diff --git "a/6649.jsonl" "b/6649.jsonl" new file mode 100644--- /dev/null +++ "b/6649.jsonl" @@ -0,0 +1,633 @@ +{"seq_id":"462737042","text":"import telegram\nimport config\nimport parserequest\nfrom telegram.error import NetworkError, Unauthorized\nfrom time import sleep\n\nupdate_id = None\n\n\ndef main():\n global update_id\n bot = telegram.Bot(config.token)\n try:\n update_id = bot.getUpdates()[0].update_id\n except IndexError:\n update_id = None\n\n while True:\n try:\n echo(bot)\n except NetworkError:\n sleep(1)\n except Unauthorized:\n update_id += 1\n\n\ndef echo(bot):\n global update_id\n for update in bot.getUpdates(offset=update_id, timeout=10):\n chat_id = update.message.chat_id\n update_id = update.update_id + 1\n\n if update.message:\n list_price = parserequest.get_list_price()\n for price in [x for x in list_price]:\n update.message.reply_text(price)\n print(chat_id)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"iProTechnoBot/old_bot.py","file_name":"old_bot.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"411188968","text":"# Import notes\n\nfrom notes import NoteRoot, NoteCircle, NoteDiamond\nfrom modes import InputMode, RenderMode\n\n### Instrument classes\n\nclass Harp:\n\n def __init__(self):\n\n self.column_count = 5\n self.row_count = 3\n self.chord_image = {}\n self.highlighted_states_image = []\n self.instrument_type = 'harp'\n self.is_highlighted = False\n\n self.sky_inverse_position_map = {\n (0, 0): 'A1', (0, 1): 'A2', (0, 2): 'A3', (0, 3): 'A4', (0, 4): 'A5',\n (1, 0): 'B1', (1, 1): 'B2', (1, 2): 'B3', (1, 3): 'B4', (1, 4): 'B5',\n (2, 0): 'C1', (2, 1): 'C2', (2, 2): 'C3', (2, 3): 'C4', (2, 4): 'C5'\n }\n\n def get_row_count(self):\n return self.row_count\n\n def get_column_count(self):\n return self.column_count\n\n def get_is_highlighted(self):\n return self.is_highlighted\n\n def set_is_highlighted(self, is_highlighted):\n '''\n Expecting a boolean, to determine whether the harp is empty in this frame\n '''\n self.is_highlighted = is_highlighted\n\n def set_chord_image(self, chord_image):\n '''\n The chord_image is a dictionary. The keys are tuples representing the positions of the buttons. The values are dictionaries, where each key is the frame, and the value is a Boolean indicating whether the button is highlighted in that frame.\n '''\n # Ok, but in this case the dict should have keys for all the positions, and shut down buttons should be set to False\n #TODO: Raise TypeError if chord_image is not a dict\n self.chord_image = chord_image\n\n # def update_chord_image(self, index, new_state):\n def append_highlighted_state(self, row_index, column_index, new_state):\n\n '''\n INCOMPLETE IMPLEMENTATION. new_state is expected to be a Boolean\n '''\n\n chord_image = self.get_chord_image()\n\n row = chord_image[row_index]\n highlighted_states = row[column_index]\n highlighted_states.append(new_state)\n\n chord_image[index] = highlighted_states #index is undefined\n\n self.set_chord_image(chord_image)\n\n\n def get_chord_image(self):\n return self.chord_image\n\n def ascii_from_chord_image(self, chord_image, instrument_index):\n\n ascii_chord = ''\n for k in chord_image:\n for f in chord_image[k]:\n if chord_image[k][f]==True: # Button is highlighted\n ascii_chord += self.sky_inverse_position_map[k]\n #print(str(k) + ' = ' + ascii_chord)\n return ascii_chord\n\n\n\n def render_in_html(self, chord_image, note_width, instrument_index):\n\n harp_is_empty = not(self.get_is_highlighted())\n\n harp_render = ''\n\n if harp_is_empty:\n harp_render += ''\n else:\n harp_render += '
'\n\n for row_index in range(self.get_row_count()):\n\n harp_render += ''\n\n for column_index in range(self.get_column_count()):\n\n harp_render += ''\n\n harp_render += ''\n\n\n harp_render += '
'\n\n # Calculate the note's overall index in the harp (0 to 14)\n note_index = (row_index * self.get_column_count()) + column_index\n\n note_position = (row_index, column_index)\n\n if note_index % 7 == 0:\n # Note is a root note\n note = NoteRoot()\n elif (note_index % self.get_column_count() == 0 or note_index % self.get_column_count() == 2) or note_index % self.get_column_count() == 4:\n # Note is in an odd column, so it is a circle\n note = NoteCircle()\n else:\n # Note is in an even column, so it is a diamond\n note = NoteDiamond()\n\n note_render = note.render_in_html(note_width, chord_image, note_position, self.get_instrument_type(), note_index, harp_is_empty)\n harp_render += note_render\n harp_render += '
'\n return harp_render\n\n\n def get_instrument_type(self):\n return self.instrument_type\n","sub_path":"python/instrument.py","file_name":"instrument.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"946513","text":"'''\nCreate a function named same_name() that has two parameters named your_name and my_name.\n\nIf our names are identical, return True. Otherwise, return False.'''\n\n\n# Write your same_name function here:\n\n# Uncomment these function calls to test your \ndef same_name(your_name,my_name):\n if your_name==my_name:\n return True\n else: return False\nprint(same_name(\"Colby\", \"Colby\"))\n# should print True\nprint(same_name(\"Tina\", \"Amber\"))\n# should print False\n","sub_path":"03 Control Flows/07 same_name.py","file_name":"07 same_name.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"368341371","text":"# -*- coding: utf-8 -*-\nimport os\nimport subprocess\nimport requests\nimport logging\n\n\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger(__name__)\n\nGITLAB_TOKEN = os.environ.get(\"GITLAB_TOKEN\")\nGITLAB_BUILD_URL = u'https://gitlab.uaprom/uaprom/uaprom/builds/'\nRUN_DEPLOY = (\n u'curl --request POST --header \"PRIVATE-TOKEN: %s\" '\n u'\"https://gitlab.uaprom/api/v3/projects/114/jobs/{job_id}/{action}\"'\n % GITLAB_TOKEN\n)\n\n\ndef get_last_tag():\n log.info(u'Get last tag')\n tags_json =requests.get(\n u'https://gitlab.uaprom/api/v3/projects/114/repository/tags',\n verify=False,\n params={\"private_token\": u'%s' % GITLAB_TOKEN}\n ).json()\n tags = []\n for i in tags_json:\n if len(i['message'].split('.')) == 3:\n if i['message'].startswith('1'):\n tags.append(i['message'])\n return tags[0]\n\n\ndef get_last_pipeline_of_branch(branch, all_pipelines=False):\n log.info(\n u'Get last pipeline of branch \"%s\" (all_pipelines=%s)'\n % (branch, all_pipelines)\n )\n pipelines =requests.get(\n u'https://gitlab.uaprom/api/v3/projects/114/pipelines?per_page=100',\n verify=False,\n params={\"private_token\": u'%s' % GITLAB_TOKEN}\n ).json()\n if all_pipelines:\n log.info(u'Len pipelines: %s' % len(pipelines))\n needful_pipelines = [\n pipeline for pipeline in pipelines if pipeline['ref'] == branch\n ]\n log.info(u'Len needful_pipelines: %s' % len(needful_pipelines))\n log.info(u'needful_pipelines: %s' % needful_pipelines)\n return needful_pipelines\n else:\n for pipeline in pipelines:\n if pipeline['ref'] == branch:\n log.info(u'pipeline: %s' % pipeline)\n return pipeline\n\n\ndef is_running_job(job_name, branch):\n log.info(u'Is running job %s' % job_name)\n pipelines = get_last_pipeline_of_branch(branch, all_pipelines=True)\n for pipeline in pipelines:\n jobs = requests.get(\n u'https://gitlab.uaprom/api/v3/projects/114/pipelines/%s/jobs'\n % pipeline['id'],\n verify=False,\n params={\"private_token\": u'%s' % GITLAB_TOKEN}\n ).json()\n for job in jobs:\n if job['name'] == job_name:\n if job['status'] in ['pending', 'running']:\n log.info(u'Running job %s' % job['id'])\n return job['id']\n\n\ndef get_job(pipline_id, job_name):\n log.info(u'Get job %s in pipeline %s' % (job_name, pipline_id))\n jobs =requests.get(\n u'https://gitlab.uaprom/api/v3/projects/114/pipelines/%s/jobs'\n % pipline_id,\n verify=False,\n params={\"private_token\": u'%s' % GITLAB_TOKEN}\n ).json()\n for job in jobs:\n if job['name'] == job_name:\n log.info(u'job: %s' % job)\n return job\n\n\ndef run_start_job(job_id):\n log.info(u'Run start job %s' % job_id)\n subprocess.Popen(\n RUN_DEPLOY.format(job_id=job_id, action=u'play'),\n shell=True,\n stdout=subprocess.PIPE\n )\n log.info(u'Started last commit (job_id: %s)' % job_id)\n\n\ndef restart_job(job_id):\n log.info(u'Restart start job %s' % job_id)\n subprocess.Popen(\n RUN_DEPLOY.format(job_id=job_id, action=u'retry'),\n shell=True,\n stdout=subprocess.PIPE\n )\n log.info(u'Restarted last commit (job_id: %s)' % job_id)\n\n\ndef start_deploy(job_name, branch):\n log.info(u'Branch: %s' % branch)\n log.info(u'Job name: %s' % job_name)\n running_job = is_running_job(job_name, branch)\n message = u'Что-то пошло не так :('\n if running_job:\n message = (\n u\"Сборка %s в gitlab уже *была запущена* \"\n u\"или *ожидает запуска!*\"\n u\"\\n%s%s\" % (job_name, GITLAB_BUILD_URL, running_job)\n )\n else:\n pipelines = get_last_pipeline_of_branch(branch, all_pipelines=True)\n if pipelines:\n n = len(pipelines)\n i = 0\n while i < n:\n log.info(u'Попытка #%s' % i)\n job = get_job(pipelines[i]['id'], job_name)\n log.info(u'Job: %s' % job)\n if job:\n if job['status'] == 'manual':\n run_start_job(job['id'])\n message = (\n u\"Запустил сборку %s (%s) в gitlab!\"\n u\"\\n%s%s\"\n % (job_name, branch, GITLAB_BUILD_URL, job['id'])\n )\n else:\n restart_job(job['id'])\n message = (\n u\"*Перезапустил* сборку %s (%s) в \"\n u\"gitlab!\\n%s%s\"\n % (job_name, branch, GITLAB_BUILD_URL, job['id'])\n )\n break\n else:\n i += 1\n if not job:\n message = (\n u\"*Не получилось запустить сборку!* Возможно нет \"\n u\"pipline ветки {} в которой есть job'а \"\n u\"{}!\".format(branch, job_name)\n )\n else:\n message = (\n u\"*Не получилось запустить сборку!* Возможно новых \"\n u\"коммитов в ветке {}!\".format(branch)\n )\n return message\n","sub_path":"bot/gitlab.py","file_name":"gitlab.py","file_ext":"py","file_size_in_byte":5569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"513447375","text":"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom auto_scan_test import OPConvertAutoScanTest\nfrom hypothesis import reproduce_failure\nimport hypothesis.strategies as st\nimport numpy as np\nimport unittest\nimport random\n\n\nclass TestNonZeroConcert(OPConvertAutoScanTest):\n \"\"\"\n ONNX op: NonZero\n OPset version: 9~15\n \"\"\"\n\n def sample_convert_config(self, draw):\n input_shape = draw(\n st.lists(\n st.integers(\n min_value=10, max_value=20), min_size=1, max_size=3))\n input_dtype = draw(st.sampled_from([\"float32\", \"int32\"]))\n\n config = {\n \"op_names\": [\"NonZero\", ],\n \"test_data_shapes\": [input_shape],\n \"test_data_types\": [input_dtype],\n \"inputs_shape\": [input_shape],\n \"min_opset_version\": 9,\n \"inputs_name\": [\"x\"],\n \"outputs_name\": [\"y\"],\n \"delta\": 1e-4,\n \"rtol\": 1e-4,\n \"run_dynamic\": True,\n }\n attrs = {}\n return (config, attrs)\n\n def test(self):\n self.run_and_statis(max_examples=50)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/onnx/test_auto_scan_nonzero.py","file_name":"test_auto_scan_nonzero.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"447333255","text":"from django.shortcuts import render, redirect, get_list_or_404\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom orm.models import Bobot\nfrom management.bobot.forms import BobotForm\nfrom library.view import ManagementAccessView\n# Create your views here.\n\n\nclass ListBobotView(ManagementAccessView):\n\tdef get(self, request):\n\n\t\ttemplate = 'bobot/index.html'\n\n\t\tform = BobotForm(request.POST or None)\n\t\tbobot = Bobot.objects.all()\n\t\tdata = {\n 'form_mode' : 'add',\n 'form' : form,\n\t\t'bobot' : bobot,\n\t\t}\n\t\treturn render(request, template, data)\n\nclass EditBobotView(ManagementAccessView):\n template = 'bobot/index.html'\n\n def get(self, request, id):\n bobot = Bobot.objects.filter(id=id)\n if not bobot.exists():\n return redirect('bobot:view')\n bobot = bobot.first()\n initial = {\n\n 'id': bobot.id,\n 'nilai_akademik' : bobot.nilai_akademik,\n 'kelas' : bobot.kelas,\n 'karakter' : bobot.karakter,\n 'plomba'\t: bobot.plomba,\n 'hasil_tes' : bobot.hasil_tes,\n }\n\n form = BobotForm(initial=initial)\n bobot = Bobot.objects.all()\n data = {\n 'id':id,\n 'form': form,\n 'form_mode' : 'edit',\n 'bobot' : bobot,\n }\n return render(request, self.template, data)\n\n\n\nclass UpdateBobotView(ManagementAccessView):\n\n def post(self, request):\n \n template = \"bobot/index.html\"\n form = BobotForm(request.POST or None)\n if form.is_valid():\n id = form.cleaned_data['id']\n bobot = Bobot.objects.get(pk=id)\n bobot.nilai_akademik = form.cleaned_data['nilai_akademik']\n bobot.kelas = form.cleaned_data['kelas']\n bobot.karakter = form.cleaned_data['karakter']\n bobot.plomba = form.cleaned_data['plomba']\n bobot.hasil_tes = form.cleaned_data['hasil_tes']\n messages.add_message(request, messages.INFO, 'Data Berhasil Diupdate') \n bobot.save(force_update=True)\n return redirect('bobot:view')\n else:\n bobot = bobot.objects.all()\n data = {\n 'form_mode':'edit',\n 'form': form,\n 'bobot': bobot,\n }\n messages.add_message(request, messages.INFO, 'Data Gagal Diupdate !!') \n return render(request, template, data)\n\n","sub_path":"PythonMoora/management/bobot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"636807144","text":"# coding=utf-8\n\nimport json\n\nwith open('processed.json') as f:\n records = json.load(f)\n\nfor i in range(19):\n for j in range(7):\n for k in range(7):\n print(i, j, k, records[i][j][k][0])\n print(i, j, k, records[i][j][k][1])\n\nwith open('final.json', 'w') as f:\n json.dump(records, f)\n","sub_path":"1/3/haha.py","file_name":"haha.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281736895","text":"#加载飞桨和相关类库\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.dygraph import nn\nimport paddle.fluid.dygraph as dy\nfrom paddle.fluid import layers\nimport numpy as np\nimport os\nfrom PIL import Image\nprint(paddle.__version__)\n\n\n# 一行代码实现动转静。\n# 动静转换的操作非常简单,仅需添加一个装饰器( @to_static ),框架就会自动将动态图的程序,转换为静态图的program,并使用该program训练、保存为静态图模型以实现推理部署。\n\n# import paddle\n# from paddle.static import InputSpec\n# from paddle.fluid.dygraph import Layer\n# from paddle.jit import to_static\n\n\n# class SimpleNet(Layer):\n# def __init__(self):\n# super(SimpleNet, self).__init__()\n# self.linear = paddle.nn.Linear(10, 3)\n\n# @to_static(input_spec=[InputSpec(shape=[None, 10], name='x'), InputSpec(shape=[3], name='y')])\n# def forward(self, x, y):\n# out = self.linear(x)\n# out = out + y\n# return out\n\n\n# net = SimpleNet()\n# paddle.jit.save(net, './simple_net') \n\n# 定义mnist数据识别网络结构,同房价预测网络\nclass MNIST(fluid.dygraph.Layer):\n def __init__(self):\n super(MNIST, self).__init__()\n\n self.cnn = dy.Conv2D(num_channels=3, num_filters=1, filter_size=3, stride=1, padding=1, act='relu')\n \n self.cls = dy.Sequential(\n dy.Linear(input_dim=784, output_dim=128),\n dy.Dropout(p=.2),\n dy.Linear(input_dim=128, output_dim=5),\n )\n # self.cls = dy.Linear(input_dim=784, output_dim=5)\n\n # 定义网络结构的前向计算过程\n def forward(self, x):\n x = self.cnn(x)\n\n b = x.shape[0]\n # print(b)\n x = layers.reshape(x, shape=[b,-1,])\n # print(x.shape)\n x = self.cls(x)\n # print(x.shape)\n return layers.softmax(x, axis=1)\n\n\nif __name__ == '__main__':\n\n # 定义预测过程\n with fluid.dygraph.guard():\n model = MNIST()\n \n # 加载模型参数\n # model_dict, _ = fluid.load_dygraph(\"mnist\")\n # model.load_dict(model_dict)\n\n # 灌入数据\n model.eval()\n tensor_img = np.random.rand(1,3,28,28).astype(np.float32)\n result = model(fluid.dygraph.to_variable(tensor_img))\n # 预测输出取整,即为预测的数字,打印结果\n print(\"本次预测的数字是\", result.numpy().astype('int32'))\n\n\nif __name__ == '__main__':\n\n x = np.load('harset/db5_acc.npy')\n y = np.load('harset/db5_lab.npy')\n\n # # 定义飞桨动态图工作环境\n # with fluid.dygraph.guard():\n # # 声明网络结构\n # model = MNIST()\n # # 启动训练模式\n # model.train()\n # # 定义数据读取函数,数据读取batch_size设置为16\n # train_loader = paddle.batch(paddle.dataset.mnist.train(), batch_size=16)\n # # 定义优化器,使用随机梯度下降SGD优化器,学习率设置为0.001\n # optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001, parameter_list=model.parameters())\n\n\n # 通过with语句创建一个dygraph运行的context\n # 动态图下的一些操作需要在guard下进行\n # with fluid.dygraph.guard():\n # model = MNIST()\n # model.train()\n # train_loader = paddle.batch(paddle.dataset.mnist.train(), batch_size=16)\n # optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001, parameter_list=model.parameters())\n \n # EPOCH_NUM = 10\n # for epoch_id in range(EPOCH_NUM):\n # for batch_id, data in enumerate(train_loader()):\n # #准备数据,格式需要转换成符合框架要求的\n # image_data = np.array([x[0] for x in data]).astype('float32')\n # label_data = np.array([x[1] for x in data]).astype('float32').reshape(-1, 1)\n # # 将数据转为飞桨动态图格式\n # image = fluid.dygraph.to_variable(image_data)\n # label = fluid.dygraph.to_variable(label_data)\n \n # #前向计算的过程\n # predict = model(image)\n \n # #计算损失,取一个批次样本损失的平均值\n # loss = fluid.layers.square_error_cost(predict, label)\n # avg_loss = fluid.layers.mean(loss)\n \n # #每训练了1000批次的数据,打印下当前Loss的情况\n # if batch_id !=0 and batch_id % 1000 == 0:\n # print(\"epoch: {}, batch: {}, loss is: {}\".format(epoch_id, batch_id, avg_loss.numpy()))\n \n # #后向传播,更新参数的过程\n # avg_loss.backward()\n # optimizer.minimize(avg_loss)\n # model.clear_gradients()\n\n # # 保存模型\n # fluid.save_dygraph(model.state_dict(), 'mnist')\n","sub_path":"har_paddle_v1.8/paddle_mnist.py","file_name":"paddle_mnist.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"602532533","text":"# -*- coding: utf-8 -*-\n# @Author: Puffrora\n# @Date: 2019-08-11 11:11:01\n# @Last Modified by: Puffrora\n# @Last Modified time: 2019-08-11 12:06:09\n\n\nclass Solution(object):\n\tdef findAnagrams(self, s, p):\n\t\twindow, need = {}, {}\n\t\tfor i in p:\n\t\t\tneed[i] = need.get(i, 0) + 1\n\t\tleft, right, match = 0, 0, 0\n\t\tres = []\n\t\twhile right < len(s):\n\t\t\tif s[right] in need:\n\t\t\t\twindow[s[right]] = window.get(s[right], 0) + 1\n\t\t\t\tif window[s[right]] == need[s[right]]:\n\t\t\t\t\tmatch += 1\n\t\t\tright += 1\n\n\t\t\twhile match == len(need):\n\t\t\t\tif right - left == len(p):\n\t\t\t\t\tres.append(left)\n\t\t\t\tif s[left] in need:\n\t\t\t\t\twindow[s[left]] -= 1\n\t\t\t\t\tif window[s[left]] < need[s[left]]:\n\t\t\t\t\t\tmatch -= 1\n\t\t\t\tleft += 1\n\n\t\treturn res","sub_path":"Leetcode/leetcode438 找到字符串中所有字母异位词.py","file_name":"leetcode438 找到字符串中所有字母异位词.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"522787315","text":"#-*- coding: utf-8 -*-\nimport redis\n\nclass RedisProxy:\n\n\tdef __init__ (self, db=None):\n\t\t\"\"\"\n\t\tThis class deals with the storing, creating, deleting, getting object from the proxy_list stored in the\n\t\tredis database.\n\t\tArgs:\n\t\t\t\n\t\t\tFor unhealthy_proxies pass redis_list_name = \"unhealthy_proxies\" as an argument when initiating this class\n\n\t\t\"\"\"\n\t\tif not db:\n\t\t\tself.redis_connection = redis.StrictRedis(host='localhost', port=6379, db=15)\n\t\telse:\n\t\t\tself.redis_connection = redis.StrictRedis(host='localhost', port=6379, db=db)\n\n\t\t\n\tdef store_proxy_list(self, proxy_list, status):\n\t\t\"\"\"\n\t\tproxy_list is the list of the proxies which will be stored in the redis proxies list\n\t\tEach element is in the form of \n\t\t{\"ip\": ip, \"port\": 1080, \"type\": Socks4, \"country\": Brazil, \"latency\": 30, \"reliability\": 90}\n\n\t\tstatus: healthy or unhealhty\n\t\tif status != \"healthy\":\n\t\t\traise StandardError(\"not a valid status for proxy\")\n\t\t\n\t\tif status != \"unhealthy\":\n\t\t\traise StandardError(\"not a valid status for proxy\")\n\t\t\"\"\"\n\t\twith self.redis_connection.pipeline() as pipe:\n\t\t\ttry:\n\t\t\t\tfor proxy in proxy_list:\n\t\t\t\t\tproxy[\"status\"] = status\n\t\t\t\t\tpipe.hmset(proxy.get(\"ip\"), proxy)\n\t\t\t\tpipe.execute()\n\t\t\texcept Exception as e:\n\t\t\t\traise StandardError(e)\n\n\tdef total_proxies(self):\n\t\tproxy_list = self.redis_connection.keys()\n\t\treturn proxy_list\n\n\n\tdef proxy_details(self, proxy):\n\t\t\"\"\"\n\t\tReturn keys and its values for the related proxy\n\t\t\"\"\"\n\t\tproxy_details = self.redis_connection.hgetall(proxy)\n\t\treturn proxy_details\n\n\n\tdef delete_proxy(self, proxy):\n\t\t\"\"\"\n\t\tDelete proxy\n\t\t\"\"\"\n\t\tself.redis_connection.delete(proxy)\n\t\treturn \n\n\n\tdef healthy_proxies(self):\n\t\t\"\"\"\n\t\treturns the list of healthy proxies of the form\n\t\t[{'country': '\\xc2\\xa0Mexico', 'ip': '187.163.164.233', 'latency': '30', 'port': '1080', \n\t\t'reliability': '100', 'status': 'healthy', 'type': 'Socks4'},\n\n\t\t{'country': '\\xc2\\xa0Pakistan', 'ip': '221.120.222.69', 'latency': '30', 'port': '1080', \n\t\t'reliability': '100', 'status': 'healthy', 'type': 'Socks5'}]\n\n\t\t\"\"\"\n\t\tproxy_list = [self.redis_connection.hgetall(key) for key in self.redis_connection.keys() \n\t\t\t\tif self.redis_connection.hget(key, \"status\") == \"healthy\"]\n\n\n\t\treturn proxy_list\n\t\n\tdef unhealthy_proxies(self):\n\t\t\"\"\"\n\t\treturns the list of unhealthy proxies\n\t\t[{'country': '\\xc2\\xa0Mexico', 'ip': '187.163.164.233', 'latency': '30', 'port': '1080', \n\t\t'reliability': '100', 'status': 'unhealthy', 'type': 'Socks4'},\n\n\t\t{'country': '\\xc2\\xa0Pakistan', 'ip': '221.120.222.69', 'latency': '30', 'port': '1080', \n\t\t'reliability': '100', 'status': 'unhealthy', 'type': 'Socks5'}]\n\t\t\"\"\"\n\t\tproxy_list = [self.redis_connection.hgetall(key) for key in self.redis_connection.keys() \n\t\t\t\tif self.redis_connection.hget(key, \"status\") == \"unhealthy\"]\n\t\treturn proxy_list\n\n\n\tdef update_status(self, ip, status):\n\t\t\"\"\"\n\t\tThis method updates the status of the proxy present in the database\n\t\t\"\"\"\n\t\tif not status in (\"healthy\", \"unhealthy\"):\n\t\t\traise StandardError(\"Status that has been provided is not a valid one\")\n\n\t\ttry:\n\t\t\tself.redis_connection.hset(ip, \"status\", status)\n\n\t\texcept Exception as e:\n\t\t\traise StandardError(e)\n\n\n\n","sub_path":"proxies/redis_storage.py","file_name":"redis_storage.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"565694602","text":"\"\"\"added colums to acquaintances table\n\nRevision ID: 514a39d06e75\nRevises: 54c23ecaee46\nCreate Date: 2015-08-24 15:16:31.227226\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '514a39d06e75'\ndown_revision = '54c23ecaee46'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column(u'users_acquaintance', sa.Column('acquaintance_user_first_name', sa.String(length=2000), nullable=True))\n op.add_column(u'users_acquaintance', sa.Column('acquaintance_user_last_name', sa.String(length=2000), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(u'users_acquaintance', 'acquaintance_user_last_name')\n op.drop_column(u'users_acquaintance', 'acquaintance_user_first_name')\n ### end Alembic commands ###\n","sub_path":"alembic/versions/514a39d06e75_added_colums_to_acquaintances_table.py","file_name":"514a39d06e75_added_colums_to_acquaintances_table.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"325896869","text":"# -*- coding:utf-8 -*\nfrom Time import Ming_time\nimport sys\nimport os\nimport os.path\nimport time,datetime\nfrom ExcelProcess import ming_file\nsys.path.append(\"..\")\nfrom ExcelProcess import excel\nreload(sys)\nsys.setdefaultencoding('utf8')\n\n\nfile_name = unicode(r'D:\\Python\\PEclub\\pingjiao\\2017~2018学年第二学期.xlsx','utf8')\ndata = excel.read_excel(filename=file_name)\n\n\nteacher_number = {}\nteacher_q_score = {}\nteacher_t_score = {}\n\nfor i in range(2, len(data)):\n teacher_name = data[i][9]\n if teacher_name in teacher_number.keys():\n teacher_number[teacher_name] += 1\n else:\n teacher_number[teacher_name] = 1\n\n q_equal_number = {u'完全同意':5,u'同意':4,u'一般':3,u'不同意':2,u'完全不同意':1}\n for j in range(11,39):\n select = data[i][j]\n if teacher_name in teacher_q_score.keys():\n teacher_q_score[teacher_name] += q_equal_number[select]\n else:\n teacher_q_score[teacher_name] = q_equal_number[select]\n\n if teacher_name in teacher_t_score.keys():\n teacher_t_score[teacher_name] = (float(teacher_t_score[teacher_name]) + float(data[i][40])) / 2\n else:\n teacher_t_score[teacher_name] = data[i][40]\n\n\nteacher_list = teacher_number.keys()\nteacher_number_score = {}\n\ntotal_score = {}\nfor teacher_name in teacher_list:\n teacher_number_score[teacher_name] = teacher_number[teacher_name] * (100/46)\n teacher_q_score[teacher_name] = teacher_q_score[teacher_name] / teacher_number[teacher_name]\n teacher_q_score[teacher_name] = teacher_q_score[teacher_name] * (100.0/145)\n\n total_score[teacher_name] = (float(teacher_number_score[teacher_name]) * 0.2) + (float(teacher_q_score[teacher_name]) * 0.6) + (float(teacher_t_score[teacher_name]) * 0.2)\n\n\nscore = []\nfor name in total_score.keys():\n score.append([name,teacher_number[name],total_score[name]])\n\n\n\ndes_file_name = unicode(r'D:\\Python\\PEclub\\pingjiao\\2017~2018学年第二学期总分.xlsx','utf8')\nexcel.write_excel(des_filename=des_file_name,data=score,sheet_name='success')\n\n\n\n\n\n\n\n","sub_path":"PEclub/pingjiao/FormatScore.py","file_name":"FormatScore.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"70126073","text":"\"\"\" Helper functions for converting JSON to ARL objects\n\n\"\"\"\n\nfrom astropy.units import Unit\n\nfrom data_models.data_model_helpers import *\n\nimport numpy\n\ndef json_to_skycoord(d):\n \"\"\"Convert JSON string to SkyCoord\n \n e.g. \"phasecentre\": {\n \"ra\": {\"value\": 30.0, \"unit\": \"deg\"},\n \"dec\": {\"value\": -60.0, \"unit\": \"deg\"},\n \"frame\": \"icrs\",\n \"equinox\": \"j2000\"}\n\n :param d:\n :return:\n \"\"\"\n return SkyCoord(ra=json_to_quantity(d[\"ra\"]),\n dec=json_to_quantity(d[\"dec\"]),\n equinox=d[\"equinox\"],\n frame=d[\"frame\"])\n\n\ndef json_to_quantity(q):\n \"\"\"Convert JSON string to Quantity\n \n e.g. \"cellsize\": {\"value\": 0.001, \"unit\": \"rad\"}\n\n :param q:\n :return:\n \"\"\"\n value = float(q[\"value\"])\n unit = q[\"unit\"]\n assert isinstance(unit, str), \"unit must be string\"\n unit = Unit(q[\"unit\"])\n return Quantity(value, unit)\n\ndef json_to_linspace(l):\n \"\"\"Convert JSON string to numpy.linspace\n \n e.g. \"frequency\": {\"start\": 0.9e8,\"stop\": 1.1e8,\"steps\": 7}\n \n :param l:\n :return:\n \"\"\"\n nsteps = int(l[\"steps\"])\n assert nsteps >= 0, \"Number of steps cannot be less than zero %s\" % str(l)\n return numpy.linspace(l[\"start\"], l[\"stop\"], nsteps)\n","sub_path":"workflows/arlexecute/processing_component_interface/arl_json/json_helpers.py","file_name":"json_helpers.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"525686501","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: D:\\dev\\PyCharm Projects\\NCryptoClient\\NCryptoClient\\UI\\ui_contacts_list.py\n# Compiled at: 2018-04-19 21:08:40\n# Size of source mod 2**32: 4551 bytes\n\"\"\"\nModule for the list of contacts (Widget).\n\"\"\"\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\nclass UiContactsList(QListWidget):\n __doc__ = '\\n UI-class which contains a list of buttons, each of which is a user contact.\\n '\n\n def __init__(self, main_window, parent=None):\n \"\"\"\n Constructor. Initializes all GUI and links logic to them.\n @param main_window: reference to the parent window (window itself).\n @param parent: ссылка на родительский класс (window panel).\n \"\"\"\n super().__init__(parent)\n self._main_window = main_window\n self.setResizeMode(QListView.Adjust)\n self.setObjectName('contacts_lb')\n self._last_keyboard_event = None\n self._last_mouse_event = None\n self.add_contact('Log')\n\n def keyPressEvent(self, *args, **kwargs):\n \"\"\"\n Registers keyboard buttons pressing events and writes them in the variable.\n @param args: additional parameters (list).\n @param kwargs: additional parameters (dictionary).\n @return: -\n \"\"\"\n self._last_keyboard_event = args[0]\n\n def mousePressEvent(self, *args, **kwargs):\n \"\"\"\n Registers mouse buttons pressing events and writes them in the variable.\n @param args: additional parameters (list).\n @param kwargs: additional parameters (dictionary).\n @return: -\n \"\"\"\n self._last_mouse_event = args[0]\n\n def add_contact(self, chat_name):\n \"\"\"\n When initializing main window components, this function adds list of user contacts.\n All data is being received from the server.\n @param chat_name: contact name.\n @return: -\n \"\"\"\n index = self.find_contact_widget(chat_name)\n if index:\n return\n item = QListWidgetItem()\n item.setSizeHint(QSize(item.sizeHint().width(), 24))\n button = QPushButton(chat_name)\n button.setContextMenuPolicy(Qt.CustomContextMenu)\n button.customContextMenuRequested.connect(lambda _, local_contact_name=chat_name: self.show_context_menu(local_contact_name))\n button.clicked.connect(lambda _, local_contact_name=chat_name: self._main_window.open_tab(local_contact_name))\n self.addItem(item)\n self.setItemWidget(item, button)\n\n def delete_contact(self, chat_name):\n \"\"\"\n Deletes contact from the list.\n @param chat_name: contact name.\n @return: -\n \"\"\"\n self._main_window.close_tab(chat_name)\n index = self.find_contact_widget(chat_name)\n if index is not None:\n self.takeItem(index)\n\n def find_contact_widget(self, chat_name):\n \"\"\"\n Searches for contact widget in the list of contacts.\n @param chat_name: contact name.\n @return: index of contact widget.\n \"\"\"\n contacts_amount = self.count()\n if contacts_amount == 1:\n if self.itemWidget(self.item(0)).text() == chat_name:\n return 0\n else:\n return\n for i in range(0, contacts_amount):\n widget = self.itemWidget(self.item(i))\n if widget.text() == chat_name:\n return i\n\n def show_context_menu(self, chat_name):\n \"\"\"\n Shows context menu on the mouse left button clicking.\n @param chat_name: contact name.\n @return: -\n \"\"\"\n menu = QMenu(self)\n remove_action = menu.addAction('Remove')\n remove_action.triggered.connect(lambda _, local_chat_name=chat_name: self._main_window.remove_contact_by_login(local_chat_name))\n menu.exec_(self.mapToGlobal(QPoint(self._last_mouse_event.x(), self._last_mouse_event.y())))","sub_path":"pycfiles/NCryptoClient-0.5.1-py2.py3-none-any/ui_contacts_list.cpython-36.py","file_name":"ui_contacts_list.cpython-36.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"618145820","text":"\"\"\"\nRepresentation of polynomial using a linked list\n\"\"\"\n\n\nclass PolyNode:\n def __init__(self, coefficient=None, exp=None):\n self.coefficient = coefficient\n self.exp = exp\n self.next = None\n\n\nclass PolyLinkedList:\n def __init__(self):\n self.head = None\n\n def create(self):\n last = None\n num = int(input(\"Enter number of terms: \"))\n print(\"Enter each term with coefficient and exponent\")\n for i in range(num):\n coefficient, exp = map(int, input().split())\n new_node = PolyNode(coefficient, exp)\n if self.head is None:\n self.head = last = new_node\n else:\n last.next = new_node\n last = new_node\n\n def display(self):\n p = self.head\n while p:\n print(\"{}x^{}\".format(p.coefficient, p.exp), end='+')\n p = p.next\n print()\n\n def eval(self, x):\n p = self.head\n val = 0\n while p:\n val += p.coefficient * pow(x, p.exp)\n p = p.next\n return val\n\n\ndef main():\n pl = PolyLinkedList()\n pl.create()\n pl.display()\n x = 4\n val = pl.eval(x)\n print(\"Evaluation of the given function {} is: {}\".format(x, val))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"linked_list/polynomial_ll.py","file_name":"polynomial_ll.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"37806708","text":"#encoding: utf-8\r\nfrom bencode import bencode, bdecode\r\nimport socket\r\n\r\nfrom hashlib import sha1\r\nfrom random import randint\r\nfrom struct import unpack\r\nfrom socket import inet_aton, inet_ntoa\r\n\r\nBOOTSTRAP_NODES = [\r\n (\"router.bittorrent.com\", 6881),\r\n (\"dht.transmissionbt.com\", 6881),\r\n (\"router.utorrent.com\", 6881)\r\n] \r\nTID_LENGTH = 4\r\nDHT_PORT = 6881\r\n\r\ndef entropy(bytes):\r\n s = \"\"\r\n for i in range(bytes):\r\n s += chr(randint(0, 255))\r\n return s\r\n\r\ndef random_id():\r\n hash = sha1()\r\n hash.update( entropy(20) )\r\n return hash.digest()\r\n\r\ndef decode_nodes(nodes):\r\n n = []\r\n length = len(nodes)\r\n if (length % 26) != 0: \r\n return n\r\n for i in range(0, length, 26):\r\n nid = nodes[i:i+20]\r\n ip = inet_ntoa(nodes[i+20:i+24])\r\n port = unpack(\"!H\", nodes[i+24:i+26])[0]\r\n n.append( (nid, ip, port) )\r\n return n\r\n\r\nclass KRPC(object):\r\n def __init__(self):\r\n self.types = {\r\n \"r\": self.response_received,\r\n \"q\": self.query_received\r\n }\r\n self.actions = {\r\n \"get_peers\": self.get_peers_received,\r\n \"announce_peer\": self.announce_peer_received,\r\n }\r\n\r\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n self.socket.bind((\"0.0.0.0\", DHT_PORT))\r\n\r\n def response_received(self, res, address):\r\n self.find_node_handler(res)\r\n\r\n def query_received(self, res, address):\r\n try:\r\n self.actions[res[\"q\"]](res, address)\r\n except KeyError:\r\n pass\r\n\r\n def send_krpc(self, msg, address):\r\n try:\r\n self.socket.sendto(bencode(msg), address)\r\n except:\r\n pass\r\n\r\nclass Client(KRPC):\r\n def __init__(self):\r\n KRPC.__init__(self)\r\n\r\n def find_node(self, address, nid=None):\r\n nid = random_id()\r\n tid = entropy(TID_LENGTH)\r\n msg = {\r\n \"t\": tid,\r\n \"y\": \"q\",\r\n \"q\": \"find_node\",\r\n \"a\": {\"id\": nid, \"target\": nid}\r\n }\r\n self.send_krpc(msg, address)\r\n\r\n def find_node_handler(self, res):\r\n try:\r\n nodes = decode_nodes(res[\"r\"][\"nodes\"])\r\n for node in nodes:\r\n (nid, ip, port) = node\r\n if len(nid) != 20: continue\r\n self.find_node( (ip, port), nid )\r\n except KeyError:\r\n pass\r\n\r\n def joinDHT(self):\r\n for address in BOOTSTRAP_NODES: self.find_node(address)\r\n\r\n def start(self):\r\n self.joinDHT()\r\n\r\n while True:\r\n try:\r\n (data, address) = self.socket.recvfrom(65536)\r\n res = bdecode(data)\r\n self.types[res[\"y\"]](res, address)\r\n except Exception:\r\n pass\r\n\r\nclass Server(Client):\r\n def __init__(self, master):\r\n Client.__init__(self)\r\n self.master = master\r\n\r\n def get_peers_received(self, res, address):\r\n try:\r\n infohash = res[\"a\"][\"info_hash\"]\r\n self.master.log(infohash)\r\n except KeyError:\r\n pass\r\n\r\n def announce_peer_received(self, res, address):\r\n try:\r\n infohash = res[\"a\"][\"info_hash\"]\r\n self.master.log(infohash)\r\n except KeyError:\r\n pass\r\n\r\n#using example\r\nclass Master(object):\r\n def __init__(self, f):\r\n self.f = f\r\n\r\n def log(self, infohash):\r\n self.f.write(infohash.encode(\"hex\")+\"\\n\")\r\n self.f.flush()\r\ntry:\r\n f = open(\"infohash.log\", \"a\")\r\n m = Master(f)\r\n s = Server(Master(f))\r\n s.start() \r\nexcept KeyboardInterrupt:\r\n s.socket.close()\r\n f.close()","sub_path":"simDHT.py","file_name":"simDHT.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"141062663","text":"from StringIO import StringIO\nimport json\nimport math\nimport random\nimport subprocess\nimport time\nimport urllib\nimport uuid\nimport pycurl\nimport ss2_config\n\nclass shieldsquareRequest:\n\n\t_zpsbd0 = \"false\"\n\t_zpsbd1 = \"\"\n\t_zpsbd2 = \"\"\n\t_zpsbd3 = \"\"\n\t_zpsbd4 = \"\"\n\t_zpsbd5 = \"\"\n\t_zpsbd6 = \"\"\n\t_zpsbd7 = \"\"\n\t_zpsbd8 = \"\"\n\t_zpsbd9 = \"\"\n\t_zpsbda = \"\"\n\t__uzma = \"\"\n\t__uzmb = 0\n\t__uzmc = \"\"\n\t__uzmd = 0\n\nclass shieldsquareCurlResponseCode:\n\n\terror_string = \"\"\n\tresponsecode = 0\n\n\nclass shieldsquareResponse:\n\n\tpid = \"\"\n\tresponsecode= 0\n\turl = \"\"\n\treason =\"\"\n\n\nclass shieldsquareCodes:\n\n\tALLOW = 0\n\tCAPTCHA = 2\n\tBLOCK = 3\n\tFFD = 4\n\tALLOW_EXP = -1\n\ndef shieldsquare_ValidateRequest( shieldsquare_username, shieldsquare_calltype, shieldsquare_pid , request):\n\n\tshieldsquare_low = 10000\n\tshieldsquare_high = 99999\n\tshieldsquare_a = 1\n\tshieldsquare_b = 3\n\tshieldsquare_c = 7\n\tshieldsquare_d = 1\n\tshieldsquare_e = 5\n\tshieldsquare_f = 10\n\tshieldsquare_time = int(time.time())\n\tshieldsquare_request = shieldsquareRequest()\n\tshieldsquare_RETURNCODES = shieldsquareCodes()\n\tshieldsquare_response = shieldsquareResponse()\n\tshieldsquare_response.dynamic_JS = \"var __uzdbm_c = 2+2\"\n\tshieldsquare_curl_response = shieldsquareCurlResponseCode()\n\tshieldsquare_service_url = 'http://' + ss2_config._ss2_domain + '/getRequestData'\n\tcookie_value_dict = dict()\n\tshieldsquare_ex_time = 3600*24*365*10 + 1*1*3*60*60\n\t\n\tif( ss2_config._timeout_value > 1000 ):\n\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW_EXP\n\t\tshieldsquare_response.reason = \"ShieldSquare Timeout cant be greater then 1000 Milli seconds\"\n\t\treturn shieldsquare_response.__dict__,None;\n\n\tif len(shieldsquare_pid) == 0:\n\t\tshieldsquare_pid = shieldsquare_generate_pid(ss2_config._sid,request)\n\n\tif '__uzma' in request.COOKIES:\n\t\tcookie_value_dict[\"__uzma\"] = {\"value\":request.COOKIES.get(\"__uzma\"),\"age\":shieldsquare_ex_time}\t\n\t\tshieldsquare_request.__uzma = request.COOKIES.get(\"__uzma\")\n\telse:\n\t\tshieldsquare_uzma = uuid.uuid1()\n\t\tcookie_value_dict[\"__uzma\"] = {\"value\":str(shieldsquare_uzma),\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzma = str(shieldsquare_uzma)\n\n\tif '__uzmc' in request.COOKIES:\n\t\tshieldsquare_uzmc = request.COOKIES.get(\"__uzmc\")\n\t\tshieldsquare_uzmc = shieldsquare_uzmc[shieldsquare_e:]\n\t\tshieldsquare_uzmc = shieldsquare_uzmc[:-shieldsquare_e]\n\t\tshieldsquare_a = (int(shieldsquare_uzmc) - shieldsquare_c) / shieldsquare_b\n\t\tshieldsquare_a += 1\n\t\tshieldsquare_uzmc= str(random.randint(shieldsquare_low, shieldsquare_high)) + str(shieldsquare_c+shieldsquare_a*shieldsquare_b) + str(random.randint(shieldsquare_low, shieldsquare_high))\t\n\t\tcookie_value_dict[\"__uzmc\"] = {\"value\":shieldsquare_uzmc,\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmc = shieldsquare_uzmc\n\t\n\telse:\n\t\tshieldsquare_uzmc= str(random.randint(shieldsquare_low, shieldsquare_high)) + str(shieldsquare_c+shieldsquare_a*shieldsquare_b) + str(random.randint(shieldsquare_low, shieldsquare_high))\n\t\tcookie_value_dict[\"__uzmc\"] = {\"value\":shieldsquare_uzmc,\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmc = shieldsquare_uzmc\n\t\t\n\tif '__uzmb' in request.COOKIES:\n\t\tcookie_value_dict[\"__uzmb\"] = {\"value\":request.COOKIES.get(\"__uzmb\"),\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmb = request.COOKIES.get(\"__uzmb\")\n\t\n\telse:\n\t\tcookie_value_dict[\"__uzmb\"] = {\"value\":int(time.time()),\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmb = shieldsquare_time\n\t\n\tif '__uzmd' in request.COOKIES:\n\t\tcookie_value_dict[\"__uzmd\"] = {\"value\":shieldsquare_time,\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmd = shieldsquare_time\n\t\n\telse:\n\t\tcookie_value_dict[\"__uzmd\"] = {\"value\":int(time.time()),\"age\":shieldsquare_ex_time}\n\t\tshieldsquare_request.__uzmd = shieldsquare_time\n\t\t\n\tif(ss2_config._mode == \"Active\"):\n\t\tshieldsquare_request._zpsbd0 = 'true'\n\t\n\telse:\n\t\tshieldsquare_request._zpsbd0 = 'false'\n\t\n\tshieldsquare_request._zpsbd1 = ss2_config._sid\n\tshieldsquare_request._zpsbd2 = shieldsquare_pid\n\tshieldsquare_request._zpsbd3 = ''\n\tshieldsquare_request._zpsbd4 = ''\n\tshieldsquare_request._zpsbd5 = ''\n\tshieldsquare_request._zpsbd6 = ''\n\tshieldsquare_request._zpsbd7 = ''\n\t\n\tshieldsquare_request._zpsbd3 = request.META.get('HTTP_REFERER')\t\n\tshieldsquare_request._zpsbd4 = request.path\n\tshieldsquare_request._zpsbd5 = request.COOKIES.get(ss2_config._sessid) \n\tshieldsquare_request._zpsbd6 = request.META.get(ss2_config._ipaddress)\n\tshieldsquare_request._zpsbd7 = request.META.get('HTTP_USER_AGENT')\n\t\n\tshieldsquare_request._zpsbd8 = shieldsquare_calltype\n\tshieldsquare_request._zpsbd9 = shieldsquare_username\n\tshieldsquare_request._zpsbda = shieldsquare_time\n\tshieldsquare_json_obj = json.dumps(shieldsquare_request.__dict__)\n\tshieldsquare_response.pid =shieldsquare_pid\n\tshieldsquare_response.url =ss2_config._js_url\n\tif(ss2_config._mode == \"Active\"):\n\t\tshieldsquare_curl_response = shieldsquare_post_sync(shieldsquare_service_url, shieldsquare_json_obj, ss2_config._timeout_value)\n\t\tif(str(shieldsquare_curl_response[1]) != '200'):\n\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW_EXP\n\t\t\tshieldsquare_response.reason = shieldsquare_curl_response[0]\n\t\telse:\n\t\t\tshieldsquare_response_from_ss = json.loads(str(shieldsquare_curl_response[0]))\n\t\t\tshieldsquare_response.dynamic_JS = shieldsquare_response_from_ss['dynamic_JS']\n\t\t\tn=int(shieldsquare_response_from_ss['ssresp'])\n\t\t\tif n == 0:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW\n\t\t\telif n==1:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.MONITOR\n\t\t\telif n==2:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.CAPTCHA\n\t\t\telif n==3:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.BLOCK\n\t\t\telif n==4:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.FFD\n\t\t\telse:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW_EXP\n\t\t\t\tshieldsquare_response.reason = str(shieldsquare_curl_response[1])\n\telse:\n\t\tif(ss2_config._async_http_post == 'true'):\n\t\t\terror_code=shieldsquare_post_async(shieldsquare_service_url, shieldsquare_json_obj,str(ss2_config._timeout_value))\n\t\t\tif(str(error_code[1])!='None'):\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW_EXP\n\t\t\t\tshieldsquare_response.reason = \"Request Timed Out/Server Not Reachable\"\n\t\t\telse:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW\n\t\telse:\n\t\t\tshieldsquare_curl_response=shieldsquare_post_sync(shieldsquare_service_url, shieldsquare_json_obj,ss2_config._timeout_value)\n\n\t\t\tif(str(shieldsquare_curl_response[1])!='200'):\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW_EXP\n\t\t\t\tshieldsquare_response.reason = str(shieldsquare_curl_response[0])\n\t\t\telse:\n\t\t\t\tshieldsquare_response.responsecode = shieldsquare_RETURNCODES.ALLOW\n\t\t\t\tshieldsquare_response_from_ss = json.loads(str(shieldsquare_curl_response[0]))\n\t\t\t\tshieldsquare_response.dynamic_JS = shieldsquare_response_from_ss['dynamic_JS']\n\t\t\t\t\n\treturn shieldsquare_response.__dict__,cookie_value_dict;\n\ndef shieldsquare_post_async(url, payload, timeout):\n\tdata = urllib.quote(payload)\n\tcmd = 'curl --fail --silent -X POST -H \"Accept: Application/json\" -H \"Content-Type: application/json\" --connect-timeout 1 -m '+ str(1) + ' ' + url + \" -d '\"+ data + \"'\" +\" &\"\n\tp = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\n\t(output, err) = p.communicate()\n\tresponse=[output,err]\n\treturn response;\n\ndef shieldsquare_post_sync(url, params, timeout):\n\tdata = urllib.quote(params)\n\tstorage = StringIO()\n\tc = pycurl.Curl()\n\tc.setopt(pycurl.URL, url)\n\tc.setopt(pycurl.TIMEOUT_MS, timeout)\n\tc.setopt(pycurl.NOSIGNAL, 1)\n\tc.setopt(pycurl.VERBOSE, False)\n\tc.setopt(pycurl.WRITEFUNCTION, storage.write)\n\tc.setopt(pycurl.HTTPHEADER, ['Accept: application/json'])\n\tc.setopt(pycurl.POST, 1)\n\tc.setopt(pycurl.POST, 1)\n\tc.setopt(pycurl.POSTFIELDS, data)\n\ttry:\n\t\tresponse=c.perform()\n\t\tcontent = storage.getvalue()\n\t\tresponse=[content,c.getinfo(c.RESPONSE_CODE)]\n\texcept:\n\t\tresponse=[\"Request Timed Out/Server Not Reachable\",\"0\"]\n\tc.close()\n\treturn response;\n\ndef microtime(get_as_float = False):\n\tif get_as_float:\n\t\treturn time.time();\n\telse:\n\t\treturn '%f %d' % math.modf(time.time());\n\ndef shieldsquare_generate_pid(shieldsquare_sid,request):\n\tt=microtime()\n\ttm=t.split(\" \")\n\tp1,p2,p3,p4,p5 = shieldsquare_sid.split(\"-\")\n\tsid_min = num = int(p4,16);\n\trmstr1= \"00000000\" + \"%x\" % int(tm[1])\n\trmstr2= \"0000\" + \"%x\" % int(round(float(tm[0]) * 65536))\n\treturn '%08s-%04x-%04s-%04s-%04x%04x%04x' % (shieldsquare_IP2Hex(request),sid_min,rmstr1[-4:],rmstr2[-4:],\n\t\t\trandom.randint(0,0xffff), random.randint(0,0xffff), random.randint(0,0xffff));\n\n\ndef shieldsquare_IP2Hex(request):\n\thexx=\"\"\n\tip = request.META.get(ss2_config._ipaddress)\n\tpart=ip.split('.')\n\thexx=\"\"\n\tfor i in range(0,len(part)):\n\t\tdt = \"0\" + \"%x\" % int(part[i])\n\t\thexx = hexx + dt[-2:]\n\n\treturn hexx;\n\ndef set_default(obj):\n\tif isinstance(obj, set):\n\t\treturn list(obj)\n\traise TypeError\n\n\ndef set_cookie_in_response(response,cookie_values_dict):\n\t\n\tfor cookie_name in cookie_values_dict:\n\t\tresponse.set_cookie(cookie_name,cookie_values_dict[cookie_name][\"value\"], max_age=cookie_values_dict[cookie_name][\"age\"])\n\treturn response","sub_path":"connector/ss2.py","file_name":"ss2.py","file_ext":"py","file_size_in_byte":9232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"426706404","text":"\"\"\"\r\n\n\nImagine you took all the numbers between 0 and `n` and concatenated them\ntogether into a long string. How many digits are there between 0 and `n`?\nWrite a function that can calculate this.\n\nThere are 0 digits between 0 and 1, there are 9 digits between 0 and 10 and\nthere are 189 digits between 0 and 100.\n\n### Examples\n\n digits(1) ➞ 0\n \n digits(10) ➞ 9\n \n digits(100) ➞ 189\n \n digits(2020) ➞ 6969\n\n### Notes\n\nThe numbers are going to be rather big so creating that string won't be\npractical.\n\n\"\"\"\r\n\ndef digits(num):\n s = 0\n k = 1\n occ = 9\n n = 10\n while num >= n:\n s += k*occ\n k += 1\n occ *= 10\n n *= 10\n return s + (num-n//10)*k\n\n","sub_path":"j9zed4GnykS48W6vh_3.py","file_name":"j9zed4GnykS48W6vh_3.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"424710238","text":"\"\"\"opengenusWeb URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path, include\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\nfrom django.views.generic import TemplateView\r\n\r\nfrom pages.views import InternListView,InternDetailView\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('', TemplateView.as_view(template_name=\"home.html\"), name=\"home\"),\r\n path('index', TemplateView.as_view(template_name=\"index.html\")),\r\n path('cosmos', TemplateView.as_view(template_name=\"cosmos.html\"), name=\"cosmos\"),\r\n path('quark', TemplateView.as_view(template_name=\"quark.html\"), name=\"quark\"),\r\n path('search', TemplateView.as_view(template_name=\"search.html\"), name=\"search\"),\r\n path('iq', TemplateView.as_view(template_name=\"iq.html\"), name=\"iq\"),\r\n path('discuss', TemplateView.as_view(template_name=\"discuss.html\"), name=\"discuss\"),\r\n # path('intern/', internDetailView),\r\n path('school/', include('schools.urls', namespace=\"schools\")),\r\n path('intern/', InternListView.as_view()),\r\n path('intern/', InternDetailView.as_view()),\r\n path('intern/search/', InternListView.as_view()),\r\n path('faq', TemplateView.as_view(template_name=\"faq.html\"), name=\"faq\"),\r\n\r\n path('tinymce/', include('tinymce.urls')),\r\n]\r\n\r\nif settings.DEBUG:\r\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\r\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\r\n","sub_path":"opengenusWeb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"356376318","text":"from kivy.app import App\r\nfrom kivy.lang import Builder\r\nfrom kivy.uix.widget import Widget\r\nfrom kivy.uix.button import Button\r\nfrom kivy.clock import Clock\r\nfrom kivy.uix.label import Label\r\nfrom kivy.factory import Factory\r\nfrom kivy.uix.relativelayout import RelativeLayout\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.stacklayout import StackLayout\r\nfrom kivy.properties import StringProperty, DictProperty, ListProperty\r\nimport SimulateOutside\r\n\r\n\"\"\"\r\nThis is a custom class for dynamically creating buttons and closing them.\r\nThis is for tag lists in our program.\r\n\"\"\"\r\n\r\nBuilder.load_string('''\r\n:\r\n size_hint: (None, None)\r\n text: self.ourText\r\n width: self.texture_size[0] + 69\r\n height: 29\r\n pos: (50, 300)\r\n background_normal: ''\r\n ArtistLayout:\r\n size: (root.width, root.height)\r\n pos: self.parent.pos\r\n Label:\r\n size_hint: (None, 1)\r\n width: self.texture_size[0] + 40\r\n pos: (0, 0)\r\n canvas.before:\r\n Color:\r\n rgba: .5, .5, .5, 1\r\n Rectangle:\r\n pos: self.pos\r\n size: self.size\r\n text: root.ourText\r\n background_normal: '..\\pics\\BlankUpTiny.png'\r\n background_down: '..\\pics\\BlankDownTiny.png'\r\n group: 'test'\r\n Button:\r\n size_hint: (None, 1)\r\n width: 29\r\n pos: (root.texture_size[0] + 40, 0)\r\n background_normal: '..\\pics\\closeUpTiny.png'\r\n background_down: '..\\pics\\closeDownTiny.png'\r\n group: 'test'\r\n\r\n:\r\n\tspacing: 5, 5\r\n''')\r\n\r\nclass ReadWriteArtistList(StackLayout):\r\n c_taglist = ['cat', 'funny', 'jump', 'fail', 'animals']\r\n dynamic_ids = DictProperty({}) # declare class attribute, dynamic_ids\r\n\r\n def __init__(self, **kwargs):\r\n super(ReadWriteArtistList, self).__init__(**kwargs)\r\n Clock.schedule_once(lambda dt: self.populateList(), timeout=0.1)\r\n\r\n def getTarget(self, p_arg):\r\n # gets the id of the last custom button. Used to accessing it.\r\n return [x for x in self.children if str(x.__class__.__name__) == p_arg]\r\n\r\n def populateList(self):\r\n #this adds all the tags to our tag list\r\n f_taglist = SimulateOutside.getArtists(SimulateOutside.getActiveFilePath())\r\n for i_tag in f_taglist:\r\n #similar to addNewArtist() but doesn't just includes pre-existing artists in the gui\r\n i_id = \"Tag:\" + i_tag\r\n i_newArtist = DynamicTag(id=i_id,\r\n ourText=i_tag)\r\n self.add_widget(i_newArtist)\r\n self.dynamic_ids[i_id] = i_newArtist\r\n i_newArtist.children[0].children[0].bind(on_release=self.delayedClose)\r\n\r\n def wipeArtistList(self):\r\n #this just cleans the gui of artists. It doesn't actually edit any data\r\n for item in self.dynamic_ids:\r\n print(\"wipeArtistList():\", item)\r\n\r\n def addNewArtist(self, p_arg):\r\n f_id = \"Tag:\"+p_arg\r\n f_newArtist = DynamicTag(id=f_id,\r\n ourText=p_arg)\r\n\r\n #this adds the tag to the file before we add it to our gui\r\n #this should theoretically stop the function if we tried adding a duplicate tag\r\n try:\r\n if SimulateOutside.addArtist(SimulateOutside.getActiveFilePath(), p_arg)==False:\r\n print(\"ReadWriteArtistList.addNewArtist(): could not add tag \\\"\", p_arg, \"\\\"\", sep='')\r\n return False\r\n #TODO: remove this part once the outside function can reliably test if we're adding a duplicate tag\r\n if f_id in self.dynamic_ids:\r\n # We don't want duplicate tags\r\n print(\"ReadWriteArtistList.addNewArtist(): We already have this tag\")\r\n return False\r\n except:\r\n print(\"ReadWriteArtistList.addNewArtist(): error adding tag\")\r\n return False\r\n\r\n self.add_widget(f_newArtist)\r\n self.dynamic_ids[f_id] = f_newArtist\r\n f_newArtist.children[0].children[0].bind(on_release=self.delayedClose)\r\n return True\r\n\r\n def closeTarget(self, p_targetID):\r\n #removes a tag from the file and our user interface\r\n try:\r\n # this first tries to remove the tag from the file using out metadata library\r\n if SimulateOutside.removeArtist(SimulateOutside.getActiveFilePath(), p_targetID[4:]):\r\n # if that succeeds, we try removing it from the list of dynamic tags displayed\r\n f_target = self.dynamic_ids[p_targetID]\r\n #print(\"ReadWriteArtistList.closeTarget(): closing\", p_targetID)\r\n if f_target != None:\r\n self.remove_widget(f_target)\r\n del self.dynamic_ids[p_targetID]\r\n except KeyError:\r\n print(\"ReadWriteArtistList.closeTarget(): key not in dictionary. Weird\")\r\n print(\"\\tIDs:\", self.dynamic_ids)\r\n print(\"\\ttried:\", p_targetID)\r\n return True\r\n\r\n def delayedClose(self, arg):\r\n #print(\"ReadWriteArtistList.delayedClose() arg:\\t\", arg)\r\n #print(\"ReadWriteArtistList.delayedClose() type:\\t\", type(arg))\r\n #without lambda here, this would pass the timeout arguement to our function.\r\n Clock.schedule_once(lambda dt: self.closeTarget(arg.parent.parent.id), timeout=0.01)\r\n\r\n def getArtistList(self):\r\n f_entrys = []\r\n for entry in self.dynamic_ids:\r\n f_entrys.append(self.dynamic_ids[entry].ourText)\r\n return f_entrys\r\n\r\nclass DynamicTag(Label):\r\n ourText = StringProperty(\"\")\r\n\r\n def __init__(self, **kwargs):\r\n super(DynamicTag, self).__init__(**kwargs)\r\n\r\n def debugSize(self):\r\n f_textBtn= self.children[0].children[1]\r\n f_closeBtn = self.children[0].children[0]\r\n print(\"FrameStartX:\", self.pos[0], end=\"\\t\\t\\t\")\r\n print(\"FrameEndX:\", self.pos[0] + self.width)\r\n print(\"\\tTextBtnStartX:\", f_textBtn.pos[0], end=\"\\t\")\r\n print(\"\\tTextBtnEndX:\", f_textBtn.pos[0]+f_textBtn.width)\r\n print(\"\\tCloseBtnStartX:\", f_closeBtn.pos[0], end=\"\\t\")\r\n print(\"\\tCloseBtnEndX:\", f_closeBtn.pos[0]+f_closeBtn.width)\r\n print()\r\n print(\"TextBtnWidthX:\", f_textBtn.width, end=\"\\t\\t\")\r\n print(\"TextBtnTextureX:\", f_textBtn.texture_size[0], end=\"\\t\")\r\n print(\"TextBtnExtraX:\", f_textBtn.width-f_textBtn.texture_size[0])\r\n print()\r\n print(\"FrameWidth:\", self.width, end=\"\\t\\t\")\r\n print(\"TotalButtonWidth:\", f_textBtn.width + f_closeBtn.width)\r\n print(\"FrameBlackSpace:\", self.width-(f_textBtn.width + f_closeBtn.width))\r\n\r\nclass ArtistLayout(RelativeLayout):\r\n def __init__(self, **kwargs):\r\n super(ArtistLayout, self).__init__(**kwargs)\r\n\r\nFactory.register('ReadWriteArtistList', cls=ReadWriteArtistList)","sub_path":"guiTesting/ReadWriteArtistList.py","file_name":"ReadWriteArtistList.py","file_ext":"py","file_size_in_byte":6955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"323405993","text":"import pandas as pands\r\nimport matplotlib.pyplot as grph_plot\r\n\r\ndata_frame = pands.read_csv('visitors_resources.csv')\r\ndata_frame.head(12)\r\ndata_frame = data_frame.set_index('2019 Year')\r\n\r\n\r\nfig, axes_obj = grph_plot.subplots()\r\ndata_frame.plot(kind='area', ax=axes_obj)\r\ngrph_plot.ylabel('visitors')\r\naxes_obj.grid(color='gray', linestyle='-', alpha=0.3)","sub_path":"visitors_resources.py","file_name":"visitors_resources.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"177550693","text":"# 981. 基于时间的键值存储\n\n\nclass TimeMap:\n time_map = {}\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n\n def set(self, key: str, value: str, timestamp: int) -> None:\n if key not in self.time_map:\n self.time_map[key] = []\n self.time_map[key].append([value, timestamp])\n\n def get(self, key: str, timestamp: int) -> str:\n if key not in self.time_map:\n return \"\"\n value_arr = self.time_map[key]\n if timestamp >= value_arr[0][1]:\n l = 0\n r = len(value_arr)-1\n while l <= r:\n mid = l + (r - l) // 2\n if value_arr[mid][1] == timestamp:\n return value_arr[mid][0]\n elif value_arr[mid][1] > timestamp:\n r = mid - 1\n else:\n l = mid + 1\n return value_arr[r][0]\n return \"\"\n\n\n# Your TimeMap object will be instantiated and called as such:\n# obj = TimeMap()\n# obj.set(key,value,timestamp)\n# param_2 = obj.get(key,timestamp)","sub_path":"answers/TimeMap.py","file_name":"TimeMap.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"341282111","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/7/16 16:59\n\n@author: vincent\nGiven a sorted linked list, delete all duplicates such that each element appear only once.\n\nExample 1:\n\nInput: 1->1->2\nOutput: 1->2\nExample 2:\n\nInput: 1->1->2->3->3\nOutput: 1->2->3\n\"\"\"\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head == None or head.next == None:\n return head\n head.next = self.deleteDuplicates(head.next)\n return head.next if head.val == head.next.val else head","sub_path":"双指针/83. Remove Duplicates from Sorted List.py","file_name":"83. Remove Duplicates from Sorted List.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"165352209","text":"\n__author__ = \"Timothy Alexander, Joshua Gisi\"\n__copyright__ = \"Copyright 2019, Project Money Tree\"\n__version__ = \"0.0.1\"\n__email__ = \"TJEnterprises2019@gmail.com\"\n__status__ = \"Development\"\n\nimport os\nfrom Support.Companion import sqlCompanion as companion\nfrom Support.treasureMap import Map\nfrom tkinter import *\nimport configparser\n\n\n\nclass backTest:\n\n def __init__(self, M1candles):\n \"\"\"\n Initiate class specific data. Make sure to edit the config file as well.\n :param M1candles: An array of candle objects\n \"\"\"\n self.M1Candles = M1candles\n self.markers = []\n self.purchases = []\n self.entryState = \"No Order\"\n self.riskPrice = -1\n self.sl = -1\n self.money = 100\n self.moneyInTrade = 0\n self.leverage = 20\n self.risk = 0.02\n\n self.configs = configparser.ConfigParser()\n config_file = os.path.join(os.path.dirname(__file__), 'HistoryAnalysisConfig.ini')\n self.configs.read(config_file)\n\n self.openConditionObj = []\n self.closeConditionObj = []\n self.initClasses(list(self.configs.get('conditions', 'openConditions').split(', ')),list(self.configs.get('conditions', 'closeConditions').split(', ')))\n\n self.delayCandles = self.configs.getint('control', 'delayCandles')\n self.run(M1candles)\n\n\n\n\n\n\n\n def run(self, M1candles):\n \"\"\"\n A buying/selling simulator for back testing Forex strategies\n :param M1candles: An array of candle objects\n :return:\n \"\"\"\n candlesUpToCurrent = []\n for curCandle in M1candles:\n candlesUpToCurrent.append(curCandle)\n\n\n checkConditions = self.openConditions(curCandle, candlesUpToCurrent)\n\n if self.entryState == \"No Order\" and (checkConditions == \"long\" or checkConditions == \"short\") and self.delayCandles <= 0:\n self.entryState = \"Order Fulfilled\"\n from Conditions.createOrder import condition as createOrder\n order = createOrder(curCandle, candlesUpToCurrent, checkConditions, backtestRef=self)\n self.sl = order.sl\n self.purchases.append({'openDT': curCandle.datetime, 'openPrice': curCandle.close, 'pos': checkConditions})\n for con in self.closeConditionObj:\n con.setup(curCandle, candlesUpToCurrent, order.position)\n\n elif self.entryState == \"Order Fulfilled\" and self.closeConditions(curCandle, candlesUpToCurrent, order.position):\n self.entryState = \"No Order\"\n\n if self.delayCandles >= 0: self.delayCandles -= 1\n\n self.paintMap()\n\n\n\n\n\n\n\n def initClasses(self, openConditionList, closeConditionList):\n \"\"\"\n Run the open/close condition classes specified in the config.ini to initiates their data\n :param openConditionList: A list of open condition classes that all must be met in order for a new positions to occur\n :param closeConditionList: A list of close condition classes that all must be met in order for a close position to occur\n :return:\n \"\"\"\n for con in openConditionList:\n name = \"condition\"\n package = \"Conditions.\"+con\n obj = getattr(__import__(package,fromlist=[name]), name)\n self.openConditionObj.append(obj(self))\n\n for con in closeConditionList:\n name = \"condition\"\n package = \"Conditions.\" + con\n obj = getattr(__import__(package, fromlist=[name]), name)\n self.closeConditionObj.append(obj(self))\n\n\n\n\n\n\n\n def openConditions(self, curCandle, candlesUpToCurrent):\n \"\"\"\n Check if the open conditions are met\n :param curCandle:\n :param candlesUpToCurrent:\n :return:\n \"\"\"\n votesToBuy = 0\n votesToSell = 0\n for con in self.openConditionObj:\n checkConditions = con.run(curCandle, candlesUpToCurrent)\n if checkConditions == 'long':\n votesToBuy += 1\n elif checkConditions == 'short':\n votesToSell += 1\n elif checkConditions == 'NA':\n votesToSell += 0\n votesToBuy += 0\n else:\n votesToBuy = -10000\n votesToSell = -10000\n\n if votesToSell == 0 and votesToBuy == 0:\n return False\n elif votesToSell == 0 and votesToBuy > 0:\n return \"long\"\n elif votesToSell > 0 and votesToBuy == 0:\n return \"short\"\n\n return False\n\n\n\n\n\n\n\n def closeConditions(self, curCandle, candlesUpToCurrent, position):\n \"\"\"\n Check if the close conditions are met\n :param curCandle:\n :param candlesUpToCurrent:\n :return:\n \"\"\"\n for con in self.closeConditionObj:\n if not con.run(curCandle, candlesUpToCurrent, position):\n return False\n return True\n\n\n\n\n\n\n\n def addMarker(self, datetime, name, color, text=None):\n \"\"\"\n Adds a marker to an array which will end up in the Treasure map class to be painted\n :param datetime:\n :param name: type of marker\n :param color:\n :param text:\n :return:\n \"\"\"\n self.markers.append({'datetime':datetime, 'type':name, 'color':color, 'text':text})\n\n\n\n\n\n\n\n def addHorizontalLine(self, datetime, name, pipOffset=0, color=\"RED\", price=0):\n \"\"\"\n Adds a marker to an array which will end up in the Treasure map class to be painted\n :param datetime:\n :param name:\n :param pipOffset:\n :param color:\n :param price:\n :return:\n \"\"\"\n self.markers.append({'datetime':datetime, 'type':name, 'pipOffset': pipOffset, 'color': color, 'price':price})\n\n\n\n\n\n\n\n def paintMap(self):\n \"\"\"\n Create a tKinter canvas and populate it\n :return:\n \"\"\"\n root = Tk()\n Map(root, self.M1Candles, self.purchases, markers=self.markers, indicators=[]) # {'name':'EMA(100)', 'color':'BLUE'}, {'name':'EMA(150)', 'color':'RED'}\n root.mainloop()\n\n\n\n\n\nif __name__ == '__main__':\n Data = companion(\"C:\\\\Users\\\\treeb\\\\OneDrive\\\\Desktop\\\\BackTestData.db\")\n dataSet = Data.getDataByDatetime(\"EUR_USD\", printReturn=False, granularity='M1',\n startDatetime='2017-01-05 02:00:00', endDatetime='2018-04-06 02:05:00', indicators=[])\n\n\n backTest(dataSet)\n Data.closeConnection()","sub_path":"BacktestingSoftware/HistoryAnalysis.py","file_name":"HistoryAnalysis.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"461169600","text":"from pathlib import Path\n\ndef list_files(path,valid_extensions,recursive=True):\n \"\"\"\n List all files in a directory given extensions.\n \"\"\"\n if valid_extensions is None:\n exts_pattern=\"*.*\"\n exts_pattern = \"*.*[\"+\"|\".join(valid_extensions)+\"]\"\n if recursive:\n files = list(Path(path).rglob(exts_pattern))\n return files\n else:\n files = list(Path(path).glob(exts_pattern))\n return files\n\n\ndef list_images(path,valid_extensions=[\"jpg\", \"jpeg\", \"png\", \"bmp\", \"tif\", \"tiff\"],recursive=True):\n \"\"\"\n List all images in a directory recursively.\n pass required extensions to valid_extensions parameter to filter the files.\n \"\"\"\n images_list=list_files(path,valid_extensions,recursive=recursive)\n return images_list\n\nif __name__ == \"__main__\":\n import argparse\n myparser = argparse.ArgumentParser(description='List all images in a directory.')\n myparser.add_argument('path',metavar='path',type=str,help='Path to the directory of Images.')\n args=myparser.parse_args()\n print(list_images(args.path))","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"649580909","text":"class Solution(object):\n def myAtoi(self, str):\n \"\"\"\n :type str: str\n :rtype: int\n \"\"\"\n #deal with inputs which have invalid characters in the middle. eg: \" -0012a42\"\n for i,c in enumerate(str.strip()):\n if (not c.isdigit()) and (c not in '+-'):\n str=str.strip()[:i]\n break\n try:\n ret=int(str)\n return ret if -2147483648<=ret<=2147483647 else (-2147483648,2147483647)[ret>0]\n except:\n return 0\n","sub_path":"8_StringtoInteger(atoi)_M.py","file_name":"8_StringtoInteger(atoi)_M.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"3385748","text":"#!/usr/bin/env python3\n\"\"\"Autoencoders\"\"\"\n\nimport tensorflow.keras as keras\n\n\n# reparameterization trick\n# instead of sampling from Q(z|X), sample epsilon = N(0,I)\n# z = z_mean + sqrt(var) * epsilon\n\n\ndef sampling(args):\n \"\"\"Reparameterization trick by sampling from an isotropic unit Gaussian.\n # Arguments\n args (tensor): mean and log of variance of Q(z|X)\n # Returns\n z (tensor): sampled latent vector\n \"\"\"\n\n z_mean, z_log_var = args\n batch = keras.backend.shape(z_mean)[0]\n dim = keras.backend.int_shape(z_mean)[1]\n # by default, random_normal has mean = 0 and std = 1.0\n epsilon = keras.backend.random_normal(shape=(batch, dim))\n return z_mean + keras.backend.exp(0.5 * z_log_var) * epsilon\n\n\ndef autoencoder(input_dims, hidden_layers, latent_dims):\n \"\"\"\n :param input_dims:is an integer containing\n the dimensions of the model input\n :param hidden_layers: is a list containing the number\n of nodes for each hidden layer in the encoder, respectively\n :param latent_dims: is an integer containing the\n dimensions of the latent space representation\n :return:encoder, decoder, auto\n \"\"\"\n input_image = keras.Input(shape=(input_dims,))\n output = keras.layers.Dense(hidden_layers[0],\n activation='relu')(input_image)\n z_mean = keras.layers.Dense(latent_dims)(output)\n z_log_var = keras.layers.Dense(latent_dims)(output)\n z = keras.layers.Lambda(sampling,\n output_shape=(latent_dims, ))([z_mean, z_log_var])\n\n input_decoder = keras.Input(shape=(latent_dims,))\n out_decoder = keras.layers.Dense(hidden_layers[-1],\n activation='relu')(input_decoder)\n\n for layer in range(len(hidden_layers) - 2, -1, -1):\n out_decoder = keras.layers.Dense(hidden_layers[layer],\n activation='relu')(out_decoder)\n decoder_out = keras.layers.Dense(input_dims,\n activation='sigmoid')(out_decoder)\n\n encoder = keras.models.Model(inputs=input_image,\n outputs=[z, z_mean, z_log_var])\n decoder = keras.models.Model(inputs=input_decoder,\n outputs=decoder_out)\n\n full_encoder = encoder(input_image)[0]\n full_decoder = decoder(full_encoder)\n auto = keras.models.Model(inputs=input_image,\n outputs=full_decoder)\n\n def loss(y_in, y_out):\n \"\"\" custom loss function \"\"\"\n reconstruction_loss = keras.backend.binary_crossentropy(y_in, y_out)\n reconstruction_loss = keras.backend.sum(reconstruction_loss, axis=1)\n kl_loss = (1 + z_log_var - keras.backend.square(z_mean)\n - keras.backend.exp(z_log_var))\n kl_loss = -0.5 * keras.backend.sum(kl_loss, axis=1)\n return reconstruction_loss + kl_loss\n\n auto.compile(optimizer='Adam',\n loss=loss)\n\n return encoder, decoder, auto\n","sub_path":"unsupervised_learning/0x04-autoencoders/3-variational.py","file_name":"3-variational.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"645562369","text":"#!/usr/bin/env python\n#-*-coding:utf-8-*-\n\nfrom flask import Module, flash, request, g, current_app, abort, redirect, \\\n url_for, session, render_template\nfrom rtiss import app\nimport rtiss.utils.db as db\nimport rtiss.utils.auth as auth\n\n@app.route(\"/message/index\")\n@auth.login_required\ndef message_index():\n page= int(request.args.get('page')) if request.args.get('page') else 1\n username=session['user']['username']\n users=db.select_all(\"SELECT username,name FROM T_User\")\n messages = db.select_all(\"SELECT T_Message.*,T_User.name FROM T_Message,T_User WHERE ( public=TRUE OR %s=ANY(at) \\\n OR author=%s ) AND T_Message.author=T_User.username ORDER BY id DESC LIMIT 10 OFFSET %s;\",\\\n (username,username,10*(page-1)))\n return render_template('message/index.html',messages=messages,users=users,page=page)\n\n@app.route(\"/message/new\", methods=(\"POST\",))\n@auth.login_required\ndef message_new():\n author=session['user']['username']\n if 'public' in request.form:\n public=True\n db.execute(\"INSERT INTO T_Message(author,content,public) VALUES(%s,%s,TRUE);\",\\\n (author,request.form['content']))\n else:\n public=False\n users = '{'+','.join(request.form.getlist('usernames'))+'}'\n db.execute(\"INSERT INTO T_Message(author,content,public,at,call) VALUES(%s,%s,False,%s,%s);\",\\\n (author,request.form['content'],users,users))\n return redirect(url_for('message_index'))\n\n@app.route(\"/message//delete\", methods=(\"POST\",))\n@auth.login_required\ndef message_delete(id):\n db.execute(\"DELETE FROM T_Message WHERE id=%s\",[id])\n return redirect(url_for('message_index'))\n\n@app.route(\"/message//read\", methods=(\"POST\",))\n@auth.login_required\ndef message_read(id):\n username=session['user']['username']\n db.execute(\"UPDATE T_Message SET call=ARRAY_REMOVE(call,%s) WHERE id=%s\",(username,id))\n return redirect(url_for('message_index'))\n\n@app.route(\"/message/manage\")\n@auth.superadmin_required\ndef message_manage():\n messages = db.select_all(\"SELECT T_Message.*,T_User.name FROM T_Message,T_User WHERE T_Message.author=T_User.username\")\n return render_template('message/manage.html',messages=messages)\n","sub_path":"rtiss/views/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"484733291","text":"import util\nimport numpy as np\nimport math\n\ndef pbc_fix(coordinate_placeholder, cellparameter):\n if abs(coordinate_placeholder) > (cellparameter/2.0):\n if coordinate_placeholder >= 0:\n new_coordinate = coordinate_placeholder - cellparameter\n else:\n new_coordinate = coordinate_placeholder + cellparameter\n else:\n new_coordinate = coordinate_placeholder\n return new_coordinate\n\ndef move(molecule, cellparameter):\n new_atom_position = []\n move_distance = np.random.random_sample(3)\n sign = np.random.random_integers(1,2)\n if sign == 2:\n move_distance = -1 * move_distance\n move_distance = cellparameter * move_distance\n for atom in range(0, len(molecule)):\n position_placeholder = []\n position_placeholder.append(molecule[atom][0])\n for coordinate in range(1, len(molecule[atom])):\n coordinate_placeholder = float(molecule[atom][coordinate]) + move_distance[coordinate-1]\n new_coordinate = pbc_fix(coordinate_placeholder, cellparameter)\n position_placeholder.append(new_coordinate)\n new_atom_position.append(position_placeholder)\n log_text = 'The molecule has undergone a random move of ' + str(move_distance[0]) + ' angstrom in the x direction, ' + str(move_distance[1]) + ' angstrom in the y direction, and ' + str(move_distance[2]) + ' angstrom in the z direction. \\n'\n return new_atom_position, log_text\n\ndef angle(molecule, cellparameter):\n axis_choose = np.random.random_integers(1,3)\n angle_choose = np.random.uniform(0.0, np.pi)\n sign = np.random.random_integers(1,2)\n if sign == 2:\n angle_choose = -1 * angle_choose\n new_atom_position = []\n normalised_for_head_atoms = []\n head_atoms = molecule[0][1:4]\n for temp in range(0, len(molecule)):\n normalised_for_head_atoms_build = []\n normalised_for_head_atoms_build.append(molecule[temp][0])\n normalised_for_head_x = molecule[temp][1] - head_atoms[0]\n normalised_for_head_y = molecule[temp][2] - head_atoms[1]\n normalised_for_head_z = molecule[temp][3] - head_atoms[2]\n normalised_for_head_x = pbc_fix(normalised_for_head_x, cellparameter)\n normalised_for_head_y = pbc_fix(normalised_for_head_y, cellparameter)\n normalised_for_head_z = pbc_fix(normalised_for_head_z, cellparameter)\n normalised_for_head_atoms_build.append(normalised_for_head_x)\n normalised_for_head_atoms_build.append(normalised_for_head_y)\n normalised_for_head_atoms_build.append(normalised_for_head_z)\n normalised_for_head_atoms.append(normalised_for_head_atoms_build)\n centre_of_mass = util.cen_of_mas(normalised_for_head_atoms)\n normalised_atoms = []\n for temp in range(0, len(molecule)):\n normalised_atoms_build = []\n normalised_atoms_build.append(molecule[temp][0])\n normalised_x = normalised_for_head_atoms[temp][1] - centre_of_mass[0]\n normalised_y = normalised_for_head_atoms[temp][2] - centre_of_mass[1]\n normalised_z = normalised_for_head_atoms[temp][3] - centre_of_mass[2]\n normalised_atoms_build.append(normalised_x)\n normalised_atoms_build.append(normalised_y)\n normalised_atoms_build.append(normalised_z)\n normalised_atoms.append(normalised_atoms_build)\n if axis_choose == 1:\n axis_label = 'X'\n for atom in range(0, len(normalised_atoms)):\n new_atom_position_build = []\n new_atom_position_build.append(normalised_atoms[atom][0])\n x_placeholder = (normalised_atoms[atom][1]) + centre_of_mass[0] + head_atoms[0]\n y_placeholder = (float(normalised_atoms[atom][2]) * np.cos(angle_choose) - float(normalised_atoms[atom][3]) * np.sin(angle_choose)) + centre_of_mass[1] + head_atoms[1]\n z_placeholder = (float(normalised_atoms[atom][2]) * np.sin(angle_choose) + float(normalised_atoms[atom][3]) * np.cos(angle_choose)) + centre_of_mass[2] + head_atoms[2]\n new_x = pbc_fix(x_placeholder, cellparameter)\n new_y = pbc_fix(y_placeholder, cellparameter)\n new_z = pbc_fix(z_placeholder, cellparameter)\n new_atom_position_build.append(new_x)\n new_atom_position_build.append(new_y)\n new_atom_position_build.append(new_z)\n new_atom_position.append(new_atom_position_build)\n elif axis_choose == 2:\n axis_label = 'Y'\n for atom in range(0, len(normalised_atoms)):\n new_atom_position_build = []\n new_atom_position_build.append(normalised_atoms[atom][0])\n x_placeholder = (float(normalised_atoms[atom][3]) * np.sin(angle_choose) + float(normalised_atoms[atom][1]) * np.cos(angle_choose)) + centre_of_mass[0] + head_atoms[0]\n y_placeholder = (normalised_atoms[atom][2]) + centre_of_mass[1] + head_atoms[1]\n z_placeholder = (float(normalised_atoms[atom][3]) * np.cos(angle_choose) - float(normalised_atoms[atom][1]) * np.sin(angle_choose)) + centre_of_mass[2] + head_atoms[2]\n new_x = pbc_fix(x_placeholder, cellparameter)\n new_y = pbc_fix(y_placeholder, cellparameter)\n new_z = pbc_fix(z_placeholder, cellparameter)\n new_atom_position_build.append(new_x)\n new_atom_position_build.append(new_y)\n new_atom_position_build.append(new_z)\n new_atom_position.append(new_atom_position_build)\n elif axis_choose == 3:\n axis_label = 'Z'\n for atom in range(0, len(normalised_atoms)):\n new_atom_position_build = []\n new_atom_position_build.append(normalised_atoms[atom][0])\n x_placeholder = (float(normalised_atoms[atom][1]) * np.cos(angle_choose) - float(normalised_atoms[atom][2]) * np.sin(angle_choose)) + centre_of_mass[0] + head_atoms[0]\n y_placeholder = (float(normalised_atoms[atom][1]) * np.sin(angle_choose) + float(normalised_atoms[atom][2]) * np.cos(angle_choose)) + centre_of_mass[1] + head_atoms[1]\n z_placeholder = (normalised_atoms[atom][3]) + centre_of_mass[2] + head_atoms[2]\n new_x = pbc_fix(x_placeholder, cellparameter)\n new_y = pbc_fix(y_placeholder, cellparameter)\n new_z = pbc_fix(z_placeholder, cellparameter)\n new_atom_position_build.append(new_x)\n new_atom_position_build.append(new_y)\n new_atom_position_build.append(new_z)\n new_atom_position.append(new_atom_position_build)\n log_text = 'The molecule has undergone a random rotation of ' + str(angle_choose) + ' radians in the ' + axis_label + ' axis. \\n'\n return new_atom_position, log_text\n\n\ndef randomise(atoms, num, cellparameter):\n new_atom_position = []\n burn_atoms = atoms\n for duplicate in range(0, int(num)):\n moved_atoms, log_text = move(burn_atoms, cellparameter)\n angle_atoms, log_text = angle(moved_atoms, cellparameter)\n new_atom_position.append(angle_atoms)\n return new_atom_position\n","sub_path":"modules/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":6993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"623005348","text":"'''\n14. Longest Common Prefix\nEasy\n\ns\n\nIf there is no common prefix, return an empty string \"\".\n\nExample 1:\n\nInput: [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\nExample 2:\n\nInput: [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\nNote:\n\nAll given inputs are in lowercase letters a-z.\n'''\n\nstrs=[\"flower\",\"flow\",\"flight\"]\nres= ''\ntemp={}\ni=0\nwhile True:\n try:\n temp = [s[i] for s in strs]\n if len(set(temp))==1:\n res= res + temp[0]\n i+=1\n else:\n break\n except:\n break\nprint (res)\n\n\n'''\nstrs=[\"flower\",\"flow\",\"flight\"]\nresult = '' #初始化\ni = 0 # 用来标志取那个位置的字母\nwhile True:\n try: #\n temp = [s[i] for s in strs] #取每个string的i位置的字母放入一个list\n if len(set(temp)) == 1: #如果相同\n result = result+temp[0] #加到result里面去\n i+=1 #位置加一\n else:\n break #否则跳出循环\n except:\n break #如果出错 跳出循环,比如输入是[\"\"],temp = [s[i] for s in strs]里面s[0]会报错,因为s为空,没有s[0]\nprint(result)\n\n'''\n","sub_path":"2.14. Longest Common Prefix.py","file_name":"2.14. Longest Common Prefix.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"193341287","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('courses', '0017_auto_20150614_1342'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='category',\n name='parent_category',\n ),\n migrations.RemoveField(\n model_name='certificate',\n name='name',\n ),\n migrations.RemoveField(\n model_name='certificate',\n name='symbol',\n ),\n migrations.AddField(\n model_name='certificate',\n name='type',\n field=models.CharField(max_length=4, default='NI', choices=[('CC', 'Certificate of Completion'), ('CA', 'Certificate of Accomplishment'), ('HCC', 'Honor Code Certificate'), ('VC$', 'Verified Certificate'), ('VCA$', 'Verified Certificate of Accomplishment'), ('SA', 'Statement of Accomplishment'), ('SP$', 'Statement of Participation'), ('CM', 'Certificate of Mastery'), ('NI', 'No Information About Certificate Available'), ('NC', 'No Certificate')]),\n ),\n migrations.AddField(\n model_name='mooc',\n name='platform_key',\n field=models.CharField(blank=True, max_length=150, null=True),\n ),\n migrations.AlterField(\n model_name='mooc',\n name='certificates',\n field=models.ManyToManyField(to='courses.Certificate'),\n ),\n migrations.DeleteModel(\n name='Category',\n ),\n ]\n","sub_path":"courses/migrations/0018_auto_20150614_1452.py","file_name":"0018_auto_20150614_1452.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"260036418","text":"from pwn import *\nfrom subprocess import Popen, PIPE\n\nimport random\nimport re\n\ncontext(arch = 'i386', os = 'linux')\n\n\nr = remote(\"pwn.sunshinectf.org\", 20001)\np = Popen(['./a.out'], stdin=PIPE, stdout=PIPE, stderr=PIPE)\noutput, err = p.communicate(b\"\")\n\nfor x in output.split(\" \"):\n r.sendline(x)\n result = r.recvuntil(\".\")\n if \"How\" in result:\n break\nr.interactive()","sub_path":"bePrepared/prepared.py","file_name":"prepared.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"19995340","text":"RESOURCE_COLLECTION = 'competences_types'\n\n_schema = {\n\n 'attributes': {'type': 'list'},\n 'categories': {'type': 'list'},\n 'checked_by': {'type': 'string'}, # String?\n 'children': {'type': 'list'},\n 'code': {'type': 'string'},\n 'colorcode': {'type': 'string'},\n 'id': {'type': 'integer',\n 'required': True},\n 'meta_type': {'type': 'string'},\n 'type_id': {'type': 'integer'},\n 'type_sa_id': {'type': 'integer'},\n 'description': {'type': 'string'},\n 'duration': {'type': 'string'},\n 'durations': {'type': 'list'},\n 'files': {'type': 'list'},\n 'instructors': {'type': 'list'},\n 'languages_available': {'type': 'list'},\n 'locale': {'type': 'string'},\n 'max_age': {'type': 'string'},\n 'min_age': {'type': 'string'},\n 'modified': {'type': 'string'},\n 'organisations': {'type': 'list'},\n 'pre_requisites': {'type': 'list'},\n 'short_description': {'type': 'string'},\n 'sports': {'type': 'list'},\n 'title': {'type': 'string'},\n 'valid_for': {'type': 'string'},\n 'weight': {'type': 'integer'},\n\n}\n\ndefinition = {\n 'url': 'competences/types',\n 'item_title': 'Competences Types',\n 'datasource': {'source': RESOURCE_COLLECTION,\n },\n 'additional_lookup': {\n 'url': 'regex(\"[\\d{1,9}]+\")',\n 'field': 'id',\n },\n 'extra_response_fields': ['id'],\n 'versioning': False,\n 'resource_methods': ['GET', 'POST'],\n 'item_methods': ['GET', 'PATCH', 'PUT'],\n 'mongo_indexes': {'type_id': ([('id', 1)], {'background': True}),\n 'title': ([('title', 'text')], {'background': True})\n },\n 'schema': _schema\n}\n","sub_path":"domain/competences_types.py","file_name":"competences_types.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"389789954","text":"import geetest\nfrom selenium import webdriver\n\nWEBDRIVER = \"Chrome\"\n# WEBDRIVER = \"PhantomJS\"\n\nif __name__ == \"__main__\":\n if WEBDRIVER == \"PhantomJS\":\n webdriver.DesiredCapabilities.PHANTOMJS['phantomjs.page.settings.userAgent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'\n driver = webdriver.PhantomJS(\"./webdriver/PhantomJS\")\n else:\n driver = webdriver.Chrome(\"./webdriver/chromedriver\")\n\n cracker = geetest.GeetestCrack(driver)\n for _ in range(100):\n try:\n cracker.crack()\n except Exception as e:\n print(e)\n","sub_path":"industry_and_commerce.py","file_name":"industry_and_commerce.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41128464","text":"import configparser\nimport os\nimport inspect\nimport logging\n\n__config = configparser.ConfigParser()\n\n__env = 'unknown'\n\n\ndef __is_empty(any_structure: object) -> object:\n if any_structure:\n return False\n else:\n return True\n\n\ndef load_config(environment) -> object:\n \"\"\"\n\n :param environment:\n :return:\n \"\"\"\n __logger = logging.getLogger(__name__)\n __logger.info(\"inside load configure\")\n\n try:\n cwd = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))\n # print(\"cwd: \" + cwd)\n path = cwd + \"/\" + \"config.ini\"\n\n global __config\n __config.read(path, encoding='utf-8')\n\n global __env\n __env = environment\n # print(\"env set as: \" + __env)\n except:\n import traceback\n __logger.error(\"UNABLE TO READ CONFIGURATION!!!!!!!!!!!!\")\n __logger.error(traceback.format_exc())\n\n\ndef get_config(key):\n \"\"\"\n\n :param key:\n :return:\n \"\"\"\n global __env\n global __config\n __logger = logging.getLogger(__name__)\n\n __logger.info(\"inside get_config get \" + key + \" for env \" + __env)\n\n config_value = \"unknown\"\n env = __env\n\n if not __is_empty(__config):\n if __config.has_section(env):\n if key in __config[env]:\n config_value = __config[env][key]\n\n __logger.info(\"config_value: \")\n __logger.info(config_value)\n return config_value\n","sub_path":"common/configure/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"519120543","text":"import torch\nimport torch.nn as nn\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport numpy as np\nimport os\nimport argparse\nimport time\nfrom PIL import Image\nfrom tensorboardX import SummaryWriter\nfrom unet import Unet\nimport pickle\nimport evaluate\n\n# parsing the arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('optFlow', help=' Path to the video testing folder')\nparser.add_argument('dataset', help=' Name of dataset')\n\nargs = parser.parse_args()\n\ndataset_dir = args.optFlow\ndataset = str(args.dataset)\n\n\nfrom tensorboardX import SummaryWriter\n\n\ndef readFlow(name):\n if name.endswith('.pfm') or name.endswith('.PFM'):\n return readPFM(name)[0][:, :, 0:2]\n\n f = open(name, 'rb')\n\n header = f.read(4)\n if header.decode(\"utf-8\") != 'PIEH':\n raise Exception('Flow file header does not contain PIEH')\n\n width = np.fromfile(f, np.int32, 1).squeeze()\n height = np.fromfile(f, np.int32, 1).squeeze()\n flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2 ))\n\n return flow.astype(np.float32)\n\n\ndef loadFlow(name):\n Flow = readFlow(name)\n Flow = torch.from_numpy(Flow)\n Flow = Flow.permute(2,0,1)\n\n optVar = torch.autograd.Variable(Flow.cuda())\n return optVar\n\ndef normalizedData(inTensor):\n inTensor[inTensor > 20.0 ] = 20.0\n inTensor[ inTensor < -20.0] = -20.0\n inTensor = torch.div(inTensor, 20.0)\n return inTensor\n\n\n\nunet = Unet()\nunet = unet.cuda()\nMSE = torch.nn.MSELoss()\noptimizer = torch.optim.SGD(unet.parameters(), 0.0001)\nepochs = 60\n\nwriter = SummaryWriter('logs/'+ str(args.dataset) +'_trained_manual_unsq_sgd')\n\nvideo_names = sorted(os.listdir(dataset_dir + 'optical_flow/'))\n\nnumHis = 4\nk = 0\nlr = 0.0001\nfor epoch in range(0,epochs):\n\n for vid in range(len(video_names)):\n\n opts = sorted(os.listdir(str(dataset_dir) + 'optical_flow/' + video_names[vid] + '/'))\n\n for i in range(numHis, len(opts)):\n k += 1\n opt_tensor1 = loadFlow(dataset_dir + 'optical_flow/' + video_names[vid] + '/' + opts[i-4])\n opt_tensor2 = loadFlow(dataset_dir + 'optical_flow/' + video_names[vid] + '/' + opts[i-3])\n opt_tensor3 = loadFlow(dataset_dir + 'optical_flow/' + video_names[vid] + '/' + opts[i-2])\n opt_tensor4 = loadFlow(dataset_dir + 'optical_flow/' + video_names[vid] + '/' + opts[i-1])\n\n opt_target = loadFlow(dataset_dir + 'optical_flow/' + video_names[vid] + '/' + opts[i])\n opt_target = torch.unsqueeze(opt_target,0)\n opt_target = normalizedData(opt_target)\n opt_target = torch.autograd.Variable(opt_target) # torch.from_numpy(nextFlow)\n opt_target = opt_target.cuda()\n\n mergedTensor1 = torch.cat((opt_tensor1, opt_tensor2), 0)\n mergedTensor2 = torch.cat((opt_tensor3, opt_tensor4), 0)\n\n inputFlow = torch.cat((mergedTensor1, mergedTensor2), 0)\n inputFlow = torch.unsqueeze(inputFlow, 0)\n inputFlow = normalizedData(inputFlow)\n inputFlow = torch.autograd.Variable(inputFlow) # torch.from_numpy(prvFlow)\n inputFlow = inputFlow.cuda()\n\n\n prdctFlow = unet(inputFlow)\n\n loss = MSE(prdctFlow, opt_target)\n\n print('Dataset: {} Loss: {} Epoch: {} Iteration: {} Remaning Epoch: {} Learning Rate: {} '.format(args.dataset, loss.item(), epoch + 1, k, epochs - 1, lr))\n writer.add_scalar('train_loss_' + str(args.dataset), loss.item(), k)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if epoch % 3 == 0:\n torch.save(unet, 'checkpoints/'+ str(args.dataset) + '/trained_manual/NET_batch_'+ '_epoch_' + str(epoch) + '_' +str(lr) + 'MSE_trainedManual_SGD'+ '.pt')\n\n\n\n\n\n\n\n","sub_path":"code/unet/trmn.py","file_name":"trmn.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"637027833","text":"import numpy as np\nfrom PySide import QtGui, QtCore\nimport sharppy.sharptab as tab\nfrom sharppy.sharptab.constants import *\nimport datetime\n\n## routine written by Kelton Halbert\n## keltonhalbert@ou.edu\n\n__all__ = ['backgroundText', 'plotText']\n\nclass backgroundText(QtGui.QFrame):\n '''\n Handles drawing the background frame onto a QPixmap.\n Inherits a QtGui.QFrame Object.\n '''\n def __init__(self):\n super(backgroundText, self).__init__()\n self.initUI()\n\n def initUI(self):\n '''\n Initializes frame variables such as padding,\n width, height, etc, as well as the QPixmap\n that contains the frame drawing.\n '''\n ## set the frame stylesheet\n self.setStyleSheet(\"QFrame {\"\n \" background-color: rgb(0, 0, 0);\"\n \" border-width: 1px;\"\n \" border-style: solid;\"\n \" border-color: #3399CC;}\")\n ## set the frame padding\n ## set the height/width variables\n self.lpad = 0; self.rpad = 0\n self.tpad = 5; self.bpad = 0\n self.wid = self.size().width()\n self.hgt = self.size().height()\n self.tlx = self.rpad; self.tly = self.tpad\n self.brx = self.wid; self.bry = self.hgt\n ## do a DPI check to make sure\n ## the text is sized properly!\n fsize = np.floor(.06 * self.hgt)\n self.tpad = np.floor(.03 * self.hgt)\n ## set the font, get the metrics and height of the font\n self.label_font = QtGui.QFont('Helvetica')\n self.label_font.setPixelSize(fsize)\n self.label_metrics = QtGui.QFontMetrics( self.label_font )\n self.label_height = self.label_metrics.xHeight() + self.tpad\n ## the self.ylast variable is used as a running sum for\n ## text placement.\n self.ylast = self.label_height\n ## initialize the QPixmap that will be drawn on.\n self.plotBitMap = QtGui.QPixmap(self.width()-2, self.height()-2)\n self.plotBitMap.fill(QtCore.Qt.black)\n ## plot the background frame\n self.plotBackground()\n \n def draw_frame(self, qp):\n '''\n Draws the background frame and the text headers for indices.\n '''\n ## initialize a white pen with thickness 1 and a solid line\n pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)\n qp.setPen(pen)\n qp.setFont(self.label_font)\n ## set the horizontal grid to be the width of the frame\n ## divided into 8 spaces\n x1 = self.brx / 8\n y1 = 1\n ## draw the header and the indices using a loop.\n ## This loop is a 'horizontal' loop that will plot\n ## the text for a row, keeping the vertical placement constant.\n count = 0\n titles = ['PCL', 'CAPE', 'CINH', 'LCL', 'LI', 'LFC', 'EL']\n for title in titles:\n rect = QtCore.QRect(x1*count, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, title)\n count += 1\n qp.drawLine(0, self.label_height, self.brx, self.label_height)\n \n def resizeEvent(self, e):\n '''\n Handles when the window gets resized.\n '''\n self.initUI()\n\n def plotBackground(self):\n '''\n Handles drawing the text background onto\n the QPixmap.\n '''\n ## initialize a QPainter objext\n qp = QtGui.QPainter()\n qp.begin(self.plotBitMap)\n ## draw the frame\n self.draw_frame(qp)\n qp.end()\n\n\nclass plotText(backgroundText):\n '''\n Handles plotting the indices in the frame.\n Inherits a backgroundText Object that contains\n a QPixmap with the frame drawn on it. All drawing\n gets done on this QPixmap, and then the QPixmap\n gets rendered by the paintEvent function.\n '''\n def __init__(self, prof):\n '''\n Initialize the data from a Profile object passed to \n this class. It then takes the data it needs from the\n Profile object and converts them into strings that\n can be used to draw the text in the frame.\n \n Parameters\n ----------\n prof: a Profile Object\n \n '''\n ## get the surfce based, most unstable, and mixed layer\n ## parcels to use for indices, as well as the sounding\n ## profile itself.\n self.sfcparcel = prof.sfcpcl\n self.mlparcel = prof.mlpcl\n self.fcstpcl = prof.fcstpcl\n self.muparcel = prof.mupcl\n self.prof = prof;\n \n \n ## either get or calculate the indices, round to the nearest int, and\n ## convert them to strings.\n ## K Index\n self.k_idx = tab.utils.INT2STR( prof.k_idx )\n ## precipitable water\n self.pwat = tab.utils.FLOAT2STR( prof.pwat, 2 )\n ## 0-3km agl lapse rate\n self.lapserate_3km = tab.utils.FLOAT2STR( prof.lapserate_3km, 1 )\n ## 3-6km agl lapse rate\n self.lapserate_3_6km = tab.utils.FLOAT2STR( prof.lapserate_3_6km, 1 )\n ## 850-500mb lapse rate\n self.lapserate_850_500 = tab.utils.FLOAT2STR( prof.lapserate_850_500, 1 )\n ## 700-500mb lapse rate\n self.lapserate_700_500 = tab.utils.FLOAT2STR( prof.lapserate_700_500, 1 )\n ## convective temperature\n self.convT = tab.utils.INT2STR( prof.convT )\n ## sounding forecast surface temperature\n self.maxT = tab.utils.INT2STR( prof.maxT )\n #fzl = str(int(self.sfcparcel.hght0c))\n ## 100mb mean mixing ratio\n self.mean_mixr = tab.utils.FLOAT2STR( prof.mean_mixr, 1 )\n ## 150mb mean rh\n self.low_rh = tab.utils.INT2STR( prof.low_rh )\n self.mid_rh = tab.utils.INT2STR( prof.mid_rh )\n ## calculate the totals totals index\n self.totals_totals = tab.utils.INT2STR( prof.totals_totals )\n self.dcape = tab.utils.INT2STR( prof.dcape )\n self.drush = tab.utils.INT2STR( prof.drush )\n self.sigsevere = tab.utils.INT2STR( prof.sig_severe )\n self.mmp = tab.utils.FLOAT2STR( prof.mmp, 2 )\n self.esp = tab.utils.FLOAT2STR( prof.esp, 1 )\n self.wndg = tab.utils.FLOAT2STR( prof.wndg, 1 )\n self.tei = tab.utils.INT2STR( prof.tei )\n \n super(plotText, self).__init__()\n\n def resizeEvent(self, e):\n '''\n Handles when the window is resized.\n \n Parametes\n ---------\n e: an Event Object\n '''\n super(plotText, self).resizeEvent(e)\n self.plotData()\n \n def paintEvent(self, e):\n '''\n Handles when the window gets painted.\n This renders the QPixmap that the backgroundText\n Object contians. For the actual drawing of the data,\n see the plotData function.\n \n Parametes\n ---------\n e: an Event Object\n \n '''\n super(plotText, self).paintEvent(e)\n qp = QtGui.QPainter()\n qp.begin(self)\n qp.drawPixmap(1, 1, self.plotBitMap)\n qp.end()\n\n def plotData(self):\n '''\n Handles the drawing of the text onto the QPixmap.\n This is where the actual data gets plotted/drawn.\n '''\n ## initialize a QPainter object\n qp = QtGui.QPainter()\n qp.begin(self.plotBitMap)\n ## draw the indices\n self.drawConvectiveIndices(qp)\n self.drawIndices(qp)\n self.drawSevere(qp)\n qp.end()\n \n def drawSevere(self, qp):\n '''\n This handles the severe indices, such as STP, sig hail, etc.\n \n Parameters\n ----------\n qp: QtGui.QPainter object\n \n '''\n ## initialize a pen to draw with.\n pen = QtGui.QPen(QtCore.Qt.yellow, 1, QtCore.Qt.SolidLine)\n qp.setFont(self.label_font)\n color_list = [QtGui.QColor(CYAN), QtGui.QColor(DBROWN), QtGui.QColor(LBROWN), QtGui.QColor(WHITE), QtGui.QColor(YELLOW), QtGui.QColor(RED), QtGui.QColor(MAGENTA)]\n ## needs to be coded.\n x1 = self.brx / 10\n y1 = self.ylast + self.tpad\n ship = tab.utils.FLOAT2STR( self.prof.ship, 1 )\n stp_fixed = tab.utils.FLOAT2STR( self.prof.stp_fixed, 1 )\n stp_cin = tab.utils.FLOAT2STR( self.prof.stp_cin, 1 )\n right_scp = tab.utils.FLOAT2STR( self.prof.right_scp, 1 )\n \n labels = ['Supercell = ', 'STP (cin) = ', 'STP (fix) = ', 'SHIP = ']\n indices = [right_scp, stp_cin, stp_fixed, ship]\n for label, index in zip(labels,indices):\n rect = QtCore.QRect(x1*7, y1, x1*8, self.label_height)\n if label == labels[0]: # STP uses a different color scale\n if float(index) >= 19.95:\n pen = QtGui.QPen(color_list[6], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 11.95:\n pen = QtGui.QPen(color_list[5], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 1.95:\n pen = QtGui.QPen(color_list[3], 1, QtCore.Qt.SolidLine)\n elif float(index) >= .45:\n pen = QtGui.QPen(color_list[2], 1, QtCore.Qt.SolidLine)\n elif float(index) >= -.45:\n pen = QtGui.QPen(color_list[1], 1, QtCore.Qt.SolidLine)\n elif float(index) < -.45:\n pen = QtGui.QPen(color_list[0], 1, QtCore.Qt.SolidLine)\n elif label == labels[1]: # STP effective\n if float(index) >= 8:\n pen = QtGui.QPen(color_list[6], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 4:\n pen = QtGui.QPen(color_list[5], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 2:\n pen = QtGui.QPen(color_list[4], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 1:\n pen = QtGui.QPen(color_list[3], 1, QtCore.Qt.SolidLine)\n elif float(index) >= .5:\n pen = QtGui.QPen(color_list[2], 1, QtCore.Qt.SolidLine)\n elif float(index) < .5:\n pen = QtGui.QPen(color_list[1], 1, QtCore.Qt.SolidLine)\n elif label == labels[2]: # STP fixed\n if float(index) >= 7:\n pen = QtGui.QPen(color_list[6], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 5:\n pen = QtGui.QPen(color_list[5], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 2:\n pen = QtGui.QPen(color_list[4], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 1:\n pen = QtGui.QPen(color_list[3], 1, QtCore.Qt.SolidLine)\n elif float(index) >= .5:\n pen = QtGui.QPen(color_list[2], 1, QtCore.Qt.SolidLine)\n else:\n pen = QtGui.QPen(color_list[1], 1, QtCore.Qt.SolidLine)\n elif label == labels[3]: # SHIP\n if float(index) >= 5:\n pen = QtGui.QPen(color_list[6], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 2:\n pen = QtGui.QPen(color_list[5], 1, QtCore.Qt.SolidLine)\n elif float(index) >= 1:\n pen = QtGui.QPen(color_list[4], 1, QtCore.Qt.SolidLine)\n elif float(index) >= .5:\n pen = QtGui.QPen(color_list[3], 1, QtCore.Qt.SolidLine)\n else:\n pen = QtGui.QPen(color_list[1], 1, QtCore.Qt.SolidLine)\n qp.setPen(pen)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, label + index)\n y1 += (self.label_height)\n \n def drawIndices(self, qp):\n '''\n Draws the non-parcel indices.\n \n Parameters\n ----------\n qp: QtGui.QPainter object\n \n '''\n qp.setFont(self.label_font)\n ## make the initial x point relatice to the width of the frame.\n x1 = self.brx / 10\n rpad = 5\n tpad = 5\n\n ## Now we have all the data we could ever want. Time to start drawing\n ## them on the frame.\n ## This starts with the left column.\n \n if self.prof.pwv_flag == -3:\n color = QtGui.QColor('#FF7F00')\n elif self.prof.pwv_flag == -2:\n color = QtGui.QColor('#EE9A00')\n elif self.prof.pwv_flag == -1:\n color = QtGui.QColor('#FFDAB9')\n elif self.prof.pwv_flag == 0:\n color = QtGui.QColor('#FFFFFF')\n elif self.prof.pwv_flag == 1:\n color = QtGui.QColor('#98FB98')\n elif self.prof.pwv_flag == 2:\n color = QtGui.QColor('#66CD00')\n else:\n color = QtGui.QColor('#00FF00')\n \n ## draw the first column of text using a loop, keeping the horizontal\n ## placement constant.\n y1 = self.ylast + self.tpad\n colors = [color, QtGui.QColor(WHITE), QtGui.QColor(WHITE), QtGui.QColor(WHITE), QtGui.QColor(WHITE), QtGui.QColor(WHITE)]\n texts = ['PW = ', 'MeanW = ', 'LowRH = ', 'MidRH = ', 'DCAPE = ', 'DownT = ']\n indices = [self.pwat + 'in', self.mean_mixr + 'g/kg', self.low_rh + '%', self.mid_rh + '%', self.dcape, self.drush + 'F']\n for text, index, c in zip(texts, indices, colors):\n rect = QtCore.QRect(rpad, y1, x1*4, self.label_height)\n pen = QtGui.QPen(c, 1, QtCore.Qt.SolidLine)\n qp.setPen(pen)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text + index)\n y1 += (self.label_height)\n\n ## middle-left column\n y1 = self.ylast + self.tpad\n texts = ['K = ', 'TT = ', 'ConvT = ', 'maxT = ', 'ESP = ', 'MMP = ']\n indices = [self.k_idx, self.totals_totals, self.convT + 'F', self.maxT + 'F', self.esp, self.mmp]\n for text, index in zip(texts, indices):\n rect = QtCore.QRect(x1*3.5, y1, x1*4, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text + index)\n y1 += (self.label_height)\n\n ## middle-right column\n y1 = self.ylast + self.tpad\n texts = ['WNDG = ', 'TEI = ', '', '', '', 'SigSvr = ']\n indices = [self.wndg, self.tei, '', '', '', self.sigsevere + ' m3/s3']\n for text, index in zip(texts, indices):\n rect = QtCore.QRect(x1*6, y1, x1*4, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text + index)\n y1 += (self.label_height)\n self.ylast = y1\n qp.drawLine(0, y1+2, self.brx, y1+2)\n qp.drawLine(x1*7-5, y1+2, x1*7-5, self.bry )\n \n ## lapserate window\n y1 = self.ylast + self.tpad\n texts = ['Sfc-3km AGL LR = ', '3-6km AGL LR = ', '850-500mb LR = ', '700-500mb LR = ']\n indices = [self.lapserate_3km + ' C/km', self.lapserate_3_6km + ' C/km', self.lapserate_850_500 + ' C/km', self.lapserate_700_500 + ' C/km']\n for text, index in zip(texts, indices):\n rect = QtCore.QRect(rpad, y1, x1*8, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft, text + index)\n y1 += (self.label_height)\n\n\n def drawConvectiveIndices(self, qp):\n '''\n This handles the drawing of the parcel indices.\n \n Parameters\n ----------\n qp: QtGui.QPainter object\n \n '''\n ## initialize a white pen with thickness 2 and a solid line\n pen = QtGui.QPen(QtCore.Qt.white, 1, QtCore.Qt.SolidLine)\n qp.setPen(pen)\n qp.setFont(self.label_font)\n ## make the initial x pixel coordinate relative to the frame\n ## width.\n x1 = self.brx / 8\n y1 = self.ylast + self.tpad\n ## get the indices rounded to the nearest int, conver to strings\n ## Start with the surface based parcel.\n sfc_bplus = tab.utils.INT2STR( self.sfcparcel.bplus )\n sfc_bminus = tab.utils.INT2STR( self.sfcparcel.bminus )\n sfc_lclhght = tab.utils.INT2STR( self.sfcparcel.lclhght )\n sfc_limax = tab.utils.INT2STR( self.sfcparcel.li5 )\n sfc_lfchght = tab.utils.INT2STR( self.sfcparcel.lfchght )\n sfc_elhght = tab.utils.INT2STR( self.sfcparcel.elhght )\n ## get the forecast surface parvel\n fcst_bplus = tab.utils.INT2STR( self.fcstpcl.bplus )\n fcst_bminus = tab.utils.INT2STR( self.fcstpcl.bminus )\n fcst_lclhght = tab.utils.INT2STR( self.fcstpcl.lclhght )\n fcst_limax = tab.utils.INT2STR( self.fcstpcl.li5 )\n fcst_lfchght = tab.utils.INT2STR( self.fcstpcl.lfchght )\n fcst_elhght = tab.utils.INT2STR( self.fcstpcl.elhght )\n ## Now get the mixed layer parcel indices\n ml_bplus = tab.utils.INT2STR( self.mlparcel.bplus )\n ml_bminus = tab.utils.INT2STR( self.mlparcel.bminus )\n ml_lclhght = tab.utils.INT2STR( self.mlparcel.lclhght )\n ml_limax = tab.utils.INT2STR( self.mlparcel.li5 )\n ## check and see if the lfc is there\n ml_lfchght = tab.utils.INT2STR( self.mlparcel.lfchght )\n ml_elhght = tab.utils.INT2STR( self.mlparcel.elhght )\n ## get the most unstable parcel indices\n mu_bplus = tab.utils.INT2STR( self.muparcel.bplus )\n mu_bminus = tab.utils.INT2STR( self.muparcel.bminus )\n mu_lclhght = tab.utils.INT2STR( self.muparcel.lclhght )\n mu_limax = tab.utils.INT2STR( self.muparcel.li5 )\n ## make sure the lfc is there\n mu_lfchght = tab.utils.INT2STR( self.muparcel.lfchght )\n mu_elhght = tab.utils.INT2STR( self.muparcel.elhght )\n\n ## Now that we have all the data, time to plot the text in their\n ## respective columns.\n \n ## PCL type\n texts = ['SFC', 'FCST', 'ML', 'MU']\n for text in texts:\n rect = QtCore.QRect(0, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## CAPE\n y1 = self.ylast + self.tpad\n texts = [sfc_bplus, fcst_bplus, ml_bplus, mu_bplus]\n for text in texts:\n rect = QtCore.QRect(x1*1, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## CINH\n y1 = self.ylast + self.tpad\n texts = [sfc_bminus, fcst_bminus, ml_bminus, mu_bminus]\n for text in texts:\n rect = QtCore.QRect(x1*2, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## LCL\n y1 = self.ylast + self.tpad\n texts = [sfc_lclhght, fcst_lclhght, ml_lclhght, mu_lclhght]\n for text in texts:\n rect = QtCore.QRect(x1*3, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## LI\n y1 = self.ylast + self.tpad\n texts = [sfc_limax, fcst_limax, ml_limax, mu_limax]\n for text in texts:\n rect = QtCore.QRect(x1*4, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## LFC\n y1 = self.ylast + self.tpad\n texts = [sfc_lfchght, fcst_lfchght, ml_lfchght, mu_lfchght]\n for text in texts:\n rect = QtCore.QRect(x1*5, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n ## EL\n y1 = self.ylast + self.tpad\n texts = [sfc_elhght, fcst_elhght, ml_elhght, mu_elhght]\n for text in texts:\n rect = QtCore.QRect(x1*6, y1, x1*2, self.label_height)\n qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, text)\n y1 += (self.label_height)\n self.ylast = y1\n qp.drawLine(0, y1+2, self.brx, y1+2)\n\n\n\n","sub_path":"sharppy/viz/thermo.py","file_name":"thermo.py","file_ext":"py","file_size_in_byte":19945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"169334766","text":"from .opensourceprojects import OpenSourceProjects\nfrom .blogposts import BlogPosts\nfrom .posttweet import PostTweet\nfrom .postlinkedin import PostLinkedIn\n\n\nclass ReviveSocialMedia:\n\n _OSS_MESSAGE = 'OSS Project: {name} is {description}. Check it out! {url} #reviveposts'\n _BLOG_MESSAGE = 'Blog Post: {name}. Check it out! {url} #reviveposts'\n\n def blog(self):\n random_blog = BlogPosts().get()\n try:\n message = self._BLOG_MESSAGE.format(\n name=random_blog['title'],\n url=random_blog['link']\n )\n PostTweet().post(message)\n PostLinkedIn().post(\n message,\n random_blog['title'],\n random_blog['link']\n )\n except:\n self.blog()\n\n def oss(self):\n random_project = OpenSourceProjects().get()\n try:\n tweet = self._OSS_MESSAGE.format(name=random_project['name'], description=random_project['description'], url=random_project['url'])\n if 'documentation' in random_project:\n tweet = tweet + ' Docs: {}'.format(random_project['documentation'])\n if 'repository' in random_project:\n tweet = tweet + ' Repo: {}'.format(random_project['repository'])\n if 'type' in random_project:\n tweet = tweet + ' #{}'.format(random_project['type'])\n PostTweet().post(tweet)\n PostLinkedIn().post(\n tweet, \n random_project['name'],\n random_project['url']\n )\n except:\n self.oss()\n","sub_path":"revivesocialmedia/revivesocialmedia.py","file_name":"revivesocialmedia.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"566352502","text":"class Armor:\n\n def __init__(self,name:str,size:0,weight:0,power:0,\n stamina=0,health=0,defense=0):\n self.name = name\n self.size = size\n self.weight = weight\n self.power = power\n self.stamina = stamina\n self.health = health\n self.defense = defense\n self.__armor_type = None\n\n\n def armor_type(self,type):\n if type == 'rare':\n self.power *= 1.1\n elif type == 'legendary':\n self.power *= 1.7\n self.__armor_type = type\n\n\n def __repr__(self):\n return self.name\n","sub_path":"armor.py","file_name":"armor.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"383061552","text":"import sys\nimport timeit\nimport pprint\n\nsys.stdin = open('배열의_분할', 'r')\n\nstart_time = timeit.default_timer()\n\n\ndef find_state():\n global N_size, S_number, index, state\n state = 0\n while index + 1 < N_size:\n diff = S_number[index + 1] - S_number[index]\n if diff > 0:\n state = 1\n index += 1\n break\n elif diff < 0:\n state = -1\n index += 1\n break\n else:\n index += 1\n\n\ndef find_index():\n global answer, N_size, S_number, index, state\n if state > 0:\n while index + 1 < N_size:\n diff = S_number[index + 1] - S_number[index]\n if diff >= 0:\n index += 1\n else:\n answer += 1\n index += 1\n break\n elif state < 0:\n while index + 1 < N_size:\n diff = S_number[index + 1] - S_number[index]\n if diff <= 0:\n index += 1\n else:\n answer += 1\n index += 1\n break\n else:\n index += 1\n\n\nfor testCase in range(int(input())):\n answer = 1\n N_size = int(input())\n S_number = list(map(int, input().split()))\n index, state = 0, 0\n while index < N_size:\n find_state()\n find_index()\n print(\"#{} {}\".format(testCase + 1, answer))\n\nend_time = timeit.default_timer()\n\nprint('running time: {}'.format(end_time - start_time))\n\n# 1 1\n# 2 2\n# 3 5\n","sub_path":"SWEA/____완료____/Code/D5/배열의_분할/배열의_분할.py","file_name":"배열의_분할.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"186126343","text":"from django.shortcuts import render,redirect\nfrom django.views.generic.base import View\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic import ListView\nfrom .models import Movie,Category,Actor,Ganre,Rating\nfrom .forms import RewiewForm\nfrom django.db.models import Q\nfrom .RatingForm import ReviewForm,RatingForm\n\n\nclass GanreYear():\n def get_genres(self):\n return Ganre.objects.all()\n \n def get_years(self):\n return Movie.objects.filter(draft=False).values(\"year\")\n\n\nclass MoviesView(GanreYear,ListView):\n model=Movie\n queryset=Movie.objects.filter(draft=False)\n paginate_by = 3\n\n\nclass MovieDetailView(GanreYear,DetailView):\n model=Movie\n slug_field=\"url\"\n\n def get_context_data(self,**kwargs):\n contex=super().get_context_data(**kwargs)\n contex[\"star_form\"]=RatingForm()\n return contex\n\nclass AddReview(View):\n def post(self,request,pk):\n form=RewiewForm(request.POST)\n movie=Movie.objects.get(id=pk)\n if form.is_valid():\n form=form.save(commit=False)\n if request.POST.get(\"parent\",None):\n form.parent_id=int(request.POST.get(\"parent\"))\n form.movie=movie\n # print(pk,type(pk))\n # print(form.name)\n form.save()\n return redirect(movie.absoluteUrl())\n\n\nclass ActorView(GanreYear,DetailView):\n model=Actor\n template_name='movies/actor.html'\n slug_field=\"name\"\n\nclass FilterMoviesView(GanreYear,ListView):\n paginate_by=2\n\n def get_queryset(self):\n queryset=Movie.objects.filter(\n Q(year__in=self.request.GET.getlist('year')) |\n Q(genres__in=self.request.GET.getlist('genre'))\n ).distinct()\n return queryset\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context[\"year\"] = ''.join([f\"year={x}&\" for x in self.request.GET.getlist(\"year\")])\n context[\"genre\"] = ''.join([f\"genre={x}&\" for x in self.request.GET.getlist(\"genre\")])\n return context\n\n# class JsonFilterMoviesView(ListView):\n# def get_queryset(self):\n# queryset = Movie.objects.filter(\n# Q(year__in=self.request.GET.getlist(\"year\")) |\n# Q(genres__in=self.request.GET.getlist(\"genre\"))\n# ).distinct().values(\"title\", \"tagline\", \"url\", \"poster\")\n# return queryset\n\n# def get(self, request, *args, **kwargs):\n# queryset = list(self.get_queryset())\n# return JsonResponse({\"movies\": queryset}, safe=False)\n\nclass AddStarRating(View):\n \"\"\"Добавление рейтинга фильму\"\"\"\n\n def get_client_ip(self, request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n\n def post(self, request):\n form = RatingForm(request.POST)\n if form.is_valid():\n Rating.objects.update_or_create(\n ip=self.get_client_ip(request),\n movie_id=int(request.POST.get(\"movie\")),\n defaults={'star_id': int(request.POST.get(\"star\"))}\n )\n return HttpResponse(status=201)\n else:\n return HttpResponse(status=400)\n\n\nclass Search(ListView):\n \"\"\"Поиск фильмов\"\"\"\n paginate_by = 3\n\n def get_queryset(self):\n return Movie.objects.filter(title__icontains=self.request.GET.get(\"q\"))\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context[\"q\"] = f'q={self.request.GET.get(\"q\")}&'\n return context","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"526702412","text":"import urllib.parse as parse\nimport os.path as path\nfrom bs4 import BeautifulSoup\nimport requests\n\ndef getFileName(url) :\n p = parse.urlparse(url).path\n return path.basename(p)\n\ndef getHostname(url, withProtocol = False):\n p = parse.urlparse(url)\n if withProtocol:\n return \"{}://{}\".format(p.scheme, p.hostname)\n else:\n return p.hostname\n\ndef get_true_url(url):\n # import urllib.parse as parse\n # import os.path as path\n\n # print(\">>>>>>>>>>>>>>>>>>>\", parse.urlparse(url).hostname)\n res = requests.get(url)\n soup = BeautifulSoup(res.text, 'html.parser')\n\n sel = \"iframe#mainFrame\"\n iframe = soup.select(sel)\n\n print(iframe, len(iframe))\n print(\"---------------------------\")\n host = getHostname(url)\n uri = iframe[0].get(\"src\")\n print(\"origin url : \", host + uri)\n\n origin_url = urljoin(getHostname(url, True), uri)\n print(origin_url)\n\n return host + uri\n\ndef urljoin(url, path):\n return parse.urljoin(url, path)\n \ndef get_iframe_src(url):\n\n html = requests.get(url).text\n soup = BeautifulSoup(html, 'html.parser')\n\n selector = \"iframe[src]\"\n sss = soup.select_one(selector)\n path = sss.get(\"src\") \n host = getHostname(url)\n\n origin_url = \"https://\" + host + \"/\" + path\n print(origin_url)\n return origin_url\n\n# url = \"https://blog.naver.com/baekmg1988/221405485574\"\n# origin_url = get_true_url(url)\n\n\n# if __name__ == '__main__':\n\n# print(getFileName(url))\n# print(getHostname(url))\n# print(getHostname(url, true))","sub_path":"scraping/scraping_url_utils.py","file_name":"scraping_url_utils.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"506477016","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# \n# XPath selector\n#\n# based on Scrapy selectors https://github.com/scrapy/scrapy/tree/master/scrapy/selector\n#\n\nimport re\nfrom lxml import etree\nimport six\n\ndef flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\"\"\"\n\n result = []\n for el in x:\n if hasattr(el, \"__iter__\"):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n\n if isinstance(regex, basestring):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n return strings\n\nclass XPathSelectorList(list):\n\n def __getslice__(self, i, j):\n return self.__class__(list.__getslice__(self, i, j))\n\n def select(self, xpath):\n return self.__class__(flatten([x.select(xpath) for x in self]))\n\n def re(self, regex):\n return flatten([x.re(regex) for x in self])\n\n def extract(self):\n return [x.extract() for x in self]\n\n def extract_unquoted(self):\n return [x.extract_unquoted() for x in self]\n\nclass XPathSelector(object):\n \n def __init__(self, html_content=None, base_url='', _root=None, _expr=None, namespaces=None):\n ''' init\n '''\n self.namespaces = namespaces\n parser = self._parser(recover=True, encoding='utf-8')\n if html_content is not None:\n _root = etree.fromstring(html_content, parser=parser, base_url=base_url)\n self._root = _root\n self._expr = _expr\n\n def select(self, xpath):\n '''returns a list of new selectors.\n '''\n try:\n xpathev = self._root.xpath\n except AttributeError:\n return XPathSelectorList([])\n\n try:\n result = xpathev(xpath, namespaces=self.namespaces)\n except etree.XPathError:\n raise ValueError(\"Invalid XPath: %s\" % xpath)\n\n if type(result) is not list:\n result = [result]\n\n result = [self.__class__(_root=x, _expr=xpath, namespaces=self.namespaces)\n for x in result]\n return XPathSelectorList(result)\n\n def re(self, regex):\n return extract_regex(regex, self.extract())\n \n def extract(self):\n try:\n return etree.tostring(self._root, method=self._tostring_method, encoding=six.u, with_tail=False)\n except (AttributeError, TypeError):\n if self._root is True:\n return u'1'\n elif self._root is False:\n return u'0'\n else:\n return self._root\n\n def register_namespace(self, prefix, uri):\n if self.namespaces is None:\n self.namespaces = {}\n self.namespaces[prefix] = uri\n\n def __str__(self):\n data = repr(self.extract()[:40])\n return \"<%s xpath=%r data=%s>\" % (type(self).__name__, self._expr, data)\n\n __repr__ = __str__\n\nclass XmlXPathSelector(XPathSelector):\n __slots__ = ()\n _parser = etree.XMLParser\n _tostring_method = 'xml'\n\nclass HtmlXPathSelector(XPathSelector):\n __slots__ = ()\n _parser = etree.HTMLParser\n _tostring_method = 'html'\n\n\nif __name__ == '__main__':\n def tests_html():\n ''' HTML tests '''\n \n html_content = '''\n \n \n \n Example website\n \n \n \n \n \n '''\n hxs = HtmlXPathSelector(html_content) \n assert hxs.select('//title/text()').extract() == [u'Example website']\n assert hxs.select('//base/@href').extract() == [u'http://example.com/']\n assert hxs.select('//div/@id').extract() == [u'images']\n assert hxs.select('//a[@href=\"image2.html\"]/img/@src').extract() == [u'image2_thumb.jpg']\n result = [u'image1_thumb.jpg', u'image2_thumb.jpg', u'image3_thumb.jpg', u'image4_thumb.jpg', u'image5_thumb.jpg']\n assert hxs.select('//a').select('img/@src').extract() == result\n \n links = hxs.select('//a[contains(@href, \"image\")]')\n assert links.extract() == [\n u'Name: My image 1
',\n u'Name: My image 2
',\n u'Name: My image 3
',\n u'Name: My image 4
',\n u'Name: My image 5
',\n ]\n\n results = [\n ([u'image1.html'], [u'image1_thumb.jpg']),\n ([u'image2.html'], [u'image2_thumb.jpg']),\n ([u'image3.html'], [u'image3_thumb.jpg']),\n ([u'image4.html'], [u'image4_thumb.jpg']),\n ([u'image5.html'], [u'image5_thumb.jpg']),\n ]\n for index, link in enumerate(links):\n args = (link.select('@href').extract(), link.select('img/@src').extract())\n assert args == results[index]\n \n \n def tests_xml():\n ''' XML tests '''\n \n xml_content = '''\n \n \n \n \n 10\n 20\n 30\n 40\n 50\n \n \n \n '''\n xxs = XmlXPathSelector(xml_content)\n assert xxs.select('//counter1/text()').extract() == [u'10']\n assert xxs.select('//object/@name').extract() == [u'object_1']\n\n def tests():\n tests_html()\n tests_xml()\n\n tests()\n \n","sub_path":"packages/xpathselectors.py","file_name":"xpathselectors.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"280265279","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nimg0 = cv2.imread('futbol.jpg') # resim eklenir\n\n\ngray = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY) # Gri Skala\n\n\nimg = cv2.GaussianBlur(gray,(3,3),0) # Gürültü kaldırma\n\n\n# laplace sobel islemleri\nlaplacian = cv2.Laplacian(img,cv2.CV_64F)\nsobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x\nsobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5) # y\n\n\n# tek görselde tablo olarak gösterme işlemi\nplt.subplot(2,2,1),plt.imshow(img,cmap = 'gray')\nplt.title('Original'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,2,2),plt.imshow(laplacian,cmap = 'gray')\nplt.title('Laplacian'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,2,3),plt.imshow(sobelx,cmap = 'gray')\nplt.title('Sobel X'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray')\nplt.title('Sobel Y'), plt.xticks([]), plt.yticks([])\n\nplt.show()","sub_path":"farkli_kenar_bulma/kenar_bulma_1.py","file_name":"kenar_bulma_1.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"315896555","text":"from laspy.file import File\nimport gdal\nimport numpy as np\nimport os\n\n\ndef imageLoad(filename):\n im = gdal.Open(filename, gdal.GA_ReadOnly)\n band = im.GetRasterBand(1)\n img = band.ReadAsArray(0, 0, im.RasterXSize, im.RasterYSize)\n transform = im.GetGeoTransform()\n return img, transform\n\n\ndef getNoDataValue(filename):\n im = gdal.Open(filename, gdal.GA_ReadOnly)\n band = im.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n return nodata\n\n\ndef imageWarp(file_from: str, file_to: str, offset=None, interp_method: int = gdal.gdalconst.GRA_Bilinear):\n image_from = gdal.Open(file_from, gdal.GA_ReadOnly)\n image_to = gdal.Open(file_to, gdal.GA_ReadOnly)\n\n # Apply registration offset\n if offset is not None:\n # Move input to memory to apply registration offset\n mem_drv0 = gdal.GetDriverByName('MEM')\n image_tmp = mem_drv0.Create('', image_from.RasterXSize,\n image_from.RasterYSize, 1, gdal.GDT_Float32)\n image_tmp.SetGeoTransform(image_from.GetGeoTransform())\n image_tmp.SetProjection(image_from.GetProjection())\n image_tmp.GetRasterBand(1).WriteArray(\n image_from.ReadAsArray(0, 0, image_from.RasterXSize,\n image_from.RasterYSize))\n NDV = image_from.GetRasterBand(1).GetNoDataValue()\n if NDV is not None:\n image_tmp.GetRasterBand(1).SetNoDataValue(NDV)\n\n offset = np.asarray(offset)\n transform = image_from.GetGeoTransform()\n transform = np.asarray(transform)\n transform[0] += offset[0]\n transform[3] += offset[1]\n image_tmp.SetGeoTransform(transform)\n else:\n image_tmp = image_from\n\n # Create outout image\n mem_drv = gdal.GetDriverByName('MEM')\n destination = mem_drv.Create('', image_to.RasterXSize, image_to.RasterYSize, 1,\n gdal.GDT_Float32)\n\n destination.SetProjection(image_to.GetProjection())\n destination.SetGeoTransform(image_to.GetGeoTransform())\n\n gdal.ReprojectImage(image_tmp, destination, image_from.GetProjection(),\n image_to.GetProjection(), interp_method)\n\n image_out = destination.GetRasterBand(1).ReadAsArray(0, 0, destination.RasterXSize, destination.RasterYSize)\n\n return image_out\n\n\ndef arrayToGeotiff(image_array, out_file_name, reference_file_name, NODATA_VALUE):\n \"\"\" Used to save rasterized dsm of point cloud \"\"\"\n reference_image = gdal.Open(reference_file_name, gdal.GA_ReadOnly)\n transform = reference_image.GetGeoTransform()\n projection = reference_image.GetProjection()\n\n driver = gdal.GetDriverByName('GTiff')\n out_image = driver.Create(out_file_name + '.tif', image_array.shape[1],\n image_array.shape[0], 1, gdal.GDT_Float32)\n if out_image is None:\n print('Could not create output GeoTIFF')\n\n out_image.SetGeoTransform(transform)\n out_image.SetProjection(projection)\n\n out_band = out_image.GetRasterBand(1)\n out_band.SetNoDataValue(NODATA_VALUE)\n out_band.WriteArray(image_array, 0, 0)\n out_band.FlushCache()\n out_image.FlushCache()\n # Ignore pep warning here, aids in memory management performance\n out_image = None\n\n return\n\n\n# Load LAS file and generate max DSM in memory\ndef lasToRaster(las_filename, transform, shape_out, NODATA):\n # Load LAS file\n test_las = File(las_filename, mode='r')\n\n x = test_las.x\n y = test_las.y\n z = test_las.z\n\n # Project to output image space\n # TODO: call map2pix\n map_to_pix = gdal.InvGeoTransform(transform)\n x0 = np.round(map_to_pix[0] + x * map_to_pix[1] + y * map_to_pix[2])\n y0 = np.round(map_to_pix[3] + x * map_to_pix[4] + y * map_to_pix[5])\n\n x0 = x0.astype(int)\n y0 = y0.astype(int)\n\n # Generate MAX value DSM\n raster = np.zeros(shape_out, np.float32) + NODATA\n for ii in range(0, x0.size):\n if (x0[ii] >= 0) & (x0[ii] < raster.shape[1]) & (y0[ii] >= 0) & (\n y0[ii] < raster.shape[0]):\n if z[ii] > raster[y0[ii], x0[ii]]:\n raster[y0[ii], x0[ii]] = z[ii]\n\n return raster\n\n\n# refMat is a GDAL GeoTransform format\ndef map2pix(reference_matrix, points_list):\n x_origin = reference_matrix[0]\n y_origin = reference_matrix[3]\n pixel_width = reference_matrix[1]\n pixel_height = -reference_matrix[5]\n\n xy = np.zeros(shape=(len(points_list), 2))\n\n xy[:, 0] = (np.round((points_list[:, 0] - x_origin) / pixel_width))\n xy[:, 1] = (np.round((y_origin - points_list[:, 1]) / pixel_height))\n\n return xy\n","sub_path":"core3dmetrics/geometrics/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"648601961","text":"from importlib import import_module\r\nfrom collections import OrderedDict\r\nfrom engine.misc import abrir_json\r\nfrom engine.globs import ModData\r\nfrom types import FunctionType\r\nfrom .r_composites import *\r\nfrom .composites import *\r\nfrom .decorators import *\r\nfrom .leaves import Leaf\r\n\r\n\r\nclass BehaviourTree:\r\n # this is a container.\r\n nodes = []\r\n tree_structure = None\r\n to_check = None\r\n node_set = False\r\n shared_context = {}\r\n status = None\r\n entity = None\r\n\r\n _loaded_functions = None\r\n\r\n def __init__(self, entity, tree_data):\r\n if self.tree_structure is not None:\r\n self.tree_structure.clear()\r\n self.nodes = []\r\n self.shared_context = {}\r\n self._loaded_functions = {}\r\n\r\n special = tree_data['head'].pop('special')\r\n self.load_script_information(tree_data['head'].pop('script'))\r\n self.tree_structure = OrderedDict()\r\n self.entity = entity\r\n tree_data = self.analyze_tree(tree_data['body'])\r\n for key in [str(i) for i in range(len(tree_data))]:\r\n node = None\r\n process = None\r\n data = tree_data[key]\r\n idx = int(key)\r\n self.tree_structure[idx] = []\r\n\r\n name = data['name']\r\n if 'children' in data: # composite\r\n self.tree_structure[idx].extend(data['children'])\r\n if name == 'Selector':\r\n node = Selector(self, idx, data['children'])\r\n elif name == 'Sequence':\r\n node = Sequence(self, idx, data['children'])\r\n elif name == 'Parallel':\r\n sucess_value, failure_value = 0, 0\r\n parallel = special.get('Parallel', False)\r\n if parallel and parallel['ID'] == idx:\r\n sucess_value = special['Parallel'].get('Sucess_value', 0)\r\n failure_value = special['Parallel'].get('Failure_value', 0)\r\n node = Parallel(self, idx, data['children'], sucess_value, failure_value)\r\n elif name == 'RSelector':\r\n node = RandomSelector(self, idx, data['children'])\r\n elif name == 'RSequence':\r\n node = RandomSequence(self, idx, data['children'])\r\n\r\n elif 'child' in data: # decorator\r\n self.tree_structure[idx].append(int(data['child']))\r\n if name == 'Repeater':\r\n times = 0\r\n repeater = special.get('Repeater', False)\r\n if repeater and repeater['ID'] == idx:\r\n times = special['Repeater']['times']\r\n node = Repeater(self, idx, data['child'], times=times)\r\n elif name == 'UntilFail':\r\n node = UntilFail(self, idx, data['child'])\r\n elif name == 'Succeeder':\r\n node = Succeeder(self, idx, data['child'])\r\n elif name == 'Inverter':\r\n node = Inverter(self, idx, data['child'])\r\n elif name == 'Failer':\r\n node = Failer(self, idx, data['child'])\r\n elif name == 'UntilSuccess':\r\n node = UntilSuccess(self, idx, data['child'])\r\n\r\n else: # leaf\r\n if name in globals():\r\n process = globals()[name]\r\n\r\n elif name in self._loaded_functions:\r\n process = self._loaded_functions[name]\r\n\r\n if isinstance(process, FunctionType):\r\n node = Leaf(self, idx, name)\r\n node.set_process(process)\r\n\r\n elif issubclass(process, Leaf):\r\n node = process(self, idx, name)\r\n\r\n self.nodes.append(node)\r\n\r\n self.set_parents()\r\n self.set_children()\r\n self.to_check = [self.nodes[0]]\r\n\r\n def __repr__(self):\r\n return 'BehaviourTree'\r\n\r\n def load_script_information(self, head_data):\r\n for script in head_data:\r\n ruta = ModData.pkg_scripts.replace('.', '/') + '/' + script\r\n modulo = import_module('.'.join([ModData.pkg_scripts, script.replace('/', '.')]), ruta)\r\n for name in head_data[script]:\r\n if hasattr(modulo, name):\r\n self._loaded_functions[name] = getattr(modulo, name)\r\n\r\n def analyze_tree(self, tree_data):\r\n key = None\r\n new_tree = None\r\n\r\n for key in [str(i) for i in range(len(tree_data))]:\r\n idx = int(key)\r\n name = tree_data[key]['name']\r\n if name == 'ExtenderLeaf':\r\n extension = abrir_json(ModData.mobs + 'behaviours/' + tree_data[key]['tree'] + '.json')\r\n head = extension.pop('head')\r\n body = extension.pop('body')\r\n new_tree = self.extend_tree(body, idx)\r\n self.load_script_information(head)\r\n break\r\n\r\n if new_tree:\r\n del tree_data[key]\r\n tree_data.update(new_tree)\r\n return tree_data\r\n\r\n @staticmethod\r\n def extend_tree(new_body, idx):\r\n new_tree = {}\r\n for kex in new_body:\r\n if 'children' in new_body[kex]:\r\n for i in range(len(new_body[kex]['children'])):\r\n new_body[kex]['children'][i] += idx\r\n elif 'child' in new_body[kex]:\r\n new_body[kex]['child'] += idx\r\n idy = str(int(kex) + idx)\r\n new_tree[idy] = new_body[kex]\r\n return new_tree\r\n\r\n def set_parents(self):\r\n for idx in self.tree_structure.keys():\r\n if len(self.tree_structure[idx]):\r\n node = self.nodes[idx]\r\n for idxn in self.tree_structure[idx]:\r\n self.nodes[idxn].set_parent(node)\r\n\r\n def set_children(self):\r\n for idx in self.tree_structure.keys():\r\n if len(self.tree_structure[idx]):\r\n if hasattr(self.nodes[idx], 'children'):\r\n for idxn in self.nodes[idx].children:\r\n node = self.nodes[idxn]\r\n index = self.nodes[idx].children.index(idxn)\r\n self.nodes[idx].children[index] = node\r\n\r\n elif hasattr(self.nodes[idx], 'child'):\r\n idxn = self.tree_structure[idx][0]\r\n node = self.nodes[idxn]\r\n self.nodes[idx].child = node\r\n\r\n def set_to_check(self, *nodes):\r\n if self.node_set is False:\r\n self.to_check = [*nodes]\r\n self.node_set = True\r\n\r\n def set_context(self, key, value):\r\n self.shared_context[key] = value\r\n\r\n def get_context(self, key, default_value=False):\r\n if key in self.shared_context:\r\n return self.shared_context[key]\r\n else:\r\n return default_value\r\n\r\n def clear_context(self):\r\n self.shared_context.clear()\r\n\r\n def erase_keys(self, *keys):\r\n \"\"\"\r\n This method erases the indicated keys from the shared context. It works as clear_context(), but selectively.\r\n \"\"\"\r\n for key in keys:\r\n if key in self.shared_context:\r\n del self.shared_context[key]\r\n\r\n def preserve_keys(self, *keys):\r\n \"\"\"\r\n This method erases all keys from the shared conext, except those which are preserved.\r\n \"\"\"\r\n preserved = {}\r\n for key in keys:\r\n preserved[key] = self.shared_context[key]\r\n\r\n self.clear_context()\r\n for key in preserved:\r\n self.set_context(key, preserved[key])\r\n\r\n def set_status(self, status):\r\n \"\"\"\"\r\n Sets the status of the entire tree. Otherwise, the status is None.\r\n \"\"\"\r\n self.status = status\r\n\r\n def reset(self):\r\n self.status = None\r\n self.to_check = self.nodes[0]\r\n self.clear_context()\r\n for node in self.nodes:\r\n node.reset()\r\n\r\n def update(self):\r\n if self.status is None:\r\n for node in self.to_check:\r\n node.update()\r\n self.node_set = False\r\n else:\r\n return self.status\r\n","sub_path":"engine/mobs/behaviortrees/behaviour_tree.py","file_name":"behaviour_tree.py","file_ext":"py","file_size_in_byte":8222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"18233515","text":"import pandas as pd\nfrom alpha_vantage.timeseries import TimeSeries\nimport time\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport datetime\nfrom api_key import api_key\n\n####\n#### Work in progress\n\n\nclass StockData:\n def __init__(self):\n self.api_key = api_key\n self.df = {}\n # self.today = pd.Timestamp(datetime.date.today())\n\n def indicator(self):\n # calculate total pv, total volume and VMAP\n self.df['6. pv total'] = (\n ((self.df['2. high'] + self.df['3. low'] + self.df['4. close']) / 3) * self.df['5. volume']).cumsum()\n self.df['7. volume total'] = self.df['5. volume'].cumsum()\n self.df['8. VWAP'] = self.df['6. pv total'] / \\\n self.df['7. volume total']\n\n def clean_data(self):\n # return yesterday's data\n yesterday = pd.Timestamp(\n datetime.date.today() - datetime.timedelta(days=1))\n print(str(yesterday))\n self.df = self.df[self.df.index > yesterday\n ].sort_index(ascending=False)\n print('data cleaned!')\n\n def TimeSeries(self, plot=False):\n ts = TimeSeries(key=self.api_key, output_format='pandas')\n self.df, meta_data = ts.get_intraday(\n symbol='MSFT', interval='1min', outputsize='full')\n\n print('dataframe created')\n self.clean_data()\n self.indicator()\n self.df = self.df.drop(['2. high', '3. low', '5. volume',\n '6. pv total', '7. volume total'], axis=1)\n print(self.df)\n self.df.plot()\n plt.title('Intraday Time Series (1 min)')\n plt.grid()\n plt.show()\n\n # if plot == True:\n\n\ndf = StockData()\ndf.TimeSeries(plot=True)\n","sub_path":"stock_alpha_vantage.py","file_name":"stock_alpha_vantage.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"355149324","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 15 17:27:16 2016\n\n@author: cornkle\n\"\"\"\n\nimport os\nimport numpy as np\nfrom scipy.ndimage.measurements import label\nfrom utils import u_mann_kendall as mk\n\n\ndef locate(pattern, root_path, exclude=None):\n strg = []\n llist = os.listdir(root_path)\n llist.sort()\n for file in llist:\n if file.endswith(pattern):\n filepath = os.path.join(root_path, file)\n\n try:\n if exclude in filepath:\n continue\n except TypeError:\n pass\n strg.append(os.path.join(root_path, file))\n return strg\n\n\n\ndef distance(x1, y1, x2, y2):\n return np.sqrt((x1 - x2) *(x1 - x2) + (y1 - y2) * (y1 - y2))\n\n\ndef closest_point(point, points):\n dist_2 = np.sum((points - point) * (points - point), axis=1)\n return np.argmin(dist_2)\n\n\n\"\"\"create one unique integer from two positive integers\n Cantor pairing function\"\"\"\ndef unique_of_pair(x,y):\n\n uni = (x + y) * (x + y + 1) / 2 + y\n return uni\n\n\n\"\"\"\nFind all indices within the local circle of radius\nInput:\nx: x index of center point\ny: y index of center point\nradius: radius in pixels, floats are handled including the farthest point\nReturns a tuple of (y index, x index)\n\"\"\"\ndef draw_circle(x, y, radius):\n\n xloc1 = np.arange(x - radius, x + radius + 1)\n yloc1 = np.arange(y - radius, y + radius + 1)\n xloc, yloc = np.meshgrid(xloc1, yloc1)\n distloc = ( ((xloc - x) * (xloc - x)) + ((yloc - y) * (yloc - y)) )**.5\n\n indloc = (distloc <= radius).nonzero()\n ycirc = indloc[0] - radius + y\n xcirc = indloc[1] - radius + x\n\n return (ycirc, xcirc)\n\n\n\"\"\"\nFind all indices within the local circle of radius\nInput:\nx: x index of center point\ny: y index of center point\nradius: radius in pixels, floats are handled including the farthest point\nReturns a tuple of (y index, x index)\n\"\"\"\ndef draw_ellipse(x, y, short, long):\n\n\n xloc = np.arange(x-np.round(short), x+np.round(short)+1)\n yloc = np.arange(y-np.round(long), y+np.round(long)+1)[:,None]\n #xloc, yloc = np.meshgrid(xloc1, yloc1)\n distloc = ((xloc - x)/short)**2 + ((yloc - y)/long)**2 <=1\n\n pos = np.where(distloc)\n\n ycirc = pos[0]+y\n xcirc = pos[1]+x\n\n return (ycirc, xcirc)\n\n\n\"\"\"\nFind all indices within the local circle of radius but remove indeces that\nare out of an area box, specified with an 2d array.\nInput:\nx: x index of center point\ny: y index of center point\nradius: radius in pixels, floats are handled including the farthest point\nReturns a tuple of (y index, x index)\n\"\"\"\ndef draw_cut_circle(x, y, radius, array):\n\n ycirc, xcirc = draw_circle(x, y, radius)\n noky = np.where(ycirc >= array.shape[0]) # if the circle is off the edge\n if noky[0].size > 0:\n ycirc = np.delete(ycirc, noky)\n xcirc = np.delete(xcirc, noky)\n\n nokx = np.where(xcirc >= array.shape[1])\n if nokx[0].size > 0:\n ycirc = np.delete(ycirc, nokx)\n xcirc = np.delete(xcirc, nokx)\n\n return (ycirc, xcirc)\n\n\n\"\"\"\nFind all indices creating the ring of a local circle of radius but remove indeces that\nare out of an area box, specified with an 2d array.\nInput:\nx: x index of center point\ny: y index of center point\nradius: radius in pixels, floats are handled including the farthest point\nReturns a tuple of (y index, x index)\n\"\"\"\ndef draw_ring(x, y, inrad, outrad, array):\n\n in_ycirc, in_xcirc = draw_cut_circle(x, y, inrad, array)\n out_ycirc, out_xcirc = draw_cut_circle(x, y, outrad, array)\n\n in_uni=unique_of_pair(in_xcirc, in_ycirc)\n out_uni = unique_of_pair(out_xcirc, out_ycirc)\n\n inter = np.in1d(out_uni, in_uni, assume_unique=True)\n\n if np.sum(inter) != 0:\n nok = np.where(inter)\n out_ycirc = np.delete(out_ycirc, nok)\n out_xcirc = np.delete(out_xcirc, nok)\n\n return (out_ycirc, out_xcirc)\n\n\ndef cut_kernel(array, xpos, ypos, dist_from_point):\n \"\"\"\n This function cuts out a kernel from an existing array and allows the kernel to exceed the edges of the input\n array. The cut-out area is shifted accordingly within the kernel window with NaNs filled in\n :param array: 2darray\n :param xpos: middle x point of kernel\n :param ypos: middle y point of kernel\n :param dist_from_point: distance to kernel edge to each side\n :return: 2d array of the chosen kernel size.\n \"\"\"\n\n if array.ndim != 2:\n raise IndexError('Cut kernel only allows 2D arrays.')\n\n kernel = np.zeros((dist_from_point*2+1, dist_from_point*2+1)) * np.nan\n\n if xpos - dist_from_point >= 0:\n xmin = 0\n xmindist = dist_from_point\n else:\n xmin = (xpos - dist_from_point) * -1\n xmindist = dist_from_point + (xpos - dist_from_point)\n\n if ypos - dist_from_point >= 0:\n ymin = 0\n ymindist = dist_from_point\n else:\n ymin = (ypos - dist_from_point) * -1\n ymindist = dist_from_point + (ypos - dist_from_point)\n\n if xpos + dist_from_point < array.shape[1]:\n xmax = kernel.shape[1]\n xmaxdist = dist_from_point + 1\n else:\n xmax = dist_from_point - (xpos - array.shape[1])\n xmaxdist = dist_from_point - (xpos + dist_from_point - array.shape[1])\n\n if ypos + dist_from_point < array.shape[0]:\n ymax = kernel.shape[0]\n ymaxdist = dist_from_point + 1\n else:\n ymax = dist_from_point - (ypos - array.shape[0])\n ymaxdist = dist_from_point - (ypos + dist_from_point - array.shape[0])\n\n cutk = array[ypos - ymindist: ypos + ymaxdist, xpos - xmindist: xpos + xmaxdist]\n\n\n kernel[ymin: ymax, xmin:xmax] = cutk\n\n return kernel\n\ndef cut_kernel_3d(array, xpos, ypos, dist_from_point):\n \"\"\"\n This function cuts out a kernel from an existing array and allows the kernel to exceed the edges of the input\n array. The cut-out area is shifted accordingly within the kernel window with NaNs filled in\n :param array: 2darray\n :param xpos: middle x point of kernel\n :param ypos: middle y point of kernel\n :param dist_from_point: distance to kernel edge to each side\n :return: 2d array of the chosen kernel size.\n \"\"\"\n\n if array.ndim != 3:\n raise IndexError('Cut kernel3d only allows 3D arrays.')\n\n kernel = np.zeros((array.shape[0], dist_from_point*2+1, dist_from_point*2+1)) * np.nan\n\n if xpos - dist_from_point >= 0:\n xmin = 0\n xmindist = dist_from_point\n else:\n xmin = (xpos - dist_from_point) * -1\n xmindist = dist_from_point + (xpos - dist_from_point)\n\n if ypos - dist_from_point >= 0:\n ymin = 0\n ymindist = dist_from_point\n else:\n ymin = (ypos - dist_from_point) * -1\n ymindist = dist_from_point + (ypos - dist_from_point)\n\n if xpos + dist_from_point < array.shape[2]:\n xmax = kernel.shape[2]\n xmaxdist = dist_from_point + 1\n else:\n xmax = dist_from_point - (xpos - array.shape[2])\n xmaxdist = dist_from_point - (xpos + dist_from_point - array.shape[2])\n\n if ypos + dist_from_point < array.shape[1]:\n ymax = kernel.shape[1]\n ymaxdist = dist_from_point + 1\n else:\n ymax = dist_from_point - (ypos - array.shape[1])\n ymaxdist = dist_from_point - (ypos + dist_from_point - array.shape[1])\n\n cutk = array[:, ypos - ymindist: ypos + ymaxdist, xpos - xmindist: xpos + xmaxdist]\n\n\n kernel[:, ymin: ymax, xmin:xmax] = cutk\n\n return kernel\n\n\n\ndef blob_define(array, thresh, min_area=None, max_area=None, minmax_area=None):\n array[array >= thresh] = 0 # T threshold maskout\n array[np.isnan(array)] = 0 # set ocean nans to 0\n\n labels, numL = label(array)\n\n u, inv = np.unique(labels, return_inverse=True)\n n = np.bincount(inv)\n\n goodinds = u[u!=0]\n\n if min_area != None:\n goodinds = u[(n>=min_area) & (u!=0)]\n badinds = u[nmax_area]\n\n if minmax_area != None:\n goodinds = u[(n <= minmax_area[1]) & (u != 0) & (n>=minmax_area[0])]\n badinds = u[(n > minmax_area[1]) | (n < minmax_area[0])]\n\n for b in badinds:\n pos = np.where(labels==b)\n labels[pos]=0\n\n return labels, goodinds\n\n\n\ndef linear_trend(x, eps=0.001, alpha=0.01):\n\n #pf = np.polyfit(np.arange(len(x)), x, 1)\n pf, slope, int, p, ind = mk.test(np.arange(len(x)),x.squeeze().values, eps=eps, alpha=alpha, Ha='upordown')\n\n # we need to return a dataarray or else xarray's groupby won't be happy\n\n if ind == 1:\n issig = slope\n else:\n issig = np.nan\n\n return issig\n\n\n\n\n\n\n\n\n","sub_path":"utils/u_arrays.py","file_name":"u_arrays.py","file_ext":"py","file_size_in_byte":8689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"493758866","text":"from datetime import date\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.core.paginator import Paginator\n\nfrom bookclub.forms import DiscussionForm\nfrom bookclub.models import Book\n\n\ndef all_books(request):\n upcoming_books = Book.objects.filter(read_by__gte=date.today()).order_by('read_by')[:3]\n previous_books = Book.objects.filter(read_by__lt=date.today()).order_by('-read_by')[:3]\n\n return render(request, 'bookclub/all_books.html', {'upcoming_books': upcoming_books, 'previous_books': previous_books})\n\n\ndef book_detail(request, pk):\n book = get_object_or_404(Book, pk=pk)\n discussion_form = DiscussionForm()\n discussion_open = False\n\n if book.read_by <= date.today():\n discussion_open = True\n\n if request.method == \"POST\":\n form = DiscussionForm(request.POST, request.FILES)\n if form.is_valid():\n opinion = form.save(commit=False)\n opinion.author = request.user\n opinion.book = book\n opinion.save()\n\n return redirect('book_detail', pk=book.pk)\n\n return render(request, 'bookclub/book_detail.html', {'book': book, 'discussion_open': discussion_open, 'discussion_form': discussion_form})\n\n\ndef book_list(request):\n books = Book.objects.all()\n paginator = Paginator(books, 5)\n\n page_number = request.GET.get('page')\n page_books = paginator.get_page(page_number)\n\n return render(request, 'bookclub/book_list.html', {'page_books': page_books})\n\n","sub_path":"bookclub/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"330863735","text":"from tkinter import *\r\nimport os\r\n\r\n#Window Declaration\r\nafter_register=Tk()\r\nafter_register.geometry(\"1360x1900\")\r\nafter_register.title(\"Welcome\")\r\n\r\n#setting of the background wallpaper\r\n\r\nbckground_label = Label(height=1000, width=1900)\r\nimage1 = PhotoImage(file=\"bank_home.gif\")\r\nbckground_label.config(image = image1)\r\nbckground_label.image = image1\r\nbckground_label.place(x=0, y=0)\r\nbckground_label.pack()\r\n\r\n#login button for further continuation\r\ndef login_button():\r\n after_register.destroy()\r\n os.system('python login.py')\r\n\r\n#Displaying some text regarding first login\r\nid = Label(text=\"Hello Dear User, Thank You for Trusting us. \\n You are being rewarded 5000 rs for \\n joining with us. Press the Continue button \\n for proceeding. \\n Happy Banking! :)\", font=('Verdana', 20))\r\nid.place(x = 400, y = 100)\r\n\r\n#setting up the login button\r\n\r\nlogin = Button(text = \"Continue\", font=('Verdana', 30), fg = \"BLUE\", command=login_button)\r\nlogin.place(x = 600, y = 600)\r\n\r\n\r\nafter_register.mainloop()\r\n","sub_path":"after_register.py","file_name":"after_register.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"16611068","text":"from ImageRecord import ImageRecord\nimport os\n\n\nclass SiteRecord:\n \"\"\"\n The SiteRecord class represents a record of a site. This record contains details relevant to the site observations.\n\n A 'SiteRecord' is formed from a root site source directory. Within this directory we expect to find image\n observations taken at the given site.\n \"\"\"\n\n def __init__(self, the_dir):\n \"\"\"\n :param the_dir: directory name for site, includes trailing /\n \"\"\"\n self.directory = the_dir\n self.include = True\n self.images = []\n self._load_from_folder()\n self.alias = self._get_site_alias()\n\n def _get_site_alias(self):\n \"\"\"\n A site can have an 'alias' or nickname / shorthand name for the site which may be useful to the reader\n :return: string representing the alias to be used for the site\n \"\"\"\n return os.path.basename(os.path.split(self.directory)[0])\n\n def _load_from_folder(self):\n \"\"\"\n This is how we keep a SiteRecord in sync with the folder.\n First we add any new directories found on the filesystem into the data structure.\n\n Then we check to see if there are any entires in the data structure whose representative files have since been\n removed from the filesystem\n :return: nothing\n \"\"\"\n self._add_new_from_folder()\n self._remove_missing_from_folder()\n\n def _add_new_from_folder(self):\n \"\"\"\n Add new images found in directory which are non existent in the site\n :return: none\n \"\"\"\n count = len(self.images) + 1\n\n for file in [os.path.join(self.directory, x) for x in os.listdir(self.directory)]:\n if file not in [x.image_path for x in self.images]:\n self.images.append(ImageRecord(count, file))\n count += 1\n\n self.images.sort()\n\n def _remove_missing_from_folder(self):\n \"\"\"\n Remove images from the structure which are no longer found in the directory\n :return: none\n \"\"\"\n for image in self.images:\n if not os.path.exists(image.image_path):\n self.images.remove(image)\n\n def __str__(self):\n \"\"\"\n Get a string representation of this site\n :return:\n \"\"\"\n base = \"site: \" + self.alias + \"\\n\"\n\n for x in self.images:\n base += str(x)\n\n return base\n","sub_path":"src/SiteRecord.py","file_name":"SiteRecord.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"214304888","text":"import sys, os, statistics, math\n\ndef main():\n proposition = True\n for m in range(4,1000):\n low = max(4,m-1)\n print(m,low)\n for n in range (low,1000):\n x = generateX(m,n)\n y = generateY(m,n)\n proposition = checkProposition(x,y)\n if not proposition :\n print(\"FAIL: m \",m,\" n \",n)\n return\n\ndef meanAbsDiff(r):\n n=len(r)\n mad=0\n for i in range(n):\n for j in range(n):\n mad = mad + abs(r[i] - r[j])\n return mad/(n^2)\n\ndef avgAbsDev(r):\n n = len(r)\n avg = statistics.mean(r)\n ad=0\n for i in range(n):\n ad = ad + abs(r[i] - avg)\n return ad/n\n\ndef stDev(r):\n n = len(r)\n avg = statistics.mean(r)\n sd = 0\n for i in range(n):\n sd = sd + math.pow(r[i] - avg,2)\n return math.sqrt(sd/n)\n\ndef gini(r):\n n = len(r)\n summ = sum(r)\n g = 0\n for i in range(n):\n for j in range(n):\n g = g + abs(r[i] - r[j])\n return g/(2*n*summ)\n\ndef checkProposition(x,y):\n flag = True\n if (meanAbsDiff(x)>meanAbsDiff(y)):\n print(x,y,\"meanAbsDiff\")\n flag = False\n if (avgAbsDev(x)>avgAbsDev(y)):\n print(x,y,\"avgAbsDev\")\n flag = False\n if (stDev(x)>stDev(y)):\n print(x,y,\"stDev\")\n flag = False\n if (gini(x)>gini(y)):\n print(x,y,\"gini\")\n flag = False\n return flag\n\ndef generateX(m,n):\n r=[]\n r.append(m-3)\n r.append(m-1)\n for i in range(n-2): \n r.append(m-2)\n return r\n\n\ndef generateY(m,n):\n r=[]\n for i in range(m-2): \n r.append(m-2-i)\n for i in range(n-m+2): \n r.append(0)\n return r\n\n# old utility methods \ndef getMofY(rY):\n return rY[0]+2\n\ndef mySumStDev(r):\n n = len(r)\n m = getMofY(r)\n avg = statistics.mean(r)\n sd = 0\n for i in range(1,m-2+1):\n sd = sd + math.pow(i,2)\n for i in range(1,m-2+1):\n sd = sd - 2*avg*i\n sd = sd + (m-2)*(math.pow(avg,2))\n sd = sd + (n-m+2)*(math.pow(avg,2))\n return sd\n\ndef stComput(r):\n n = len(r)\n m = getMofY(r)\n avg = statistics.mean(r)\n sd = (m-2)*(m-1)*(2*m-3)/6 - avg*(m-2)*(m-1)+ avg**2*(m-2)+(n-m+2)*avg**2\n sd = (m-2)*(m-1)*(2*m-3)/6 - (m-2)**2*(m-1)**2/(4*n)\n return sd\n\ndef myAvgAbsDevY(r,m):\n n=len(r)\n ad=0\n avg= 0\n for i in range(2,m-1+1):\n avg = avg + (m-i)\n avg = avg/n\n for i in range(2,m-1+1):\n ad = ad + abs((m-i)- avg)\n ad = ad + (n-m+2)*(avg)\n return ad\n\ndef avgAbsDevComputation(r):\n n = len(r)\n m = getMofY(r)\n ad = 0\n avg = statistics.mean(r)\n floor = math.floor(avg)\n for i in range(floor+1,m-2+1):\n ad = ad + (i-avg)\n for i in range(1,floor+1):\n ad = ad - (i-avg)\n ad = ad + (n-m+2)*(avg)\n return ad\n\ndef testMathAd(r):\n n = len(r)\n m = getMofY(r)\n avg = statistics.mean(r)\n floor = math.floor(avg)\n x = (-((floor-m+2)*(floor+m-1)/2)-(m-2-floor)*avg-((floor*(floor+1))/2) + floor*avg+(n-m+2)*avg)\n x = floor*(-(floor+m-1)/2+(m-2)/2+2*avg-(floor+1)/2)+2*avg*(n-m+2)\n return floor*(2*avg-floor-1)+ 2*avg*(n-m+2)\n\ndef myMeanAbsDiffY(r,m):\n n=len(r)\n sum = 0\n for j in range(m-3+1):\n for i in range(m-3-j+1):\n sum = sum + i\n sum = sum + (m-2-j)*(n-m+2)\n for i in range(j+1):\n sum = sum + i\n temp=0\n for j in range(m-2+1):\n temp = temp + j\n sum = sum + (temp * (n-m+2) )\n return sum/(n^2)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n ","sub_path":"spreadMeasures.py","file_name":"spreadMeasures.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"320782262","text":"def get_methods(a_class):\n return [method for method in a_class.__dict__.keys() if\n callable(getattr(a_class, method)) and not\n method.startswith('_') and method != 'init']\n\n\nclass Group(object):\n def __init__(self, sim, groups, agent_class=None):\n self.sim = sim\n self.num_managers = sim.processes\n self._processor_groups = sim._processor_groups\n self.groups = groups\n self.do = (self.execute_parallel\n if sim.processes > 1\n else self.execute_serial)\n\n self.agent_class = agent_class\n for method in dir(agent_class):\n if method[0] != '_':\n setattr(self, method,\n eval('lambda self=self, *argc, **kw: self.do(\"%s\", *argc, **kw)' %\n method))\n\n self.panel_serial = 0\n self.last_action = \"Begin_of_Simulation\"\n\n def __add__(self, g):\n return Group(self.sim, self.groups + g.groups, self.agent_class)\n\n def __radd__(self, g):\n if isinstance(g, Group):\n return self.__add__(g)\n else:\n return self\n\n def execute_serial(self, command, *args, **kwargs):\n self.last_action = command\n self.sim.messagess[-1].clear()\n out_messages = self._processor_groups[0].execute(\n self.groups, command, [], args, kwargs)\n self.sim.messagess = out_messages\n return out_messages[-1]\n\n def execute_parallel(self, command, *args, **kwargs):\n self.last_action = command\n self.sim.messagess[-1].clear()\n parameters = ((pg, self.groups, command, self.sim.messagess[pgid], args, kwargs)\n for pgid, pg in enumerate(\n self._processor_groups))\n out = self.sim.pool.map(execute_wrapper, parameters, chunksize=1)\n for pgid in range(self.num_managers):\n self.sim.messagess[pgid].clear()\n for out_messages in out:\n for pgid, messages in enumerate(out_messages):\n self.sim.messagess[pgid].extend(messages)\n return self.sim.messagess[-1]\n\n def panel_log(self, variables=[], possessions=[], func={}, len=[]):\n \"\"\" panel_log(.) writes a panel of variables and possessions\n of a group of agents into the database, so that it is displayed\n in the gui.\n\n Args:\n possessions (list, optional):\n a list of all possessions you want to track as 'strings'\n variables (list, optional):\n a list of all variables you want to track as 'strings'\n func (dict, optional):\n accepts lambda functions that execute functions. e.G.\n :code:`func = lambda self: self.old_money - self.new_money`\n len (list, optional):\n records the length of the list or dictionary with that name.\n\n Example in start.py::\n\n for round in simulation.next_round():\n firms.produce_and_sell()\n firms.panel_log(possessions=['money', 'input'],\n variables=['production_target', 'gross_revenue'])\n households.buying()\n \"\"\"\n self.do('_panel_log', variables, possessions, func, len, self.last_action)\n\n def agg_log(self, variables=[], possessions=[], func={}, len=[]):\n \"\"\" agg_log(.) writes a aggregate data of variables and possessions\n of a group of agents into the database, so that it is displayed\n in the gui.\n\n Args:\n possessions (list, optional):\n a list of all possessions you want to track as 'strings'\n variables (list, optional):\n a list of all variables you want to track as 'strings'\n func (dict, optional):\n accepts lambda functions that execute functions. e.G.\n :code:`func = lambda self: self.old_money - self.new_money`\n len (list, optional):\n records the length of the list or dictionary with that name.\n\n Example in start.py::\n\n for round in simulation.next_round():\n firms.produce_and_sell()\n firms.agg_log(possessions=['money', 'input'],\n variables=['production_target', 'gross_revenue'])\n households.buying()\n \"\"\"\n self.do('_agg_log', variables, possessions, func, len)\n\n\ndef execute_wrapper(inp):\n # processor_group.execute(self.groups, command, messages[pgid])\n return inp[0].execute(inp[1], inp[2], inp[3], inp[4], inp[5])\n","sub_path":"abce/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"257110798","text":"\"\"\"2행 N열\n스티커를 떼면 좌우상하 스티커 모두 사용할 수 없게 됨\n점수의 합이 최대가 되도록 스티커를 떼려 함\n\n케이스(1) 1칸 이전 대각선에서 올 경우\n케이스(2) 2칸 이전 대각선에서 올 경우\n\n위 두 케이스 중 존재하는 케이스에 대한 최댓값과 현재 상태값의 합\n\"\"\"\nimport sys\ninput = sys.stdin.readline\n\nt = int(input())\nanswers = []\n\nfor _ in range(t):\n n_cols = int(input())\n arr = [list(map(int, input().strip().split())) for _ in range(2)]\n scores = [[0] * n_cols for _ in range(2)]\n\n for c in range(n_cols):\n for r, diag in zip([0, 1], [1, 0]):\n if c == 0:\n scores[r][c] += arr[r][c]\n\n elif c == 1:\n scores[r][c] += arr[r][c] + scores[diag][c-1]\n \n else:\n scores[r][c] += arr[r][c] + max(scores[diag][c-1], scores[diag][c-2])\n\n answers.append(max(scores[0][n_cols-1], scores[1][n_cols-1]))\n\nfor a in answers:\n print(a)","sub_path":"dp/9465.py","file_name":"9465.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"505907443","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom sympy import *\nimport matplotlib.pyplot as pt\n\n\n# In[2]:\n\n\n#Laplace transform\n#It transforms a function of a real variable t (often time) to a function of a complex variable s (complex frequency).\n#the Laplace transform is a useful tool for dealing with linear systems described by ODEs. \n#the Laplace transform is defined for a larger class of functions than the related Fourier transform.\n\n\n# In[3]:\n\n\n#Basic and standard laplase can be found by integrating the function with exp(-st) from 0 to infinity with repect to t\nt,s,n,a,b=symbols('t s n a b')\nf=[1,t,t**n,exp(a*t),exp(-a*t),sin(a*t),cos(a*t),sinh(a*t),cosh(a*t)]\n\n\n# In[4]:\n\n\nintegrate(f[0]*exp(-s*t),(t,0,oo))\n\n\n# In[5]:\n\n\nintegrate(f[1]*exp(-s*t),(t,0,oo))\n\n\n# In[6]:\n\n\nintegrate(f[2]*exp(-s*t),(t,0,oo))\n\n\n# In[7]:\n\n\nintegrate(f[3]*exp(-s*t),(t,0,oo))\n\n\n# In[8]:\n\n\nintegrate(f[4]*exp(-s*t),(t,0,oo))\n\n\n# In[9]:\n\n\nintegrate(f[5]*exp(-s*t),(t,0,oo))\n\n\n# In[10]:\n\n\nintegrate(f[6]*exp(-s*t),(t,0,oo))\n\n\n# In[11]:\n\n\nintegrate(f[7]*exp(-s*t),(t,0,oo))\n\n\n# In[12]:\n\n\nintegrate(f[8]*exp(-s*t),(t,0,oo))\n\n\n# In[13]:\n\n\n#first shifting property is to simply equations with exp(at)\n#convert s----->(s-a)\n\n\n# In[14]:\n\n\nf=exp(a*t)*sin(b*t)\n#find laplace of sin(b*t)\nf1=sin(b*t)\nlaplace=integrate(f1*exp(-s*t),(t,0,oo))\n#s---->s-a\nlaplace.subs(s,s-a)\n\n\n# In[15]:\n\n\nimport numpy as np\nfrom sympy.integrals import laplace_transform\nfrom sympy.abc import t,s,a,b\n\n\n# In[16]:\n\n\nf=cos(t)\nlistu=[]\nfor i in np.arange(0,8*np.pi,0.2):\n listu.append(f.subs(t,i))\npt.plot(np.arange(0,8*np.pi,0.2),listu,label=\"f(t) graph\")\ng=laplace_transform(f,t,s)\ng=g[0]\nlisty=[]#g(s) values\nfor i in np.arange(0,8*np.pi,0.2):\n listy.append(g.subs(s,i))\npt.plot(np.arange(0,8*np.pi,0.2),listy,label=\"f(s) graph\")\npt.legend()\n\n\n# In[17]:\n\n\nf=exp(a*t)*sin(b*t)\n#can solve this easily by first shifting property\n#take f1\nf1=sin(b*t)\ng1=laplace_transform(f1,t,s)\ng1=g1[0]\ng2=g1.subs(s,s-a)\ng=laplace_transform(f,t,s)\ng=g[0]\nif g.equals(g2):\n print('true')\n print(\"laplace transform of exp(a*t)*sin(b*t) is\",g)\n print(\"laplace transform of sin(b*t) and then substituting s with s-a is\",g2)\n\nelse:\n print('false')\n\n\n# In[38]:\n\n\nlisty=[]\nlistu=[]\nlisti=[]\n#let a=1 b=2\ng=g.subs([(a,1),(b,2)])\ng1=g1.subs([(a,1),(b,2)])\ng2=g2.subs([(a,1),(b,2)])\n\nfor i in np.arange(0,8*np.pi,0.2):\n listy.append(g1.subs(s,i))\nfor i in np.arange(0,8*np.pi,0.2):\n listu.append(g2.subs(s,i))\nfor i in np.arange(0,8*np.pi,0.2):\n listi.append(g.subs(s,i))\nfig, (ax1, ax2,ax3) = pt.subplots(1,3)\nfig.suptitle('First shit visuallisation')\nax1.plot(np.arange(0,8*np.pi,0.2),listy)\nax2.plot(np.arange(0,8*np.pi,0.2),listu)\nax3.plot(np.arange(0,8*np.pi,0.2),listi)\nax1.set_title('Laplace before shifting')\nax2.set_title('Laplace after shifting')\nax3.set_title('Laplace done normally')\nprint('See after shifting both graphs become same')\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"python_project.py","file_name":"python_project.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"195453863","text":"# Get the input email from user\nemail = input('What is your email address?: ').strip()\n\n# Slice out the username\nusername = email[:email.index('@')]\n\n# Slice out the domain\ndomain = email[email.index('@') + 1:]\n\n# Format the out put\noutput = 'Your username is {} and your domain is {}'\noutput = output.format(username, domain)\n\n# Print the message\nprint(output)\n","sub_path":"slicer.py","file_name":"slicer.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"298176971","text":"'''\nconfig for factory test rig\n'''\n\nfrom math import *\n\nGDB=\"arm-none-eabi-gdb\"\nFMU_BMAGIC_SERIAL=\"B5DEADF0\"\nIO_BMAGIC_SERIAL=\"B5DFADF1\"\n\nFMU_JTAG=\"/dev/serial/by-id/usb-Black_Sphere_Technologies_Black_Magic_Probe_%s-if00\" % FMU_BMAGIC_SERIAL\nIO_JTAG=\"/dev/serial/by-id/usb-Black_Sphere_Technologies_Black_Magic_Probe_%s-if00\" % IO_BMAGIC_SERIAL\n\nFMU_DEBUG=\"/dev/serial/by-id/usb-Black_Sphere_Technologies_Black_Magic_Probe_%s-if02\" % FMU_BMAGIC_SERIAL\n\nFW_IO=\"FW/px4io.elf\"\nBL_IO=\"FW/px4io_bl.elf\"\n\nFW_FMU=\"FW/firmware-test.elf\"\nBL_FMU=\"FW/px4fmuv3_bl.elf\"\n\nCPUID_IO=\"STM32, Medium density\"\nCPUID_FMU=\"STM32F4xx\"\n\nUSB_DEV_TEST=\"/dev/serial/by-id/usb-3D_Robotics_PX4_FMU_v2.x_0-if00\"\nUSB_DEV_REFERENCE=\"/dev/serial/by-id/usb-3D_Robotics_PH_REFERENCE_0-if00\"\n\nFTDI_POWER=\"/dev/serial/by-id/usb-FTDI_TTL232R_FTFX6YMW-if00-port0\"\n\nNUM_ACCELS=3\nNUM_GYROS=3\n\nREMOTE_MONITOR=\"10.26.1.200:16550\"\nREMOTE_MONITOR2=\"10.26.1.200:16551\"\n\nROTATION_LEVEL_TOLERANCE = 3.0\nROTATION_TOLERANCE = 5.0\n\nGYRO_TOLERANCE = radians(0.2)\n\nPRESSURE_TOLERANCE = 10\nTEMPERATURE_TOLERANCE = 20\nVOLTAGE_TOLERANCE = 0.4\n\n# what channels control pitch and yaw in body frame\n\n# yaw in body frame\nYAW_CHANNEL = 5\n# +100 change == -20 degrees\nYAW_SCALE = -22.0 / 100\n\n\n\n\n# pitch in earth frame\nPITCH_CHANNEL = 6\n# +100 change == -34 degrees\nPITCH_SCALE = -34.0/100\n\n# acceptable modes when the test board is idle\nIDLE_MODES = [\"RTL>\",\"CIRCLE>\",\"MANUAL>\",\"STABILIZE>\"]\n\n\nclass Rotation(object):\n def __init__(self, chan1, chan2, roll, pitch):\n self.chan1 = chan1\n self.chan2 = chan2\n self.roll = roll\n self.pitch = pitch\n\n# servo positions for different orientations of boards in the test jig\n# the columns are:\n# servo5 PWM\n# servo6 PWM\n# expected roll\n# expected pitch\nROTATIONS = {\n 'level' : Rotation(1272, 1687, 0, 0),\n 'right' : Rotation(855, 1420, 90, 0),\n 'left' : Rotation(1660, 1420, -90, 0),\n 'up' : Rotation(1260, 1420, None, 90),\n 'down' : Rotation(1274, 1950, None, -90),\n 'back' : Rotation(1255, 1180, 180, 0)\n }\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"15733326","text":"'''\n1 + 2 + ... + n-1 + n + n-1 + ... + 2 + 1\n이 합은 n**2임\n1. 따라서 주어진 길이에서 n**2만큼 빼주고\n2. 남은 값은 n보다 작거나 같은 수로 빼주면 될듯\n'''\n\nn = int(input()) # 반복 횟수\n\nfor _ in range(n):\n x, y = map(int, input().split()) # x좌표, y좌표\n lenth = y - x # 거리 계산\n test = 0 # 장치 작동 횟수\n n = 1 # 위 주석에서 언급한 n\n while n**2 <= lenth:\n n += 1\n n -= 1\n test += 2*n - 1 # n만큼 횟수 추가\n lenth -= n**2 # n**2만큼 뺀 나머지\n \n while True:\n if lenth == 0: # 다 끝난 경우\n print(test)\n break\n elif lenth >= n:\n lenth -= n\n test += 1\n else: # lenth < n\n lenth = 0\n test += 1","sub_path":"1_백준/1_단계별학습/08_기본_수학_1/10_1011.py","file_name":"10_1011.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"307874797","text":"from conans import ConanFile, python_requires, tools, CMake\nimport shutil\nimport os\n\npyreq = python_requires(\"pyreq/1.0.0@tdelame/stable\")\n\nclass openimageio(pyreq.CMakeConanFile):\n description = \"library for reading and writing images, and a bunch of related classes, utilities, and applications.\"\n license = \"BSD 3-Clause License\"\n url = \"https://sites.google.com/site/openimageio/\"\n\n name = \"OpenImageIO\"\n version = \"2.1.10.1\"\n\n settings = \"os\"\n\n def build_requirements(self):\n \"\"\"Define build-time requirements.\"\"\"\n self.requires(\"boost-headers/1.70.0@tdelame/stable\")\n super(openimageio, self).build_requirements()\n\n def requirements(self):\n \"\"\"Define runtime requirements.\"\"\"\n self.requires(\"OpenEXR/2.4.0@tdelame/stable\")\n self.requires(\"tiff/4.1.0@tdelame/stable\")\n self.requires(\"libjpeg/9c@tdelame/stable\")\n self.requires(\"libpng/1.6.37@tdelame/stable\")\n self.requires(\"zlib/1.2.11@tdelame/stable\")\n self.requires(\"boost-filesystem/1.70.0@tdelame/stable\")\n self.requires(\"boost-thread/1.70.0@tdelame/stable\")\n self.requires(\"TBB/2019-U6@tdelame/stable\")\n self.requires(\"bzip2/1.0.8@tdelame/stable\")\n self.requires(\"freetype/2.9.1@tdelame/stable\")\n\n def source(self):\n \"\"\"Retrieve source code.\"\"\"\n url = \"https://github.com/OpenImageIO/oiio/archive/Release-{}.tar.gz\".format(self.version)\n directory = \"oiio-Release-{}\".format(self.version)\n tools.get(url)\n os.rename(directory, self._source_subfolder)\n\n def cmake_definitions(self):\n \"\"\"Setup CMake definitions.\"\"\"\n boost_lib_paths = []\n boost_libs = []\n for component in [\"filesystem\", \"thread\"]:\n dep = self.deps_cpp_info[\"boost-{}\".format(component)]\n boost_lib_paths.extend(dep.lib_paths)\n boost_libs.extend(dep.libs)\n \n definition_dict = {\n \"OIIO_BUILD_TESTS\": False,\n \"OIIO_BUILD_TOOLS\": False,\n \"OIIO_THREAD_ALLOW_DCLP\": True,\n \n \"EMBEDPLUGINS\": True,\n \"INSTALL_DOCS\": False,\n \"BUILD_DOCS\": False,\n \"USE_STD_REGEX\": True,\n\n \"USE_PYTHON\": False,\n \"USE_HDF5\": False,\n \"USE_OpenColorIO\": False,\n \"USE_OpenCV\": False,\n \"USE_DCMTK\": False,\n \"USE_Field3D\": False,\n \"USE_Libheif\": False,\n \"USE_LibRaw\": False,\n \"USE_Webp\": False,\n \"USE_Nuke\": False,\n \"USE_R3DSDK\": False,\n \"USE_OpenGL\": False,\n \"USE_OpenVDB\": False,\n \"USE_PTex\": False,\n \"USE_Qt5\": False,\n \"USE_Libsquish\": False,\n \"USE_OpenJpeg\": False,\n \"USE_FFmpeg\": False,\n \"USE_GIF\": False,\n \"USE_JPEGTurbo\": False,\n\n \"BOOST_CUSTOM\": True,\n \"Boost_VERSION\": \"1.70.0\",\n \"Boost_INCLUDE_DIRS\": self.deps_cpp_info[\"boost-headers\"].include_paths[0],\n \"Boost_LIBRARY_DIRS\": \";\".join([\"{}\".format(path) for path in boost_lib_paths]),\n \"Boost_LIBRARIES\": \";\".join([\"{}\".format(lib) for lib in boost_libs]),\n\n \"ZLIB_ROOT\": self.deps_cpp_info[\"zlib\"].rootpath,\n \n \"PNG_ROOT\": self.deps_cpp_info[\"libpng\"].rootpath,\n\n \"TIFF_ROOT\": self.deps_cpp_info[\"tiff\"].rootpath,\n\n \"Freetype_ROOT\": self.deps_cpp_info[\"freetype\"].rootpath,\n\n \"BZip2_ROOT\": self.deps_cpp_info[\"bzip2\"].rootpath,\n\n\n \"OpenEXR_ROOT\": self.deps_cpp_info[\"OpenEXR\"].rootpath,\n\n \"JPEG_ROOT\": self.deps_cpp_info[\"libjpeg\"].rootpath,\n\n \"CMAKE_CXX_FLAGS\": \"-fPIC -Wno-error=deprecated -m64 -O3\"\n }\n\n\n self.add_default_definitions(definition_dict)\n return definition_dict\n\n def build(self):\n # this project expect is very picky about cmake invocation...\n build_dir = os.path.join(self._source_subfolder, \"build\")\n package_dir = os.path.abspath(self.package_folder)\n os.makedirs(build_dir)\n os.makedirs(package_dir)\n\n compile_command = 'cmake ../ -G\"Ninja\" -DCMAKE_INSTALL_PREFIX=\"{package_folder}\" {definitions}'.format(\n package_folder=package_dir,\n definitions=\" \".join(['-D{0}=\"{1}\"'.format(key, value) for key, value in self.cmake_definitions().items()]))\n\n install_command = 'ninja install'\n\n with tools.chdir(build_dir):\n self.run(compile_command)\n self.run(install_command)\n\n def package(self):\n os.rename(\n os.path.join(self.package_folder, \"lib64\"),\n os.path.join(self.package_folder, \"lib\"))\n self.package_licenses()\n self.clean_package() \n \n def package_info(self):\n \"\"\"Edit package info.\"\"\"\n super(openimageio, self).package_info()\n self.cpp_info.libs = [\"OpenImageIO\", \"OpenImageIO_Util\"]\n","sub_path":"OpenImageIO/OpenImageIO-2.1.10.1.py","file_name":"OpenImageIO-2.1.10.1.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"187636885","text":"from webob import Request\r\nrequests=[]\r\nwikipedia = Request.blank(\"wikipedia.org\")\r\nwikipedia.host = 'wikipedia.org'\r\nwikipedia.environ[\"SERVER_NAME\"] = 'wikipedia.org'\r\nwikipedia.accept = \"text/html\"\r\nwikipedia.user_agent = \"User-Agent: Mozilla/5.0 (X11; U; Linux i686; ru; rv:1.9b5) Gecko/2008050509 Firefox/3.0b5\"\r\nrequests.append(wikipedia)\r\n\r\nhttpbin1 = Request.blank(\"ip\")\r\nhttpbin1.host = 'httpbin.org'\r\nhttpbin1.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin1.accept = '*/*'\r\nrequests.append(httpbin1)\r\n\r\nhttpbin2 = Request.blank(\"get?foo=bar&1=2&2/0&error=True\")\r\nhttpbin2.host = 'httpbin.org'\r\nhttpbin2.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin2.accept = '*/*'\r\nrequests.append(httpbin2)\r\n\r\nhttpbin3 = Request.blank(\"post\")\r\nhttpbin3.host = 'httpbin.org'\r\nhttpbin3.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin3.method = 'POST'\r\ncontent = \"foo=bar&1=2&2%2F0=&error=True\".encode('ascii')\r\nhttpbin3.content_type = \"application/x-www-form-urlencoded\"\r\nhttpbin3.body = content\r\nhttpbin3.content_length = len(content)\r\nhttpbin3.headers['Connection'] = 'close'\r\nrequests.append(httpbin3)\r\n\r\nhttpbin4 = Request.blank('cookies/set?country=Ru')\r\nhttpbin4.host = 'httpbin.org'\r\nhttpbin4.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin4.accept = '*/*'\r\nhttpbin4.headers['Connection'] = 'close'\r\nrequests.append(httpbin4)\r\n\r\nhttpbin5 = Request.blank(\"cookies\")\r\nhttpbin5.host = 'httpbin.org'\r\nhttpbin5.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin5.accept = '*/*'\r\nhttpbin5.headers['Connection'] = 'close'\r\nrequests.append(httpbin5)\r\n\r\nhttpbin6 = Request.blank('redirect/4')\r\nhttpbin6.host = 'httpbin.org'\r\nhttpbin6.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin6.accept = '*/*'\r\nhttpbin6.headers['Connection'] = 'close'\r\nrequests.append(httpbin6)\r\n\r\nhttpbin7 = Request.blank(\"post\")\r\nhttpbin7.host = 'httpbin.org'\r\nhttpbin7.environ[\"SERVER_NAME\"] = 'httpbin.org'\r\nhttpbin7.method = 'POST'\r\ncontent = \"firstname=Nikita&lastname=Ragozin&group=fo340001&message=empty_message\".encode('ascii')\r\nhttpbin7.content_length = len(content)\r\nhttpbin7.content_type = \"application/x-www-form-urlencoded\"\r\nhttpbin7.body = content\r\nhttpbin7.headers['Connection'] = 'close'\r\nrequests.append(httpbin7)\r\n\r\nfor request in requests:\r\n\tresponce = request.get_response()\r\n\tresponce.content_type = 'text/plain'\r\n\tresponce.charset = 'utf-8'\r\n\tprint(responce)\r\n\tprint(\"\\n\\n------------\\n\\n\")\r\n\r\n","sub_path":"requestWebob.py","file_name":"requestWebob.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"545829863","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom utils import create_ui,genrate_new_images_on_existing_model, plot_loss, save_checkpoint, load_checkpoint,initialize_weights,create_tensor_board_dirs\nfrom model import Discriminator, Generator\nimport my_config\nfrom datetime import datetime\n\n## ----------settings for models are in config.py file -----------------------\n##-----------change this parmater from \"WGAN\" to \"DC_GAN\" to alternate between models. --------------------\n\n\nconfig=create_ui (my_config)\nMODEL=config.MODEL\nif MODEL['name']==\"Wgan-GP\":\n Model=config.WGAN\nelse:\n Model=config.DC_GAN\n\ndevice_name = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\ndevice = torch.device(device_name) \nModel['device']=device\nprint( \"the current device is : \" +device_name)\n\n\n\n\n############# define model and do pre procssing #####################\ntransforms = transforms.Compose(\n [\n transforms.Resize(Model['image_size']),\n transforms.ToTensor(),\n transforms.Normalize(\n (0.5,),(0.5,))\n ]\n)\ndataset = datasets.FashionMNIST(root=\"dataset/\",train=True, transform=transforms, download=True)\nloader = DataLoader(\n dataset,\n batch_size=Model['batch_size'],\n shuffle=True,\n)\n\nload_flag= Model['General']['load_existing_mode']\nif load_flag:\n gen,disc, epoch,trained= load_checkpoint( Model['General']['load_dir'],Model['name'])\n if not trained:\n my_config.NUM_EPOCHS=epoch\n else: \n real=loader.__iter__().next()[0]\n date=datetime.now().strftime(\"%m_%d_%H_%M\")\n for i in range(Model['General'] ['number_of_real_images']):\n torchvision.utils.save_image(real[i],f\"real_image_from_model_{Model['name']}_At_{date}_{i}.png\")\n genrate_new_images_on_existing_model(Model,gen) \n exit(0) \n# initialize gen and disc, note: discriminator should be called critic (since it no longer outputs between [0, 1])\n# for connivance of alternate between models is name remain disc\nif not load_flag:\n gen = Generator(Model).to(device)\n disc = Discriminator(Model).to(device)\ninitialize_weights(gen)\ninitialize_weights(disc)\n\n# initializate optimizer\nopt_gen = optim.Adam(gen.parameters(), lr=Model['lr_gen'], betas=(Model['beta1'], Model['beta2']))\nopt_disc = optim.Adam(disc.parameters(), lr=Model['lr_disc'], betas=(Model['beta1'], Model['beta2']))\n\n\n############# end define and pre procssing ##############################\n\n# for tensorboard plotting- demonstre improvement by loss graphs and image genration process\nwriter_real,writer_fake,writer_gen_loss,writer_disc_loss=create_tensor_board_dirs(Model)\n\nNUM_EPOCHS= my_config.NUM_EPOCHS\nfixed_noise = torch.randn(Model['batch_size'], Model['z_dim'], 1, 1).to(device)\nnum_of_batches=len(loader)//Model['batch_size']\nstep = 0\nD_loss=[]\nG_loss=[]\nfor epoch in range(NUM_EPOCHS):\n #apply train mode to models \n gen.train()\n disc.train()\n for batch_idx, (real,_) in enumerate(loader):\n if len(real) < Model['batch_size']:# not take in consider the last partial batch \n break \n real = real.to(device)\n torch.autograd.set_detect_anomaly(True)\n for _ in range(Model['disc_iter']):\n noise = torch.randn(Model['batch_size'], Model['z_dim'], 1, 1).to(device) #BCHW \n fake = gen(noise)\n loss_disc=disc.calculate_disc_loss(disc,real,fake)\n disc.zero_grad()\n loss_disc.backward(retain_graph=True)\n opt_disc.step()\n loss_gen = gen.calculate_gen_loss(disc,fake)\n gen.zero_grad()\n loss_gen.backward(retain_graph=True)\n opt_gen.step()\n\n # Print losses occasionally and print to tensorboard\n if batch_idx % 100 == 0 and batch_idx > 0:\n print(\n f\"Epoch [{epoch}/{NUM_EPOCHS}] Batch {batch_idx}/{len(loader)} \\\n Loss D: {loss_disc:.4f}, loss G: {loss_gen:.4f}\"\n )\n with torch.no_grad():\n gen.eval()\n disc.eval()\n fake = gen(fixed_noise)\n # take out (up to) 32 examples\n img_grid_real = torchvision.utils.make_grid(real[:32], normalize=True)# normalize is for return to range of [0,1 ]\n img_grid_fake = torchvision.utils.make_grid(fake[:32], normalize=True)\n D_loss.append(loss_disc.item())\n G_loss.append(loss_gen.item())\n writer_real.add_image(\"Real\", img_grid_real, global_step=step)\n writer_fake.add_image(\"Fake\", img_grid_fake, global_step=step)\n writer_gen_loss.add_scalar('Discriminator Loss', D_loss[-1], global_step=step)\n writer_disc_loss.add_scalar('Generator Loss ',G_loss[-1], global_step=step)\n\n # writer_disc_loss.add_graph(gen,fixed_noise)\n # writer_disc_loss.add_graph(disc,real)\n writer_real.flush()\n writer_fake.flush()\n writer_gen_loss.flush()\n writer_disc_loss.flush()\n\n\n step += 1\nwriter_real.close()\nwriter_fake.close()\nwriter_gen_loss.close()\nwriter_disc_loss.close()\nprint(\"Training is finish!... save the train results and plot loss :)\")\nsave_checkpoint(gen,disc,None,True)\nplot_loss(G_loss,D_loss,gen.model)\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"426947913","text":"# Licensed under the Prefect Community License, available at\n# https://www.prefect.io/legal/prefect-community-license\n\n\nimport asyncio\n\nimport pendulum\nimport pytest\n\nfrom prefect_server import api, config, utilities\nfrom prefect_server.database import models\nfrom prefect_server.utilities.exceptions import Unauthorized\n\n\nasync def test_create_logs(flow_run_id):\n\n where_clause = {\n \"flow_run_id\": {\"_eq\": flow_run_id},\n }\n logs_count = await models.Log.where(where_clause).count()\n\n dt = pendulum.now()\n await api.logs.create_logs([dict(flow_run_id=flow_run_id)])\n\n assert await models.Log.where(where_clause).count() == logs_count + 1\n log = await models.Log.where(where_clause).first(\n {\"timestamp\", \"level\", \"task_run_id\"}\n )\n\n assert log.timestamp > dt\n assert log.level == \"INFO\"\n assert log.task_run_id is None\n\n\nasync def test_create_logs_with_task_run_id(flow_run_id, task_run_id):\n\n where_clause = {\n \"flow_run_id\": {\"_eq\": flow_run_id},\n \"task_run_id\": {\"_eq\": task_run_id},\n }\n logs_count = await models.Log.where(where_clause).count()\n\n await api.logs.create_logs(\n [dict(flow_run_id=flow_run_id, task_run_id=task_run_id,)]\n )\n\n assert await models.Log.where(where_clause).count() == logs_count + 1\n log = await models.Log.where(where_clause).first({\"task_run_id\"})\n\n assert log.task_run_id == task_run_id\n\n\nasync def test_create_logs_with_info(flow_run_id):\n\n where_clause = {\n \"flow_run_id\": {\"_eq\": flow_run_id},\n }\n logs_count = await models.Log.where(where_clause).count()\n\n timestamp = pendulum.datetime(2018, 1, 1)\n info = {\"lineno\": 5}\n level = \"ERROR\"\n name = \"Test\"\n message = \"test message\"\n\n pendulum.now()\n await api.logs.create_logs(\n [\n dict(\n flow_run_id=flow_run_id,\n timestamp=timestamp,\n info=info,\n level=level,\n name=name,\n message=message,\n )\n ]\n )\n\n assert await models.Log.where(where_clause).count() == logs_count + 1\n log = await models.Log.where(where_clause).first(\n {\"timestamp\", \"level\", \"name\", \"message\", \"info\"}\n )\n\n assert log.timestamp == timestamp\n assert log.level == level\n assert log.info == info\n assert log.message == message\n assert log.name == name\n\n\nasync def test_create_logs_with_bad_flow_run_ids_still_inserts_good_logs(flow_run_id):\n where_clause = {\n \"flow_run_id\": {\"_eq\": flow_run_id},\n }\n logs_count = await models.Log.where(where_clause).count()\n\n dt = pendulum.now()\n await api.logs.create_logs(\n [\n dict(flow_run_id=flow_run_id),\n dict(flow_run_id=\"\"),\n dict(flow_run_id=flow_run_id, message=\"foo\"),\n dict(flow_run_id=None),\n ]\n )\n\n assert await models.Log.where(where_clause).count() == logs_count + 2\n","sub_path":"server/tests/api/test_logs.py","file_name":"test_logs.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"184222856","text":"import aiohttp\nimport asyncio\n\n\nasync def fetch_resp_text(session, url):\n '''return http request text'''\n async with session.get(url) as resp:\n assert resp.status == 200\n return await resp.text()\n\n\nasync def main():\n async with aiohttp.ClientSession() as session:\n html = await fetch_resp_text(session, 'http://python.org')\n print(html)\n\n\nasync def webget(url):\n async with aiohttp.ClientSession() as session:\n html = await fetch_resp_text(session, url)\n print(url)\n\n\nurl_list = ['http://python.org', 'http://www.baidu.com','http://www.qq.com']\nloop = asyncio.get_event_loop()\n# loop.run_until_complete(main())\ntasks = [webget(host) for host in url_list]\ntasks = [asyncio.ensure_future(webget(host)) for host in url_list]\nloop.run_until_complete(asyncio.wait(tasks))\n","sub_path":"Thread/ansy.py","file_name":"ansy.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"23331596","text":"#!/usr/bin/env python\n#####################################################################################################################################\n# imports ###########################################################################################################################\n#####################################################################################################################################\nimport os\nimport logging\nimport re\nimport json\nfrom contextlib import closing\nfrom requests import get \nfrom requests.exceptions import RequestException\nfrom bs4 import BeautifulSoup\n\n#####################################################################################################################################\n# Helper functions ##################################################################################################################\n#####################################################################################################################################\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at 'url' by making a HTTP GET request. \n If the content-type of response is some kind of HTML/XML, return the \n text content, otherwise return None. \n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n log_error('Error during request to {0} : {1}'.format(url,str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise\n \"\"\"\n content_types = (\"html\",\"json\",\"csv\")\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and any(ct in content_type for ct in content_types))\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors\n This function just prints them, but you can \n make it do anything.\n \"\"\"\n print(e)\n\n#####################################################################################################################################\n#####################################################################################################################################\n#####################################################################################################################################\n\n\n# url we want to scrape\nurl = 'https://archives.library.illinois.edu/archon/?p=collections/findingaid&id=4719&q=correspondence&rootcontentid=83972#id83972'\n\n# Get the site content\nresponse = simple_get(url)\n\n# Parse the site content\nsoup = BeautifulSoup(response, 'html.parser')\n\n# Get the description list (e.g. Series 2: Amateurism)\ndescription_list = soup.find('div', {\"id\": \"famain\"} ).dl\ndata_tag = description_list.dt\ndata_tag_title = data_tag.text\n\n# All boxes for this series\ndata_boxes = description_list.dd.dl\n\n# A list of all the box titles\nbox_titles = data_boxes.findChildren(['dt'], recursive=False)\n\n# a list (of lists) of all boxes content \nbox_contents = data_boxes.find_all(['dd'], recursive=False)\n\nfile_name = \"data.csv\"\nif os.path.exists(file_name):\n os.remove(file_name)\n\nprint(\"Writing data file...\")\nf = open(file_name, \"w+\")\n# set the CSV seperation character\nf.write(\"sep=|\\n\")\n\n# loop over all the box titles and the box content\nfor box_title, box_content in zip(box_titles, box_contents):\n box_title_text = box_title.text # get the box title text\n box_items = box_content.dl.findChildren(['dt'], recursive = False) # a list of all the top level items\n for box_item in box_items:\n item_text = box_item.text\n line = \"{} | {} | {} \\n\".format(data_tag_title, box_title_text, item_text)\n f.write(line)\n\nf.close()\nprint(\"Done!\")","sub_path":"basic_scraper.py","file_name":"basic_scraper.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"234033467","text":"#!/usr/bin/env python3\n\nimport sys\n\nimport creator\nimport messages\n\ndef main():\n\tdispatcher(sys.argv[1:])\n\ndef dispatcher(argv):\n try:\n command = argv[0]\n except IndexError:\n messages.helpMessage(\"No command specified\")\n sys.exit()\n if command in (\"-n\", \"new\"):\n creator.dispatcher(argv[1:])\n else:\n messages.helpMessage(\"The specified command is unknown\")\n sys.exit()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"latexhelper.py","file_name":"latexhelper.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"162754770","text":"import requests, sys\nfrom bs4 import BeautifulSoup\n\nnextLink = 'http://artedosdados.blogspot.com.br/2013/08/python-manipulando-strings-extraindo_6.html'\ntry:\n c = requests.get(nextLink)\n soup = BeautifulSoup(c.text, 'lxml')\n # imprime o título da página\n print(soup.title.string)\n print('-----------------------')\nexcept:\n print('Erro na abertura da página', sys.info()[0])\n# retorna o conteudo em negrito\ntags = soup.find_all('b')\nfor tag in tags:\n print (tag.string)","sub_path":"WebScraping /Capturando_elementos_em_negrito.py","file_name":"Capturando_elementos_em_negrito.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"445114143","text":"from optparse import make_option\nfrom django.core.management.base import BaseCommand, CommandError\nfrom avocado.models import DataCategory, DataConcept, DataField\nimport csv\n\n\ndef annotate_datafield(field_name, annotation):\n '''Presently working with a limited subset of redcap fields.\n Most fields won't be found.\n\n '''\n try:\n c = DataField.objects.filter(name__iexact = field_name)[0]\n c.description = annotation\n c.save()\n print('--> Annotated field {}'.format(field_name))\n except Exception as E:\n pass\n ## print('Error while annotating {}: {}'.format(field_name, E))\n\n\ndef load_annotation(filename):\n with open(filename, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter = '\\t')\n for row in reader:\n try:\n field_name = row['field_label']\n ep = float(row['EP'])\n ea =float(row['EA'])\n if (ep + ea ) > 0:\n prct_prov = round(100*(ep / (ep + ea)), 1)\n prct_miss = 100 - prct_prov\n prct_prov_str = str(prct_prov)\n prct_miss_str = str(prct_miss)\n else:\n prct_prov_str = 'NA'\n prct_miss_str = 'NA'\n\n annotation = 'Data completness: (Provided | Missing | % Provided | % Missing) = ({} | {} | {}% | {}%)'.format(row['EP'], row['EA'], prct_prov_str, prct_miss_str)\n annotate_datafield(field_name, annotation)\n except Exception as Ex:\n print('Error while building annotation for {}'.format(field_name))\n print(Ex)\n\n\nclass Command(BaseCommand):\n \"\"\"Annotate concept based on external file\n\n \"\"\"\n help = 'Annotate data concept'\n\n option_list = BaseCommand.option_list + (\n make_option(\n '--stats_file',\n dest='stats_file',\n help='File containing stats on fields.',\n ),\n )\n\n def handle(self, *args, **options):\n print('Start...')\n filename = options['stats_file']\n load_annotation(filename)\n print('Done.')\n","sub_path":"ibemc/management/commands/load_annotation.py","file_name":"load_annotation.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"7750028","text":"'''Old LAS Parser'''\n\nimport re\nimport os\nfrom collections import deque\n\n\ndef getSection(match):\n '''get section name from first Upper character'''\n return {\n 'A': 'ascii',\n 'C': 'curve',\n 'V': 'version',\n 'W': 'well',\n 'P': 'parameter',\n 'O': 'other'\n }[match]\n\n\n__ParameterRule__ = re.compile(r'([^\\.]*)\\.([^\\s]*)\\s*([^:]*):([^\\n]*)')\n\n\ndef parseLAS(lines):\n '''Pass in raw las file lines'''\n sep = None\n version = None\n wrap = None\n strt = None\n stop = None\n step = None\n null = None\n curves = []\n currentSection = None\n version = None\n for i, line in enumerate(lines):\n\n # Check for Section Delimiter Character ~\n if line.strip().startswith('~'):\n try:\n currentSection = getSection(line.strip()[1:2].upper())\n except:\n raise LASParseError(\n \"Unknown Section: {} at Line#: {}\".format(line.strip(), i))\n if line.strip().startswith('#') or currentSection == 'other':\n yield('comment', line)\n elif currentSection != 'ascii':\n match = parameterRule.match(line)\n if match:\n # Split common line format into pieces and clean\n parameter, unit, value, description = map(\n str.strip, match.groups())\n if version is not None and version < 2:\n value, description = description, value\n if currentSection == 'version':\n if parameter.upper() == 'WRAP':\n wrap = value\n elif parameter.upper() == 'VERS':\n # Try to float value so we can compare it numerically\n try:\n version = float(value)\n except:\n version = value\n elif parameter.upper() == 'SEP':\n sep = value\n elif currentSection == 'well':\n if parameter.upper() == 'STRT':\n strt = value\n elif parameter.upper() == 'STOP':\n stop = value\n elif parameter.upper() == 'STEP':\n step = value\n elif parameter.upper() == 'NULL':\n null = value\n elif currentSection == 'curve':\n # build list so we can use these later\n curves.append(parameter.strip())\n yield(currentSection, (parameter, unit, value, description))\n else:\n # handle ascii block\n firstLine = True\n for i, line in enumerate(lines, i):\n if sep is None:\n values = line.split()\n else:\n values = line.split(sep)\n if len(values) != len(curves):\n raise LASParseError(\"Mismatch Length of Curves: {} and Values: {} for Line#: {}\".format(\n values[0], curves[0], line))\n else:\n if firstLine:\n firstLine = False\n if float(strt) != float(values[0]):\n if float(stop) == float(values[0]):\n raise LASParseError(\"Stop Value: {} matches First Value: {} in Reference: {} for Line#: {}\".format(\n values[0], curves[0], line))\n else:\n raise LASParseError(\"Start Value: {} does not match First Value: {} in Reference: {} for Line#: {}\".format(\n strt, values[0], curves[0], line))\n yield (currentSection, values)\n\n if float(stop) != float(values[0]):\n raise LASParseError(\"Stop Value: {} does not match Last Value: {} in Reference: {} for Line#: {}\".format(\n stop, values[0], curves[0], line))\n\n\nclass LASParseError(Exception):\n pass\n\n\nif __name__ == '__main__':\n folderPath = os.path.realpath(os.path.join(\"Development\", \"LAS\",\n \"LAS Files\"))\n\n for root, dirs, files in os.walk(folderPath, topdown=False):\n for filename in files:\n if filename.lower().endswith('.las'):\n filepath = os.path.join(root, filename)\n with open(filepath, 'r') as las_file:\n try:\n deque(parseLAS(las_file))\n # print('\\n'.join(map(str, parseLAS(lashan))))\n except LASParseError as e:\n print(e, filepath)\n # exc_type, exc_obj, tb = sys.exc_info()\n # f = tb.tb_frame\n # lineno = tb.tb_lineno\n # python_filename = f.f_code.co_filename\n # print 'EXCEPTION {}, FILE {},\n # {}'.format(type(e).__name__,filepath, e)\n","sub_path":"Development/PYTHON/LASParser.py","file_name":"LASParser.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"311827848","text":"#!/usr/bin/env python\n\n\n# Given a singly linked list, return a random node's value from the linked list. Each node must have the same probability of being chosen.\n#\n# Follow up:\n# What if the linked list is extremely large and its length is unknown to you? Could you solve this efficiently without using extra space?\n#\n# Example:\n#\n# // Init a singly linked list [1,2,3].\n# ListNode head = new ListNode(1);\n# head.next = new ListNode(2);\n# head.next.next = new ListNode(3);\n# Solution solution = new Solution(head);\n#\n# // getRandom() should return either 1, 2, or 3 randomly. Each element should have equal probability of returning.\n# solution.getRandom();\n\n\n### Tag: Reservoir sampling ###\n\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n \n \n def __init__(self, head):\n \"\"\"\n http://www.geeksforgeeks.org/reservoir-sampling/\n \"\"\"\n self.head = head\n \n def getRandom(self):\n import random\n \n selected_item = self.head\n \n if self.head.next == None:\n return selected_item.val\n \n current = self.head.next\n index = 1\n k = 1 # only select 1 item\n \n while current:\n j = random.randint(0, index)\n \n if j < k:\n selected_item = current\n \n current = current.next\n index += 1\n \n return selected_item.val\n\n # def __init__(self, head):\n # \"\"\"\n # @param head The linked list's head.\n # Note that the head is guaranteed to be not null, so it contains at least one node.\n # :type head: ListNode\n # \"\"\"\n # p = head\n # self.items = []\n # while p:\n # self.items.append(p.val)\n # p = p.next\n #\n # def getRandom(self):\n # \"\"\"\n # Returns a random node's value.\n # :rtype: int\n # \"\"\"\n # import random\n # return random.choice(self.items)\n \n\n# Your Solution object will be instantiated and called as such:\nobj = Solution(head)\nparam_1 = obj.getRandom()","sub_path":"382_linked_list_random_node.py","file_name":"382_linked_list_random_node.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"338651701","text":"from traceback import format_exception\nfrom discord.ext import commands\nfrom glob import glob\nfrom utils import checks\n\nclass Core(commands.Cog):\n def __init__(self, potato):\n self.potato = potato\n\n @staticmethod\n def get_traceback(exception, limit=None, chain=True):\n return ''.join(format_exception(\n type(exception),\n exception,\n exception.__traceback__,\n limit=limit,\n chain=chain)\n )\n\n def get_modules_list(self):\n modules = glob(\"modules/**/**.py\", recursive=True)\n modules = [m.replace(\"/\", \".\").replace(\"modules.\", \"\").replace(\".py\", \"\") for m in modules]\n return modules\n\n def get_modules(self):\n modules = self.get_modules_list()\n new_modules = []\n for module in modules:\n if module in self.potato.settings[\"modules\"]:\n new_modules.append(\"+ \" + module)\n else:\n new_modules.append(\"- \" + module)\n return new_modules\n\n def get_full_module_name(self, name):\n modules = self.get_modules_list()\n for module in self.get_modules_list():\n if module.endswith(name):\n return module\n return name\n\n @commands.command()\n @checks.is_owner()\n async def reload(self, ctx, module_name):\n await self.unload(ctx, module_name)\n await self.load(ctx, module_name)\n\n @commands.command()\n @checks.is_owner()\n async def load(self, ctx, module_name):\n \"\"\"Load a module.\"\"\"\n module_name = self.get_full_module_name(module_name)\n try:\n self.potato.load_module(module_name)\n return await ctx.send(\"Module loaded sucessfully.\")\n except Exception as e:\n msg = 'Unable to load; the module caused a `{}`:\\n```py\\n{}\\n```'\\\n .format(type(e).__name__, self.get_traceback(e))\n return await ctx.send(msg)\n\n @commands.command()\n @checks.is_owner()\n async def unload(self, ctx, module_name):\n \"\"\"Unload a module.\"\"\"\n module_name = self.get_full_module_name(module_name)\n try:\n self.potato.unload_module(module_name)\n return await ctx.send(\"Module unloaded sucessfully.\")\n except Exception as e:\n return await ctx.send(\"Unable to load; the module isn't loaded.\")\n\n @commands.command()\n @checks.is_owner()\n async def modules(self, ctx):\n \"\"\"List modules.\"\"\"\n modules = sorted(self.get_modules())\n message = \"```diff\\n\"\n message += \"\\n\".join(modules)\n message += \"```\"\n await ctx.send(message)\n\n\ndef setup(potato):\n \"\"\"Setup the Core module.\"\"\"\n potato.setup_module(Core(potato))\n","sub_path":"modules/default/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123049152","text":"# Copyright (c) Meta Platforms, Inc. and affiliates.\n\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nfp16 = dict(loss_scale=512.)\n# model settings\nmodel = dict(\n type='CascadeRCNN',\n pretrained=None,\n backbone=dict(\n type='ConvNeXt',\n in_chans=3,\n depths=[3, 3, 27, 3],\n dims=[128, 256, 512, 1024],\n drop_path_rate=0.6,\n layer_scale_init_value=1.0,\n out_indices=[0, 1, 2, 3],\n ),\n neck=dict(\n type='FPN',\n in_channels=[128, 256, 512, 1024],\n out_channels=256,\n num_outs=5),\n rpn_head=dict(\n type='RPNHead',\n in_channels=256,\n feat_channels=256,\n anchor_generator=dict(\n type='AnchorGenerator',\n scales=[4],\n # ratios=[0.5, 1.0, 2.0],\n ratios=[0.2, 0.5, 1.2, 3.5],\n strides=[4, 8, 16, 32, 64]),\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0]),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n roi_head=dict(\n type='CascadeRoIHead',\n num_stages=3,\n stage_loss_weights=[1, 0.5, 0.25],\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=[\n dict(\n type='ConvFCBBoxHead',\n num_shared_convs=4,\n num_shared_fcs=1,\n in_channels=256,\n conv_out_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=1,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n reg_class_agnostic=False,\n reg_decoded_bbox=True,\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),\n dict(\n type='ConvFCBBoxHead',\n num_shared_convs=4,\n num_shared_fcs=1,\n in_channels=256,\n conv_out_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=1,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.05, 0.05, 0.1, 0.1]),\n reg_class_agnostic=False,\n reg_decoded_bbox=True,\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),\n dict(\n type='ConvFCBBoxHead',\n num_shared_convs=4,\n num_shared_fcs=1,\n in_channels=256,\n conv_out_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=1,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.033, 0.033, 0.067, 0.067]),\n reg_class_agnostic=False,\n reg_decoded_bbox=True,\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='GIoULoss', loss_weight=10.0))\n ]),\n # model training and testing settings\n train_cfg = dict(\n rpn=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n match_low_quality=True,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=256,\n pos_fraction=0.5,\n neg_pos_ub=-1,\n add_gt_as_proposals=False),\n allowed_border=0,\n pos_weight=-1,\n debug=False),\n rpn_proposal=dict(\n nms_pre=2000,\n max_per_img=2000,\n nms=dict(type='nms', iou_threshold=0.7),\n min_bbox_size=0),\n rcnn=[\n dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.3,\n neg_iou_thr=0.3,\n min_pos_iou=0.3,\n match_low_quality=False,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False),\n dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.4,\n neg_iou_thr=0.4,\n min_pos_iou=0.4,\n match_low_quality=False,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False),\n dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n match_low_quality=False,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False)\n ]),\n test_cfg=dict(\n rpn=dict(\n nms_pre=1000,\n max_per_img=1000,\n nms=dict(type='nms', iou_threshold=0.7),\n min_bbox_size=0),\n rcnn=dict(\n score_thr=0.1,\n nms=dict(type='nms', iou_threshold=0.5),\n max_per_img=100)))\n\n\n# dataset settings\ndataset_type = 'ShipDataset'\ndata_root = 'data/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n # dict(type='LoadImageFromFile'),\n # dict(type='LoadAnnotations', with_bbox=True),\n dict(type='Mosaic', img_scale=(256, 256), pad_val=0.0, prob=0.5),\n dict(type='Resize',\n img_scale=[(256, 256), (512, 512)],\n multiscale_mode='range',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.0, direction='horizontal'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n # img_scale=(512, 512),\n img_scale=[(256, 256), (384, 384), (512, 512)],\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\n\ntrain_dataset = dict(\n type='MultiImageMixDataset',\n dataset=dict(\n type=dataset_type,\n ann_file='/data/user_data/annotations/train.json',\n img_prefix='/data/raw_data/training_dataset/A/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True)\n ],\n filter_empty_gt=False,\n ),\n pipeline=train_pipeline)\n\ndata = dict(\n samples_per_gpu=4,\n workers_per_gpu=2,\n train=train_dataset,\n val=dict(\n type=dataset_type,\n ann_file='/data/user_data/annotations/train.json',\n img_prefix='/data/raw_data/training_dataset/A/',\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n ann_file='/data/user_data/annotations/testA.json',\n img_prefix='/data/raw_data/test_dataset/测试集/',\n pipeline=test_pipeline))\n\nevaluation = dict(interval=32, metric='bbox', iou_thrs=[0.5])\n\n# optimizer\noptimizer = dict(constructor='LearningRateDecayOptimizerConstructor',\n type='AdamW',\n lr=0.0002, betas=(0.9, 0.999), weight_decay=0.05,\n paramwise_cfg={'decay_rate': 0.8,\n 'decay_type': 'layer_wise',\n 'num_layers': 12})\noptimizer_config = dict(grad_clip=None)\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=0.001,\n step=[27, 33])\n\nrunner = dict(type='EpochBasedRunner', max_epochs=32)\n\ncheckpoint_config = dict(interval=32)\n# yapf:disable\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook'),\n # dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\ncustom_hooks = [dict(type='NumClassCheckHook')]\n\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = '/data/user_data/pretrained/cascade_mask_rcnn_convnext_base_22k_3x.pth'\nresume_from = None\nworkflow = [('train', 1)]\nwork_dir = '/data/user_data/train_work_dirs/cascade_convnext_base_large_scale_only_train_data_36ep_imagenet22k_pretrain_noflip_anchor4_5_3_3_4_5_ratio_mosaic'","sub_path":"configs/ship/cascade_convnext_base_large_scale_onlytraindata_noflip_anchor4_ratio_mosaic.py","file_name":"cascade_convnext_base_large_scale_onlytraindata_noflip_anchor4_ratio_mosaic.py","file_ext":"py","file_size_in_byte":10286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"456330130","text":"import matplotlib.pyplot as plt\r\nimport xlrd\r\nfrom pylab import *\r\nfrom xlrd import open_workbook\r\nx_data = []\r\ny_data = []\r\nx_volte=[]\r\ntemp=[]\r\nwb = open_workbook('my_data.xlsx')\r\n\r\nfor s in wb.sheets():\r\n print('Sheets:', s.name)\r\n for row in range(s.nrows):\r\n print('the row is:', row)\r\n values = []\r\n for col in range(s.ncols):\r\n values.append(s.cell(row,col).value)\r\n print(values)\r\n x_data.append(values[0])\r\n y_data.append(values[1])\r\n\r\nplt.plot(x_data, y_data, 'bo-', label='Phase curve', linewidth=1)\r\nplt.title('TR14 phase detector')\r\nplt.legend(loc='upper left')\r\n\r\nax = gca()\r\nax.spines['right'].set_color('none')\r\nax.spines['top'].set_color('none')\r\nax.xaxis.set_ticks_position('bottom')\r\nax.spines['bottom'].set_position(('data', 0))\r\nax.yaxis.set_ticks_position('left')\r\nax.spines['left'].set_position(('data', 0))\r\n\r\nplt.xlabel('input-deg')\r\nplt.ylabel('output-V')\r\n\r\nplt.show()\r\nprint('over!')","sub_path":"example_xlrd.py","file_name":"example_xlrd.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"444737377","text":"from flask_wtf import FlaskForm\nfrom wtforms import BooleanField, IntegerField, SelectField, SubmitField, validators\n\nclass BaseList(FlaskForm):\n base_field = SelectField('Base')\n submit = SubmitField()\n\nclass CommodList(FlaskForm):\n commo = SelectField('Commodity')\n submit = SubmitField()","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"276892109","text":"# -*- coding: utf-8 -*-\n\nimport psycopg2\nfrom datetime import datetime\nimport requests\nimport json\nimport time\nimport sys\n\nconn = psycopg2.connect(\"dbname=postgres user=postgres password=Ef9iei!!\")\n\n\n\n\n\ndef main():\n # Определяем дату стоп\n cur = conn.cursor()\n\n cur.execute(\"select \\\"VALUE\\\" from SETTINGS where \\\"PARAMETER\\\" = 'MIN_DATE';\")\n\n date_str = str(cur.fetchone())[2:12]\n\n datetime_object = datetime.strptime(date_str, '%Y-%m-%d')\n\n unix_stop_time = time.mktime(datetime_object.timetuple())\n\n cur.execute(\"select \\\"match_id\\\" from bot_matches\")\n\n db_matches_temp = cur.fetchall()\n db_matches = []\n\n for db_match in db_matches_temp:\n db_matches.append(int(str(db_match)[1:-2]))\n\n while 1 == 1:\n\n cur.execute(\"select \\\"VALUE\\\" from SETTINGS where \\\"PARAMETER\\\" = 'LAST_RECORDED_MATCH';\")\n\n try:\n LAST_RECORDED_MATCH = cur.fetchone()[0]\n except TypeError:\n LAST_RECORDED_MATCH = None\n\n if LAST_RECORDED_MATCH is None:\n\n matches_opendota = requests.get('https://api.opendota.com/api/proMatches/')\n\n mode = 'continue'\n\n else:\n\n matches_opendota = requests.get('https://api.opendota.com/api/proMatches?less_than_match_id=' + LAST_RECORDED_MATCH )\n\n # Расшифровываем JSON запрос\n matches_json = json.loads(matches_opendota.text)\n\n for match in matches_json:\n\n # Проверяем, если такой матч уже есть в базе данных, то пропускаем его\n\n\n\n if match['match_id'] in db_matches:\n print(\"Пропущен \" + str(match['match_id']))\n\n cur.execute(\"update settings set \\\"VALUE\\\" = '\" + str(\n match['match_id']) + \"' where \\\"PARAMETER\\\" = 'LAST_RECORDED_MATCH'\")\n\n conn.commit()\n\n continue\n\n #print(str(match['start_time'])+ \" \" + str(unix_stop_time) )\n\n if match['start_time']< unix_stop_time:\n cur.execute(\"update settings set \\\"VALUE\\\" = NULL where \\\"PARAMETER\\\" = 'LAST_RECORDED_MATCH'\")\n conn.commit()\n sys.exit(\"Загрузка завершена\")\n\n try:\n dire_team_id = str(match['dire_team_id'])\n\n if dire_team_id == \"None\":\n dire_team_id = \"NULL\"\n\n except KeyError:\n dire_team_id = \"NULL\"\n\n try:\n radiant_team_id = str(match['radiant_team_id'])\n\n if radiant_team_id == \"None\":\n radiant_team_id = \"NULL\"\n\n except KeyError:\n radiant_team_id = \"NULL\"\n\n\n try:\n dire_team_name = str(match['dire_name'])\n\n dire_team_name = \"'\" + dire_team_name.replace(\"'\", \"''\") + \"'\"\n\n except KeyError:\n dire_team_name = \"NULL\"\n\n try:\n radiant_team_name = str(match['radiant_name'])\n\n radiant_team_name = \"'\" + radiant_team_name.replace(\"'\", \"''\") + \"'\"\n\n except KeyError:\n radiant_team_name = \"NULL\"\n\n\n\n cur.execute(\"INSERT INTO public.BOT_MATCHES\"\n \"(MATCH_ID, DURATION, RADIANT_SCORE, RADIANT_WIN, DIRE_TEAM_ID, DIRE_TEAM_NAME, RADIANT_TEAM_ID, RADIANT_TEAM_NAME, START_TIME) \"\n \"VALUES (\"\n + str(match['match_id']) + \",\"\n + str(match['duration']) + \",\"\n + str(match['radiant_score']) + \",\"\n + str(int(match['radiant_win'])) + \",\"\n + dire_team_id + \",\"\n + dire_team_name + \",\"\n + radiant_team_id + \",\"\n + radiant_team_name + \",\"\n + str(match['start_time']) +\")\"\n\n )\n\n cur.execute(\"update settings set \\\"VALUE\\\" = '\"+ str(match['match_id']) + \"' where \\\"PARAMETER\\\" = 'LAST_RECORDED_MATCH'\")\n conn.commit()\n LAST_RECORDED_MATCH = match['match_id']\n\n match_opendota = requests.get('https://api.opendota.com/api/matches/' + str(match['match_id']))\n match_json = json.loads(match_opendota.text)\n\n\n #startapp_players\n\n try:\n for player in match_json['players']:\n cur.execute(\"INSERT INTO public.BOT_PLAYERS\"\n \"(MATCH_ID, ACCOUNT_ID, ASSISTS, DEATHS, GOLD_PER_MIN, HERO_ID, KILLS, XP_PER_MIN, WIN, LOSE, NAME, ISRADIANT, HERO_DAMAGE) \"\n \"VALUES (\"\n + str(match['match_id']) + \",\"\n + str(player['account_id']) + \",\"\n + str(player['assists']) + \",\"\n + str(player['deaths']) + \",\"\n + str(player['gold_per_min'])+ \",\"\n + str(player['hero_id']) + \",\"\n + str(player['kills']) + \",\"\n + str(player['xp_per_min']) + \",\"\n + str(player['win']) + \",\"\n + str(player['lose']) + \",\"\n + \"'\" + str(player['name']) + \"',\"\n + str(int(player['isRadiant'])) + \",\"\n + str(player['hero_damage']) +\")\"\n\n )\n conn.commit()\n\n except TypeError:\n continue\n\n\n\n\n conn.commit()\n\n print(\"Записан \" + str(match['match_id']))\n\n\n\n\n cur.close()\n\n conn.close()\n\ndef some_function():\n try:\n main()\n return True\n except (KeyError, TypeError):\n main()\n return False\n\n\nwhile True:\n if some_function():\n break\n else:\n time.sleep(5)\n continue\n\n\n","sub_path":"0 Get Historic Data Matches.py","file_name":"0 Get Historic Data Matches.py","file_ext":"py","file_size_in_byte":5985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"68676162","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom libs.con import Con\nimport pdb\n\nCATEGORY_PARAM='category'\nPAGE_PARAM='page'\nCHUNK=settings.PRODUCTS_CHUNK\n\nclass Base:\n\n\tdef __init__(self):\n\t\t# Making connection to Odoo server\n\t\tself._con = Con(settings.ODOO_URL, settings.ODOO_DB, settings.ODOO_USERNAME, settings.ODOO_PASSORD)\n\n\t\"\"\"\n\t\t@Description:\n\t\t\tGet product categories\n\t\t@Input:\n\t\t\troot_url:\n\t\t@Output:\n\t\t\tcates = [{'id': ,'name':, 'url':, \n\t\t\t\t\t\t'child': [ {'id': , 'name':, 'url':}]}]\n\t\"\"\"\n\tdef get_product_cates(self, root_url='/'):\n\t\t# if existing 'saleable product category' we will use this category else geting all category\n\t\t# We will get two level\n\t\tcate_ids = []\n\t\tsaleable_cate_id = self._con.execute('product.category', 'search',[[['name', '=', 'Saleable']]])\n\t\tif saleable_cate_id:\n\t\t\tcate_ids = self._con.execute('product.category', 'search',[[['parent_id', '=', saleable_cate_id[0]]]])\n\t\telse:\n\t\t\t# get all categories from Odoo\n\t\t\tcate_ids = self._con.execute('product.category', 'search',[[]])\n\t\tcates = self._con.execute('product.category', 'read',[cate_ids], {'fields': ['name']})\n\t\tfor cate in cates:\n\t\t\turl = root_url + \"%s/%s\" % (CATEGORY_PARAM, str(cate['id']))\n\t\t\tcate['url'] = url\n\t\t\tchild_cate_ids = self._con.execute('product.category', 'search',[[['parent_id', '=', cate['id']]]])\n\t\t\tif child_cate_ids:\n\t\t\t\tchild_cates = self._con.execute('product.category', 'read',[child_cate_ids], {'fields': ['name']})\n\t\t\t\tfor child_cate in child_cates:\n\t\t\t\t\turl = root_url + \"%s/%s\" % (CATEGORY_PARAM, str(cate['id']))\n\t\t\t\t\tchild_cate['url'] = url\n\t\t\t\tcate['child'] = child_cates\n\t\treturn cates\n\n\tdef get_product_ids(self, categ_id=None, offset=None, limit=None, sale_ok=True, order=\"id DESC\"):\n\t\tparams = [['sale_ok', '=', sale_ok]]\n\t\toffset_par = {\"order\":order}\n\t\tif offset:\n\t\t\toffset_par['offset'] = offset\n\t\tif limit:\n\t\t\toffset_par['limit'] = limit \n\t\tif categ_id:\n\t\t\tparams.append(['categ_id', '=', categ_id])\n\t\tproduct_ids = self._con.execute('product.template', 'search', [params], offset_par)\n\t\treturn product_ids\n\n\t\"\"\"\n\t\t@Description:\n\t\t\tget products by id. If fields has 'product_multi_images_ids', it will get images of product\n\t\t@Output:\n\t\t\tproducts = [{'id': ,'product_multi_images_ids': }]\n\t\"\"\"\n\tdef get_products(self, product_ids, fields):\n\t\ttry:\n\t\t\tfields.index('product_multi_images_ids')\n\t\t\tneed_images = True\n\t\texcept:\n\t\t\tneed_images = False\n\t\tproducts = self._con.execute('product.template', 'read',[product_ids], {'fields': fields})\n\t\tif need_images:\n\t\t\tfor product in products:\n\t\t\t\tmulti_images = self._con.execute('product.multi.images', 'read',[product['product_multi_images_ids']], {'fields': ['name', 'image']})\n\t\t\t\tproduct['product_multi_images_ids'] = multi_images\n\t\treturn products\n\nclass HomeView(Base):\n\n\t\"\"\"\n\t\t@Description:\n\t\t\tget products and group by category\n\t\t@Input:\n\t\t\tlimit: limit number of products is showed on one line\n\t\t@Output:\n\t\t\tproducts_group = [{'id': , 'name': name of categ, 'url':, 'products': [{ 'id':, ..}]}]\n\t\"\"\"\n\tdef get_products_group(self, categ_num=5, limit=CHUNK):\n\t\tproduct_fields = ['name', 'list_price', 'product_multi_images_ids']\n\t\tcates = self.get_product_cates()\n\t\tactive = True\n\t\tfor cate in cates[0:categ_num-1]:\n\t\t\tproduct_ids = self.get_product_ids(cate['id'], 0, limit)\n\t\t\tcate['products'] = self.get_products(product_ids, product_fields)\n\t\t\tcate['active'] = active\n\t\t\tif active:\n\t\t\t\tactive = False\n\t\treturn cates\n\n\tdef get_recommened_products(self, limit=20, chunk=CHUNK):\n\t\trecommened_products= []\n\t\tproduct_ids = self.get_product_ids(limit=limit)\n\t\tproducts = self.get_products(product_ids, ['name', 'list_price', 'product_multi_images_ids'])\n\t\tchunk_size = len(products)/chunk\n\t\tanchor = 0\n\t\tfor i in range(chunk_size):\n\t\t\tgroup = {'id':i, 'active':False}\n\t\t\tif i == 0:\n\t\t\t\tgroup['active'] = True\n\t\t\tif i == chunk_size -1:\n\t\t\t\tgroup['products'] = products[anchor:]\n\t\t\telse:\n\t\t\t\tgroup['products'] = products[anchor:anchor + 3]\n\t\t\tanchor +=3\n\t\t\trecommened_products.append(group)\n\t\treturn recommened_products\n\n\t# Create your views here.\n\tdef shop(self, request, categ_id=None):\n\t\tif categ_id and int(categ_id) < 0:\n\t\t\tcateg_id = None\n\t\telse:\n\t\t\tcateg_id = int(categ_id)\n\t\tproduct_ids = self.get_product_ids(categ_id, 0, CHUNK)\n\t\tproducts = self.get_products(product_ids, ['name', 'list_price', 'product_multi_images_ids'])\n\n\t\t# get total number of products\n\t\tproducts_num = len(self.get_product_ids(categ_id))\n\t\tcontext = {'products':products,\n\t\t\t\t\t# get categories\n\t\t\t\t\t'cates': self.get_product_cates(),\n\t\t\t\t\t'load_more': {'total_products': products_num, 'categ_id':categ_id, 'chunk': CHUNK},\n\t\t\t\t\t}\n\t\treturn render(request, 'goldtree/shop.html', context)\n\n\t# Create your views here.\n\tdef index(self, request):\n\t\toffset = int(request.GET.get('page',1))\n\t\tcateg_id = request.GET.get('category', -1)\n\t\tif categ_id and int(categ_id) < 0:\n\t\t\tcateg_id = None\n\t\telse:\n\t\t\tcateg_id = int(categ_id)\n\t\tif int(offset) < 1:\n\t\t\toffset = 1\n\t\t#Offset begin with 0 in the coding but interface will show 1\n\t\toffset = offset - 1\n\t\tproduct_ids = self.get_product_ids(categ_id, offset, 6)\n\t\tproducts = self.get_products(product_ids, ['name', 'list_price', 'product_multi_images_ids'])\n\n\t\t#get products which are grouped by category\n\t\tproducts_group = self.get_products_group(5, 4)\n\n\t\t#get recommened products\n\t\trecommened_products = self.get_recommened_products(9)\n\n\t\tcontext = {\n\t\t\t\t'products':products,\n\t\t\t\t'cates': self.get_product_cates(),\n\t\t\t\t'products_group': products_group,\n\t\t\t\t'recommened_products': recommened_products,\n\t\t\t\t}\n\t\treturn render(request, 'goldtree/index.html', context)\n","sub_path":"goldtree/views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"189131726","text":"import unittest\nimport distance_calculator as calculator\n\n\nclass EuclideanDistanceTestSuite(unittest.TestCase):\n def setUp(self):\n self.calculator = calculator.EuclideanDistance()\n\n def test_should_return_correct_distance(self):\n instance1 = [6, 5]\n instance2 = [1, 1]\n weights = [3, 2]\n correct_result = 10.344\n\n self.assertAlmostEqual(self.calculator.calculate(instance1, instance2, weights), correct_result, delta=0.001)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"euclidean_distance_test.py","file_name":"euclidean_distance_test.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"553507892","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport streamlit as st\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier#\nfrom sklearn.metrics import classification_report\n\nst.title(\"Machine learning and medical diagnosis\")\n\ndf = pd.read_csv('data/BRCA.csv')\ndata = df.copy()\nst.write(\n \"This dataset consists of a group of breast cancer patients, who had surgery to remove their tumour. The dataset consists of the following variables:\")\nst.write(data.head())\n\n# Data and feature engineering\ndf = df.drop([334, 335, 336, 337, 338, 339, 340])\ndata = data.drop([334, 335, 336, 337, 338, 339, 340])\ndf = df.drop('Date_of_Last_Visit', axis=1)\ndf['Patient_Status'] = df['Patient_Status'].replace('Dead', 'Deceased')\n\nohe = pd.get_dummies(data[['Gender', 'Tumour_Stage', 'Histology', 'ER status', 'PR status',\n 'HER2 status', 'Surgery_type']])\n\ndata = data.drop(['Gender', 'Patient_ID', 'Tumour_Stage', 'Histology', 'ER status', 'PR status',\n 'HER2 status', 'Surgery_type', 'Date_of_Surgery', 'Date_of_Last_Visit'], axis = 1)\n\nle = LabelEncoder()\ndata['patient_status'] = le.fit_transform(data['Patient_Status'])\ndata = data.drop('Patient_Status', axis = 1)\n\ndata = data.join(ohe)\n\ndata['er_status_positive'], data['pr_status_positive'], data['her2_status_negative'], data['her2_status_positive'] = \\\n data['ER status_Positive'], data['PR status_Positive'], data['HER2 status_Negative'], data['HER2 status_Positive']\n\ndata = data.drop(['ER status_Positive',\n 'PR status_Positive', 'HER2 status_Negative', 'HER2 status_Positive'], axis = 1)\ndata = data.drop(['er_status_positive', 'pr_status_positive'], axis = 1)\n\ncut = data[data['patient_status'] <= 1]\ndata = cut\n\n# Missing data\nst.title(\"Reviewing our dataset\")\nst.write(\"Let's explore our data to see if anything is missing. Before any analysis can begin we need to ensure data is of sufficient quality. As the strength of our prediction will be reflected in the quality of our data!\")\n\ndef missingdata():\n plt.figure(figsize=(10, 10), dpi = 250)\n g = sns.heatmap(data.isnull(), cmap='RdBu')\n g.set_xlabel(\"Features\")\n g.set_ylabel(\"Index\")\n g.set_title('Missing data by feature')\n st.pyplot(g.figure)\n\nmissingdata()\n\nst.write(\"Great, no data is missing!\")\n\n\n# Univariate analysis\n\nst.text(\"\")\nst.write(\n \"From these data we want to predict the patient status (aka the target variable). Let's look at the target variable in detail:\")\n\n\ndef countplot ():\n plt.figure(figsize = (10, 7.5), dpi = 250)\n p = sns.countplot(df['Patient_Status'], palette = 'Paired')\n p.set_xlabel(\"Patient status\")\n p.set_ylabel(\"Count\")\n st.pyplot(p.figure)\n\n\ncountplot()\n\nst.text(\"\")\nst.title(\"Basic descriptive analysis\")\nst.write(\"Using basic descriptive statistics we can generate basic insights into our data!\")\nst.write(data.describe())\n\n# Correlation analysis\nst.text(\"\")\nst.write(\n \"To explore how our data correlations we can call the Pandas internal correlation function. This function takes three method arguments, so feel free to explore how correlations change per method!\")\nmethods = ['Spearman', 'Pearson', 'Kendall']\nselection = st.selectbox('Please select correlation method:', methods)\n\nif selection == 'Pearson':\n i = 'pearson'\nelif selection == 'Spearman':\n i = 'spearman'\nelse:\n i = 'kendall'\n\nst.write(data.corr(method = i))\n\n# Model development\nst.write(\n \"After some basic data and feature engineering (that I'll spare you from!) we can start building a basic ML model to set our baseline performance. First, we must define our training set:\")\n\nX = data.drop('patient_status', axis = 1)\ny = data['patient_status']\n\nst.write(X)\nst.text(\"\")\nst.write(\"And the target variable we are trying to predict:\")\nst.write(y)\n\nst.write(\"Now we have our data defined, we'll use a selection of models and see which performs best out the box! For this we'll need classificatgion algorithms, let's see how they perform!\")\n\nalgorithms = ['Logistic regression', 'Kneighbours classifier', 'Random Forest Classifier']\nselection = st.selectbox('Please select correlation method:', algorithms)\n\nif selection == 'Logistic Regression':\n i = LogisticRegression\nelif selection == 'Kneighbours classifier':\n i = KNeighborsClassifier\nelse:\n i = RandomForestClassifier\n#\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 101)\n\n\ndef modeldev(i, j, k, l, m):\n model = i()\n model.fit(j, k)\n y_pred = model.predict(l)\n rep = classification_report(m, y_pred, output_dict = True)\n rep = pd.DataFrame(rep)\n rep = rep.drop('support', axis=0)\n st.write(rep)\n\nmodeldev(i, X_train, y_train, X_test, y_test)\n\nst.write(\"Here we are, results from our trained model!\")\n\n\n\n\n# st.write(\n# \"One parameter we can look to change is K, the value representing the count of nearest neighbours, and its value is vital to developing a model with good classification capability. \")\n\n\n# error_rate = []\n# for i in range(1,40):\n# knn = KNeighborsClassifier(n_neighbors=i)\n# knn.fit(X_train,y_train)\n# pred_i = knn.predict(X_test)\n# error_rate.append(np.mean(pred_i != y_test))\n#\n# optimalk = pd.DataFrame({\n# 'k': range(1,40),\n# 'error_rate': error_rate\n# })\n# plt.figure(figsize = (10, 10), dpi = 200)\n# plt.title(\"Error rate by value of K\")\n# plt.ylabel(\"Error rate\")\n# plt.xlabel(\"Value of K\")\n# p = sns.lineplot(range(1, 40), optimalk['error_rate'], markers = True)\n# st.text(\"\")\n# st.pyplot(p.figure)\n# st.write(\"As we can see the minimum error is: {} at K = {}\".format(min(error_rate), error_rate.index(min(error_rate))))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301964992","text":"def solution(A):\n cnt = {}\n for num in A:\n key_ = str(num)\n if not (key_ in cnt):\n cnt[key_] = 1\n else:\n cnt[key_] += 1\n result = list(cnt.items())\n for item in result:\n if item[1] % 2 == 1:\n return int(item[0])","sub_path":"jinkyuhan/codility_lesson2_OddOccurrencesInArray.py","file_name":"codility_lesson2_OddOccurrencesInArray.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296541180","text":"# -*- coding:utf-8 -*-\n\"\"\"\n@author: lijingxin\n@contact: lijingxin666@gmail.com\n@site: https://github.com/lijingxin666\n@time: Created on 5:28 PM 5/7/20\n\nQuestion: \n\n\"\"\"\nfrom BinarySearchTree import BinarySearchTree\nfrom BinarySearchTree import Node\nfrom F14031IterativeGet import AdvBST1\nfrom F14032IterativeAdd import AdvBST2\n\nclass AdvBST3(AdvBST2):\n # # Traversal Methods\n # def print_inorder(self):\n # self._print_inorder(self._root)\n # print('')\n #\n # def _print_inorder(self, node):\n # if (node is None):\n # return\n # self._print_inorder(node._left)\n # print('[', node._item, ']', end=\" \")\n # self._print_inorder(node._right)\n\n def printInorderIterative(self):\n node = self._root\n stack = []\n\n while True:\n while (node is not None): # 退出的时候 说明找到最左下的点了\n stack.append(node)\n node = node._left\n if len(stack) == 0: # stack为空 退出\n return\n # stack 不为空 pop 及 打印, node往右\n node = stack.pop()\n print('[', node._item, ']', end=\" \")\n node = node._right\n\nbst = AdvBST3()\nnumbers = [6, 4, 8, 7, 9, 2, 1, 3, 5, 13, 11, 10, 12]\nfor i in numbers:\n bst.add(i)\n#bst.print_inorder()\nbst.printInorderIterative()","sub_path":"Algorithm_PY/ch14/F14033IterativeInorderTraversal.py","file_name":"F14033IterativeInorderTraversal.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272633513","text":"from keypoints import part_names, pose_chain, part_name_to_id_map\nfrom utils import (get_image_coords, clamp, add_vectors, get_offset_point,\n within_radius_of_corresponding_point)\n\n\nparent_children_id_tuples = [\n (part_name_to_id_map[parent_joint_name], part_name_to_id_map[child_joint_name])\n for parent_joint_name, child_joint_name in pose_chain\n]\nparent_to_child_edges = tuple([t[1] for t in parent_children_id_tuples])\nchild_to_parent_edges = tuple([t[0] for t in parent_children_id_tuples])\n\n\ndef decode_pose(root, heatmap_scores, offsets, output_stride, displacements_fwd, displacements_bwd):\n num_parts = heatmap_scores.shape[2]\n num_edges = len(parent_to_child_edges)\n # TODO: check if this is buggy\n instance_keypoints = [None] * num_parts\n\n # Start the new detection instance at the position of root.\n root_score, root_part = -root[0], root[1] # The `-` is due to python not having a max heap 🤮\n root_point = get_image_coords(root_part, output_stride, offsets)\n instance_keypoints[root_part['keypoint_id']] = {\n 'score': root_score, 'part': part_names[root_part['keypoint_id']], 'position': root_point\n }\n\n # Decode the part positions upwards in the tree, following the backward displacements.\n # TODO: This is absolutely disgusting code, please rewrite.\n for edge in reversed(range(num_edges)):\n # TODO: have some doubts bout this code\n source_keypoint_id = parent_to_child_edges[edge]\n target_keypoint_id = child_to_parent_edges[edge]\n if instance_keypoints[source_keypoint_id] and not instance_keypoints[target_keypoint_id]:\n instance_keypoints[target_keypoint_id] = traverse_to_target_keypoint(\n edge, instance_keypoints[source_keypoint_id], target_keypoint_id, heatmap_scores,\n offsets, output_stride, displacements_bwd\n )\n\n for edge in range(num_edges):\n source_keypoint_id = child_to_parent_edges[edge]\n target_keypoint_id = parent_to_child_edges[edge]\n if instance_keypoints[source_keypoint_id] and not instance_keypoints[target_keypoint_id]:\n instance_keypoints[target_keypoint_id] = traverse_to_target_keypoint(\n edge, instance_keypoints[source_keypoint_id], target_keypoint_id, heatmap_scores,\n offsets, output_stride, displacements_fwd\n )\n\n return instance_keypoints\n\n\ndef traverse_to_target_keypoint(edge_id, source_keypoint, target_keypoint_id, heatmap_scores,\n offsets, output_stride, displacements):\n\n height, width, _ = heatmap_scores.shape\n\n # Nearest neighbor interpolation for the source->target displacements.\n source_keypoint_indices = get_strided_index_near_point(\n source_keypoint['position'], output_stride, height, width\n )\n\n # Get displacement vector located at our source_keypoint's heatmap x & y coordinates\n displacement = get_displacement(edge_id, source_keypoint_indices, displacements)\n\n # Add that vector to out source_keypoint\n displaced_point = add_vectors(source_keypoint['position'], displacement)\n\n # Get the heatmap x & y coordinates for the resulting vector\n displaced_point_indices = get_strided_index_near_point(displaced_point, output_stride,\n height, width)\n\n # Find the offset vector of said coordinates\n # TODO: Shouldn't we be using Hough voting here??\n offset_point = get_offset_point(displaced_point_indices['y'], displaced_point_indices['x'],\n target_keypoint_id, offsets)\n\n # Add it to our previously displayed point to find our target_keypoint!\n target_keypoint = add_vectors(\n {'x': displaced_point_indices['x'] * output_stride,\n 'y': displaced_point_indices['y'] * output_stride},\n offset_point # TODO: I refactored it a bit here, check in case something fails\n )\n\n score = heatmap_scores[\n displaced_point_indices['y'], displaced_point_indices['x'], target_keypoint_id\n ]\n\n return {'position': target_keypoint, 'part': part_names[target_keypoint_id], 'score': score}\n\n\ndef get_strided_index_near_point(point, output_stride, height, width):\n # TODO: Isn't this clamp unnecesary?\n return {\n 'y': int(clamp(round(point['y'] / output_stride), 0, height - 1)),\n 'x': int(clamp(round(point['x'] / output_stride), 0, width - 1))\n }\n\n\ndef get_displacement(edge_id, point, displacements):\n num_edges = int(displacements.shape[2] / 2) # TODO: convert to int?\n return {\n 'y': displacements[point['y'], point['x'], edge_id],\n 'x': displacements[point['y'], point['x'], num_edges + edge_id]\n }\n\n\ndef get_instance_score(existing_poses, squared_nms_radius, instance_keypoints):\n # TODO is this generated score used at all? (Maybe I added some bugs here).\n not_overlapped_keypoint_scores = 0.0\n for keypoint_id, keypoint in enumerate(instance_keypoints):\n if not within_radius_of_corresponding_point(existing_poses, squared_nms_radius,\n keypoint['position'], keypoint_id):\n not_overlapped_keypoint_scores += keypoint['score']\n\n return not_overlapped_keypoint_scores / len(instance_keypoints)\n","sub_path":"decode_pose.py","file_name":"decode_pose.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"373257486","text":"#Create layers for Finding Point and Line Features in Water Bodies and Water smaller than 1.4 acres\r\n#6/9/2014\r\n\r\nimport arcpy\r\narcpy.env.overwriteOutput = True\r\n\r\ninpolyFC = arcpy.GetParameterAsText (0) #Input Soil Polygons\r\ninsfptsFC = arcpy.GetParameterAsText(1) # Input Special Feature Points\r\ninsflnFC = arcpy.GetParameterAsText(2) #Input Special Feature Lines\r\n#MUSYM = arcpy.GetParameterAsText (3)# Choose MUSYM attribute\r\nAcres = arcpy.GetParameterAsText (3) #Choose Acre Size\r\nworkspace = arcpy.GetParameterAsText (4)# Choose Workspace\r\n\r\n\r\n#Add Field\r\n\r\narcpy.AddField_management(inpolyFC, \"ACRES\", \"DOUBLE\",)\r\n\r\n#Calculate Field\r\n\r\narcpy.CalculateField_management(inpolyFC, \"ACRES\", '!Shape.area@ACRES!', \"PYTHON_9.3\")\r\n\r\n\r\n#Select all Water bodies\r\narcpy.SelectLayerByAttribute_management (inpolyFC, \"NEW_SELECTION\", \" MUSYM = 'W' \")\r\n#Make a layer from the feature class\r\narcpy.MakeFeatureLayer_management (inpolyFC, \"soil_w_lyr\")\r\n\r\n#Select points in Water polygons\r\narcpy.SelectLayerByLocation_management (insfptsFC, \"COMPLETELY_WITHIN\", \"soil_w_lyr\", \"\", \"ADD_TO_SELECTION\")\r\n\r\n#Write the selected features to a new featureclass\r\narcpy.CopyFeatures_management(insfptsFC, workspace+'\\\\'+\"SFP_in_W\")\r\n\r\n#Select Lines in Water polygons\r\narcpy.SelectLayerByLocation_management (insflnFC, \"INTERSECT\", \"soil_w_lyr\", \"\", \"ADD_TO_SELECTION\")\r\n\r\n#Export the selected features to a new featureclass\r\narcpy.CopyFeatures_management(insflnFC, workspace+'\\\\'+\"SFL_in_W\")\r\n\r\n#Select Layer By Location SUBSET_SELECTION \"Acres\" < 1.35\r\narcpy.SelectLayerByAttribute_management (\"soil_w_lyr\", \"NEW_SELECTION\", \"ACRES < *\")\r\n\r\n#Export the selected features to a new featureclass\r\narcpy.CopyFeatures_management(\"soil_w_lyr\", workspace+'\\\\'+\"Small_W\")\r\n\r\n#Clear Selected Features\r\narcpy.SelectLayerByAttribute_management (inpolyFC, \"CLEAR_SELECTION\")\r\narcpy.SelectLayerByAttribute_management (insfptsFC, \"CLEAR_SELECTION\")\r\narcpy.SelectLayerByAttribute_management (insflnFC, \"CLEAR_SELECTION\")","sub_path":"geo_surreal/create_layers_finding_pt_ln_musym_acres_size_06122014.py","file_name":"create_layers_finding_pt_ln_musym_acres_size_06122014.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"371706523","text":"\nfrom PyQt5 import QtWidgets\n\nfrom Presenter import Presenter\nfrom Model import Model\nfrom View import View\n\nclass Setup(object):\n \n def __init__(self):\n # Creating Model and View\n self.__view = View()\n self.__model = Model()\n\n # Creating Presenter\n self.__presenter = Presenter(self.__model, self.__view)\n \n def show_window(self):\n self.__view.show()\n\n\nif __name__ == '__main__':\n import sys\n app = QtWidgets.QApplication(sys.argv)\n\n manager = Setup()\n manager.show_window()\n \n sys.exit(app.exec_())","sub_path":"MVP/Manager.py","file_name":"Manager.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"119942128","text":"from flask import Flask, render_template, url_for, request, session, redirect\r\nfrom flask_pymongo import PyMongo, pymongo\r\nfrom bson.objectid import ObjectId\r\nfrom operator import itemgetter\r\nimport bcrypt\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['MONGO_DBNAME'] = 'SampleProject'\r\napp.config['MONGO_URI'] = 'mongodb://127.0.0.1:27017/SampleProject'\r\n\r\nfrom app import routes\r\n\r\nmongo = PyMongo(app)\r\n\r\n@app.route('/')\r\n@app.route('/Home')\r\ndef Home():\r\n\treturn render_template('Home.html', title='Home')\r\n\r\n@app.route('/Register', methods=['POST', 'GET'])\r\ndef Register():\r\n\tif request.method == 'POST':\r\n\t\tUser = mongo.db.User\r\n\t\texisting_user = User.find_one({'username' : request.form['username']})\r\n\r\n\t\tif not existing_user:\r\n\t\t\thashpass = bcrypt.hashpw(request.form['password'].encode('utf-8'), bcrypt.gensalt())\r\n\t\t\tUser.insert({'username': request.form['username'], 'password': hashpass, 'first_name': request.form['first_name'], 'last_name': request.form['last_name'], 'email': request.form['email']})\r\n\t\t\tlogin_user = User.find_one({'username': request.form['username']})\r\n\t\t\tsession['userid'] = str(login_user['_id'])\r\n\t\t\tsession['username'] = login_user['username']\r\n\t\t\tsession['first_name'] = login_user['first_name']\r\n\t\t\tsession['last_name'] = login_user['last_name']\r\n\t\t\tsession['email'] = login_user['email']\r\n\t\t\treturn redirect(url_for('MembersHome'))\r\n\t\treturn 'That username already exists!'\r\n\treturn render_template('register.html', title='Register')\r\n\r\n@app.route('/Login', methods=['POST', 'GET'])\r\ndef Login():\r\n\tif request.method == 'POST':\r\n\t\tUser = mongo.db.User\r\n\t\tlogin_user = User.find_one({'username' : request.form['username']})\r\n\r\n\t\tif login_user:\r\n\t\t\tif bcrypt.hashpw(request.form['password'].encode('utf-8'), login_user['password'].encode('utf-8')) == login_user['password'].encode('utf-8'):\r\n\t\t\t\tsession['userid'] = str(login_user['_id'])\r\n\t\t\t\tsession['username'] = login_user['username']\r\n\t\t\t\tsession['first_name'] = login_user['first_name']\r\n\t\t\t\tsession['last_name'] = login_user['last_name']\r\n\t\t\t\tsession['email'] = login_user['email']\r\n\t\t\t\treturn redirect(url_for('MembersHome'))\r\n\treturn render_template('Login.html', title='Login')\r\n\r\n@app.route('/MembersHome')\r\ndef MembersHome():\r\n\tif not session.get('username'):\r\n\t\treturn redirect(url_for('Login'))\r\n\tquery_0 = session['first_name']\r\n\tDivision = mongo.db.Division\r\n\tmultiquery_test_17 = Division.find({}).limit(0)\r\n\tmultiquery_17 = []\r\n\tfor mq in multiquery_test_17:\r\n\t\tmultiquery_17.append(mq)\r\n\treturn render_template('MembersHome.html', title='MembersHome', query_0=query_0, multiquery_17=multiquery_17)\r\n\r\n@app.route('/newDivision', methods=['POST', 'GET'])\r\ndef newDivision():\r\n\tif not session.get('username'):\r\n\t\treturn redirect(url_for('Login'))\r\n\tif request.method == 'POST':\r\n\t\tDivision = mongo.db.Division\r\n\t\texists = Division.find_one({'Name': request.form['Name']})\r\n\r\n\t\tif not exists:\r\n\t\t\tDivision.insert({'Name': request.form['Name']})\r\n\r\n\treturn render_template('newDivision.html', title='newDivision')\r\n\r\n@app.route('/Newinvoice', methods=['POST', 'GET'])\r\ndef Newinvoice():\r\n\tif not session.get('username'):\r\n\t\treturn redirect(url_for('Login'))\r\n\tif request.method == 'POST':\r\n\t\tInvoice = mongo.db.Invoice\r\n\t\tAmountNoTax_calculated = int(request.form['Amount']) * 0.8\r\n\t\texists = Invoice.find_one({'Description': request.form['Description'], 'Amount': float(request.form['Amount']), 'AmountNoTax': AmountNoTax_calculated, 'Claimant': request.form['Claimant'], 'Division': request.form['Division'], 'Date': request.form['Date']})\r\n\r\n\t\tif not exists:\r\n\t\t\tInvoice.insert({'Description': request.form['Description'], 'Amount': float(request.form['Amount']), 'AmountNoTax': AmountNoTax_calculated, 'Claimant': request.form['Claimant'], 'Division': request.form['Division'], 'Date': request.form['Date']})\r\n\r\n\tUser = mongo.db.User\r\n\tClaimant_options = User.find({})\r\n\tDivision = mongo.db.Division\r\n\tDivision_options = Division.find({})\r\n\treturn render_template('Newinvoice.html', title='Newinvoice', Claimant_options=Claimant_options, Division_options=Division_options)\r\n\r\n@app.route('/Division/')\r\ndef Division(Divisionid):\r\n\tif not session.get('username'):\r\n\t\treturn redirect(url_for('Login'))\r\n\tDivision = mongo.db.Division\r\n\tpage_Division = Division.find_one({'_id': ObjectId(Divisionid)})\r\n\tquery_0 = page_Division['Name']\r\n\tobjectReset = mongo.db.objectReset\r\n\tmultiquery_test_24 = objectReset.find({'Division': str(page_Division['_id'])}).sort('Date', pymongo.DESCENDING).limit(0)\r\n\tmultiquery_24 = []\r\n\tfor mq in multiquery_test_24:\r\n\t\tmultiquery_24.append(mq)\r\n\treturn render_template('Division.html', title='Division', query_0=query_0, multiquery_24=multiquery_24)\r\n\r\n@app.route('/logout')\r\ndef logout():\r\n\tsession.pop('username', None)\r\n\tsession.pop('userid', None)\r\n\tsession.pop('first_name', None)\r\n\tsession.pop('last_name', None)\r\n\tsession.pop('email', None)\r\n\treturn redirect(url_for('Home'))\r\n\r\nif __name__ == \"__main__\":\r\n\tapp.run()\r\n\r\napp.secret_key = '\\xc4\\xd1\\xc8@g[\\x04\\xbfpu\\th&,\\x1b\\xb5\\x18\\x0e\\x06\\xbc\\xad\"*\\xa8'","sub_path":"sample/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"98931367","text":"import numpy as np\nfrom scipy.integrate import quad\nfrom matplotlib import pyplot as plt\n\n#potenital function for a charged finite bar, 1/4*pi factor not included!\ndef phi(x1,x2,y,z):\n return np.log(np.abs( (x2+np.sqrt(x2**2+z**2+y**2))/(x1+np.sqrt(x1**2+z**2+y**2)) ))\n\n# defines a square magnet (x,y,z) are \"center\" coordinates, X,Y,Z are the total size\ndef square_mag(X,Y,Z,x,y,z):\n int1= quad(lambda t:phi(-X/2+x,X/2+x,t+y,z-Z/2),-Y/2,Y/2)[0]\n int2= -quad(lambda t:phi(-X/2+x,X/2+x,t+y,z+Z/2),-Y/2,Y/2)[0]\n \n return int1+int2\n\ndist=2.0\n\n# defines the arrangement of the magnets, and transforms coordinates\n# here: a single centralized cube\ndef total_phi(xx,yy,zz):\n total= square_mag(10.0,10.0,0.01,-xx,-yy,-zz)\n# total+=square_mag(10.0,10.0,1.0,-xx,-yy,-dist-zz)\n return total\n\neps=0.0001\n\n# calculates the gradient of total_phi\ndef grad(x,y,z):\n delta=np.array([total_phi(x+eps,y,z)-total_phi(x-eps,y,z),\n total_phi(x,y+eps,z)-total_phi(x,y-eps,z),\n total_phi(x,y,z+eps)-total_phi(x,y,z-eps)])\n return delta/2.0/eps\n# test: potential is identically zero in the center of a cube magnet:\nprint (\"test: potential in center of a cube magnet:\",square_mag(1.0,1.0,1.0,0,0,0))\nprint (\"test: field in the center of a sheet of magnet is \",np.pi*4)\nprint (grad(0,0,0))\n\n\n# defines the arrangement of the magnets, and transforms coordinates\n# here:unit cubic magnet\ndef total_phi(xx,yy,zz):\n total= square_mag(1,1,1,-xx,-yy,-zz)\n \n return total\nref_B=grad(0,0,0.51)[2]\nprint(\"field on the surface of unit magnet\",ref_B)\n\n# defines the arrangement of the magnets, and transforms coordinates\n# two slabs of magnet, equal distance from center\ndef total_phi(xx,yy,zz):\n total= square_mag(10.0,10.0,1,-xx,-yy,dist-zz)\n total+=square_mag(10.0,10.0,1.0,-xx,-yy,-dist-zz)\n return total\n\n\nx=np.linspace(-5,5,21)\n\n\n#plots filed for x and z, distance is fixed\ndef calc_plot_Bz(zz):\n Bz=[]\n for i in range(0,21):\n Bz.append(grad(x[i],0,zz)[2])\n Bz=np.array(Bz)\n #Bz=Bz/np.max(np.abs(Bz))\n #Bz=Bz/np.abs(Bz[10])\n print(np.max(np.abs(Bz)))\n plt.plot(x,Bz,label=str(dist-zz-0.5))\n\n#dist=0.001+0.05\n#calc_plot_Bz()\n\n# distance (from center!) is fixed to minimum fluctuation value\ndist= 2.75\nzz=0.0\nfor j in range(0,11):\n calc_plot_Bz(zz)\n zz+=0.21\n\n#phi=total_phi(np.array([0,0]),np.array([0,0]),np.array([0,0]))\n#phii=square_mag(1.0,1.0,1.0,np.array([0,0]),0,1.0)\n#Bz=-grad(x,0,0)[3]\nplt.legend()\nplt.grid()\nplt.show()\n#print (square_mag(1.0,1.0,1.0,0,0,.001))\n","sub_path":"magnet/magnet2.py","file_name":"magnet2.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"287245654","text":"#!/usr/bin/env python3\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\n\ndef parse_txs():\n print(\"Get transactions ids...\")\n accounts = {\"0xd079c22e9c63341bf839a8634e8892c430d724cf\",\n \"0xae506bb28ed79b29c6968ab527d1efdc5f399331\"}\n txs = set()\n page = 0\n stop = False\n with open(\"txs.txt\", \"r\") as f:\n old_txs = set(map(lambda x: x.strip(), f.readlines()))\n while 1:\n print(\"page: {}\".format(page))\n r = requests.get('https://etherscan.io/txs?a=0xAE506bb28Ed79b29c6968Ab527d1eFdc5f399331&p={}'.format(page))\n soup = BeautifulSoup(r.text, 'lxml')\n addrs = soup.find_all(\"span\", {\"class\": \"address-tag\"})\n if len(addrs) == 0:\n break\n for addr in addrs:\n if addr.text in accounts:\n continue\n if addr.text in old_txs:\n stop = True\n break\n txs.add(addr.text)\n if stop:\n break\n page += 1\n print(\"Get {} new transactions ids\".format(len(txs)))\n with open(\"txs.txt\", \"a\") as f:\n [f.write(tx + \"\\n\") for tx in txs]\n with open(\"data.txt\", \"a\") as f:\n for i, tx in enumerate(txs):\n print(i)\n tx = tx.strip()\n r = requests.get('https://etherscan.io/tx/{}'.format(tx))\n soup = BeautifulSoup(r.text, 'lxml')\n data = soup.find(\"textarea\", {\"class\": \"form-control\"})\n f.write(data.text + \"\\n\") if data is not None else print(tx)\n print(\"Get transactions data... OK\")\n\n\ndef parse_data():\n print(\"Get variables from data...\")\n with open(\"data.txt\", \"r\") as f:\n s = f.readlines()\n print(len(s))\n with open(\"transactions.txt\", \"w\") as f:\n for tx in s:\n tx = tx.strip()[10:]\n txid = tx[:32]\n am = tx[64:128]\n time = tx[129:]\n f.write(\"{} {} {}\\n\".format(txid, int(\"0x\" + am, 0) / 10 ** 8, datetime.fromtimestamp(int(\"0x\" + time, 0))))\n print(\"Get variables from data... OK\")\n\n\ndef sort_data():\n with open(\"transactions.txt\", \"r\") as f:\n s = f.readlines()\n s = list(map(lambda x: x.split(), s))\n s = sorted(s, key=lambda x: (x[2], x[3]))\n with open(\"info.txt\", \"w\") as f:\n [f.write(x[0] + 4 * \" \" + x[1] + (14 - len(x[1])) * \" \" + x[2] + (14 - len(x[2])) * \" \" + \\\n x[3] + \"\\n\") for x in s]\n\n\ndef sort_by_amount():\n with open(\"transactions.txt\", \"r\") as f:\n s = f.readlines()\n s = list(map(lambda x: x.split(), s))\n s = sorted(s, key=lambda x: float(x[1]), reverse=True)\n with open(\"top.txt\", \"w\") as f:\n [f.write(x[0] + 4 * \" \" + x[1] + (14 - len(x[1])) * \" \" + x[2] + (14 - len(x[2])) * \" \" + \\\n x[3] + \"\\n\") for x in s[:10]]\n f.write(str(sum([float(x[1]) for x in s[10:]])))\n\n\ndef group_by_day():\n with open(\"transactions.txt\", \"r\") as f:\n s = f.readlines()\n s = list(map(lambda x: x.split(), s))\n s = sorted(s, key=lambda x: (x[2], x[3]))\n days_info = []\n cur_date = s[0][2]\n day_tx = []\n for tx in s:\n if tx[2] == cur_date:\n day_tx.append(tx)\n else:\n days_info.append(day_tx)\n cur_date = tx[2]\n day_tx = []\n days_info.append(day_tx)\n with open(\"info_by_day.txt\", \"w\") as f:\n f.write(\"Date\\t\\t\\tAmount\\n\")\n for x in days_info:\n f.write(x[0][2] + \"\\t\\t\" + str(sum(float(el[1]) for el in x)) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n parse_txs()\n parse_data()\n sort_by_amount()\n group_by_day()\n","sub_path":"Statistics/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"394935414","text":"from scipy.stats import mode\nimport numpy as np\nimport plotly.offline as py\nimport plotly.graph_objs as go\nfrom plotly import tools\n\ndef plot_attention_map(x_words, y_words, weights, average_weights=True, show_max_only=False):\n\n if average_weights:\n mean_weights = np.mean(weights, axis=-1)\n weights = [mean_weights]\n\n traces = []\n for weight_count, weight in enumerate(weights):\n if show_max_only:\n weight = (weight == weight.max(axis=-1)[:, None]).astype(int)\n\n color_centroid = np.around(weights, decimals=4)\n color_centroid = mode(color_centroid, axis=None)[0][0]\n print(color_centroid)\n traces.append(go.Heatmap(z=weight,\n zmin= color_centroid - np.var(weight)/100,\n zmax= color_centroid + np.var(weight)/100,\n x=list(x for x in range(len(x_words))),\n y=list(y for y in range(len(y_words))),\n showscale=True,\n colorbar=dict(x=0.45 if len(weights) > 1 and weight_count%2 == 0 else 1.0,\n # y=0.45 if weight_count%2 == 0 else 1, len= 0.45\n )\n ))\n if len(weights) == 1:\n layout = {}\n layout.update({'yaxis': {'ticktext': y_words,\n 'tickvals': list(y for y in range(len(y_words))),\n 'tickmode': 'array', 'autorange': 'reversed'}})\n layout.update({'xaxis': {'ticktext': x_words,\n 'tickvals': list(x for x in range(len(x_words))),\n 'tickmode': 'array',\n 'tickangle': -90}})\n fig = go.Figure(traces, layout=layout)\n else:\n fig = tools.make_subplots(rows=(len(weights)+1)//2, cols=2, shared_yaxes=False, shared_xaxes=False, print_grid=False)\n\n layout = {'height': (18*len(y_words))*(len(weights)//2 + 1)}\n for trace_count, trace in enumerate(traces):\n fig.append_trace(trace, (trace_count//2)+1, (trace_count%2)+1)\n\n layout.update({'yaxis' + str(trace_count+1): {'ticktext': y_words,\n 'tickvals': list(y for y in range(len(y_words))),\n 'tickmode': 'array', 'autorange': 'reversed'}})\n layout.update({'xaxis' + str(trace_count+1): {'ticktext': x_words,\n 'tickvals': list(x for x in range(len(x_words))) ,\n 'tickmode':'array'}})\n\n fig['layout'].update(layout)\n py.plot(fig, image=\"svg\")\n\n# Place the blue cube on top of the green cube","sub_path":"keras_transformer/utils/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"639438477","text":"#!/usr/bin/env python3\n\"\"\" https://github.com/UWPCEWebPython/flask-mailroom \"\"\"\n\nimport os\n\nfrom flask import Flask, render_template, request, redirect, url_for\n\nfrom model import Donor, Donation\n\napp = Flask(__name__) # pylint: disable=invalid-name\n\n\n@app.route('/', methods=['GET'])\ndef home():\n \"\"\" Redirect to donations page \"\"\"\n return redirect(url_for('donations'))\n\n\n@app.route('/donations', methods=['GET', 'POST'])\ndef donations():\n \"\"\" Prompt for donations and display current donations \"\"\"\n if request.method == 'POST':\n donor_name = request.form['donor_name']\n donation_amount = request.form['donation_amount']\n print(f\"donor_name {donor_name} donation_amount {donation_amount}\")\n try:\n donor = Donor.get(Donor.name == donor_name)\n except Donor.DoesNotExist:\n donor = Donor(name=donor_name)\n donor.save()\n\n donation = Donation(donor=donor, value=donation_amount)\n donation.save()\n\n # README says to redirect to home page, but that just redirects here.\n # So, just fall through to what would be the GET processing.\n\n dons = Donation.select()\n return render_template('donations.jinja2', donations=dons)\n\n\nif __name__ == \"__main__\":\n PORT = int(os.environ.get(\"PORT\", 6738))\n app.run(host='0.0.0.0', port=PORT)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"371577664","text":"import unittest\n\n\n# A child is running up a staircase with n steps, and can hop either 1 step, 2 steps, or 3 steps at a time.\n# Implement as method to count how many possible ways the child can run up the stairs.\n\n\ndef staircase_count(nSteps):\n if nSteps < 0:\n return 0\n if nSteps == 0:\n return 1\n total = 0\n for step in [1, 2, 3]:\n total += staircase_count(nSteps - step)\n return total\n\n\ndef staircase_list(nSteps, li=None, output=None):\n if li is None:\n li = []\n if output is None:\n output = []\n if sum(li) > nSteps:\n return None\n if sum(li) == nSteps:\n output.append(li)\n return output\n for step in [1, 2, 3]:\n staircase_list(nSteps, li=li + [step], output=output)\n return output\n\n\nclass StaircaseTest(unittest.TestCase):\n\n def assertNestedListEquals(self, nested_list, expected):\n for a, b in zip(nested_list, expected):\n self.assertListEqual(a, b)\n\n def test_staircase_count(self):\n self.assertEqual(1, staircase_count(1))\n self.assertEqual(2, staircase_count(2))\n self.assertEqual(4, staircase_count(3))\n self.assertEqual(7, staircase_count(4))\n\n def test_staircase_list(self):\n self.assertNestedListEquals([[1]], staircase_list(1))\n self.assertNestedListEquals([[1, 1], [2]], staircase_list(2))\n self.assertNestedListEquals([[1, 1, 1], [1, 2], [2, 1], [3]], staircase_list(3))\n self.assertNestedListEquals([[1, 1, 1, 1], [1, 1, 2], [1, 2, 1], [1, 3], [2, 1, 1], [2, 2], [3, 1]], staircase_list(4))\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"python/d&c/staircase.py","file_name":"staircase.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"156329728","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom teachers.models import Teacher\nfrom django.db.models import Q\nfrom teachers.forms import TeachersAddForm\n\n\ndef generate_teacher(request):\n teacher = Teacher.generate_teacher()\n return HttpResponse(f'{teacher.get_info()}')\n\n\ndef teachers(request):\n queryset = Teacher.objects.all()\n response = ''\n\n filtr_param = request.GET.get('filtr_param')\n if filtr_param:\n queryset = queryset.filter(\n Q(first_name__contains=filtr_param) | Q(last_name__contains=filtr_param) | Q(email__contains=filtr_param)\n )\n # __contains --> like '%blabla%'\n # __endswith --> like '%blabla'\n # __startswith --> like 'blabla%'\n # __istarts/ends/--> регистронезависимый поиск\n\n for teacher in queryset:\n response += teacher.get_info() + '
'\n return render(request,\n 'teachers_list.html',\n context={'teachers_list': response})\n\n\ndef teacher_add(request):\n if request.method == 'POST':\n form = TeachersAddForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/teachers/')\n else:\n form = TeachersAddForm()\n\n return render(request,\n 'teacher_add.html',\n context={'form': form})","sub_path":"Src/teachers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"328440874","text":"import FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.VarParsing as VarParsing\n\nprocess = cms.Process(\"OWNPARTICLES\")\noptions = VarParsing.VarParsing('analysis')\noptions.inputFiles = \"file:/home/t3-ku/janguian/CMSSW_10_6_8/src/KUsoftMVA/test/0ACB220F-DB5C-3449-83F5-E04858176001.root\"\noptions.outputFile = \"defaultout.root\"\noptions.maxEvents = 100\noptions.parseArguments()\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.maxEvents = cms.untracked.PSet( input=cms.untracked.int32(options.maxEvents ))\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 5000\n\n\nprocess.source = cms.Source(\"PoolSource\", \n fileNames = cms.untracked.vstring(options.inputFiles)\n)\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n fileName = cms.untracked.string(options.outputFile)\n ,outputCommands = cms.untracked.vstring('drop *',\n \"keep *_offlineSlimmedPrimaryVertices_*_*\",\n \"keep *_slimmedMuons_*_*\",\n \"keep *_slimmedElectrons_*_*\",\n \"keep *_packedPFCandidates_*_*\",\n \"keep *_packedGenParticles_*_*\")\n \n) \nprocess.e = cms.EndPath(process.out)\n\n\n","sub_path":"MuonAnalysis/test/createMiniAODNtuple.py","file_name":"createMiniAODNtuple.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"107819694","text":"#import sys\n#sys.path.append('../')\n#from P32.main import gcd\n\n#def totient_phi(num):\n# count = 0\n# for i in range(1,num):\n# if gcd(i,num) == 1:\n# count += 1\n# return count\n\n\n########################修正コード##########################\nimport sys\nsys.path.append('../')\nfrom P33.main import is_coprime\n\ndef totient_phi(num):\n count = 0\n for i in range(1,num):\n if is_coprime(i,num):\n count +=1\n return count\n","sub_path":"P34/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"443773411","text":"__author__ = 'nick meng'\r\n#字符匹配\r\n#短字符\r\n# ‘\\d’匹配一个数字,‘\\w’匹配一个数字或字母\r\n# ‘\\s’匹配一个空格,‘.’匹配任意字符,\r\n#长字符\r\n#‘*’表示任意个字符(包括0个),\r\n#‘+’表示至少一个字符\r\n#‘?’表示0个或1个字符\r\n# '{n}'表示n个字符,‘{n,m}’表示n到m个字符\r\n#精确匹配,可以用[]表示范围\r\n#‘^’表示行的开头,'$'表示行的结束\r\n#re模块\r\n#\r\ns='ABC\\\\-001'#对应的正则表达式字符串变成了‘ABC\\-001’\r\n\r\nimport re\r\nre.match(r'^\\d{3}\\-\\d{3,8}$','010-1234')\r\n#match方法判断是否匹配,匹配成功返回一个Match对象,否则返回None。\r\n# match(pattern, string, flags=0)\r\ntest='用户输入的字符串'\r\nif re.match(r'正则表达式',test):\r\n print('ok')\r\nelse:\r\n print('failed')\r\n#切分字符串\r\n#\r\ns='a b c'\r\ns1=s.split(' ')\r\nprint(s1)\r\ns2=re.split(r'\\s+','a b c')\r\nprint(s2)\r\ns3=re.split(r'[\\s\\,]+','a,b, c d')\r\nprint(s3)\r\ns4=re.split(r'[\\s\\,\\:]+','a,b:: c d')\r\nprint(s4)\r\n\r\n#分组\r\n#正则表达式可以从字符串中提取子串,\r\nm=re.match(r'^(\\d{3})-(\\d{3,8})$','010-12345')\r\nm.group(0)\r\nm.group(1)\r\nm.group(2)\r\n#如果正则表达式定义了组,就可以在Match对象上涌group函数提取出子串,参数为子串的序数,0表示原始字符串\r\n#\r\nt='19:05:30'\r\nm=re.match(r'^(0[0-9]|1[0-9]|2[0-3]|[0-9])\\:(0[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-9]|[0-9])\\:(0[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-9]|[0-9])$', t)\r\nm1=m.groups()\r\nprint(m1)\r\n\r\n#贪婪匹配:\r\n#正则默认匹配模式为贪婪匹配,即匹配尽可能多的字符\r\nw=re.match(r'^(\\d+)(0*)$','102300').groups()\r\nprint(w)\r\ns=re.match(r'^(\\d+?)(0*)$','102300').groups()#使用'?'采用非贪婪匹配\r\nprint(s)\r\n#编译:预编译产生Regular Expression对象\r\nre_telephone=re.compile(r'^(\\d{3})-(\\d{3,8})$')\r\nr1=re_telephone.match('010-12345').groups()\r\nprint(r1)","sub_path":"MyProject/firstprogarm/regexdemo.py","file_name":"regexdemo.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"56928209","text":"#!/usr/bin/env python\n\n__author__ = 'tgiguere'\n\nfrom pyon.public import log\nfrom interface.objects import DatasetDescriptionDataSamplingEnum\nfrom eoi.agent.handler.base_external_data_handler import *\nfrom eoi.agent.utils import ArrayIterator\nimport numpy\n\nclass HfrRadialDataHandler(BaseExternalDataHandler):\n\n _data_array = None\n _number_of_records = 0\n _variables = []\n _attributes = {}\n\n def __init__(self, data_provider=None, data_source=None, ext_dataset=None, *args, **kwargs):\n BaseExternalDataHandler.__init__(self, data_provider, data_source, ext_dataset, *args, **kwargs)\n\n self._variables[:] = []\n self._load_attributes(data_source)\n self._load_values(data_source)\n\n def _load_attributes(self, filename=''):\n #looping through the whole file to get the attributes; not sure if this is such a good idea\n with open(filename, 'r') as f:\n in_table_data = False\n correct_table_type = False\n for line in f:\n if line.startswith('%TableType:'):\n parsed_line = line.partition(': ')\n if parsed_line[2].startswith('LLUV'):\n correct_table_type = True\n else:\n correct_table_type = False\n if line.startswith('%TableStart:'):\n in_table_data = True\n if line.startswith('%TableEnd:') and in_table_data:\n in_table_data = False\n correct_table_type = False\n\n\n if not (in_table_data):\n self._parse_attribute(line, correct_table_type)\n f.close()\n\n def _parse_attribute(self, line='', correct_table_type=False):\n #strip out leading %\n new_line = line.replace('%', '')\n\n parsed_line = new_line.partition(':')\n if parsed_line[0] == 'TableColumnTypes' and correct_table_type:\n cols = parsed_line[2].split(' ')\n for col in cols:\n if not col == '' and not col == '\\n':\n self._variables.append(col)\n elif not parsed_line[0].startswith('Table'):\n if not parsed_line[2] == '':\n self._attributes[parsed_line[0]] = parsed_line[2].replace('\\n', '')\n\n def _load_values(self, filename=''):\n a = numpy.loadtxt(fname=filename, comments='%')\n\n self._data_array = {}\n index = 0\n for column in self._variables:\n self._data_array[column] = a[:,index]\n index += 1\n\n self._number_of_records = a.shape[0]\n\n def acquire_data(self, var_name=None, slice_=()):\n if var_name in self._variables:\n vars = [var_name]\n else:\n vars = self._variables\n\n if not isinstance(slice_, tuple): slice_ = (slice_,)\n\n for vn in vars:\n var = self._data_array[vn]\n\n ndims = len(var.shape)\n # Ensure the slice_ is the appropriate length\n if len(slice_) < ndims:\n slice_ += (slice(None),) * (ndims-len(slice_))\n\n arri = ArrayIterator(var, self._block_size)[slice_]\n for d in arri:\n if d.dtype.char is \"S\":\n # Obviously, we can't get the range of values for a string data type!\n rng = None\n elif isinstance(d, numpy.ma.masked_array):\n # TODO: This is a temporary fix because numpy 'nanmin' and 'nanmax'\n # are currently broken for masked_arrays:\n # http://mail.scipy.org/pipermail/numpy-discussion/2011-July/057806.html\n dc = d.compressed()\n if dc.size == 0:\n rng = None\n else:\n rng = (numpy.nanmin(dc), numpy.nanmax(dc))\n else:\n rng = (numpy.nanmin(d), numpy.nanmax(d))\n yield vn, arri.curr_slice, rng, d\n\n return\n\n def get_attributes(self, scope=None):\n \"\"\"\n Returns a dictionary containing the name/value pairs for all attributes in the given scope.\n @param scope The name of a variable in this dataset. If no scope is provided, returns the global_attributes for the dataset\n \"\"\"\n #Since there are no variable attributes in this file, just return the global ones.\n return self._attributes\n\n def get_attribute(self, attr_name=''):\n if attr_name in self._attributes:\n return self._attributes[attr_name]\n else:\n return ''\n\n def get_variables(self):\n return self._variables\n","sub_path":"eoi/agent/handler/hfr_radial_data_handler.py","file_name":"hfr_radial_data_handler.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"317480359","text":"def memoize(func, *args):\n memo = {}\n\n def wrapped(n, a):\n if n not in memo:\n memo[n] = func(n, a)\n\n return memo[n]\n\n return wrapped\n\n\n@memoize\ndef can_construct(n, a):\n if n == \"\":\n return True\n\n for each in a:\n # don't take out from middle because it would create\n # a string of characters that do not necessarily appear in the original\n # string. It is better to take out from prefix because you preserve\n # the adjacent strings in the original. Also, the candidate strings\n # have to match the prefix or suffix if they are able to construct the\n # original string\n if n.startswith(each):\n if can_construct(n[len(each) :], a) is True:\n return True\n\n return False\n\n\nif __name__ == \"__main__\":\n print(can_construct(\"abcdef\", [\"ab\", \"abc\", \"cd\", \"def\", \"abcd\"]))\n print(can_construct(\"enterapotentpot\", [\"a\", \"p\", \"ent\", \"enter\", \"ot\", \"o\", \"t\"]))\n print(\n can_construct(\n \"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeef\",\n [\"e\", \"ee\", \"eee\", \"eeee\", \"eeeee\", \"eeeeee\"],\n )\n )\n","sub_path":"algorithms/dynamic_programming/can_construct.py","file_name":"can_construct.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"361940259","text":"#!/usr/bin/python3\n\nimport hashlib\n\nSECRET_KEY = \"ckczppom\"\n\n\ndef hash_mine(number, zero_count):\n digest = hashlib.md5(SECRET_KEY + str(number)).hexdigest()\n return digest.startswith(str(0) * zero_count)\n\n\nif __name__ == \"__main__\":\n count = 0\n while True:\n if hash_mine(count, 5):\n break\n\n # Protip: Include this line, otherwise the problem takes a really,\n # really long time to run...\n count += 1\n\n print(\"First hash that starts with five zeroes is: {}\".format(count))\n\n while True:\n if hash_mine(count, 6):\n break\n\n count += 1\n\n print(\"First hash that starts with six zeroes is: {}\".format(count))\n","sub_path":"day4/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"458208230","text":"#_main_.py - 打耗子\nfrom math import *\nimport time\nfrom sys import exit\nimport pygame\n\npygame.init()\npygame.display.set_caption(\"打耗子\")\n\nscreen = pygame.display.set_mode((640, 480))\nscreen_pic = pygame.image.load(\"grass\")\n#screen_pic = pygame.transform.scale(screen_pic, (640, 480))\n\ndule = pygame.image.load(\"dude\")\ndule = pygame.transform.flip(dule , False, True)\n\ncastle = pygame.image.load(\"castle\")\n\n\n\ndule = pygame.transform.rotate(dule , 0)\n\nscreen.blit(screen_pic, (0, 0))\nscreen.blit(castle, (30, 30))\nscreen.blit(castle, (30, 180))\nscreen.blit(castle, (30, 330))\nscreen.blit(dule, (100, 100))\npygame.display.update()\n\ntime.sleep(3)\n\ndule = pygame.transform.rotate(dule , -30)\n\nscreen.blit(screen_pic, (0, 0))\nscreen.blit(castle, (30, 30))\nscreen.blit(castle, (30, 180))\nscreen.blit(castle, (30, 330))\nscreen.blit(dule, (100, 100))\npygame.display.update()\n\n\ncat_x = 100\ncat_y = 100\nmouse_x = 201\nmouse_y = 302\na = mouse_y -cat_y\nb = mouse_x - cat_x\nangle = atan(a / b)\nangle = angle * 180 / 3.141592654\nangle = 0 - angle\n\nis_start = 1\n\nwhile True:\n\tif is_start == 1:\n\n\t\tcat_x = 100\n\t\tcat_y = 100\n\t\ta = mouse_y - cat_y\n\t\tb = mouse_x - cat_x\n\t\tangle_old = angle\n\t\tangle = atan(a / b)\n\t\tangle = angle * 180 / 3.141592654\n\t\tangle = 0 - angle\n\n\t\t# angle = atan((mouse[1]-123)/(mouse[0]-132))#*180/3.14159265\n\t\tif angle_old == angle:\n\t\t\tdule = pygame.transform.rotate(dule , angle)\n\t\tmouse_x, mouse_y = pygame.mouse.get_pos()\n\t\tscreen.blit(screen_pic, (0, 0))\n\t\tscreen.blit(castle, (30, 30))\n\t\tscreen.blit(castle, (30, 180))\n\t\tscreen.blit(castle, (30, 330))\n\t\tscreen.blit(dule, (100, 100))\n\t\t#time.sleep(1)\n\telif is_start == 2:\n\t\tscreen.bilt()\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tpygame.quit()\n\t\t\texit()\n\tpygame.display.update()\n","sub_path":"angle.py","file_name":"angle.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"37229291","text":"import pandas as pd\nimport numpy as np\n\ndef findpeaks(data, spacing=1, limit=None):#fonction de détéction des pics à partir d'un signal( les points depassant un seuil=limit)\n\n \n len = data.size\n x = np.zeros(len + 2 * spacing)\n data = np.squeeze(data)\n x[:spacing] = data[0] - 1.e-6\n x[-spacing:] = data[-1] - 1.e-6\n x[spacing:spacing + len] = data\n peak_candidate = np.zeros(len)\n peak_candidate[:] = True\n for s in range(spacing):\n start = spacing - s - 1\n h_b = x[start: start + len] # before\n start = spacing\n h_c = x[start: start + len] # central\n start = spacing + s + 1\n h_a = x[start: start + len] # after\n peak_candidate = np.logical_and(peak_candidate, np.logical_and(h_c > h_b, h_c > h_a))\n\n ind = np.argwhere(peak_candidate)\n ind = ind.reshape(ind.size)\n if limit is not None:\n ind = ind[data[ind] > limit]\n return ind","sub_path":"ECG_TELNET_Pythonanywhere/code/findpeaks.py","file_name":"findpeaks.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"615531604","text":"#\n# Copyright The NOMAD Authors.\n#\n# This file is part of NOMAD. See https://nomad-lab.eu for further info.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\n\nfrom nomad.datamodel import EntryArchive\nfrom atkparser import ATKParser\n\n\ndef approx(value, abs=0, rel=1e-6):\n return pytest.approx(value, abs=abs, rel=rel)\n\n\n@pytest.fixture(scope='module')\ndef parser():\n return ATKParser()\n\n\ndef test_scf(parser):\n archive = EntryArchive()\n parser.parse('tests/data/Si2.nc', archive, None)\n\n sec_run = archive.section_run[0]\n assert sec_run.program_version == 'ATK 2016.0.3'\n\n sec_method = sec_run.section_method[0]\n assert sec_method.smearing_width == 300\n assert sec_method.section_XC_functionals[1].XC_functional_name == 'LDA_C_PZ'\n\n sec_system = sec_run.section_system[0]\n assert sec_system.lattice_vectors[1][0].magnitude == approx(2.7153e-10)\n assert sec_system.atom_positions[1][0].magnitude == approx(1.35765e-10)\n assert sec_system.atom_labels == ['Si', 'Si']\n\n sec_scc = sec_run.section_single_configuration_calculation[0]\n assert sec_scc.energy_total.magnitude == approx(-5.73249938e-17)\n assert sec_scc.energy_XC.magnitude == approx(-3.41975673e-17)\n","sub_path":"tests/test_atkparser.py","file_name":"test_atkparser.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"271814013","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Constants for y0.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Protocol, TypeVar\n\n__all__ = [\n \"NodeType\",\n \"NodeProtocol\",\n]\n\n\nclass NodeProtocol(Protocol):\n \"\"\"Represents what can be a node in a mixed graph.\"\"\"\n\n def __hash__(self) -> int:\n ...\n\n def __lt__(self, other) -> bool:\n ...\n\n\nNodeType = TypeVar(\"NodeType\", bound=NodeProtocol)\n","sub_path":"src/y0/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"110023837","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nfrom absl import app\nimport numpy as np\n\nimport pyspiel\n\nfrom open_spiel.python import rl_environment\nfrom open_spiel.python.algorithms import random_agent\nfrom open_spiel.python.algorithms import tabular_qlearner\nfrom open_spiel.python.project.part_1.cross_learning import cross_learner\n\ndef train_agents(env, agents, nbep):\n prob_array = []\n for ep in range(nbep):\n time_step = env.reset()\n while not time_step.last():\n output0 = agents[0].step(time_step)\n output1 = agents[1].step(time_step)\n prob_array = []\n prob_array.append(output0.probs)\n prob_array.append(output1.probs)\n actions = [output0.action, output1.action]\n time_step = env.step(actions)\n # Episode is over, step all agents with final info state.\n agents[0].step(time_step)\n agents[1].step(time_step)\n# print(time_step.rewards)\n return prob_array\ndef train_agents_simultaneous_single(env, agents, nbep):\n for ep in range(nbep):\n time_step = env.reset()\n while not time_step.last():\n output0 = agents[0].step(time_step)\n output1 = agents[1].step(time_step)\n actions = [output0.action, output1.action]\n time_step = env.step(actions)\n # Episode is over, step all agents with final info state.\n agents[0].step(time_step)\n agents[1].step(time_step)\n\n\n\ndef evaluate_agents(env, agents):\n time_step = env.reset()\n while not time_step.last():\n output0 = agents[0].step(time_step, is_evaluation = True)\n output1 = agents[1].step(time_step)\n action000 = output0.action\n actions = [output0.action, output1.action]\n time_step = env.step(actions)\n # Episode is over, step all agents with final info state.\n agents[0].step(time_step)\n agents[1].step(time_step)\n return action000\n\n\ndef create_game(name):\n if name == \"PD\":\n return pyspiel.create_matrix_game([[3,0],[5,1]], [[3,5],[0,1]])\n elif name == \"BOS\":\n return pyspiel.create_matrix_game(\"battle_of_sexes\", \"The Battle of The Sexes\",\n [\"LW\", \"WL\"], [\"LW\", \"WL\"],\n [[1, 0], [0, 1/2]], [[1/2, 0], [0, 1]])\n elif name == \"MP\":\n return pyspiel.create_matrix_game(\"matching_pennies\", \"Matching Pennies\",\n [\"Heads\", \"Tails\"], [\"Heads\", \"Tails\"],\n [[0, 1], [1, 0]], [[1, 0], [0, 1]])\n elif name == \"RPS\":\n return pyspiel.create_matrix_game(\n [[0.0, -0.25, 0.5], [0.25, 0.0, -0.05], [-0.5, 0.05, 0.0]],\n [[0.0, 0.25, -0.5], [-0.25, 0.0, 0.05], [0.5, -0.05, 0.0]])\ndef create_environment(game):\n return rl_environment.Environment(game)\n\n\n\ndef execute_scenarios_probs(env, nb, start):\n agents = [\n tabular_qlearner.QLearner(player_id=0, num_actions=3, step_size=0.5, epsilon=0.05, discount_factor=1.0 ),\n tabular_qlearner.QLearner(player_id=1, num_actions=3, step_size=0.5, epsilon=0.05, discount_factor=1.0)\n\n ]\n agents[0]._q_values['[0.0]'][0] = start[0]\n agents[1]._q_values['[0.0]'][0] = start[1]\n train_agents_simultaneous_single(env, agents, nb)\n return evaluate_agents(env, agents)\n\n\ndef rewardCounter(totalSum, reward, payOffMatrix):\n\n if reward == payOffMatrix[0][0]:\n totalSum[0] +=1\n elif reward == payOffMatrix[0][1]:\n totalSum[1] +=1\n elif reward == payOffMatrix[1][0]:\n totalSum[2] +=1\n elif reward == payOffMatrix[1][1]:\n totalSum[3] +=1\n\n\ndef rewardCounter2(totalSum, reward, payOffMatrix):\n\n if reward == payOffMatrix[0][0]:\n totalSum[0][0] +=1\n elif reward == payOffMatrix[0][1]:\n totalSum[0][1] +=1\n elif reward == payOffMatrix[0][2]:\n totalSum[0][2] +=1\n elif reward == payOffMatrix[1][0]:\n totalSum[1][0] +=1\n elif reward == payOffMatrix[1][1]:\n totalSum[1][1] +=1\n elif reward == payOffMatrix[1][2]:\n totalSum[1][2] +=1\n elif reward == payOffMatrix[2][0]:\n totalSum[2][0] +=1\n elif reward == payOffMatrix[2][1]:\n totalSum[2][1] +=1\n elif reward == payOffMatrix[2][2]:\n totalSum[2][2] += 1\n\ndef rewardCounter3(totalSum, reward, qsdf):\n if reward == 0:\n totalSum[0] +=1\n elif reward == 1:\n totalSum[1] +=1\n elif reward == 2:\n totalSum[2] += 1\n\n\ndef create_payoff(name):\n if name == \"PD\":\n return [[3,3],[0,5]], [[5,0],[1,1]]\n if name == \"BOS\":\n return [[1,1/2],[0,0]], [[0,0],[1/2,1]]\n if name == \"RPS\":\n return [[[0,0],[-0.25,25],[0.5,-0.5]], [[0.25,-0.25],[0,0],[-0.05,0.05]], [[-0.5,0.5],[0.05,-0.05],[0,0]]]\n if name == \"MP\":\n return [[0,1],[1,0]], [[1,0],[0,1]]\n\n\ndef main(_):\n name = \"RPS\"\n game = create_game(name)\n payoff = create_payoff(name)\n env = create_environment(game)\n totalsum = np.zeros(4)\n #sum = [[0,0,0],[0,0,0],[0,0,0]]\n for i in range(1000):\n print(i)\n rewardCounter3(totalsum, execute_scenarios_probs(env, 1000, (0, 0)), payoff)\n print(totalsum)\n\n\nif __name__ == \"__main__\":\n app.run(main)","sub_path":"open_spiel/python/Project/part_1/nash/nash.py","file_name":"nash.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"24682482","text":"from flask import Blueprint, render_template\nfrom flask_login import login_required,current_user\nfrom flask.helpers import url_for\nfrom werkzeug.datastructures import cache_property\nfrom website.models import Category, Product, Receipt\nfrom flask import Blueprint, render_template, request, make_response, jsonify, redirect, url_for\nfrom . import db\nimport sys\nimport json\nfrom sqlalchemy import desc\n\nviews = Blueprint('views', __name__)\n\n@views.route('/main', methods = ['POST', 'GET'])\ndef main():\n try:\n if current_user.is_admin == 1:\n return redirect(url_for('views.admin'))\n except:\n print()\n if request.method == 'POST':\n req = request.get_json()\n # print(req, file=sys.stdout)\n if req['command'] == 'get_products':\n sort = req[\"sort\"]\n sort_order= req[\"sort_order\"]\n product_name= req[\"product_name\"]\n product_cats= req[\"product_categories\"]\n lower_bound, upper_bound = req['price_range']\n # print(product_name, file=sys.stdout)\n # print(req, file=sys.stdout)\n \n \n if product_name not in ('', ' ', None, {}):\n products = Product.query.filter_by(name = product_name).all()\n else:\n if sort == 'sold':\n products = Product.query.order_by(desc(Product.sold_number)).all()\n elif sort == 'date':\n products = Product.query.order_by(desc(Product.date)).all()\n else:\n if sort_order == 'desc':\n products = Product.query.order_by(desc(Product.price)).all()\n else:\n products = Product.query.order_by(Product.price).all()\n\n res_products = []\n for product in products:\n if product_cats not in ('', ' ', []) and product.category not in product_cats:\n continue\n if product.price < lower_bound or product.price > upper_bound:\n continue\n res_products.append({\"name\":product.name, \"category\":product.category, \"price\":product.price,\n \"availability_number\":product.availability_number,\n \"sold_number\":product.sold_number, \"image\":product.image})\n \n res = make_response(jsonify({\"message\": res_products}), 200)\n return res\n \n if req['command'] == 'get_categories':\n categories = Category.query.filter_by().all()\n res_categories = []\n for cat in categories:\n res_categories.append({\"name\":cat.name})\n \n res = make_response(jsonify({\"message\": res_categories}), 200)\n return res\n \n return render_template(\"main.html\",user=current_user)\n\n\n\n\n\n\n@views.route('/admin', methods=['GET', 'POST'])\n@login_required\ndef admin():\n if current_user.is_admin == 0:\n return redirect(url_for('views.user'))\n if request.method == 'POST':\n req = request.get_json()\n if req['command'] == 'get_products':\n products = Product.query.filter_by().all()\n names, categories, prices, available_numbers, sold_numbers, images = [],[],[],[],[],[]\n res_products = []\n for product in products:\n res_products.append({\"name\":product.name, \"category\":product.category, \"price\":product.price,\n \"availability_number\":product.availability_number,\n \"sold_number\":product.sold_number, \"image\":product.image})\n \n res = make_response(jsonify({\"message\": res_products}), 200)\n return res\n \n if req['command'] == 'get_categories':\n categories = Category.query.filter_by().all()\n res_categories = []\n for cat in categories:\n res_categories.append({\"name\":cat.name})\n \n res = make_response(jsonify({\"message\": res_categories}), 200)\n return res\n \n \n if req['command'] == 'get_receipts':\n receipts = Receipt.query.order_by(desc(Receipt.date)).all()\n res_categories = []\n for rec in receipts:\n json_rec = {\"id\":rec.id, \"product_name\":rec.product_name,\n \"purchase_number\":rec.purchase_number,\t\"customer_first_name\":rec.customer_first_name,\n \"customer_last_name\":rec.customer_last_name,\t\"customer_address\":rec.customer_address,\n \"total_price\":rec.total_price,\t\"date\":rec.date,\t\"state\":rec.state,\n \"customer_id\":rec.customer_id}\n res_categories.append(json_rec)\n \n res = make_response(jsonify({\"message\": res_categories}), 200)\n return res\n \n if req['command'] == 'get_filtered_receipts':\n if req['rec_id'] not in (' ', ''):\n receipts = Receipt.query.filter_by(id = req['rec_id']).all()\n else:\n receipts = Receipt.query.order_by(desc(Receipt.date)).all()\n res_categories = []\n for rec in receipts:\n json_rec = {\"id\":rec.id, \"product_name\":rec.product_name,\n \"purchase_number\":rec.purchase_number,\t\"customer_first_name\":rec.customer_first_name,\n \"customer_last_name\":rec.customer_last_name,\t\"customer_address\":rec.customer_address,\n \"total_price\":rec.total_price,\t\"date\":rec.date,\t\"state\":rec.state,\n \"customer_id\":rec.customer_id}\n res_categories.append(json_rec)\n \n res = make_response(jsonify({\"message\": res_categories}), 200)\n return res\n \n \n return render_template(\"admin.html\")\n\n\n@views.route('/user', methods = ['POST', 'GET'])\n@login_required\ndef user():\n if current_user.is_admin == 1:\n return redirect(url_for('views.admin'))\n if request.method == 'POST':\n req = request.get_json()\n if req['command'] == 'get_receipts':\n receipts = Receipt.query.filter_by(customer_id = current_user.id).order_by(desc(Receipt.date)).all()\n res_categories = []\n for rec in receipts:\n json_rec = {\"id\":rec.id, \"product_name\":rec.product_name,\n \"purchase_number\":rec.purchase_number,\t\"customer_first_name\":rec.customer_first_name,\n \"customer_last_name\":rec.customer_last_name,\t\"customer_address\":rec.customer_address,\n \"total_price\":rec.total_price,\t\"date\":rec.date,\t\"state\":rec.state,\n \"customer_id\":rec.customer_id}\n res_categories.append(json_rec)\n \n res = make_response(jsonify({\"message\": res_categories}), 200)\n return res\n \n elif req['command'] == 'get_current_user':\n res = make_response(jsonify({\"message\": [{\"user_id\":current_user.id, \"user_first_name\":current_user.first_name,\n \"user_credit\":current_user.charge}]}), 200)\n return res\n \n \n return render_template(\"user.html\",user=current_user)\n\n@views.route('/signin')\ndef signin():\n return render_template(\"signin.html\",user=current_user)\n\n@views.route('/admin/create_product')\n@login_required\ndef create_product():\n if current_user.is_admin == 0:\n return redirect(url_for('views.user'))\n return render_template(\"create_product.html\")\n\n\n@views.route('/admin/edit_product', methods = ['GET', 'POST'])\n@login_required\ndef edit_product():\n if current_user.is_admin == 0:\n return redirect(url_for('views.user'))\n global current_product\n if request.method == 'POST':\n req = json.loads(request.get_data())\n current_product = req['product_name']\n render_template(\"edit_product.html\", product_name = create_product)\n return redirect(url_for(\"views.edit_product\"))\n print(request.method, request.get_json())\n return render_template(\"edit_product.html\", product = current_product)\n\n\n@views.route('/user/shop_basket', methods = ['GET'])\n@login_required\ndef get_shop_basket():\n if current_user.is_admin == 1:\n return redirect(url_for('views.admin'))\n return render_template(\"shop_basket.html\", user = current_user)\n\n\ncurrent_product = ''\n ","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"235462306","text":"#!/usr/bin/env python3 -m pytest\nfrom plex_trakt_sync.walker import Walker\nfrom tests.conftest import factory\n\nplex = factory.plex_api()\nmf = factory.media_factory()\n\n\ndef test_walker():\n w = Walker(plex, mf)\n assert type(w) == Walker\n\n w.add_library(\"TV Shows\")\n w.add_library(\"Movies (Tuti)\")\n w.add_show(\"Breaking Bad\")\n w.add_movie(\"Batman Begins\")\n\n episodes = list(w.find_episodes())\n movies = list(w.find_movies())\n\n assert len(episodes) == 0\n assert len(movies) == 0\n","sub_path":"tests/test_walker.py","file_name":"test_walker.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"288082723","text":"\"\"\"\nOriginal Developer: Jonathan Ward\n\"\"\"\n\n# Standard Modules:\nimport numpy as np\nimport scipy.signal\n\n# Custom Modules:\nimport parameters\nimport smoothing_interpolate\nimport util\nimport curvature\n\n\nclass TubePoint(object):\n\n def compute_pylon_cost(self):\n if self.is_underground:\n pylon_cost = 0\n else:\n height_cost = (self.pylon_height *\n parameters.PYLON_COST_PER_METER + 5*self.pylon_height**2)\n base_cost = parameters.PYLON_BASE_COST\n pylon_cost = height_cost + base_cost\n return pylon_cost\n\n def build_pylon(self):\n pylon = {\"height\" : self.pylon_height, \n \"cost\" : self.pylon_cost,\n \"latlng\" : self.latlng.tolist(),\n \"elevation\" : self.land_elevation,\n \"arcLength\" : self.arc_length,\n \"index\" : self.index}\n return pylon\n\n def __init__(self, arc_length, land_elevation, tube_elevation, latlng, index):\n self.arc_length = arc_length\n if land_elevation == None:\n print(\"bad land elevation\")\n print(land_elevation)\n raise ValueError\n self.land_elevation = land_elevation\n self.index = index\n self.tube_elevation = tube_elevation\n self.latlng = latlng \n self.pylon_height = tube_elevation - land_elevation\n self.is_underground = (self.pylon_height < 0)\n self.pylon_cost = self.compute_pylon_cost()\n\n\nclass TubeEdge(object):\n \n def compute_tunnel_cost(self, edge_length, tube_point_a, tube_point_b):\n if tube_point_a.is_underground and tube_point_b.is_underground:\n tunneling_cost = (edge_length *\n parameters.TUNNELING_COST_PER_METER)\n if tube_point_a.is_underground and not tube_point_b.is_underground:\n tunneling_cost = (0.5 * edge_length *\n parameters.TUNNELING_COST_PER_METER)\n if not tube_point_a.is_underground and tube_point_b.is_underground:\n tunneling_cost = (0.5 * edge_length *\n parameters.TUNNELING_COST_PER_METER)\n if not tube_point_a.is_underground and not tube_point_b.is_underground:\n tunneling_cost = 0.0 \n return tunneling_cost\n\n def compute_tube_cost(self, edge_length):\n tube_cost = edge_length * parameters.TUBE_COST_PER_METER\n return tube_cost\n\n def compute_edge_length(self, tube_point_a, tube_point_b):\n tube_coords_a = [tube_point_a.arc_length, tube_point_a.tube_elevation]\n tube_coords_b = [tube_point_b.arc_length, tube_point_b.tube_elevation]\n edge_vector = np.subtract(tube_coords_b, tube_coords_a)\n edge_length = np.linalg.norm(edge_vector)\n return edge_length\n\n def __init__(self, tube_point_a, tube_point_b):\n edge_length = self.compute_edge_length(tube_point_a, tube_point_b)\n self.start_arc_length = tube_point_a.arc_length\n self.start_pylon_cost = tube_point_a.pylon_cost\n self.tube_cost = self.compute_tube_cost(edge_length)\n self.tunnel_cost = self.compute_tunnel_cost(edge_length, tube_point_a, \n tube_point_b)\n\nclass NaiveTubeProfile(object):\n\n def compute_tube_elevations(self, arc_lengths, land_elevations, latlngs,\n peak_resolution=None):\n try: \n np.sum(land_elevations)\n except TypeError: \n print(land_elevations)\n if peak_resolution == None:\n peak_resolution = 1\n num_land_elevations = len(land_elevations)\n if num_land_elevations < 5:\n tube_elevations = [max(land_elevations)] * num_land_elevations\n tube_curvature_array = [0] * num_land_elevations\n return [tube_elevations, tube_curvature_array]\n else:\n interior_land_elevations = land_elevations[peak_resolution:\n -peak_resolution]\n interior_land_elevation_peaks_indices_tuple = \\\n scipy.signal.argrelmax(interior_land_elevations,\n order=peak_resolution)\n land_elevation_peaks_indices = (\n interior_land_elevation_peaks_indices_tuple[0] + peak_resolution)\n land_elevation_peaks_indices = land_elevation_peaks_indices.tolist()\n land_elevation_peaks_indices.insert(0,0)\n land_elevation_peaks_indices.append(land_elevations.shape[0] - 1)\n\n tube_elevation_function = scipy.interpolate.PchipInterpolator(\n [arc_lengths[i] for i in land_elevation_peaks_indices], \n [land_elevations[i] for i in land_elevation_peaks_indices])\n tube_elevations = tube_elevation_function(arc_lengths)\n pylon_elevations = np.array(tube_elevations)-np.array(land_elevations)\n\n try: \n np.sum(pylon_elevations)\n except TypeError: \n print(pylon_elevations)\n\n peak_resolution = 200\n num_pylon_elevations = len(pylon_elevations)\n if num_land_elevations < 5:\n tube_elevations = [max(pylon_elevations)] * num_pylon_elevations\n tube_curvature_array = [0] * num_pylon_elevations\n return [tube_elevations, tube_curvature_array]\n\n else:\n interior_pylon_elevations = pylon_elevations[peak_resolution:\n -peak_resolution]\n interior_pylon_elevation_peaks_indices_tuple = \\\n scipy.signal.argrelmax(interior_pylon_elevations,\n order=peak_resolution)\n pylon_elevation_peaks_indices = (\n interior_pylon_elevation_peaks_indices_tuple[0] + peak_resolution)\n pylon_elevation_peaks_indices = pylon_elevation_peaks_indices.tolist()\n pylon_elevation_peaks_indices.insert(0,0)\n pylon_elevation_peaks_indices.append(pylon_elevations.shape[0] - 1)\n adjusted_land_elevations = np.copy(land_elevations)\n maxdiff = (max(land_elevations)-min(land_elevations))/15.0\n n = 1\n for i in sorted(land_elevation_peaks_indices, key=lambda i: land_elevations[i], reverse=True):\n adjusted_land_elevations[i] = land_elevations[i]-(maxdiff*n**(-2))\n n = n + 1\n n = 1\n for i in sorted(pylon_elevation_peaks_indices, key=lambda i: pylon_elevations[i], reverse=True):\n adjusted_land_elevations[i] = land_elevations[i]+(maxdiff*n**(-2))\n n = n + 1\n land_elevation_peaks_indices = sorted(set(land_elevation_peaks_indices + pylon_elevation_peaks_indices))\n tube_elevation_function = scipy.interpolate.PchipInterpolator([arc_lengths[i] for i in land_elevation_peaks_indices], \n [adjusted_land_elevations[i] for i in land_elevation_peaks_indices])\n tube_elevations = tube_elevation_function(arc_lengths)\n\n # peaklist_byheight = sorted(land_elevation_peaks_indices, key=lambda i: land_elevations[i], reverse=True)\n\n # def lower(elt, proportion):\n # land_elevations[peaklist_byheight[elt]] = (land_elevations[peaklist_byheight[elt]] - \n # proportion*(land_elevations[peaklist_byheight[elt]] - \n # max(land_elevations[land_elevation_peaks_indices.index(peaklist_byheight[elt])+1],\n # land_elevations[land_elevation_peaks_indices.index(peaklist_byheight[elt])-1])))\n\n # def compute_cost(tube_elevations):\n # tube_points = []\n # for i in range(len(arc_lengths)): \n # tube_point = TubePoint(arc_lengths[i], land_elevations[i],\n # tube_elevations[i], latlngs[i])\n # tube_points.append(tube_point)\n # tube_edges = [TubeEdge(tube_points[i], tube_points[i + 1])\n # for i in range(len(tube_points) - 1)]\n # tube_costs = [tube_edge.tube_cost for tube_edge in tube_edges]\n # tunneling_costs = [tube_edge.tunneling_cost for tube_edge in tube_edges]\n # pylons_costs = [tube_point.pylon_cost for tube_point in tube_points]\n # total_pylon_cost = sum(pylons_costs)\n # tube_cost = sum(tube_costs)\n # tunneling_cost = sum(tunneling_costs)\n # return total_pylon_cost + tube_cost + tunneling_cost\n\n # def lowering_is_good(i):\n # tube_elevation_function = scipy.interpolate.PchipInterpolator([arc_lengths[i] for i in land_elevation_peaks_indices], [land_elevations[i] for i in land_elevation_peaks_indices])\n # tube_elevations = tube_elevation_function(arc_lengths)\n # old_cost = compute_cost(tube_elevations)\n # lower(i, .333)\n # tube_elevation_function = scipy.interpolate.PchipInterpolator([arc_lengths[i] for i in land_elevation_peaks_indices], [land_elevations[i] for i in land_elevation_peaks_indices])\n # tube_elevations = tube_elevation_function(arc_lengths)\n # new_cost = compute_cost(tube_elevations)\n # lower(i, -.5)\n # return new_cost < old_cost\n\n # for i in range(0,min(6,len(peaklist_byheight)-1)):\n # print (\"peak number is:\")\n # print (i)\n # attempts = 0\n # while (lowering_is_good(i) and attempts < 4):\n # lower(i, .333)\n # attempts +=1\n\n # print (\" \")\n\n yprime = tube_elevation_function.derivative(1)\n ydoubleprime = tube_elevation_function.derivative(2)\n tube_curvature_array = np.array([np.absolute(ydoubleprime(s)/(1 + yprime(s)**2)**1.5) for s in arc_lengths])\n return [tube_elevations, tube_curvature_array]\n\n def build_pylons(self):\n pylons = [tube_point.build_pylon() for tube_point in self.tube_points]\n return pylons\n\n def build_cost_segments(self, tube_edges):\n cost_segment_step_size = 1000 #1 Kilometer\n tube_arc_length_counter = cost_segment_step_size \n cost_segments = [0]\n for tube_edge in tube_edges:\n if tube_edge.start_arc_length < tube_arc_length_counter:\n cost_segments[-1] += (tube_edge.tube_cost + \n tube_edge.tunnel_cost + tube_edge.start_pylon_cost)\n else:\n cost_segments.append(0)\n cost_segments[-1] += (tube_edge.tube_cost + \n tube_edge.tunnel_cost + tube_edge.start_pylon_cost)\n tube_arc_length_counter += cost_segment_step_size\n #if len(cost_segments) > 20:\n # print(\"cost_segments\")\n # print(cost_segments)\n return cost_segments\n\n def __init__(self, elevation_profile, peak_resolution=None):\n arc_lengths = elevation_profile.arc_lengths \n land_elevations = elevation_profile.land_elevations\n latlngs = elevation_profile.latlngs\n tube_elevations, tube_curvature_array = self.compute_tube_elevations(\n arc_lengths, land_elevations, latlngs, peak_resolution)\n tube_points = []\n for i in range(len(arc_lengths)): \n index = i + 1\n tube_point = TubePoint(arc_lengths[i], land_elevations[i],\n tube_elevations[i], latlngs[i], index)\n tube_points.append(tube_point)\n tube_edges = [TubeEdge(tube_points[i], tube_points[i + 1])\n for i in range(len(tube_points) - 1)]\n cost_segments = self.build_cost_segments(tube_edges)\n tube_costs = [tube_edge.tube_cost for tube_edge in tube_edges]\n tunnel_costs = [tube_edge.tunnel_cost for tube_edge in tube_edges]\n pylons_costs = [tube_point.pylon_cost for tube_point in tube_points] \n self.arc_lengths = arc_lengths\n self.tube_points = tube_points\n self.cost_segments = cost_segments\n self.land_elevations = land_elevations\n self.tube_elevations = tube_elevations\n self.tube_curvature_array = tube_curvature_array\n self.total_pylon_cost = sum(pylons_costs)\n self.tube_cost = sum(tube_costs)\n self.tunneling_cost = sum(tunnel_costs)\n\n","sub_path":"test/tube_naive.py","file_name":"tube_naive.py","file_ext":"py","file_size_in_byte":12755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"650194025","text":"import pygame\nfrom pygame.locals import *\nfrom Controller import *\nimport MainMenu\n\nclass Menu:\n \n class Option:\n\n hovered = False\n font = None\n \n def __init__(self, text, pos, font, surface):\n self.text = text\n self.pos = pos\n self.font = font\n self.surface = surface\n self.set_rect()\n self.draw()\n \n def draw(self):\n self.set_rend()\n self.surface.blit(self.rend, self.rect)\n \n def set_rend(self):\n self.rend = self.font.render(self.text, True, self.get_color())\n \n def get_color(self):\n if self.hovered:\n return (255, 255, 255)\n else:\n return (100, 100, 100)\n \n def set_rect(self):\n self.set_rend()\n self.rect = self.rend.get_rect()\n self.rect.topleft = self.pos\n\n def pauseMenuStart(self, controller):\n self.c = controller\n global pause\n while True:\n pygame.event.pump()\n pygame.mouse.set_visible(1)\n for option in self.options:\n if option.rect.collidepoint(pygame.mouse.get_pos()):\n option.hovered = True\n else:\n option.hovered = False\n option.draw()\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == QUIT:\n return \"Quit\"\n if self.options[0].hovered == True:\n if event.type == MOUSEBUTTONDOWN:\n pygame.mouse.set_visible(False)\n return \"Continue\"\n \n if self.options[1].hovered == True:\n if event.type == MOUSEBUTTONDOWN:\n pygame.mouse.set_visible(True)\n return \"Exit\"\n \n def __init__(self, controller):\n pauseMenuFont = pygame.font.Font(None, 40)\n\n self.c = controller\n self.surface = controller.screen\n\n cont = self.Option(\"CONTINUE\", (178, 330), pauseMenuFont, self.surface)\n quit = self.Option(\"QUIT\", (222, 370), pauseMenuFont, self.surface)\n \n self.options = [cont, quit]\n \n def pause(self, controller):\n return self.pauseMenuStart(controller)\n \n","sub_path":"PauseMenu.py","file_name":"PauseMenu.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"540392571","text":"#!/usr/bin/python3\n\nimport os\nimport time\n\ndebug = 0\nif debug == 0:\n f = os.system\nelse:\n f = print\n\n\nvoterList = []\n\nfor line in open('vo_keys'):\n accName = line[:line.find(',')]\n if accName[0:2] == 'vo':\n voterList.append(accName)\n\n\n#all voter stake\n\nn = 0\nfor voter in voterList:\n amount = str(4500000+n)\n f('claac system delegatebw '+ voter +' ' + voter+''' \"'''+amount + ''' AAC\" \"'''+amount + ''' AAC\"''')\n n+=10000\n print(\"voter: \"+voter+ \" stake token ok!\")\n time.sleep(0.02)\n\n","sub_path":"tests/ck_test/0-vote-test-scripts/8-voter_stake.py","file_name":"8-voter_stake.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"58909406","text":"# Script to pull down all the australian tools in ToosAU into a JSON file\n# readable by ToolsAU.\n# Madison Flannery, 2016.\n\nimport urllib2\nimport json\n\n# The URL for the bio.tools API.\nBIO_TOOLS_URL=\"https://bio.tools/api/tool?page=\"\n\n# Search terms.\n# We search for country, email, and capital cities.\nAUSTRALIA = \"Australia\"\nEMAIL=\"*.edu.au\"\nCAPITAL_CITIES=[\"Brisbane\", \"Sydney\", \"Melbourne\", \"Hobart\", \"Adelaide\", \"Perth\", \"Darwin\"]\n\n# API request params, appended to URL.\nCRITERIA_CONTACT = \"contact=\"\nCRITERIA_EMAIL = \"contactEmail=\"\nCRITERIA_INSTITUTION = \"creditsInstitution=\"\n\n# Build the search URL's.\nqueries = [CRITERIA_INSTITUTION + city for city in CAPITAL_CITIES]\nqueries.append(CRITERIA_EMAIL + EMAIL)\nqueries.append(CRITERIA_CONTACT + AUSTRALIA)\n\nresults = []\n\n# Do a query for each URL.\nfor query in queries:\n page_num = 1\n\n # Make sure we get all pages of the query results.\n # API will return 25 results at a time.\n while True:\n # Query and load JSON response.\n response = urllib2.urlopen(BIO_TOOLS_URL + str(page_num) + \"&\" + query)\n data = json.load(response)\n\n for item in data['list']:\n # Ignore duplicates.\n if item not in results:\n results.append(item)\n # Break if we have no more pages of the query to do.\n if data['next'] == None:\n break\n page_num += 1\n\n# Some output.\nprint('Number of Query Results: ' + str(len(results)))\n\n# If we actually have some results, i.e. things went well.\nif(len(results) > 0):\n # Sort the results alphabetically, ignore case.\n results = sorted(results,key=lambda x:x['name'].lower())\n # Dump to file.\n with open('au_tools.json', 'w') as outfile:\n json.dump(results, outfile)\n","sub_path":"toolsAU.py","file_name":"toolsAU.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"50915085","text":"ipadd1=input(\"IP 1 : \")\nipadd2=input(\"IP 2 : \")\n\n#converting IP address in list\nip1 = ipadd1.split(\".\")\nip2 = ipadd2.split(\".\")\n\n#Converting IP address in binary numbers\nipx1=('{0:08b}.{1:08b}.{2:08b}.{3:08b}'.format(int(ip1[0]),int(ip1[1]),int(ip1[2]),int(ip1[3])))\nipx2=('{0:08b}.{1:08b}.{2:08b}.{3:08b}'.format(int(ip2[0]),int(ip2[1]),int(ip2[2]),int(ip2[3])))\n\n#printing the binary converted IP addresses\nprint(ipx1)\nprint(ipx2)\n\n#Taking netmask as an input\nnetmask=int(input(\"Netmask : \"))\n\n#Using the input netmask and performing associated convertion using the following function:\ndef concatenate_list_data(list):\n result= ''\n for element in list:\n result += str(element)\n for x in range(32-int(len(result))):\n result+=str(0)\n #print(result)\n return(result)\n\nnmf = concatenate_list_data([int(1) for x in range(netmask)])\n#print(nmf)\n\n#Splitting the netmask into a list of 8 bits each\nl=[nmf[i:i+8] for i in range(0,len(nmf),8)]\n#print(l)\n\n#Using join() to concatenate the list seperated by a \".\"\nh=\".\"\nh=h.join(l)\nprint(h)\n\n#Splitting up the IP addrresses and joining them to perforn the required AND operation\ne=\"\"\nf=\"\"\nui = ipx1.split(\".\")\n#print(ui)\ne=e.join(ui)\n\n#e=int(e)\n#print(e)\n\nux =ipx2.split(\".\")\nf=f.join(ux)\n#f=int(f)\n\nnmf=int(nmf)\n#print(nmf)\n\ninputA=int(e,2)\ninputB=int(f,2)\n\n\n#Checking for the given condition using the AND operator\nif(inputA & nmf!=inputB & nmf):\n print(\"Belongs to different subnet\")\nelse:\n print(\"Belongs to same subnet\")\n","sub_path":"a02.py","file_name":"a02.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"210913934","text":"##############################################################################\n# Import some libraries\n##############################################################################\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n##############################################################################\n# Import some extra special libraries from my own repo and do some other stuff\n##############################################################################\nsys.path.insert(0, r\"D:\\Python\\Local Repo\\library\")\nnp.set_printoptions(suppress=True)\nimport prd_plots\nimport prd_file_import\nimport prd_data_proc\nimport prd_maths\ncs = prd_plots.palette()\n\n##############################################################################\n# Do some stuff\n##############################################################################\n# Specify results directory and change working directory to this location\np0 = (r\"D:\\Experimental Data\\F5 L10 Spectrometer\\Spec data 20190516\")\n# p0 = (r\"D:\\Experimental Data\\Internet Thorlabs optics data\"))\nos.chdir(p0)\n# Generate list of relevant data files and sort them chronologically\nroi = 80\n\nλs, ctss, lbs = prd_file_import.load_spec_dir(p0)\nxs0 = λs[1]\nys0 = ctss[1]\n\n# Use gin to get first approximation for peak location\npts = prd_plots.gin(λs[0], ctss[0], 0,\n 'politely click peak locations and smash enter')\n\npk_λs = []\npk_idxs = []\nfit_data = []\n# Loop over data in directory and perform fits on each spec, for each peak\nfor i0, val0 in enumerate(pts):\n pk_λ = str(int(np.round(pts[i0][0])))\n pk_lb = 'peak ' + str(i0) + ' (' + pk_λ + ' nm)'\n λ_pk, idx_pk = prd_maths.find_nearest(xs0, pts[i0, 0])\n pk_λs.append(λ_pk)\n pk_idxs.append(idx_pk)\n # Restrict data set to roi of interest\n x_roi = xs0[int(idx_pk - roi / 2):int(idx_pk + roi / 2)]\n y_roi = ys0[int(idx_pk - roi / 2):int(idx_pk + roi / 2)]\n # Extract first guess values for fitting\n μ = λ_pk\n σ = 0.1\n bkg = np.mean(y_roi)\n # Set up higher resolution x axis for fit\n x_fit = np.linspace(min(x_roi), max(x_roi), 1000)\n # Perform fit\n popt, pcov = curve_fit(prd_maths.Gaussian_1D,\n x_roi, y_roi, p0=[1, μ, σ, bkg])\n\n As, μs, σs, Ps = prd_data_proc.spec_seq_Gauss_fit_20190516(p0,\n popt,\n idx_pk,\n roi,\n pk_lb)\n fit_data.append([As, μs, σs, Ps])\n data_name = pk_lb + '.dat'\n data = np.column_stack((Ps, As, μs, σs))\n header = \"Powers, Gaussian Amplitudes, Gaussian centres, Gaussian widths\"\n np.savetxt(data_name, data, header=header)\nprint(len(fit_data))\nprd_plots.ggplot()\nsize = 4\nfig1 = plt.figure('fig1', figsize=(size * np.sqrt(2), size))\nax1 = fig1.add_subplot(1, 1, 1)\nfig1.patch.set_facecolor(cs['mnk_dgrey'])\nax1.set_xlabel('Wavelength (λ - nm)')\nax1.set_ylabel('Counts')\nax1.set_title('Labelled spectrum')\nax1.plot(xs0, ys0, '-.', markersize=2, lw=0.5,\n alpha=1, color=cs['gglred'], label='')\n\npk_ys = [ys0[i] for i in pk_idxs]\nfor i0, val0 in enumerate(fit_data):\n pk_x = fit_data[i0][1][1]\n pk_y = prd_maths.Gaussian_1D(pk_x,\n fit_data[i0][0][1],\n fit_data[i0][1][1],\n fit_data[i0][2][1])\n ax1.plot(pk_x, pk_y, '.',\n mfc=cs['ggblue'],\n mec=cs['ggblue'],\n label='peak ' + str(i0))\n ax1.text(pk_x, pk_y, ' peak ' + str(i0))\n\nfig1.tight_layout()\nplt.show()\nprd_plots.PPT_save_2d(fig1, ax1, 'peak labels.png')\n","sub_path":"Experimental analysis/Quantum dots/spectral gaussian fits 20190516.py","file_name":"spectral gaussian fits 20190516.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"426733005","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nimport image_cropping.fields\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('website', '0024_pressclipping'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FenaconMidia',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(help_text=b'Titulo da Noticia', max_length=90, verbose_name=b'T\\xc3\\xadtulo')),\n ('subtitle', models.CharField(help_text=b'Para Manchetes', max_length=100, null=True, verbose_name=b'Sub-t\\xc3\\xadtulo', blank=True)),\n ('text', models.TextField(verbose_name=b'Texto')),\n ('slug', models.SlugField(unique=True, max_length=200, blank=True)),\n ('credit', models.CharField(help_text=b'Creditos da foto.', max_length=50, verbose_name=b'Cr\\xc3\\xa9dito', blank=True)),\n ('font', models.CharField(help_text=b'Fonte da not\\xc3\\xadcia', max_length=50, verbose_name=b'Fonte', blank=True)),\n ('author', models.CharField(help_text=b'Autor da not\\xc3\\xadcia', max_length=50, verbose_name=b'Autor', blank=True)),\n ('featured_image', models.ImageField(help_text=b'Dimens\\xc3\\xb5es 600x335px ou maior - JPEG', upload_to=b'uploads/noticias/%Y/%m/', verbose_name=b'Imagem Destaque', blank=True)),\n (b'featured_big', image_cropping.fields.ImageRatioField(b'featured_image', '600x335', hide_image_field=False, size_warning=False, allow_fullsize=False, free_crop=False, adapt_rotation=False, help_text=b'Destaque princial na home.', verbose_name=b'Destaque Grande')),\n (b'featured_medium', image_cropping.fields.ImageRatioField(b'featured_image', '470x180', hide_image_field=False, size_warning=False, allow_fullsize=False, free_crop=False, adapt_rotation=False, help_text=b'Destaque secund\\xc3\\xa1rio home.', verbose_name=b'Destaque M\\xc3\\xa9dio')),\n (b'featured_small', image_cropping.fields.ImageRatioField(b'featured_image', '170x170', hide_image_field=False, size_warning=False, allow_fullsize=False, free_crop=False, adapt_rotation=False, help_text=b'Imagem para listagem de not\\xc3\\xadcias e \\xc3\\xbaltimas no\\xc3\\xadticas.', verbose_name=b'Destaque Pequeno')),\n ('published_at', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Data de Publicacao')),\n ('status', models.BooleanField(default=True, help_text=b'Se esta op\\xc3\\xa7\\xc3\\xa3o estiver desmarcada os usu\\xc3\\xa1rios do portal n\\xc3\\xa3o ir\\xc3\\xa3o mais ver esta not\\xc3\\xadcia', verbose_name=b'Noticia Ativa?')),\n ('featured', models.BooleanField(default=False, help_text=b'Coloca a not\\xc3\\xadcia em destaque. Deve possuir imagem destaque.', verbose_name=b'Destaque secund\\xc3\\xa1rio')),\n ('user', models.ForeignKey(verbose_name=b'Usu\\xc3\\xa1rio', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('-published_at',),\n 'get_latest_by': 'published_at',\n 'verbose_name': 'FENACON na M\\xeddia',\n 'verbose_name_plural': 'FENACON na M\\xeddia',\n },\n ),\n ]\n","sub_path":"apps/website/migrations/0025_fenaconmidia.py","file_name":"0025_fenaconmidia.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"140794724","text":"\"\"\" This is magic glue for integrating the frontend and backend.\n\n This is NOT the place for backend customizations. Go to\n api/historic_hebrew_dates_ui/settings.py instead.\n\"\"\"\n\nimport os.path as op\n\nhere = op.dirname(op.abspath(__file__))\n\n# First, import the standard backend settings. This requires some\n# magic because the backend directory itself is not a Python package.\n# Imitated from https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n# or\n# https://stackoverflow.com/a/29855240\n# (respectively for Python >= 3.5 and Python 3.4)\n\nimport sys\nfrom importlib import util, machinery\n\nsettings_name = 'settings'\nsettings_path = op.join(here, 'api', 'historic_hebrew_dates_ui', 'settings.py')\n\nif sys.version_info >= (3, 5):\n spec = util.spec_from_file_location(settings_name, settings_path)\n settings = util.module_from_spec(spec)\n spec.loader.exec_module(settings)\nelse:\n settings = machinery.SourceFileLoader(settings_name, settings_path).load_module()\n\nsys.modules[settings_name] = settings\n\nfrom settings import *\n\n# Next, augment the settings to make the backend aware of the frontend.\n\nSTATICFILES_DIRS += [\n op.join(here, 'web-ui', 'dist'),\n op.join(here, 'web-ui', 'node_modules'),\n]\n\n\nPROXY_FRONTEND = \"http://localhost:4200\"\n\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"481993993","text":"from os import getenv\nfrom subprocess import Popen, PIPE\n\n\n# ---------------------------------------------------------------------------- #\n# API key:\napi_key = getenv(\"CONSUMER_KEY\")\n# API secret key:\napi_secret = getenv(\"CONSUMER_SECRET\")\n# Access token: \naccess_token = getenv(\"API_KEY\")\n# Access token secret: \naccess_token_secret = getenv(\"API_SECRET\")\n\n\n# ---------------------------------------------------------------------------- #\ndef create_auth_json():\n #Create auth.json file for twitter-to-sqlite\n p = Popen(['twitter-to-sqlite', 'auth'], stdin=PIPE)\n p.stdin.write(f\"{api_key}\\n\".encode())\n p.stdin.write(f\"{api_secret}\\n\".encode())\n p.stdin.write(f\"{access_token}\\n\".encode())\n p.stdin.write(f\"{access_token_secret}\\n\".encode())\n p.stdin.flush()\n return\n\n\n# ---------------------------------------------------------------------------- #\nif __name__ == \"__main__\":\n create_auth_json()","sub_path":"bot/create_auth_json.py","file_name":"create_auth_json.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335749710","text":"variable = [\"I0\",\"I1\",\"I2\",\"I3\",\"1\",\"0\"]\nstack = []\ntree_list = [\"\"]*32\n#max_position_of_or = 0\nstack_or = []\nimport filter\n\ndef Tree(left, right, root, test, q):\n # ******************************preorder********************************\n stack_or = []\n stack_and = []\n RUN_OR = True\n test = obj.check_last(test)\n for i in range(len(test)):\n # check \"(\" and \")\"\n if( test[i] == \"(\" ):\n RUN_OR = False\n stack.append(i)\n elif( test[i] == \")\"):\n position = stack.pop()\n\n # if stack is empty\n if(len(stack) == 0):\n RUN_OR = True\n\n # filter \"!\" sinario !(I0&I1), (!I1&I2) and !(I0)\n if( test[i] == \"!\" and (test[i+1] in variable or test[i+1] == \"(\") ):\n if(RUN_OR == False):\n pass\n elif(RUN_OR == True):\n Is_joke = obj.check_last(test[i+1:])\n #print(Is_joke, test[i], right)\n root = test[i]\n left = Is_joke\n right = right\n\n #print(RUN_OR)\n #print(f\"test:{test} \\nleft {left} \\nroot {root} \\nright {right}\\n-----------------------\")\n \n if(RUN_OR):\n # find the max position of + for split to left root right\n if(test[i] == \"+\"):\n max_position_of_or = i\n root = test[max_position_of_or]\n left = test[:max_position_of_or]\n right = test[max_position_of_or+1:]\n stack_or.append(max_position_of_or)\n\n if(test[i] == \"&\"):\n max_position_of_and = i\n root = test[max_position_of_and]\n left = test[:max_position_of_and]\n right = test[max_position_of_and+1:]\n stack_and.append(max_position_of_and)\n\n if(len(stack_or) > 0):\n root = test[max_position_of_or]\n left = test[:max_position_of_or]\n right = test[max_position_of_or+1:]\n \n if(len(stack_and) > 0):\n root = test[max_position_of_and]\n left = test[:max_position_of_and]\n right = test[max_position_of_and+1:]\n\n pl = (2*q)+1\n pr = (2*q)+2\n tree_list[q] = root\n tree_list[pl] = left\n tree_list[pr] = right\n # *** (2*q)+1 = left index\n # *** (2*q)+2 = right index\n # *** q = root index\n\n # meter checking value recursive value\n #print(f\"test:{test} \\nleft {left} \\nroot {root} \\nright {right}\\n-----------------------\")\n if(len(left) == 1):\n tree_list[pl] = left[0]\n if( len(right) > 1):\n Tree(\"\", \"\", \"\", right, pr)\n else:\n if(right == \"\"):\n tree_list[pr] = right\n else:\n tree_list[pr] = right[0]\n\n return tree_list\n\n Tree(\"\", \"\", \"\", left, pl)\n\n if( len(right) > 1 ):\n Tree(\"\", \"\", \"\", right, pr)\n else:\n #print(pl,q,pr)\n if(right == \"\"):\n tree_list[pr] = right\n else:\n tree_list[pr] = right[0]\n\n return tree_list\n\n#((I2&I1)+(I0&I1))\n# data set for testing my algorithm\ntest1 = [\"!\",\"(\",\"1\",\"+\",\"0\",\")\"] # OK\ntest2 = [\"!\",\"(\",\"!\",\"(\",\"0\",\"+\",\"I0\",\"&\",\"1\",\")\",\")\"] # OK\n\n#(I0+!I1+!(I2))&(!I0+I1+I2)\ntest3 = [\"(\",\"I0\",\"+\",\"!\",\"I1\",\"+\",\"!\",\"(\",\"I2\",\")\",\")\",\"&\",\"(\",\"!\"\n ,\"I0\",\"+\",\"I1\",\"+\",\"I2\",\")\"] # OK\n\n#\"!(I0&I1)+!(I1+I2)\"\ntest4 = [\"!\",\"(\",\"I0\",\"&\",\"I1\",\")\",\"+\",\"!\",\"(\",\"I1\",\"+\",\"I2\",\")\"]\n\n#\"(((I0&I1&!I2)+!I1)+I3)\"\ntest5 = [\"(\",\"(\",\"(\",\"I0\",\"&\",\"I1\",\"&\",\"!\",\"I2\",\")\",\"+\",\"!\",\"I1\",\")\",\"+\",\"I3\",\")\"]\n\n#((I2&I1)+(I0&I1))\ntest6 = [\"(\",\"(\",\"I2\",\"&\",\"I1\",\")\",\"+\",\"(\",\"I0\",\"&\",\"I1\",\")\",\")\"]\n\n#I2&I1+I0&I1\ntest7 = [\"I2\",\"&\",\"I1\",\"+\",\"I0\",\"&\",\"I1\"] # OK\n\nq = 0\nobj = filter.check_bracket()\n\nprint(\"#\",1,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test1, q))\n\nprint(\"#\",2,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test2, q))\n\nprint(\"#\",3,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test3, q))\n\nprint(\"#\",4,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test4, q))\n\nprint(\"#\",5,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test5, q))\n\nprint(\"#\",6,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test6, q))\n\nprint(\"#\",7,\"*\"*50)\ntree_list = [\"\"]*32\nprint(Tree('','','',test7, q))","sub_path":"Boolean expression string/Demo_version/Boolean_tree_recursive_left_and_right.py","file_name":"Boolean_tree_recursive_left_and_right.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"270443821","text":"from graph import Graph, Node\nimport struct\nimport re\n\n#Парсер заполняющий Граф.\nclass Reader:\n def read(file_path):\n g = Graph()\n\n with open(file_path, \"r\") as f:\n lines = [line for line in f]\n u, v = map(int, lines[0].strip().split(' '))\n for i in range(1, u + 1):\n #Записываем узел\n g.add_node(Node(i))\n for i in range(1, v + 1):\n (edge1, edge2) = map(int, lines[i].strip().split(' '))\n #Записываем ребро\n g.add_edge(Node(edge1), Node(edge2))\n g.start, g.finish = map(int, lines[len(lines) - 1].strip().split(' '))\n return g\n","sub_path":"reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"572123382","text":"print(\"██╗ ██╗███╗ ██╗ ██████╗\")\nprint(\"██║ ██║████╗ ██║██╔═══██╗\")\nprint(\"██║ ██║██╔██╗ ██║██║ ██║\")\nprint(\"██║ ██║██║╚██╗██║██║ ██║\")\nprint(\"╚██████╔╝██║ ╚████║╚██████╔╝\")\nprint( \"╚═════╝ ╚═╝ ╚═══╝ ╚═════╝ \")\n\nprint(\" \")\nprint(\" Es un juego que se juega de 2 a 10 jugadores\")\nprint(\" \")\nprint(\" Donde cada jugador recibe 7 cartas al empezar\")\nprint(\" \")\nprint(\" Tu objetivo es llegar a los 500 puntos para derrotar a tus compañeros\")\nprint(\" \")\nprint(\" ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄\")\nprint(\"▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌\")\nprint(\"▐░█▀▀▀▀▀▀▀▀▀ ▀▀▀▀█░█▀▀▀▀ ▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀█░▌ ▀▀▀▀█░█▀▀▀▀ \")\nprint(\"▐░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ \")\nprint(\"▐░█▄▄▄▄▄▄▄▄▄ ▐░▌ ▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄█░▌ ▐░▌ \")\nprint(\"▐░░░░░░░░░░░▌ ▐░▌ ▐░░░░░░░░░░░▌▐░░░░░░���░░░░▌ ▐░▌ \")\nprint(\" ▀▀▀▀▀▀▀▀▀█░▌ ▐░▌ ▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀█░█▀▀ ▐░▌ \")\nprint(\" ▐░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ \")\nprint(\" ▄▄▄▄▄▄▄▄▄█░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ \")\nprint(\"▐░░░░░░░░░░░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ \")\nprint(\" ▀▀▀▀▀▀▀▀▀▀▀ ▀ ▀ ▀ ▀ ▀ ▀ \")\nprint( \" \")\n\nprint( ) \nprint(\"________ ________ ________ ________ ________ ________ ________ ________ ________\")\nprint('\"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\" \"\"\"\"\"\"\"\"')\nprint( ) \nfrom mesa import *\n\n\n\nwhile True:\n for s in range(0,len(jugadores)):\n os.system(\"cls\")\n tablero.accion(jugadores)\n if rondaIniciada:\n carta = tablero.inicial(barajas)\n rondaIniciada = False\n \n else:\n tablero.repCartas(carta)\n \n print(\" \")\n print(\"{} tu mano es :\".format(jugadores[s].nombre))\n print(\" \")\n jugadores[s].mostrarMano()\n print(\" \")\n jugadores[s].mostrarOpciones()\n desicion = input(\"¿Que deseas hacer?: \")\n while desicion not in [\"q\",\"r\",\"w\"]:\n os.system(\"cls\")\n tablero.accion(jugadores)\n if rondaIniciada:\n carta = tablero.inicial(barajas)\n rondaIniciada = False\n \n else:\n tablero.repCartas(carta)\n \n print(\" \")\n print(\"{} tu mano es :\".format(jugadores[s].nombre))\n print(\" \")\n jugadores[s].mostrarMano()\n print(\" \")\n\n jugadores[s].mostrarOpciones()\n desicion = input(\"¿Que deseas hacer?: \")\n\n if desicion == \"q\":\n try:\n opcion = int(input(\" Que carta deseas jugar? : \"))\n jugada = jugadores[s].jugarCarta(opcion)\n if jugada[0] in barajas.valorCartas:\n if tablero.validarCarta:\n if jugada[0] == \"Retorno\":\n jugadores.reverse()\n\n elif jugada[0] == \"Elegir color\":\n especiales.mostrarColores()\n color = input(\"Elige un color: \")\n especiales.cambiarColor(jugada,especiales.opcionColor(color))\n\n\n carta = jugada\n tablero.repCartas(carta)\n \n elif tablero.validarCarta(jugada,carta):\n carta = jugada\n tablero.repCartas(carta)\n \n else:\n jugadores[s].mano.append(jugada)\n print(\"Haz sido penalizado por jugada incorrecta\")\n barajas.robar(jugadores[s])\n time.sleep(2)\n \n except:\n print(\"Mira bien la longitud de tu mano\")\n print(\"Penalizado por no atencionar bien tu mano\")\n time.sleep(2)\n\n elif desicion == \"r\":\n barajas.robar(jugadores[s])\n time.sleep(2)\n \n else:\n try:\n jugadores[s].Uno()\n opcion = int(input(\" Que carta deseas jugar? : \"))\n jugada = jugadores[s].jugarCarta(opcion)\n if jugada[0] in barajas.valorCartas:\n if tablero.validarCarta:\n if jugada[0] == \"Retorno\":\n jugadores.reverse()\n\n\n if tablero.validarCarta(jugada,carta):\n carta = jugada\n tablero.repCartas(carta)\n \n else:\n jugadores[s].mano.append(jugada)\n print(\"Haz sido penalizado por jugada incorrecta\")\n barajas.robar(jugadores[s])\n time.sleep(1)\n time.sleep(2)\n\n except:\n print(\"Mira bien la longitud de tu mano\")\n print(\"Penalizado por no atencionar bien tu mano\")\n time.sleep(2)\n\n\n if len(jugadores[s].mano) >= 2 and jugadores[s].estado == \"Uno\":\n jugadores[s].estado = \"\"\n\n if len(jugadores[s].mano) == 1 and jugadores[s].estado == \"\":\n print(\"Haz sido penalizado por no decir 'Uno' \")\n barajas.robar(jugadores[s])\n time.sleep(2)\n \n\n if len(jugadores[s].mano) == 0:\n print(\"{} ha ganado esta ronda\".format(jugadores[s].nombre))\n jugadores[s].sumarPuntos(jugadores,jugadores[s],barajas)\n tablero = Mesa()\n tablero.accion(jugadores)\n barajas.repartir(jugadores)\n rondaIniciada = True\n jugadores[s].reiniciarMano(jugadores)\n jugadores[s].reiniciarEstado(jugadores)\n print(\"Nueva ronda\")\n time.sleep(2)\n os.system(\"cls\")\n\n\n\n if jugadores[s].verificarPuntos(jugadores):\n break\n","sub_path":"principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"489744961","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport sys\n\nclass Mlp():\n \n\n def __init__(self, size_layers, act_funct='sigmoid', bias_flag=True,learning_rate=1,mean_weights=1):\n self.learning_rate=learning_rate\n self.size_layers = size_layers\n self.n_layers = len(size_layers)\n self.act_f = act_funct\n self.bias_flag = bias_flag\n self.mean_weights=mean_weights\n self.theta_weights = []\n self.initialize_theta_weights()\n \n \n \n\n def train(self, X, Y, iterations=400):\n for iteration in range(iterations):\n self.gradients = self.backpropagation(X, Y)\n for ii in range( self.n_layers-1):\n self.theta_weights[ii]=self.theta_weights[ii]-((self.learning_rate)*self.gradients[ii])#0.000009 for sum\n \n\n def predict(self, X):\n\n A , Z = self.feedforward(X)\n \n Y_hat = A[-1]\n return Y_hat\n\n def initialize_theta_weights(self):\n size_next_layers = self.size_layers.copy()\n size_next_layers.pop(0)\n i=0\n for i in range(len(size_next_layers)):\n if self.bias_flag:\n theta_tmp = self.mean_weights*(np.random.randn(size_next_layers[i], self.size_layers[i] + 1 ))\n else:\n theta_tmp = self.mean_weights*(np.random.randn(size_next_layers[i], self.size_layers[i])) \n self.theta_weights.append(theta_tmp)\n \n return self.theta_weights\n\n def backpropagation(self, X, Y):\n \n if self.act_f == 'sigmoid':\n g_dz = lambda x: self.sigmoid_derivative(x)\n elif self.act_f == 'relu':\n g_dz = lambda x: self.relu_derivative(x)\n\n n_examples = X.shape[0]\n \n A, Z = self.feedforward(X)\n\n # Backpropagation\n deltas = [None] * self.n_layers\n deltas[-1] = A[-1] - Y\n #deltas[-1]=abs(deltas[-1])\n #deltas[-1] = deltas[-1]* g_dz(Z[-1])\n \n #print(Z[-1])\n \n for ix_layer in np.arange(self.n_layers - 2 , 0 , -1):\n #print(ix_layer)\n theta_tmp = self.theta_weights[ix_layer]\n if self.bias_flag:\n \n theta_tmp = np.delete(theta_tmp, np.s_[0], 1)\n deltas[ix_layer] = (np.matmul(theta_tmp.transpose(), deltas[ix_layer + 1].transpose() ) ).transpose() * g_dz(Z[ix_layer])\n #print(Z) \n #print(deltas[-1])\n \n gradients = [None] * (self.n_layers - 1)\n for ix_layer in range(self.n_layers - 1):\n grads_tmp = np.matmul(deltas[ix_layer + 1].transpose() , A[ix_layer])\n grads_tmp = grads_tmp / n_examples\n \n gradients[ix_layer] = grads_tmp;\n #print(gradients)\n\n #print(gradients) \n return gradients\n\n def feedforward(self, X):\n \n if self.act_f == 'sigmoid':\n g = lambda x: self.sigmoid(x)\n elif self.act_f == 'relu':\n g = lambda x: self.relu(x)\n\n A = [None] * self.n_layers\n Z = [None] * self.n_layers\n input_layer = X\n \n\n for ix_layer in range(self.n_layers - 1):\n n_examples = input_layer.shape[0]\n if self.bias_flag:\n \n input_layer = np.concatenate((np.ones([n_examples ,1]) ,input_layer), axis=1)\n A[ix_layer] = input_layer\n Z[ix_layer + 1] = np.matmul(input_layer, self.theta_weights[ix_layer].transpose() )\n #print(Z[ix_layer+1])\n \n output_layer = g(Z[ix_layer + 1])\n \n input_layer = output_layer\n #print(Z)\n A[self.n_layers - 1] = output_layer\n return A, Z\n\n\n def sigmoid(self, z):\n \n result = 1.0 / (1.0 + np.exp(-z))\n return result\n\n def relu(self, z):\n \n if np.isscalar(z):\n result = np.max((z, 0))\n else:\n zero_aux = np.zeros(z.shape)\n meta_z = np.stack((z , zero_aux), axis = -1)\n result = np.max(meta_z, axis = -1)\n return result\n\n def sigmoid_derivative(self, z):\n \n result = self.sigmoid(z) * (1 - self.sigmoid(z))\n return result\n\n def relu_derivative(self, z):\n \n result = 1 * (z > 0)\n return result","sub_path":"homeworkMLP/mlp_kri.py","file_name":"mlp_kri.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"260923031","text":"from pathlib import Path\nfrom pathlib import PurePosixPath\n\n# le nom de notre fichier jouet \nnom = 'fichier-temoin'\n# on crée un objet de la classe Path, associé au nom de fichier\npath = Path(nom)\nprint(path)\n# si j'écris dedans je le crée\nwith open(nom, 'w', encoding='utf-8') as output:\n output.write('0123456789\\n')\nprint(path.stat())\n\nmtime = path.stat().st_mtime\nfrom datetime import datetime\nmtime_datetime = datetime.fromtimestamp(mtime)\nprint(mtime_datetime)\nprint(f\"{mtime_datetime:%H:%M}\")\n\n# ou encore mieux, si je veux détruire \n# seulement dans le cas où il existe je peux aussi faire\ntry: \n path.unlink()\nexcept FileNotFoundError:\n print(\"no need to remove\")\n\nprint(\"*\"*100) \ndirpath = Path('F:\\Python3\\S3')\n# tous les fichiers *.json dans le répertoire data/\nfor json in dirpath.glob(\"*.*\"):\n print(json)","sub_path":"S3/fichiers3.py","file_name":"fichiers3.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"153962698","text":"#!/usr/bin/python\nimport os\nimport subprocess\n\naptPackages = [\"nmap\", \"python-pip\"]\n\nos.system(\"apt install -y \" + \" \".join(aptPackages))\n\nrequirements = open(\"requirements.txt\", \"r\").read().split(\"\\n\")\nfor module in requirements:\n if module != \"\":\n os.system(\"pip install '\" + module + \"'\")\nos.system(\"pip install cryptography==2.4.2\")","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"124527797","text":"from django import forms\nfrom proyectoferreteria.apps.gestionadmin.models import Categoria\n\nclass CategoriaForm(forms.ModelForm):\n\n class Meta:\n model = Categoria\n \n fields = [\n 'Id_Categoria',\n 'Descripcion_Categoria'\n ]\n\n labels = {\n 'Id_Categoria':'Id de la categoría',\n 'Descripcion_Categoria':'Descripción de la categoría'\n }\n\n widgets = {\n 'Id_Categoria':forms.TextInput(attrs={'class':'form-control'}),\n 'Descripcion_Categoria':forms.TextInput(attrs={'class':'form-control'}),\n }","sub_path":"proyectoferreteria/apps/gestionadmin/formularios/categoria_form.py","file_name":"categoria_form.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"168554830","text":"import numpy as np\r\nfrom collections import Counter\r\nfrom itertools import product, chain\r\nimport math\r\nfrom scipy.signal import argrelextrema\r\nimport pickle\r\nimport networkx as nx\r\n\r\n\r\nclass Graph:\r\n def __init__(self, parameter, lyap_exp, transition_graph, pattern_label, clustering_coeff, out_strength, time_apparence):\r\n print('Processing...')\r\n self.parameter = parameter #the parameter of the rossler oscillator\r\n \r\n self.lyap_exp = lyap_exp #the lyapunov exponent of the oscillator\r\n \r\n self.transition_graph = transition_graph #the adjacency matrix of the graph\r\n \r\n self.pattern_label = pattern_label #the labels of the patterns/nodes\r\n \r\n self.clustering_coeff = clustering_coeff #the clustering coefficient of each node\r\n \r\n self.out_strength = out_strength #the out_strength of each node\r\n \r\n self.time_apparence = time_apparence #the matrix of shape = (node, time) with \r\n #entries the tells you if that node \r\n #appears in ceartain time (1) or does not (0)\r\n\r\ndef calc_MI(X,Y,bins):\r\n\r\n c_XY = np.histogram2d(X,Y,bins)[0]\r\n c_X = np.histogram(X,bins)[0]\r\n c_Y = np.histogram(Y,bins)[0]\r\n\r\n H_X = shan_entropy(c_X)\r\n H_Y = shan_entropy(c_Y)\r\n H_XY = shan_entropy(c_XY)\r\n\r\n MI = H_X + H_Y - H_XY\r\n return MI\r\n\r\ndef shan_entropy(c):\r\n c_normalized = c / float(np.sum(c))\r\n c_normalized = c_normalized[np.nonzero(c_normalized)]\r\n H = -sum(c_normalized* np.log2(c_normalized))\r\n return H\r\n\r\ndef find_lag(data, stepSize, tau_max = 200, bins =200, plot = False ):\r\n #find usable time delay via mutual information\r\n mis = []\r\n\r\n N = len(data)\r\n for tau in range(1, tau_max):\r\n M = N - tau\r\n unlagged = data[0:M]\r\n lagged = data[tau:N]\r\n mis.append(calc_MI(unlagged, lagged, bins = bins))\r\n best_tau = 0\r\n # mis.append(mutual_information_2d(unlagged, lagged, normalized=True))\r\n for i in mis:\r\n if i < 1/math.e:\r\n best_tau = mis.index(i)\r\n break\r\n if best_tau != 0:\r\n print('criterio e')\r\n pass\r\n else:\r\n #print('criterio minimo')\r\n mis = np.array(mis)\r\n\r\n minimun = argrelextrema(mis, np.less, order = int(1/stepSize)) \r\n\r\n #print(minimun)\r\n best_tau = minimun[0][0] + 1\r\n\r\n if plot == False:\r\n return best_tau\r\n\r\n elif plot == True:\r\n\r\n tau_points = np.arange(1, tau_max)\r\n #plot time delay embedding\r\n fig = plt.plot(tau_points, mis), plt.xlabel('tau'), plt.ylabel('Mutual Information')\r\n return best_tau, fig\r\n\r\ndef mean_derivative(data, stepSize):\r\n num_points = len(data)\r\n p = []\r\n for point in range(num_points - 1):\r\n p.append((data[point + 1] - data[point])/stepSize)\r\n\r\n return p\r\n\r\ndef M_p(p):\r\n \r\n \"\"\"M_p is the threshold for p\"\"\"\r\n \r\n p = np.array(p)\r\n return np.average(np.abs(p))\r\n\r\ndef symbolize_point(p, M_p):\r\n \r\n \"\"\"It symbolize the time series with the criterion show below \"\"\"\r\n \r\n p = np.array(p)\r\n symb = []\r\n for val in p:\r\n if val >= M_p:\r\n symb.append('R')\r\n elif val > 0 and val < M_p:\r\n symb.append('r')\r\n elif val == 0:\r\n symb.append('e')\r\n elif val < 0 and val > - M_p:\r\n symb.append('d')\r\n elif val <= -M_p:\r\n symb.append('D')\r\n\r\n return symb\r\n\r\ndef delay_embedding(data, emb_dim, delay):\r\n \"\"\"It creats the embbeding phase space using the delay \r\n \r\n and the embbeding dimmesion passed\"\"\"\r\n \r\n N = len(data)\r\n M = N - (emb_dim - 1)*delay\r\n delay_vec = []\r\n for i in range(emb_dim):\r\n for time in range(M):\r\n delay_vec[time][i] = data[time + i*delay]\r\n\r\n return delay_vec\r\n\r\n\r\ndef symbolize_vector(symb_points, emb_dim, delay):\r\n N = len(symb_points)\r\n M = N - (emb_dim - 1)*delay\r\n symb_vec = []\r\n time_points = dict()\r\n for time in range(M):\r\n temp_vector = []\r\n for i in range(emb_dim):\r\n temp_vector.append(symb_points[time + i*delay])\r\n symb_vec.append(''.join(temp_vector))\r\n if not symb_vec[time] in time_points:\r\n time_points[symb_vec[time]] = [time]\r\n else:\r\n time_points[symb_vec[time]].append(time)\r\n\r\n return symb_vec, time_points, M\r\n\r\ndef trasitional_graph(data_all, stepSize, emb_dim):\r\n\r\n data = data_all\r\n lag = find_lag(data, stepSize, tau_max = 50, bins = 200, plot = False)\r\n delay = lag\r\n print('delay = ', delay)\r\n\r\n histogram = []\r\n\r\n p = mean_derivative(data, stepSize)\r\n M = M_p(p)\r\n\r\n symb = symbolize_point(p, M)\r\n\r\n\r\n symb_vector, time_points, num_timePoints = symbolize_vector(symb, emb_dim,delay)\r\n\r\n\r\n all_edges = [(symb_vector[i], symb_vector[i + 1] ) for i in range(len(symb_vector)-1)]\r\n\r\n numEdges = Counter(all_edges)\r\n\r\n u, indices = np.unique(np.array(symb_vector),return_index=True)\r\n\r\n num_upatters = len(u)\r\n\r\n\r\n #possible edges with the unique patters\r\n possible_edges = list(product(u ,repeat = 2))\r\n\r\n possible_edges_dict = dict()\r\n for key in possible_edges:\r\n if key in numEdges:\r\n possible_edges_dict[key] = numEdges[key]\r\n else:\r\n possible_edges_dict[key] = 0\r\n\r\n\r\n matrix = np.zeros(shape=(num_upatters, num_upatters),dtype= int)\r\n time_apparence = np.zeros(shape= (num_upatters, num_timePoints ), dtype=int)\r\n\r\n count_coulumn = 0\r\n count_raw = 0\r\n patterns =[]\r\n for key in possible_edges_dict:\r\n if count_coulumn < num_upatters - 1:\r\n matrix[count_raw, count_coulumn] = possible_edges_dict[key]\r\n count_coulumn += 1\r\n if count_raw == 0:\r\n patterns.append(key[1])\r\n\r\n else:\r\n pass\r\n\r\n elif count_coulumn == num_upatters - 1:\r\n matrix[count_raw, count_coulumn] = possible_edges_dict[key]\r\n if count_raw == 0:\r\n patterns.append(key[1])\r\n else:\r\n pass\r\n count_coulumn = 0\r\n count_raw += 1\r\n clustering_coeff = []\r\n G = nx.from_numpy_matrix(matrix)\r\n clustering = nx.clustering(G, weight= 'weight')\r\n\r\n for key in clustering:\r\n clustering_coeff.append(clustering[key])\r\n\r\n count_pattern = 0\r\n for pattern in patterns:\r\n time = time_points[pattern]\r\n for t in time:\r\n\r\n time_apparence[count_pattern, t ] = 1\r\n count_pattern += 1\r\n\r\n out_strength = np.sum(matrix, axis = 1, dtype= int)\r\n\r\n return matrix, patterns, clustering_coeff, out_strength, time_apparence\r\n\r\ndef degree_freq(data_all, stepSize, emb_dim):\r\n # delays = []\r\n # for node in range(90):\r\n # data = data_all[:, node]\r\n # lag = find_lag(data, stepSize= downsample, tau_max = 20, bins = 200, plot = False)\r\n # delays.append(lag)\r\n \r\n # delay = int(np.average(np.array(delays)))\r\n delay = 3\r\n #print('delay = ', delay)\r\n \r\n histogram = []\r\n for node in range(90):\r\n data = data_all[:, node]\r\n #lag = find_lag(data, stepSize= downsample, tau_max = 20, bins = 200, plot = False)\r\n \r\n \r\n p = mean_derivative(data, stepSize)\r\n M = M_p(p)\r\n \r\n symb = symbolize_point(p, M)\r\n\r\n \r\n symb_vector = symbolize_vector(symb, emb_dim, delay = delay)\r\n \r\n \r\n #the last element has not an out-edge\r\n symb_vector.remove(symb_vector[-1])\r\n \r\n freq = dict()\r\n \r\n u, indices = np.unique(np.array(symb_vector),return_index=True)\r\n for pattern in u:\r\n freq[pattern] = symb_vector.count(pattern)\r\n \r\n \r\n for key in freq:\r\n histogram.append(freq[key])\r\n\r\n # all_edges = [(symb_vector[i], symb_vector[i + 1] ) for i in range(len(symb_vector)-1)]\r\n \r\n # numEdges = Counter(all_edges)\r\n \r\n # u, indices = np.unique(np.array(symb_vector),return_index=True)\r\n \r\n # num_upatters = len(u)\r\n \r\n \r\n # #possible edges with the unique patters\r\n # possible_edges = list(product(u ,repeat = 2))\r\n \r\n # possible_edges_dict = dict()\r\n # for key in possible_edges:\r\n # if key in numEdges:\r\n # possible_edges_dict[key] = numEdges[key]\r\n # else:\r\n # possible_edges_dict[key] = 0\r\n \r\n \r\n # matrix = np.zeros(shape=(num_upatters, num_upatters))\r\n \r\n \r\n # count_coulumn = 0\r\n # count_raw = 0\r\n # for key in possible_edges_dict:\r\n # if count_coulumn < num_upatters - 1:\r\n # matrix[count_raw, count_coulumn] = possible_edges_dict[key]\r\n # count_coulumn += 1\r\n \r\n # elif count_coulumn == num_upatters - 1:\r\n # matrix[count_raw, count_coulumn] = possible_edges_dict[key]\r\n # count_coulumn = 0\r\n # count_raw += 1\r\n \r\n # np.fill_diagonal(matrix, 0)\r\n # weight = np.sum(matrix, axis=0)\r\n # weight = weight/np.max(weight)\r\n\r\n # for i in range(len(weight)):\r\n # frequencies.append(weight[i])\r\n \r\n return np.array(histogram)/max(histogram)\r\n\r\ndef positive_values(data):\r\n \"\"\"It transforms the points from the time series in\r\n positive values neccesary to create the visibility graph\"\"\"\r\n\r\n minimun = np.min(data)\r\n data_positive = np.array(data) + abs(minimun) + 1\r\n\r\n return data_positive\r\n\r\ndef horizontal_vg(data):\r\n\r\n numPoints = len(data)\r\n hvg = np.zeros(shape= (numPoints, numPoints))\r\n\r\n data = positive_values(data)\r\n\r\n for i in range(numPoints - 1):\r\n neighbor = []\r\n\r\n hvg[i, i + 1], hvg[i + 1, i] = 1, 1\r\n neighbor.append(data[i + 1 ])\r\n for j in range(i + 2, numPoints):\r\n if data[i] > max(neighbor):\r\n if data[j] > max(neighbor):\r\n hvg[i, j], hvg[j, i] = 1, 1\r\n neighbor.append(data[j])\r\n else:\r\n neighbor.append(data[j])\r\n pass\r\n else:\r\n break\r\n return hvg\r\n\r\n\r\ndef hvg_extended(t_series, time_step):\r\n \r\n downsample = time_step\r\n num_nodes = len(t_series[0])\r\n \r\n histogram = []\r\n for node in range(num_nodes):\r\n data = t_series[:, node]\r\n hvg = horizontal_vg(data)\r\n \r\n weight = np.sum(hvg, axis = 0)\r\n\r\n histogram.append(weight)\r\n \r\n histogram = np.array(list(chain.from_iterable(histogram)), dtype=int)\r\n \r\n \r\n \r\n return histogram/max(histogram)","sub_path":"graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":10830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"367332103","text":"import sys\nsys.path.append(\"/srv/scratch/oursu/3Dgenome/src/kCCA/ChIAPET/pyrcca/\")\nimport rcca\nimport numpy as np\n\ndef main():\n parser=OptionParser()\n parser.add_option('--out',dest='out')\n parser.add_option('--matrices',dest='ms',default='',help='Comma delimited, .npy files, nodes should be aligned')\n opts,args=parser.parse_args()\n\n matrices=opts.ms.split(',')\n m1=np.load(matrices[0])\n m2=np.load(matrices[1])\n\n # Set up Pyrcca\n cca = rcca.CCA(kernelcca=False, numCC=2, reg=0.5)\n # Find canonical components\n training=cca.train([m1,m2])\n\n\nmain()\n","sub_path":"3Dutils/cca_2_matrices.py","file_name":"cca_2_matrices.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292187226","text":"\n\nfrom xai.brain.wordbase.verbs._guarantee import _GUARANTEE\n\n#calss header\nclass _GUARANTEEING(_GUARANTEE, ):\n\tdef __init__(self,): \n\t\t_GUARANTEE.__init__(self)\n\t\tself.name = \"GUARANTEEING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"guarantee\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_guaranteeing.py","file_name":"_guaranteeing.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"323234078","text":"'''Server module.\n\nHandle and response challenge requests from the frontend server.\n\n'''\n\nimport sys\nimport json\nimport traceback\nfrom collections import deque\nfrom tornado import gen\nfrom tornado.ioloop import IOLoop, PollIOLoop\nfrom tornado.web import Application\nfrom tornado.websocket import WebSocketHandler\nimport PyExt\nimport Privilege\nimport Config\nfrom StdChal import StdChal\n\n\nclass EvIOLoop(PollIOLoop):\n '''Tornado compatible ioloop interface.'''\n\n def initialize(self, **kwargs):\n '''Initialize.'''\n\n super().initialize(impl=PyExt.EvPoll(), **kwargs)\n\n\nclass JudgeHandler(WebSocketHandler):\n '''Judge request handler.\n\n Static attributes:\n chal_running_count (int): Number of current running challenges.\n chal_queue (deque): Pending challenges.\n\n '''\n\n chal_running_count = 0\n chal_queue = deque()\n\n @staticmethod\n @gen.coroutine\n def start_chal(obj, websk):\n '''Start a challenge.\n\n Check the challenge config, issue judge tasks, then report the result.\n\n Args:\n obj (dict): Challenge config.\n websk (WebSocketHandler): Websocket object.\n\n Returns:\n None\n\n '''\n\n # The worst exception, there is no chal_id in the obj.\n chal_id = None\n try:\n chal_id = obj['chal_id']\n code_path = obj['code_path']\n res_path = obj['res_path']\n test_list = obj['test']\n metadata = obj['metadata']\n comp_type = obj['comp_type']\n check_type = obj['check_type']\n\n test_paramlist = list()\n assert comp_type in ['g++', 'clang++', 'makefile', 'python3']\n assert check_type in ['diff', 'ioredir']\n\n for test in test_list:\n test_idx = test['test_idx']\n memlimit = test['memlimit']\n timelimit = test['timelimit']\n data_ids = test['metadata']['data']\n for data_id in data_ids:\n test_paramlist.append({\n 'in': res_path + '/testdata/%d.in'%data_id,\n 'ans': res_path + '/testdata/%d.out'%data_id,\n 'timelimit': timelimit,\n 'memlimit': memlimit,\n })\n\n chal = StdChal(chal_id, code_path, comp_type, check_type, \\\n res_path, test_paramlist, metadata)\n result_list, verdict = yield chal.start()\n\n result = []\n idx = 0\n for test in test_list:\n test_idx = test['test_idx']\n data_ids = test['metadata']['data']\n total_runtime = 0\n total_mem = 0\n total_status = 0\n for data_id in data_ids:\n runtime, peakmem, status = result_list[idx]\n total_runtime += runtime\n total_mem += peakmem\n total_status = max(total_status, status)\n idx += 1\n\n result.append({\n 'test_idx': test_idx,\n 'state': total_status,\n 'runtime': total_runtime,\n 'peakmem': total_mem,\n 'verdict': ''\n })\n\n websk.write_message(json.dumps({\n 'chal_id': chal_id,\n 'verdict': verdict,\n 'result': result,\n }))\n\n except Exception:\n traceback.print_exception(*sys.exc_info())\n websk.write_message(json.dumps({\n 'chal_id': chal_id,\n 'verdict': None,\n 'result': None,\n }))\n\n finally:\n JudgeHandler.chal_running_count -= 1\n JudgeHandler.emit_chal()\n\n @staticmethod\n def emit_chal(obj=None, websk=None):\n '''Emit a challenge to the queue and trigger the start_chal.\n\n Args:\n obj (dict, optional): Challenge config.\n websk (WebSocketHandler): Websocket object.\n\n Returns:\n None\n\n '''\n\n if obj is not None:\n JudgeHandler.chal_queue.append((obj, websk))\n\n while len(JudgeHandler.chal_queue) > 0 \\\n and JudgeHandler.chal_running_count < Config.TASK_MAXCONCURRENT:\n chal = JudgeHandler.chal_queue.popleft()\n JudgeHandler.chal_running_count += 1\n IOLoop.instance().add_callback(JudgeHandler.start_chal, *chal)\n\n def open(self):\n '''Handle open event'''\n\n print('Frontend connected')\n\n def on_message(self, msg):\n '''Handle message event'''\n\n obj = json.loads(msg, 'utf-8')\n JudgeHandler.emit_chal(obj, self)\n\n def on_close(self):\n '''Handle close event'''\n\n print('Frontend disconnected')\n\n\ndef init_websocket_server():\n '''Initialize websocket server.'''\n\n app = Application([\n (r'/judge', JudgeHandler),\n ])\n app.listen(2501)\n\n\ndef main():\n '''Main function.'''\n\n Privilege.init()\n PyExt.init()\n StdChal.init()\n IOLoop.configure(EvIOLoop)\n\n init_websocket_server()\n\n IOLoop.instance().start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583742562","text":"from django import forms\nfrom .widgets import ChainedSelectWidget\nfrom .models import Child\n\n\nclass SponsorForm(forms.Form):\n child = forms.IntegerField()\n\nclass FilterForm(forms.Form):\n gender = forms.ChoiceField(choices=[(x, x) for x in ('---------', 'MALE', 'FEMALE')])\n age = forms.ChoiceField(choices=[(x, x) for x in range(1, 18)], required=False)\n\n def __init__(self, *args, **kwargs):\n super(FilterForm, self).__init__(*args, **kwargs)\n\n if 0 == len(self.data):\n self.fields['age'].queryset = Child.objects.none()\n\n # assign a widget to second select field\n self.fields['age'].widget = ChainedSelectWidget(\n parent_name='gender', # the name of parent field\n app_name='sponsorship', # the name of model's application\n model_name='child', # the name of a model with the method\n method_name='get_children', # the name of queryset method\n )","sub_path":"sponsorship/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"63717431","text":"import fingerprint\nimport os\nimport librosa\nimport glob\nfrom hashlib import sha1\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom collections import Counter\nimport time\n\nclass Database:\n def __init__(self):\n self.song_table = pd.DataFrame({'song_id':[], 'name':[]}) #song table has two columns\n self.fingerprint_table = pd.DataFrame({'song_id':[], 'hash':[], 'offset':[]}) #fingerprint table has three columbs\n \n def addRow(self, table, row):\n table.loc[len(table)] = row\n \n def parse_file_hash(self, filename):\n '''\n Generate a song_id using \n sha1 hashing algorithm. \n '''\n s = sha1()\n with open(filename , \"rb\") as f:\n while True:\n buf = f.read(2**20)\n if not buf: \n break\n s.update(buf)\n\n return s.hexdigest().upper()\n \n def add(self, f): # f is a wav file\n x, fs = librosa.load(f)\n song_id = self.parse_file_hash(f)\n self.addRow(self.song_table, [song_id, f])\n hashes = set(fingerprint.fingerprint(x, fs))\n for hash_, offset in hashes:\n self.addRow(self.fingerprint_table, [song_id, hash_, int(offset)])\n print(f'{f} is added')\n \n def get_song_by_id(self, song_id):\n return self.song_table[self.song_table['song_id'] == song_id].values[0][1]\n \n def get_song_hashes_count(self, song_id):\n return len(self.fingerprint_table.loc[self.fingerprint_table['song_id'] == song_id])\n \n def train(self, training_dir): # add all the wavs in a directory to the database\n t0 = time.process_time()\n i = 0\n for f in glob.iglob(training_dir+'/*.wav'):\n self.add(f)\n i += 1\n t = time.process_time()\n print(f'training time: {t-t0}; number of files added: {i}')\n \n def save(self, s, f): #s, f are both csv files. \n self.song_table.to_csv(s)\n self.fingerprint_table.to_csv(f)\n \n def load(self, s, f):\n self.song_table = pd.read_csv(s, index_col=0)\n self.fingerprint_table = pd.read_csv(f, index_col=0)\n \n def find_matches(self, f): # f is a wav file\n x, fs = librosa.load(f)\n hashes = set(fingerprint.fingerprint(x, fs))\n return self.return_matches(hashes)\n \n def return_matches(self, hashes): # return all the (id, offset_difference) tuple\n # based on finding the rows that match the hashes the target has\n mapper = {}\n for hash_, offset in hashes:\n mapper[hash_] = offset\n values = mapper.keys() #hashe values\n results = []\n for unique_value in set(values):\n matched = self.fingerprint_table.loc[self.fingerprint_table['hash'] == unique_value]\n if len(matched) == 0:\n continue\n else:\n for id_,offset in matched[['song_id', 'offset']].values:\n results.append((id_, int(offset-mapper[unique_value])))\n return results\n \n def align_matches(self, matches):\n if len(matches) == 0:\n return {}\n cnt = Counter(matches)\n # for match in cnt:\n # cnt[match] /= self.get_song_hashes_count(match[0])\n best_guess = cnt.most_common(1)[0] # find the one that has the most matches with the target audio\n largest_count = best_guess[1]\n song_id = best_guess[0][0]\n offset_difference = best_guess[0][1]\n song_name = self.get_song_by_id(song_id)\n nsec = round(float(largest_count)/44100*2048, 5)\n return {\n \"SONG_ID\" : song_id,\n \"SONG_NAME\" : song_name,\n \"SIMILAR_FEATURES_COUNT\" : largest_count,\n \"OFFSET_DIFFERENCE\" : offset_difference,\n \"OFFSET_DIFFERENCE_IN_SEC\": nsec\n }\n \n def query(self, f, log=True):\n t0 = time.process_time()\n output = self.align_matches(self.find_matches(f))\n t = time.process_time()\n if log:\n print(\"Query time:\", t-t0)\n return output","sub_path":"core/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"202937403","text":"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A module that maps channels to its respective bitrate and resolutions to its\nrespective height, bitrate and profile.\"\"\"\n\nclass ChannelData():\n\n def __init__(self, aac_bitrate, opus_bitrate):\n self.aac_bitrate = aac_bitrate\n self.opus_bitrate = opus_bitrate\n\nclass ResolutionData():\n\n def __init__(self, width, height, h264_bitrate, vp9_bitrate, h264_profile):\n self.width = width\n self.height = height\n self.h264_bitrate = h264_bitrate\n self.vp9_bitrate = vp9_bitrate\n self.h264_profile = h264_profile\n\n def __eq__(self, other):\n return self.height == other.height\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __ge__(self, other):\n return self.height >= other.height\n\n# A map of channels to ChannelData objects which contains the AAC and Opus\n# bitrate information of a given channel.\nCHANNEL_MAP = {\n 2: ChannelData(128, 64),\n 6: ChannelData(192, 96),\n}\n\n# A map of resolutions to ResolutionData objects which contain\n# the height and H264 bitrate of a given resolution.\nRESOLUTION_MAP = {\n '144p': ResolutionData(256, 144, '108k', '95k', 'baseline'),\n '240p': ResolutionData(426, 240, '242k', '150k', 'main'),\n '360p': ResolutionData(640, 360, '400k', '276k', 'main'),\n '480p': ResolutionData(854, 480, '2M', '750k', 'main'),\n '576p': ResolutionData(1024, 576, '2.5M', '1M', 'main'),\n '720p': ResolutionData(1280, 720, '3M', '2M', 'main'),\n '720p-hfr': ResolutionData(1280, 720, '4M', '4M', 'main'),\n '1080p': ResolutionData(1920, 1080, '5M', '4M', 'high'),\n '1080p-hfr': ResolutionData(1920, 1080, '6M', '6M', 'high'),\n '2k': ResolutionData(2560, 1440, '9M', '6M', 'high'),\n '2k-hfr': ResolutionData(2560, 1440, '14M', '9M', 'high'),\n '4k': ResolutionData(3840, 2160, '17M', '12M', 'uhd'),\n '4k-hfr': ResolutionData(3840, 2160, '25M', '18M', 'uhd'),\n}\n\nclass Metadata():\n\n def __init__(self, pipe, channels = None, res_string = None,\n audio_codec = None, video_codec = None, lang=None,\n hardware=None):\n self.pipe = pipe\n if channels:\n self.channels = channels\n self.audio_codec = audio_codec\n self.channel_data = CHANNEL_MAP[channels]\n self.lang = lang\n if res_string:\n self.res = res_string\n self.video_codec = video_codec\n self.resolution_data = RESOLUTION_MAP[res_string]\n self.hardware = hardware\n","sub_path":"streamer/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"431401350","text":"def possible(lawn):\r\n # Get the maximum heights in each row or column\r\n row_max = [max(row) for row in lawn]\r\n col_max = [max([row[col_num] for row in lawn]) for col_num in range(len(lawn[0]))]\r\n # For each square, check if it could have been reached\r\n for row_num in range(len(lawn)):\r\n for col_num in range(len(lawn[row_num])):\r\n if (lawn[row_num][col_num] < row_max[row_num] and \r\n lawn[row_num][col_num] < col_max[col_num]):\r\n return \"NO\"\r\n # If we got this far then the layout is possible\r\n return \"YES\"\r\n\r\n\r\nfin = open(\"input.txt\", \"r\")\r\nfout = open(\"output.txt\", \"w\")\r\n\r\nt = int(fin.readline())\r\n\r\nfor i in range(t):\r\n dimensions = list(map(int, fin.readline().split(\" \")))\r\n lawn = []\r\n for row in range(dimensions[0]):\r\n lawn += [list(map(int, fin.readline().split(\" \")))]\r\n fout.write(\"Case #\" + str(i + 1) + \": \" + possible(lawn) + \"\\n\")\r\n\r\nfin.close()\r\nfout.close()\r\n","sub_path":"solutions_2449486_0/Python/eliotball/lawnmower.py","file_name":"lawnmower.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"633408612","text":"# Definition for a binary tree node.\n#class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object): #recursive DFS \n def constructMaximumBinaryTree(self, nums):\n R = TreeNode(None)\n self.helper(R, nums)\n return R\n \n \n def helper(self, root, nums):\n if nums == []:\n return None\n M = max(nums)\n mid = nums.index(M)\n root.val = M\n if nums[:mid] != []:\n root.left = TreeNode(None) #create a new leaf node\n self.helper(root.left, nums[:mid])\n if nums[mid+1:] != []:\n root.right = TreeNode(None)\n self.helper(root.right, nums[mid+1:])\n \n \n \n \n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n # Given an integer array with no duplicates. A maximum tree building on this array is defined as follow:\n # The root is the maximum number in the array.\n # The left subtree is the maximum tree constructed from left part subarray divided by the maximum number.\n # The right subtree is the maximum tree constructed from right part subarray divided by the maximum number.\n # Construct the maximum tree by the given array and output the root node of this tree.\n\n","sub_path":"Maximum Binary Tree.py","file_name":"Maximum Binary Tree.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"79133994","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport cv2\nimport numpy as np\n\ndef find_rect_of_target_color(image):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV_FULL)\n h = hsv[:, :, 0]\n s = hsv[:, :, 1]\n mask = np.zeros(h.shape, dtype=np.uint8)\n mask[((h < 20) | (h > 200)) & (s > 128)] = 255\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n rects = []\n # rects : [x,y,width,height]\n for contour in contours:\n approx = cv2.convexHull(contour)\n rect = cv2.boundingRect(approx)\n rects.append(np.array(rect))\n return rects\n\ndef camera_system():\n capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n # while cv2.waitKey(30) < 0:\n _, frame = capture.read()\n rects = find_rect_of_target_color(frame)\n capture.release()\n cv2.destroyAllWindows()\n return rects\n\nif __name__ == \"__main__\":\n capture = cv2.VideoCapture(0)\n while cv2.waitKey(30) < 0:\n _, frame = capture.read()\n rects = find_rect_of_target_color(frame)\n if len(rects) > 0:\n rect = max(rects, key=(lambda x: x[2] * x[3]))\n cv2.rectangle(frame, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]), (0, 0, 255), thickness=2)\n circle_point = np.array(rect[0:2] + (rect[2:4])/2, dtype=int)\n circle_point = np.round(circle_point)\n print(circle_point)\n cv2.circle(frame,tuple(circle_point),10,(0,0,255),-1)\n cv2.imshow('red', frame)\n print(rect)\n capture.release()\n cv2.destroyAllWindows()\n","sub_path":"inRaspberryPi/CameraSystem.py","file_name":"CameraSystem.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"130064133","text":"import pymysql\nfrom analog.bin.exception.Exceptions import *\nfrom pymysql.cursors import Cursor\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\n\nclass db:\n\n def __init__(self, config, controller=None):\n\n self.section_name = 'Database'\n self.database_name = 'WebLog_Analysis'\n self.connect = None\n self.config = config\n self.controller = controller\n\n\n def connect_db(self):\n\n self.connect = pymysql.connect(host=self.config.get(self.section_name, 'host'),\n port=int(self.config.get(self.section_name, 'port')),\n user=self.config.get(self.section_name, 'user'),\n password=self.config.get(self.section_name, 'password'),\n database=self.config.get(self.section_name, 'database'),\n charset=self.config.get(self.section_name, 'charset'))\n return self.connect\n\n\n def close(self):\n if self.connect:\n self.connect.close()\n\n\n def execute_many(self, sql, args) -> Cursor:\n\n try:\n cursor = self.connect.cursor()\n cursor.executemany(sql, args)\n except Exception as e:\n self.connect_db()\n cursor = self.connect.cursor()\n cursor.executemany(sql, args)\n return cursor\n\n\n def execute(self, sql: str, args: object = None) -> Cursor:\n try:\n cursor = self.connect.cursor()\n cursor.execute(sql, args)\n except Exception as e:\n self.connect_db()\n cursor = self.connect.cursor()\n cursor.execute(sql, args)\n return cursor\n\n\n def commit(self):\n self.connect.commit()\n\n\n def update(self, *args):\n cursor = self.connect.cursor()\n try:\n arguments = {\"table_name\": args[0], \"values\": \"\",\n \"conditions\": \"WHERE %s\" % args[2] if len(args) > 2 else \"\"}\n flag = False\n string = \"\"\n if isinstance(args[1], dict):\n for item in args[1].items():\n if flag:\n string += \",\"\n\n string += \"{0} = {1}\".format(item[0], item[1])\n\n if flag is False:\n flag = True\n\n arguments['values'] = string\n cursor.execute(\"UPDATE :table_name SET :values :conditions\", arguments)\n self.connect.commit()\n except Exception:\n return False\n\n\n def create_db(self):\n _connection = None\n try:\n _connection = pymysql.connect(host=self.config.get(self.section_name, 'host'),\n user=self.config.get(self.section_name, 'user'),\n password=self.config.get(self.section_name, 'password'),\n charset=self.config.get(self.section_name, 'charset'))\n cursor = _connection.cursor()\n cursor.execute('create database if not exists {}'.format(self.database_name))\n _connection.commit()\n except Exception as e:\n raise DatabaseException(\"Can't create database, make sure your config are correct!\")\n finally:\n if _connection:\n _connection.close()\n return True\n\n\nif __name__ == \"__main__\":\n a = db()\n","sub_path":"analog/bin/lib/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"233764296","text":"from sklearn.manifold import TSNE\nfrom scipy.cluster.hierarchy import dendrogram\nfrom pytransform3d.rotations import *\nfrom varname import nameof\n\nimport sys\nimport os\n\nsys.path.append(\"..\")\nimport cv2\nfrom os import listdir\nfrom os.path import isfile, join\n\n# matplotlib.use('tkagg')\n# matplotlib.use('WebAgg')\n\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport phenograph\nimport io\n\n\ndef getLastDirectory(inputDir):\n if inputDir.endswith('/'):\n inputDir = inputDir[-1]\n return os.path.split(inputDir)[-1]\n\n\nday3WT = '/alder/home/soobink/rotarod_ML10/output/Day3_WT'\nday3YAC = '/alder/home/soobink/rotarod_ML10/output/Day3_YAC'\nday4WT = '/alder/home/soobink/rotarod_ML10/output/Day4_WT'\nday4YAC = '/alder/home/soobink/rotarod_ML10/output/Day4_YAC'\nday3and4WT = '/alder/home/soobink/rotarod_ML10/output/Day3and4_WT'\nday3and4YAC = '/alder/home/soobink/rotarod_ML10/output/Day3and4_YAC'\n\npaths = [day3WT, day4WT, day3YAC, day4YAC, day3and4WT, day3and4YAC]\nperplexities = [20, 30, 100]\n\nfor perplexity in perplexities:\n for path in paths:\n print('--- Running \\'%s\\' with perplexity = %i. ---' % (path, perplexity))\n data_2d = [f for f in listdir(path) if (isfile(join(path, f)) and (not f.startswith('.')))]\n\n # data_3d = ['LD1_1580415036_3d.csv']\n coords_all_2d = []\n coords_all_3d = []\n dataset_name_2d = []\n dataset_name_3d = []\n\n # for f_2d, f_3d in zip(data_2d, data_3d):\n for f_2d in data_2d:\n coords_file = os.path.join(path, f_2d)\n dataset_name_2d = coords_file\n # coords_2d = pd.read_csv(coords_file, dtype=np.float, header=2, index_col=0)\n coords_2d = pd.read_csv(coords_file, dtype=float, header=0, index_col=0)\n coords_2d.dropna(axis=0, inplace=True)\n coords_2d = coords_2d.iloc[:90]\n coords_2d = coords_2d.values[:, 4:] # exclude first column\n coords_2d = np.delete(coords_2d, list(range(2, coords_2d.shape[1], 3)),\n axis=1) # delete every 3rd column of prediction score\n coords_all_2d.append(coords_2d)\n\n # coords_file = data_root + os.sep + f_3d\n # dataset_name_3d = coords_file.split('/')[-1].split('.')[0]\n # coords_3d = pd.read_csv(coords_file, header=2)\n # coords_3d = coords_3d.values[:, 1:] # exclude the index column\n # coords_3d = np.around(coords_3d.astype('float'), 2) # round to two decimal places\n # coords_3d = gaussian_filter1d(coords_3d, 5, axis=0) # smooth the data, the points were oscillating\n # coords_all_3d.append(coords_3d)\n\n\n coords_all_2d = np.vstack(coords_all_2d)\n # x_3d = coords_all_3d[:, ::3];\n # y_3d = coords_all_3d[:, 1::3];\n # z_3d = coords_all_3d[:, 2::3];\n x_2d = coords_all_2d[:, ::2];\n y_2d = coords_all_2d[:, 1::2];\n z_2d = np.zeros(x_2d.shape);\n coords_all_3d_trans = []\n # for i in np.arange(x_3d.shape[0]):\n\n k = 30 # K for k-means step of phenograph\n communities_2d, graph, Q = phenograph.cluster(coords_all_2d, k=k)\n n_clus_2d = np.unique(communities_2d).shape[0]\n\n # --end of phenograph\n\n # tsne_model = TSNE(n_components=2, random_state=2,perplexity=100,angle=0.1,init='pca',n_jobs= mp.cpu_count()-1)\n tsne_model = TSNE(n_components=2, random_state=2, perplexity=perplexity, angle=0.1, init='pca', n_jobs=-1)\n Y_2d = tsne_model.fit_transform(coords_all_2d)\n cmap = plt.cm.colors.ListedColormap(plt.cm.jet(np.linspace(0, 1, n_clus_2d)))\n plt.figure()\n plt.scatter(Y_2d[:, 0], Y_2d[:, 1],\n c=communities_2d,\n cmap=cmap,\n alpha=1.0)\n plt.colorbar(ticks=np.unique(communities_2d), label='Cluster#')\n plt.xlabel('TSNE1');\n plt.ylabel('TSNE2')\n\n name = getLastDirectory(path)\n plt.title(' 2D Body coordinate clusters: total frames %s\\n%s, perplexity = %i' % (\n str(len(communities_2d)), name, perplexity))\n\n plt.savefig(os.path.join('plots', name + 'p' + str(perplexity) + '.png'), format='png')\n plt.text(1, 0, path, ha='right', va='bottom', fontsize=7)\n plt.show()\n","sub_path":"featureImportance/tsne/test_ellenDataPhenograph.py","file_name":"test_ellenDataPhenograph.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"423075372","text":"import webapp2\nimport csv\nimport time\n\nfrom google.appengine.api import logservice\nfrom google.appengine.api import files\n\nimport config\n\nclass Level2Handler(webapp2.RequestHandler):\n def get(self):\n # Create a new Google Storage file\n filename = 'request.csv'\n gs_file = files.gs.create('/gs/%s/%s' % (config.gs_bucket_name, filename),\n mime_type='text/csv')\n with files.open(gs_file, 'a') as f:\n # Create a csv writer that outputs to the Google Storage file\n w = csv.writer(f)\n for r in logservice.fetch(start_time=time.time()-5*60):\n w.writerow([r.start_time,r.method,r.resource,\n r.status,r.latency,r.response_size,\n r.user_agent if r.user_agent else \"NULL\"])\n # Finalize the file\n files.finalize(gs_file)\n\n # Render a HTML link to the file in the reponse body\n link_format = 'https://storage.cloud.google.com/{gs_bucket}/{filename}'\n link = link_format.format(gs_bucket=config.gs_bucket_name, filename=filename)\n self.response.write('{link}'.format(link=link))\n\napp = webapp2.WSGIApplication([('/solution/level2', Level2Handler)], debug=True)\n","sub_path":"solution/level2.py","file_name":"level2.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"349160123","text":"from django.conf import settings\nfrom movie.models import Genre\n\nmovies_df = settings.MOVIE_GENRE_SEP_DF.copy()\n\nweights = {}\n\ndef dot_product(vector_1, vector_2): \n\treturn sum([ i*j for i,j in zip(vector_1, vector_2)])\n\ndef get_movie_score(movie_features, p): \n\treturn dot_product(movie_features, p)\n\t\ndef get_movie_recommendations(genresQ, n_recommendations):\n\tfor i,weight in enumerate([5,3,2]):\n\t\ttry:\n\t\t\tweights[genresQ[i].name] = weight\n\t\texcept IndexError:\n\t\t\tpass\n\n\tmovie_categories = movies_df.columns[1:]\n\tall_weights = []\n\tfor category in movie_categories:\n\t\tgenre = genresQ.filter(name__exact=category)\n\t\tif genre.exists():\n\t\t\tall_weights.append(weights[genre[0].name])\n\t\telse:\n\t\t\tall_weights.append(0)\n\n #we add a column to the movies_df dataset with the calculated score for each movie for the given user\n\tmovies_df['score'] = movies_df[movie_categories].apply(get_movie_score, args=([all_weights]), axis=1)\n\treturn movies_df.sort_values(by=['score'], ascending=False)['movieId'].values[:n_recommendations]\n","sub_path":"recommender/genre_recommender.py","file_name":"genre_recommender.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"26147972","text":"import itertools\ndef fun1(checked,cc):\n for ch in checked:\n q = 0\n for b in ch:\n if b in cc: q = q + 1\n if q == len(ch): return False\n \n return True\n\ndef solution(relation):\n answer,count = 0, 1\n kate = len(relation[0])\n kate_list = [i for i in range(kate)]\n checked = []\n while( count < kate+1 ):\n ccc = list(itertools.combinations(kate_list,count))\n for cc in ccc:\n tmp = []\n if fun1(checked,cc) == False:\n continue\n\n for c in cc:\n semi = []\n for r in relation: semi.append(r[c])\n tmp.append(semi)\n\n final = list(zip(*tmp))\n\n if len(final) == len(set(final)):\n checked.append(cc)\n answer += 1\n\n count = count + 1\n\n return answer\n \n\nprint(solution([[\"100\",\"ryan\",\"music\",\"2\"],[\"200\",\"apeach\",\"math\",\"2\"],[\"300\",\"tube\",\"computer\",\"3\"],[\"400\",\"con\",\"computer\",\"4\"],[\"500\",\"muzi\",\"music\",\"3\"],[\"600\",\"apeach\",\"music\",\"2\"]]) )","sub_path":"Programmers/Programmers_2020 Kakao Blind_후보키.py","file_name":"Programmers_2020 Kakao Blind_후보키.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"487422641","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom utils import plot_classification_dataset, plot_2d_decisionboundary\n\n\nif __name__ == \"__main__\":\n # Load data\n data = np.load('data_3_logreg_b.npz')\n X, y = data['X'], data['y']\n print(X.shape)\n print(y.shape)\n \n Xtrain, Xtest, ytrain, ytest = train_test_split(X,y,test_size=0.33)\n\n model = LogisticRegression(multi_class='ovr', solver='liblinear') \n # Fit and evaluate (compute test error) logistic regression on this 1d data set\n model.fit(Xtrain, ytrain) \n print(\"Test-Accuracy: {0}\".format(model.score(Xtest, ytest)))\n print(\"Train-Accuracy: {0}\".format(model.score(Xtrain, ytrain))) \n # DOC: Accuracy is pretty bad (around 0.5) as the 1 dimensional data with points of class 0\n # surrounding the points of class 1 is not possible to split in half! \n \n\n # Inspect the data set\n plot_classification_dataset(X, y)\n\n # Feature transformation (1d -> 2d)\n X2 = np.copy(X)\n X2.resize((X.shape[0], 2))\n print(X2.shape)\n for i in range(X2.shape[0]):\n X2[i][1] = X2[i][0] * X2[i][0]\n\n # split new 2d data set\n X2train, X2test, y2train, y2test = train_test_split(X2,y,test_size=0.33)\n\n # Fit logistic regression to new 2d data set \n # Evaluate the model (compute test error)\n model2 = LogisticRegression(multi_class='ovr', solver='liblinear') \n # Fit and evaluate (compute test error) logistic regression on this 1d data set\n model2.fit(X2train, y2train) \n print(\"Test-Accuracy: {0}\".format(model2.score(X2test, y2test)))\n print(\"Train-Accuracy: {0}\".format(model2.score(X2train, y2train)))\n\n # Visualize the decision boundary of the final model\n plot_2d_decisionboundary(model2, X2, y)\n\n #DOC: Performance for the 2-dim data is significantly better as the data can easier be split\n # by a linear curve as they are positioned in 2-dim space","sub_path":"logistic_regression_b_skeleton.py","file_name":"logistic_regression_b_skeleton.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"533030946","text":"# Note: no changes need to be made to this file...\n\n# IMPORTS:\n\nimport matplotlib.pyplot as plt\nimport warnings, random\nfrom math import pi, cos, sin, sqrt, floor, ceil, atan2\nfrom cmath import phase\n\n\n# FUNCTIONS:\n\ndef scatter(xs, ys, model=None):\n \"\"\"Plots data according to true and modeled outcomes.\n\n Keyword arguments:\n xs -- the values of the attributes\n ys -- the values of the true outcomes\n model -- the classification/regression model (default None)\n\n Return values:\n None\n \"\"\"\n # Wrap all y in lists, if provided as scalars\n scalar_y = type(ys[0]) is not list\n if scalar_y:\n ys = [[yi] for yi in ys]\n # Determine the x-range of the data\n x0s = [xi[0] for xi in xs]\n x1s = [xi[1] for xi in xs]\n range_x = ceil(1.1*max(-min(x0s), max(x0s), -min(x1s), max(x1s)))\n paint_x = [(xi/64.0-1.0)*range_x for xi in range(129)]\n # Generate subplots\n axes = len(ys[0])\n fig, axs = plt.subplots(1, axes, figsize=(6.4*axes, 4.8), squeeze=False)\n for n, ax in enumerate(axs[0]):\n # Determine the y-range of the data\n yns = [yi[n] for yi in ys]\n range_y = max(-min(yns), max(yns))\n # Plot the data\n data = ax.scatter(x0s, x1s, c=yns, edgecolors='w', cmap=plt.cm.RdYlBu, vmin=-range_y, vmax=range_y)\n # Paint background colors denoting the model predictions\n if hasattr(model, 'predict'):\n if scalar_y:\n paint_y = [[model.predict([xi, yi]) for xi in paint_x] for yi in paint_x]\n else:\n paint_y = [[model.predict([xi, yi])[n] for xi in paint_x] for yi in paint_x]\n ax.imshow(paint_y, origin='lower', extent=(-range_x, range_x, -range_x, range_x), vmin=-range_y, vmax=range_y, interpolation='bilinear', cmap=plt.cm.RdYlBu)\n # Draw dashed line at contour zero\n with warnings.catch_warnings(): # Ignore warning that zero-contour is absent\n warnings.simplefilter('ignore')\n ax.contour(paint_x, paint_x, paint_y, levels=[0.0], colors='k', linestyles='--', linewidths=1.0)\n else:\n ax.set_facecolor('#F8F8F8')\n # Finish the layout and display the figure\n ax.set_aspect('equal', 'box')\n ax.axis([-range_x, range_x, -range_x, range_x])\n ax.grid(True, color='k', linestyle=':', linewidth=0.5)\n ax.axhline(y=0, color='k', linestyle='-', linewidth=1.0)\n ax.axvline(x=0, color='k', linestyle='-', linewidth=1.0)\n ax.set_axisbelow(True)\n ax.set_xlabel(r'$x_1$')\n ax.set_ylabel(r'$x_2$')\n cbar = plt.colorbar(data, ax=ax).ax\n cbar.axhline(y=0.5, color='k', linestyle='--', linewidth=1.0)\n cbar.set_title(r'$y$' if axes == 1 else r'$y_{}$'.format(n+1))\n if hasattr(model, 'loss'):\n if scalar_y:\n loss = sum(model.loss(x, y[0]) for x, y in zip(xs, ys))\n else:\n loss = sum(model.loss(x, y) for x, y in zip(xs, ys))\n plt.suptitle('Total loss: {:.3f}'.format(loss))\n plt.show()\n\n\ndef graph(funcs, *args, xmin=-3.0, xmax=3.0):\n \"\"\"Plots the graph of a given function.\n\n Keyword arguments:\n funcs -- one or more functions to be plotted\n *args -- extra arguments that should be passed to the function(s) (optional)\n xmin -- the lowest x-value (default -4.0)\n xmax -- the highest x-value (default +4.0)\n\n Return values:\n None\n \"\"\"\n # Wrap the function in a list, if only one is provided\n if type(funcs) is not list:\n funcs = [funcs]\n # Plot the figures and keep track of their y-range\n xs = [xmin+xi*(xmax-xmin)/256.0 for xi in range(257)]\n ymin = -1.0\n ymax = +1.0\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n plt.subplot(1, 1, 1, facecolor='#F8F8F8')\n for n, func in enumerate(funcs):\n ys = [func(x, *args) for x in xs]\n ymin = min(ymin, floor(min(ys)))\n ymax = max(ymax, ceil(max(ys)))\n plt.plot(xs, ys, color=colors[n % len(colors)], linewidth=3.0, label=func.__code__.co_name)\n # Finish the layout and display the figure\n plt.axis([xmin, xmax, ymin, ymax])\n plt.legend()\n plt.grid(True, color='k', linestyle=':', linewidth=0.5)\n plt.axhline(y=0, color='k', linestyle='-', linewidth=1.0)\n plt.axvline(x=0, color='k', linestyle='-', linewidth=1.0)\n plt.xlabel(r'$x$')\n plt.ylabel(r'$f(x)$')\n plt.show()\n\n\ndef generate(nominal, num=64, dim=2, bias=None, weights=None, noise=0.0, seed=None):\n \"\"\"Generate a suitable dataset with attributes and outcomes.\n\n Keyword arguments:\n nominal -- flag indicates nominal classes or continuous values\n num -- number of instances (default 64)\n dim -- dimensionality of the attributes (default 2)\n bias -- bias of the generating model equation (default random)\n weights -- weights of the generating model equation (default random)\n noise -- the amount of noise to add (default 0.0)\n seed -- a seed to initialise the random number generator (default random)\n\n Return values:\n xs -- values of the attributes\n ys -- values of the outcomes\n \"\"\"\n # Seed the random number generator\n random.seed(seed)\n # Generate random bias if none provided\n if bias == None:\n bias = random.gauss(0.0, 4.0)\n # Generate randomly directed weight vector if none provided\n if weights == None:\n weights = [random.gauss(0.0, 1.0) for d in range(dim)]\n length = sqrt(sum(wi**2 for wi in weights))\n weights = [wi/length for wi in weights]\n # Generate attribute data\n xs = [[random.gauss(0.0, 8.0) for d in range(dim)] for n in range(num)]\n # Generate outcomes\n if nominal:\n ys = [-1 if bias+sum(wi*xi for wi, xi in zip(weights, x)) < 0 else 1 for x in xs]\n else:\n ys = [bias+sum(wi*xi for wi, xi in zip(weights, x)) for x in xs]\n # Add noise to the attributes\n xs = [[xs[n][d]+random.gauss(0.0, noise) for d in range(dim)] for n in range(num)]\n # Return values\n return xs, ys\n\n\ndef multinomial(classes, num=512, seed=None):\n \"\"\"Generate a dataset based on Newton's method applied to 1+(-z)^c=0.\n\n Keyword arguments:\n classes -- number of classes to generate\n num -- number of instances (default 512)\n seed -- a seed to initialise the random number generator (default random)\n\n Return values:\n xs -- values of the attributes x1 and x2\n ys -- class labels in one-hot encoding\n \"\"\"\n # Seed the random number generator\n random.seed(seed)\n # Generate attribute data\n rs = [sqrt(0.75*random.random()) for n in range(num)]\n fs = [2.0*pi*random.random() for n in range(num)]\n xs = [[r*cos(f), r*sin(f)] for r, f in zip(rs, fs)]\n # Initialize outcomes\n ys = [[0.0 for c in range(classes)] for n in range(num)]\n # Perform Newton's method\n for n in range(num):\n z_old = -complex(xs[n][0], xs[n][1])\n z_new = (z_old*(classes-1)-z_old**(1-classes))/classes\n while abs(z_new-z_old) > 1e-9:\n z_old = z_new\n z_new = (z_old*(classes-1)-z_old**(1-classes))/classes\n c = int(((phase(-z_new)/pi+1.0)*classes-1.0)/2.0)\n ys[n][c] = 1.0\n # Return values\n return xs, ys\n","sub_path":"les4/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"574794203","text":"t = int(input())\nresult = []\nfor i in range(1, t + 1):\n k, c, s = [int(x) for x in input().split(\" \")] # read a list of integers, 2 in this case\n result = []\n toSum = k**(c-1)\n \n for j in range(s):\n result += [1 + j*toSum]\n \n print(\"Case #{}: {}\".format(i, \" \".join([str(x) for x in result])))\n ","sub_path":"codes/CodeJamCrawler/16_0_4/lidiamcfreitas/problemD.py","file_name":"problemD.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4359775","text":"from board import Board\nimport numpy as np\n\nCOUNTERS = {'s':'p', 'p':'r', 'r':'s'}\nCOUNTERED = {'p':'s', 'r':'p', 's':'r'}\n\ndef distance(coord1, coord2):\n (r1, c1) = coord1\n (r2, c2) = coord2\n\n dr = r1 - r2\n dc = c1 - c2\n if (dr < 0 and dc < 0) or (dr > 0 and dc > 0):\n return abs(dr + dc)\n else:\n return max(abs(dr), abs(dc))\n\ndef unthrown_diff(board):\n return board.unthrown_uppers - board.unthrown_lowers\n\ndef thrown_diff(board):\n return board.remaining_tokens(\"UPPER\") - board.remaining_tokens(\"LOWER\") - unthrown_diff(board)\n\ndef dominance_diff(board):\n up_rps = (len(board.thrown_uppers[\"r\"]), len(board.thrown_uppers[\"p\"]), len(board.thrown_uppers[\"s\"]))\n low_rps = (len(board.thrown_lowers[\"r\"]), len(board.thrown_lowers[\"p\"]), len(board.thrown_lowers[\"s\"]))\n\n up_r = up_rps[0]/(low_rps[1] + up_rps[0] + 1)\n up_p = up_rps[1]/(low_rps[2] + up_rps[1] + 1)\n up_s = up_rps[2]/(low_rps[0] + up_rps[2] + 1)\n\n low_r = low_rps[0]/(up_rps[1] + low_rps[0] + 1)\n low_p = low_rps[1]/(up_rps[2] + low_rps[1] + 1)\n low_s = low_rps[2]/(up_rps[0] + low_rps[2] + 1)\n return (up_r + up_p + up_s) - (low_r + low_p + low_s)\n\ndef spread(ps):\n if not ps:\n return 0\n mean = np.mean(ps, axis = 0)\n centroid = (round(mean[0]), round(mean[1]))\n return sum([distance(p, centroid) for p in ps])/len(ps)\n\ndef spread_diff(board):\n u_ps = board.chain(board.thrown_uppers)\n l_ps = board.chain(board.thrown_lowers)\n return spread(u_ps) - spread(l_ps)\n\ndef min_circuit(ps, qs):\n # Number of tiles on the board\n min_circuit_dist = 9\n for p in ps:\n dists = [distance(p, q) for q in qs]\n\n if not dists:\n return 0\n\n avg_dist = np.sum(dists)/len(dists)\n if avg_dist < min_circuit_dist:\n min_circuit_dist = avg_dist\n return min_circuit_dist\n\ndef sum_min_dists(player_thrown, opponent_thrown):\n sum_dists = 0\n for key, value in player_thrown.items():\n countered = opponent_thrown[COUNTERS[key]]\n sum_dists += min_circuit(value, countered)\n return sum_dists\n\ndef capture_dist_difference(board):\n return sum_min_dists(board.thrown_lowers, board.thrown_uppers) - sum_min_dists(board.thrown_uppers, board.thrown_lowers)\n\n# evaluates a board state with the option of evaluating after input move from input\ndef evaluate(board):\n if board.is_win(\"UPPER\"):\n return 1\n if board.is_win(\"LOWER\"):\n return -1\n if board.is_draw():\n return 0\n\n # Features: throw_diff, scissor_diff, paper_diff, rock_diff, median row\n ut_diff = unthrown_diff(board)\n t_diff = thrown_diff(board)\n dom_diff = dominance_diff(board)\n s_diff = spread_diff(board)\n c_diff = capture_dist_difference(board)\n #print(unthrown_diff, dom_diff, thrown_diff)\n value = 0.2* ut_diff + 0.2 *t_diff + 0.25*dom_diff + 0.05*s_diff + 0.05*c_diff\n capped_val = min(max(value, -1), 1)\n\n return capped_val\n\ndef evaluate_move(board, move, player):\n return evaluate(apply_move(board, move, player))\n\ndef apply_move(board, move, player):\n player_thrown, player_unthrown = board.player_pieces(player)\n opponent_thrown, opponent_unthrown = board.opponent_pieces(player)\n player_thrown = board.copy_dict(player_thrown)\n opponent_thrown = board.copy_dict(opponent_thrown)\n\n if move[0] == \"THROW\":\n t = board.update_throw(player_thrown, move[1], move[2])\n player_unthrown -= 1\n else:\n t = board.update_slide_swing(player_thrown, move[1], move[2])\n\n counters_t = COUNTERED[t]\n countered_t = COUNTERS[t]\n\n if move[2] in player_thrown[counters_t] or move[2] in opponent_thrown[counters_t]:\n player_thrown[t] = [p for p in player_thrown[t] if p != move[2]]\n\n player_thrown[countered_t] = [p for p in player_thrown[countered_t] if p != move[2]]\n opponent_thrown[countered_t] = [p for p in opponent_thrown[countered_t] if p != move[2]]\n\n if player == \"UPPER\":\n return Board(player_thrown, opponent_thrown, player_unthrown, opponent_unthrown, board.turn + 1, None)\n else:\n return Board(opponent_thrown, player_thrown, opponent_unthrown, player_unthrown, board.turn + 1, None)\n","sub_path":"skeleton-code-B/grandMasters/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"89021109","text":"from django.shortcuts import render\r\nfrom django.http import HttpRequest, HttpResponse\r\nfrom django.http import JsonResponse, HttpResponse\r\nfrom django.http import StreamingHttpResponse\r\nfrom django.views.decorators.csrf import csrf_exempt, csrf_protect\r\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\r\nfrom AO_Prj.common.results.ao_results import AOJsonResult\r\nimport json\r\nfrom datetime import datetime,date\r\nfrom django.core import serializers\r\nfrom AO_Prj.models import TEngineAddr\r\n\r\n# Create your views here\r\n\r\n''' 初始化所有机房地址信息 '''\r\n@csrf_exempt\r\ndef get_init_engineaddr(request):\r\n result = TEngineAddr.objects.all()\r\n return render(request, 'app/engineaddr/engineaddr.html', {'addrlist': result})\r\n\r\n''' 查询所有机房地址信息 '''\r\n@csrf_exempt\r\ndef get_engineaddr(request):\r\n if request.method == 'POST':\r\n # 获取请求参数\r\n logobj = json.loads(request.body.decode())\r\n result = TEngineAddr.objects.all()\r\n return AOJsonResult(result)\r\n\r\n''' 添加新机房地址 '''\r\n@csrf_exempt\r\ndef add_engineaddr(request):\r\n if request.method == 'POST':\r\n #获取过滤参数\r\n request_data = json.loads(request.body.decode())\r\n #addTime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n #执行新增操作\r\n try:\r\n #engineaddr = TEngineAddr(addrname=addrname, remark=remake, addtime=addTime)\r\n #engineaddr.save()\r\n TEngineAddr.objects.create(**request_data)\r\n except Exception as e:\r\n print(e)\r\n pass\r\n else:\r\n return HttpResponse(json.dumps({'data': 1}), content_type=\"application/json\")\r\n\r\n\r\n''' 删除机房地址 '''\r\n@csrf_exempt\r\ndef del_engineaddr(request):\r\n if request.method == 'POST':\r\n #获取请求参数\r\n request_data = json.loads(request.body.decode())\r\n addrid = request_data['addrid']\r\n\r\n #执行删除\r\n try:\r\n TEngineAddr.objects.filter(id=addrid).delete()\r\n except:\r\n pass\r\n else:\r\n return HttpResponse(json.dumps({'data': 1}), content_type=\"application/json\")\r\n\r\n''' 更新机房地址信息 '''\r\n@csrf_exempt\r\ndef update_engineaddr(request):\r\n if request.method == 'POST':\r\n #获取请求参数\r\n request_data = json.loads(request.body.decode())\r\n addrid = request_data.pop('addrid')\r\n #执行更新\r\n try:\r\n TEngineAddr.objects.filter(id=addrid).update(**request_data)\r\n except Exception as e:\r\n print(e)\r\n pass\r\n else:\r\n return HttpResponse(json.dumps({'data': 1}), content_type=\"application/json\")\r\n\r\n\r\n''' 获取所有的机房地址名称和对应的id '''\r\n@csrf_exempt\r\ndef get_all_sites(request):\r\n queryAll = TEngineAddr.objects.all()\r\n result = [{'id':engine.id,'addrname':'%s-%s' % (engine.addrsite,engine.addrname)} for engine in queryAll]\r\n return AOJsonResult(result)\r\n\r\n\r\n","sub_path":"apps/engineaddr/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"270905449","text":"# -*- coding: utf-8 -*-\n\n#--------------------------------------------------------------------#\n# #\n# Copyright (C) 2018 HOLOEYE Photonics AG. All rights reserved. #\n# Contact: https://holoeye.com/contact/ #\n# #\n# This file is part of HOLOEYE SLM Display SDK. #\n# #\n# You may use this file under the terms and conditions of the #\n# \"HOLOEYE SLM Display SDK Standard License v1.0\" license agreement. #\n# #\n#--------------------------------------------------------------------#\n\n\n\n\n# Import the SLM Display SDK:\nimport detect_heds_module_path\nfrom holoeye import slmdisplaysdk\n\n\n# Function to print some statistics:\ndef printStat(stat, dataHandles):\n sum = 0.0\n count = 0\n min = 10000\n max = -10000\n\n for handle in dataHandles:\n # get the stat from the handle\n v = getattr(handle, stat)\n\n # check if this action did happen at all\n if v == slmdisplaysdk.Datahandle.NotDone:\n continue\n\n # process value\n sum += float(v)\n count += 1\n\n if v < min:\n min = v\n\n if v > max:\n max = v\n\n # check if any handle did this action\n if count > 0:\n avg = sum / count\n\n print(\"{0:<16} -> min: {1:<3} - avg: {2:<3} - max: {3:<3}\".format(stat, min, avg, max))\n else:\n print(\"{0:<16} -> min: {1} - avg: {1} - max: {1}\".format(stat, \"n/a\"))","sub_path":"SLM/slideshow_preload_print_stats.py","file_name":"slideshow_preload_print_stats.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"492774337","text":"# Notes: The output should collection of a CSV file, with 2000 sentences (or less) for #each file. Output csv file must have three columns: sentenceID as the #original corpus, sentences as original corpus and NER_extracted_Bern (output #of BERN)\nimport pandas as pd\nimport requests\n\ndef bern_ent_extraction(query):\n try:\n ent_info = query_raw(query)\n extracted_ents = extract_ents(ent_info)\n return extracted_ents\n except:\n return None\n\n\ndef query_raw(text, url=\"https://bern.korea.ac.kr/plain\"):\n return requests.post(url, data={'sample_text': text}).json()\n\n\n# define functions to extract ENTs from Bern model\n\n\n# find_ent_index(ents)\ndef extract_ents(ents_info):\n index = find_ent_index(ents_info)\n extracted_ents = find_ent(index)\n return extracted_ents\n\n\ndef find_ent_index(ents_info):\n ent_index = {}\n for k, v in ents_info['logits'].items():\n if v:\n l = [v[0][0][\"start\"], v[0][0][\"end\"]]\n tup = tuple(l)\n ent_index[k] = tup\n return ent_index\n\n\ndef find_ent(ent_index):\n extracted_ent = {}\n for k, v in ent_index.items():\n extracted_ent[k] = query[v[0]:v[1]]\n return extracted_ent\n\n\nwith open (r\"E:\\Helen\\FinalProject_INFO5731\\COVID_19_relatedWorking\\All_COVID_related_body_sentSplited\\COV_RelatedBody_sentSplit_DS1.csv\", 'r', newline='', encoding='utf-8') as file:\n df = pd.read_csv(file)\n#for sent in df.sentence[:1]:'\nquery = \"Autophagy captures intracellular components and delivers them to lysosomes, where they are degraded and recycled to sustain metabolism and to enable survival during starvation1-5\"\nif __name__ == '__main__':\n query_raw(query)\n\nbern_ents = []\nsents_list=[]\nfor sent in df.sentence[-1000:]:\n sents_list.append(sent)\n query = sent\n bern_ents.append(bern_ent_extraction(query))\n\nprint(bern_ents)\nsentID = [id for id in df.sentenceID[-1000:]]\ndf_ent = pd.DataFrame(list(zip(sentID, sents_list,bern_ents)), columns=[\"sentenceID\",\"sentences\", \"BERN_entities\"])\n\nwith open (\"E:\\Helen\\FinalProject_INFO5731\\COVID_19_relatedWorking\\All_COVID_ENTS_extracted\\BERN\\DS1\\Entities_extracted_DS1_1000_-1.csv\", 'w', newline='', encoding='utf-8') as file:\n df_ent.to_csv(file)\n","sub_path":"Deep_Content_Analysis/NER_Extracting/Entities/BERN/DS1/BERN_COVID_DS1_1000.py","file_name":"BERN_COVID_DS1_1000.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"479230137","text":"import os\nimport sys\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\ndef read(fname):\n with open(os.path.join(os.path.dirname(__file__), fname)) as f:\n return f.read()\n\nsetup(\n name='provider',\n version=\"0.0.2\",\n author='schwarzlicht',\n author_email='schwarzlicht@riseup.net',\n description=('Content crawler for Twitter'),\n long_description=read('readme.md'),\n license='mit',\n include_package_data=False,\n packages=find_packages(),\n entry_points={'console_scripts': [\n 'content-provider=provider.script:main',\n ]},\n install_requires=['argparse', 'tweepy', 'python-decouple', 'apscheduler',\n 'pyyml'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"483203890","text":"import base_grid\n\n# Inherit a Grid and give it an update instruction\nclass life_grid(base_grid.grid):\n def __init__(self, block, screen, colour):\n base_grid.grid.__init__(self, block, screen, colour)\n self.mouse_clicks = []\n\n # Propogate mouse_clicks to the cells\n def add_mouse_clicks(self, mouse_click):\n x, y = mouse_click\n for coordinate in self.cells:\n xo, yo = coordinate\n if (xo < x and x <= xo + self.width) and (yo < y and y <= yo + self.height):\n self.cells[coordinate].pop()\n self.cells[coordinate].append(\"white\")\n\n # Propogate the life\n def propogate_life(self):\n if len(self.cells[self.margin, self.margin]) == 1: \n self.add_neighbors()\n return\n if len(self.cells[self.margin, self.margin]) == 9: \n for coordinate in self.cells: \n live_neighbors = 0\n old_state = self.cells[coordinate].pop(0)\n new_state = old_state\n\n while self.cells[coordinate]:\n if not self.cells[coordinate].pop() == \"black\":\n live_neighbors = live_neighbors + 1\n\n if old_state == \"black\":\n if live_neighbors == 3:\n new_state = \"white\"\n elif live_neighbors < 2 or live_neighbors > 3:\n new_state = \"black\"\n elif live_neighbors == 3:\n if old_state == \"green\":\n new_state = \"red\"\n if old_state == \"blue\":\n new_state = \"green\"\n if old_state == \"red\":\n new_state = \"blue\"\n if old_state == \"white\":\n new_state = \"blue\"\n\n self.cells[coordinate].append(new_state)\n self.add_neighbors()\n\n #Get the neighbor cell states\n def add_neighbors(self):\n if len(self.cells[self.margin, self.margin]) == 1:\n for coordinate in self.cells:\n xo, yo = coordinate\n for i in range(3):\n x = xo - (i - 1) * (self.width + self.margin)\n for j in range(3):\n y = yo - (j - 1) * (self.height + self.margin)\n if not (x == xo and y == yo): \n if x < self.margin:\n x = self.xmax\n if x > self.screen_width - self.width:\n x = self.margin\n if y < self.margin: \n y = self.ymax\n if y > self.screen_width - self.height:\n y = self.margin\n self.cells[(xo, yo)].append(self.cells[(x, y)][0])\n","sub_path":"GameOfLife/grid/life_grid.py","file_name":"life_grid.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"513710583","text":"from django import forms\nfrom users.models import Profile\n\n\nclass UserCreationProfileForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ['country', 'phone']\n widgets = {'phone': forms.NumberInput(attrs={'aria-label': 'Sizing example input', 'aria-describedby': 'nputGroup-sizing-default',\n 'class': 'form-control'})}\n","sub_path":"main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"387303894","text":"estatura = float(input('Digite su estatura\\n'))\nacumuladorestatura = 0\ncontadorpersonas = 0\npromedio = 0\n\nwhile estatura >= 1.0 and estatura <= 3.0:\n\n\testatura = float(input('Digite su estatura\\n'))\n\n\tcontadorpersonas = contadorpersonas + 1\n\tacumuladorestatura = acumuladorestatura + estatura\n\tpromedio = acumuladorestatura / contadorpersonas\n\nelse:\n\tprint('El promedio de las estaturas es:', promedio)","sub_path":"EjerciciosPython2/estatura_promedio3.py","file_name":"estatura_promedio3.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"587996027","text":"s = int(input())\n\nans = [s]\nidx = 1\ntmp = s\nwhile True:\n if tmp % 2 == 0:\n tmp = tmp//2\n else:\n tmp = (3*tmp) + 1\n idx +=1\n\n if tmp in ans:\n print(idx)\n exit()\n else:\n ans.append(tmp)","sub_path":"Python_codes/p03146/s629482993.py","file_name":"s629482993.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"334472017","text":"#\n# Python: 3.9.5\n#\n# Chris Windsor\n#\n# Purpose: The Tech Academy Python Course\n# Demonstrating how to pass variables from function to \n# while producing a functional game. \n#\n\n\n\n\ndef start():\n f_name = \"Sarah\"\n l_name = \"Connor\"\n age = 28\n gender = \"Female\"\n get_info(f_name,l_name,age,gender)\n\n\n\ndef get_info(f_name,l_name,age,gender):\n print(\"My name is {} {}. I am a {} year old {}.\".format(f_name,l_name,age,gender))\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n start()\n","sub_path":"python_tutorial.py","file_name":"python_tutorial.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"410751218","text":"def computador_escolhe_jogada(n, m):\n resto = n % (m + 1)\n if n == m:\n return n\n elif resto == 0 or resto > m:\n return m\n else:\n return resto\n\ndef usuario_escolhe_jogada(n, m):\n pecas = 0\n while pecas < 1 or pecas > m or pecas > n:\n pecas = int(input(\"Digite o número de peças a serem retirados: \"))\n if pecas < 1 or pecas > m:\n print(\"Jogada invalida, tente novamente.\")\n return pecas\n\n\ndef menu ():\n user = 0\n comp = 0\n print(\"Bem-vindo ao jogo do NIM! Escolha:\\n\")\n print(\"1 - para jogar uma partida isolada\\n2 - para jogar um campeonato\")\n escolha = int(input(\"\"))\n if escolha == 1:\n print(\"Você escolheu uma partida isolada\")\n print(\"-=\" * 40)\n print(\"\\t\\t\\t\\t JOGO NIM\")\n print(\"-=\" *40)\n partida()\n elif escolha == 2:\n print(\"Voce escolheu um campeonato!\\n\")\n print(\"\\n**********Rodada 1************\")\n result = partida()\n if result == \"user\":\n user += 1\n elif result == \"comp\":\n comp += 1\n print(\"Placar: Você {} X {} Computador\".format(user, comp))\n print(\"\\n**********Rodada 2************\")\n result = partida()\n if result == \"user\":\n user += 1\n elif result == \"comp\":\n comp += 1\n print(\"Placar: Você {} X {} Computador\".format(user, comp))\n print(\"\\n**********Rodada 3************\")\n result = partida()\n if result == \"user\":\n user += 1\n elif result == \"comp\":\n comp += 1\n print(\"Placar: Você {} X {} Computador\".format(user, comp))\n\ndef partida():\n n = int(input(\"Digite o número de peças do jogo: \"))\n m = int(input(\"Digite o número de peças máximos que é possível retirar em uma rodada: \"))\n nInicial = n\n pecas = 0\n print(\"\\n\")\n if nInicial % (m + 1) == 0:\n print(\"Você começa!\\n\")\n while n > 0:\n pecas1 = usuario_escolhe_jogada(n, m)\n print(\"\\nVocê tirou {} peças\".format(pecas1))\n n -= pecas1\n print(\"Restam {} peças\\n\".format(n))\n if n <= 0:\n print(\"Você ganhou!\")\n return \"user\"\n pecas2 = computador_escolhe_jogada(n, m)\n print(\"\\nO computador tirou tirou {} peças\".format(pecas2))\n n -= pecas2\n print(\"Restam {} peças\".format(n))\n if n <= 0:\n print(\"O computador ganhou!\")\n return \"comp\"\n pecas = pecas1 + pecas2\n else:\n print(\"Computador começa!\\n\")\n while n > 0:\n pecas1 = computador_escolhe_jogada(n, m)\n print(\"\\nO computador tirou {} peças\".format(pecas1))\n n -= pecas1\n print(\"Restam {} peças\".format(n))\n if n <= 0:\n print(\"O computador ganhou!\")\n return \"comp\"\n pecas2 = usuario_escolhe_jogada(n, m)\n print(\"\\nVocê tirou {} peças\".format(pecas2))\n n -= pecas2\n print(\"Restam {} peças\\n\".format(n))\n if n <= 0:\n print(\"Você ganhou!\")\n return \"user\"\n pecas = pecas1 + pecas2\n\nmenu()\n","sub_path":"Curso Coursera - python/jogo_nim.py","file_name":"jogo_nim.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"144716061","text":"import math\n\n\ndef lse():\n \"\"\"\n https://www.cnblogs.com/BlogOfMr-Leo/p/8627311.html\n :return:\n \"\"\"\n import numpy as np\n from scipy import optimize\n\n x = np.array([0.9, 2.5, 3.3, 4.5, 5.7, 6.9])\n y = np.array([1.1, 1.6, 2.6, 3.2, 4.0, 6.0])\n\n def reds(p):\n # 计算以p为参数的直线和数据之间的误差\n k, b = p\n return y - (k * x + b)\n # return math.pow((y - (k * x + b)),2)\n\n # leastsq 使得reds()输出最小,参数的初始值是【1,0】\n r = optimize.leastsq(reds, [1, 0])\n k, b = r[0]\n print(\"k=\", k, \"\\n b=\", b)\n y1 = x * k + b\n a = np.array([y1[0] - y[0], y1[1] - y[1], y1[2] - y[2], y1[3] - y[3], y1[4] - y[4], y1[5] - y[5]])\n print(\"\\n\", y, \"\\n\", y1, a)\n print(\"灵敏度计算\", k)\n\nif __name__ == '__main__':\n print(2^2)\n print(math.pow(2,3))\n lse()","sub_path":"math_basic/optimize_method.py","file_name":"optimize_method.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"73762828","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'posts'\n\nurlpatterns = [\n url(r'^(?P\\d+)/comment/$', views.comment_add, name='comment_add'),\n url(r'^(?P\\d+)/comment/(?P\\d+)/approve/$', views.comment_approve, name='comment_approve'),\n url(r'^(?P\\d+)/comment/(?P\\d+)/edit/$', views.comment_edit, name='comment_edit'),\n url(r'^(?P\\d+)/comment/(?P\\d+)/remove/$', views.comment_remove, name='comment_remove'),\n]\n","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"268735704","text":"import argparse\nfrom pprint import pprint\n\nfrom gevent.pywsgi import WSGIServer\nfrom lib.applib_updater import update\n\nclass Application(object):\n\n def __init__(self, applib_repo_dir, website_repo_dir):\n self.__applib_repo_dir = applib_repo_dir\n self.__website_repo_dir = website_repo_dir\n\n def __call__(self, env, start_response):\n if env['REQUEST_METHOD'] == 'POST':\n if env['PATH_INFO'] == '/' and update(self.__applib_repo_dir, self.__website_repo_dir):\n start_response('200 OK', [('Content-Type', 'text/html')])\n return [\"Updated\"]\n else:\n start_response('404 Not Found', [('Content-Type', 'text/html')])\n return ['Not Found']\n \n start_response('405 Method Not Allowed', [('Content-Type', 'text/html')])\n return ['Method Not Allowed
Try POST']\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='Webapp for testing the applib_updater.update function')\n parser.add_argument('-a', '--applib-repo-dir', required=True)\n parser.add_argument('-w', '--website-repo-dir', required=True)\n parser.add_argument('-p', '--port', type=int, required=True)\n args = parser.parse_args()\n #pprint(args)\n print('Serving on {0}...'.format(args.port))\n \n WSGIServer(\n ('', args.port),\n Application(\n args.applib_repo_dir,\n args.website_repo_dir\n )\n ).serve_forever()\n \n","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"402654051","text":"#Тестовая программа по греческой статье\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#Исходные данные\r\n#Модуль сдвига\r\nG = 2039000/(2*(1 + 0.25))\r\n#Коэффициент Пуассона\r\nnu = 0.3\r\n#Константы\r\nl = 0.0\r\na = 5.0\r\n#Массив координат точек пластины\r\npc = np.array([[0, 0],\r\n [1, 0],\r\n [0, 1]])\r\npc2 = np.array([[0, 0],\r\n [0.5, 0],\r\n [1, 0],\r\n [0.5, 0.5],\r\n [0, 1],\r\n [0, 0.5]])\r\n#Массив элементов - в нём номера точек, на которых он строится\r\nelm1 = np.array([0, 1, 2])\r\n#Для обычных пластин\r\n#Нижняя левая, нижняя правая, верхняя, центральная\r\nelm2 = np.array([[0, 1, 5],\r\n [1, 2, 3],\r\n [3, 4, 5],\r\n [1, 3, 5]])\r\n#Вектор узловых нагрузок и узловых моментов\r\n\"\"\"fu = np.array([0, 0, 0, 0, 0, 0])\r\nffi = np.array([1e-12, 0, 0])\"\"\"\r\nP1 = np.array([0, 0, 0, 0, 1000, 0])\r\nP2 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1000, 0, 0, 0])\r\n\r\n#Функция получения матрицы жёсткости элемента\r\n#pn - кортеж с номерами точек ��ластины\r\n#te - тип матрицы:\r\n#1 - 6x6\r\n#2 - 12x12\r\ndef matrix_K(pc, pn, te):\r\n #1. Определение длин сторон пластины\r\n #Номер стороны = номер противоположного узла пластины\r\n l1 = math.sqrt((pc[pn[1], 0] - pc[pn[2], 0])**2 + (pc[pn[1], 1] - pc[pn[2], 1])**2)\r\n l2 = math.sqrt((pc[pn[0], 0] - pc[pn[2], 0])**2 + (pc[pn[0], 1] - pc[pn[2], 1])**2)\r\n l3 = math.sqrt((pc[pn[1], 0] - pc[pn[0], 0])**2 + (pc[pn[1], 1] - pc[pn[0], 1])**2)\r\n #2. Определение дополнительных параметров, связанных с размером пластины\r\n l12 = l1**2 + l2**2 - l3**2\r\n l23 = l2**2 + l3**2 - l1**2\r\n l31 = l3**2 + l1**2 - l2**2\r\n #3. Определение метрического тензора (что это?)\r\n gab = np.array([[l2**2, l12/2],\r\n [l12/2, l1**2]])\r\n #4. Вычисление определителя метрического тензора\r\n g = np.linalg.det(gab)\r\n #5. Вычисление площади пластины через определитель метрического тензора\r\n A = math.sqrt(g/4)\r\n #6. Задаём известные матрицы\r\n B = np.array([[1, 0, 0, 0, -1, 0],\r\n [0, 0, 0, 1, 0, -1],\r\n [0, 1, 0, 0, 0, -1],\r\n [0, 0, 1, 0, -1, 0]])\r\n D = G*np.array([[(2*(1 - nu))/(1 - 2*nu), (2*nu)/(1 - 2*nu), 0, 0],\r\n [(2*nu)/(1 - 2*nu), (2*(1 - nu))/(1 - 2*nu), 0, 0],\r\n [0, 0, 1 + a, 1 - a],\r\n [0, 0, 1 - a, 1 + a]])\r\n #7. Задаём константу\r\n C = 2*a\r\n #8. Вычисляем подматрицы\r\n Kuu = A*(B.transpose() @ D @ B)\r\n #9. Составляем матрицу жесткости элемента\r\n Ke = Kuu\r\n if te == 2:\r\n E1 = D[0][0] + D[0][3]\r\n E2 = D[0][1] + D[1][3]\r\n E3 = D[0][2] + D[2][3]\r\n E4 = D[0][3] + D[3][3]\r\n \r\n M1 = D[0][1] + D[0][2]\r\n M2 = D[1][1] + D[1][2]\r\n M3 = D[1][2] + D[2][2]\r\n M4 = D[1][3] + D[2][3]\r\n \r\n R1 = D[0][1] + D[2][3]\r\n R2 = D[0][2] + D[1][3]\r\n \r\n T1 = 8*(D[0][0] + D[0][3] + D[3][3])\r\n T2 = 8*(D[1][1] + D[1][2] + D[2][2])\r\n \r\n #Заполняем одну половину, другую отзеркалим\r\n Ke = np.array([[3*D[0][0], 3*D[0][2], -4*D[0][3], -D[0][1], E1, M1, 0, 0, -4*E1, -4*M1, 4*D[0][3], 4*D[0][1]],\r\n [0, 3*D[2][2], -D[2][3], -D[1][2], E3, M3, 0, 0, -4*E3, -4*M3, 4*D[2][3], 4*D[1][2]],\r\n [0, 0, 3*D[3][3], 3*D[1][3], E4, M4, -4*E4, -4*M4, 0, 0, 4*D[0][3], 4*D[2][3]],\r\n [0, 0, 0, 3*D[1][1], E2, M2, -4*E2, -4*M2, 0, 0, 4*D[0][1], 4*D[1][2]],\r\n [0, 0, 0, 0, 3*(E1 + E4), 3*(M1 + M4), -4*E4, -4*E2, -4*E1, -4*E3, 0, 0],\r\n [0, 0, 0, 0, 0, 3*(M2 + M3), -4*M4, -4*M2, -4*M1, -4*M3, 0, 0],\r\n [0, 0, 0, 0, 0, 0, T1, 4*R1 + 8*R2, 8*D[0][3], 4*R1, -8*E1, -4*(E3 + M1)],\r\n [0, 0, 0, 0, 0, 0, 0, T2, 4*R1, 8*D[1][2], -4*(E3 + M1), -8*M3],\r\n [0, 0, 0, 0, 0, 0, 0, 0, T1, 4*R1 + 8*R2, -8*E4, -4*(E2 + M4)],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, T2, -4*(E2 + M4), -8*M2],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, T1, 4*R1 + 8*R2],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, T2]])\r\n \r\n #Зеркалим незаполненную половину матрицы\r\n for i in range(12):\r\n for j in range(i):\r\n Ke[i][j] = Ke[j][i]\r\n #Секция вывода данных\r\n \"\"\"print('1:\\nl1 =', l1, '\\nl2 =', l2, '\\nl3 =', l3)\r\n print('2:\\nl12 =', l12, '\\nl23 =', l23, '\\nl31 =', l31)\r\n print('3:\\ngab =\\n', gab)\r\n print('4:\\ng =', g)\r\n print('5:\\nA =', A)\r\n print('6:\\nB =\\n', B, '\\nD =\\n', D, '\\nBcap =\\n', Bcap, '\\nDcap =\\n', Dcap)\r\n print('7:\\nC =', C)\r\n print('8:\\nKuu =\\n', Kuu, '\\nKufi =\\n', Kufi, '\\nKfifi =\\n', Kfifi)\r\n print('9:\\nKe =\\n', Ke.astype(int))\"\"\"\r\n #И возвращаем Ke\r\n return Ke\r\n\r\n#Функция установки граничного условия\r\n#KG - глобальная матрица жёсткости\r\n#n - номер столбца и строки, который мы закрепляем (начинается с 0)\r\ndef set_BC(KG, n):\r\n #Размер матрицы\r\n size = KG.shape[0]\r\n KG[0:size, n:n + 1] = np.zeros((size, 1))\r\n KG[n:n + 1, 0:size] = np.zeros(size).transpose()\r\n KG[n, n] = 1\r\n \r\n return KG\r\n\r\n#Функция добавления матрицы к глобальной матрице жёсткости\r\n#KG - глобальная матрица жёсткости\r\n#Kelm - матрица жёсткости элемента\r\n#n - номер элемента\r\ndef matrix_Add(KG, Kelm, elm, n):\r\n for iy in range(3):\r\n for ix in range(3):\r\n KG[2*elm[n, iy]:2*elm[n, iy] + 2, 2*elm[n, ix]:2*elm[n, ix] + 2] += Kelm[iy*2:iy*2 + 2, ix*2:ix*2 + 2]\r\n \r\n return KG\r\n\r\n#Количество точек\r\npn1 = pc2.shape[0]\r\n#И количество элементов\r\nen1 = elm2.shape[0]\r\n#Получаем глобальную матрицу жёсткости\r\nK1 = np.zeros((pn1*2, pn1*2))\r\n#Заносим в матрицу жёсткости элементы\r\nfor i in range(en1):\r\n K1 = matrix_Add(K1, matrix_K(pc2, elm2[i], 1), elm2, i)\r\n print('KElm of elm #', i, ':', matrix_K(pc2, elm2[i], 1), '\\nK1 step', i, ':\\n', K1)\r\n\r\n#Получаем матрицы жёсткости элементов\r\n#K1 = matrix_K(elm, 1)\r\nK2 = matrix_K(pc, elm1, 2)\r\n#Заносим в матрицу жёсткости элементы\r\n#И присваиваем граничное условие\r\nK1 = set_BC(K1, 0)\r\nK1 = set_BC(K1, 1)\r\n#K1 = set_BC(K1, 2)\r\nK1 = set_BC(K1, 3)\r\n#K1 = set_BC(K1, 4)\r\nK1 = set_BC(K1, 5)\r\n\r\nK2 = set_BC(K2, 0)\r\nK2 = set_BC(K2, 1)\r\n#K2 = set_BC(K2, 2)\r\nK2 = set_BC(K2, 3)\r\n#K2 = set_BC(K2, 4)\r\nK2 = set_BC(K2, 5)\r\n#K2 = set_BC(K2, 6)\r\n#print('10:\\nBC =\\n', K.astype(int))\r\n#10. Составляем вектор нагрузок\r\n\"\"\"P = np.zeros(9)\r\nP[0:6] = fu\r\nP[6:9] = ffi\"\"\"\r\n#print('10:\\nP =\\n', P)\r\n#11. Получаем вектор узловых перемещений U, решив систему линейных уравнений\r\nU1 = np.linalg.solve(K1, P2.transpose())\r\nU2 = np.linalg.solve(K2, P2.transpose())\r\nU1 = U1.reshape((int(U1.shape[0]/2), 2)).transpose()\r\nU2 = U2.reshape((int(U2.shape[0]/2), 2)).transpose()\r\n#print('11:\\nU =\\n', U1)\r\nprint('U1:\\n', U1, '\\n\\nU2:\\n', U2)\r\n\r\n#Графический вывод\r\nk = 100\r\npcd = pc.transpose()\r\npc2d = pc2.transpose()\r\nprint('pcd:\\n', pcd, '\\n\\npc2d:\\n', pc2d)\r\n\r\nplt.figure(figsize = (16, 9))\r\n\r\nplt.plot(pc2d[0], pc2d[1], 'bo-')\r\nplt.plot(pc2d[0] + U1[0]*k, pc2d[1] + U1[1]*k, 'ro-')\r\nplt.plot(pc2d[0] + U2[0]*k, pc2d[1] + U2[1]*k, 'go-')\r\n#Вывод исходной схемы\r\n#Проходимся по элементам\r\n\"\"\"for i in range(elm.shape[0]):\r\n #Массив точек треугольника\r\n #Четвёртая точка - чтобы замкнуть\r\n #Сразу транспонирован\r\n tp = np.zeros((2, 4))\r\n #Заполняем по точкам\r\n for j in range(3):\r\n for ii in range(2):\r\n tp[ii, j] = pc[elm[i][j]][ii]\r\n #Дублируем последнюю точку\r\n tp[0:2, 3:4] = tp[0:2, 0:1]\r\n #И рисуем\r\n plt.plot(tp[0], tp[1], 'bo-')\r\n#Вывод схемы с перемещениями\r\n#Проходимся по элементам\r\nfor i in range(elm.shape[0]):\r\n #Массив точек треугольника\r\n #Четвёртая точка - чтобы замкнуть\r\n #Сразу транспонирован\r\n tp = np.zeros((2, 4))\r\n #Заполняем по точкам\r\n for j in range(3):\r\n for ii in range(2):\r\n tp[ii, j] = pc[elm[i][j]][ii] + U[elm[i][j]*3 + ii]\r\n #Дублируем последнюю точку\r\n tp[0:2, 3:4] = tp[0:2, 0:1]\r\n #И рисуем\r\n plt.plot(tp[0], tp[1], 'ro-')\"\"\"\r\nplt.axis('equal')\r\nplt.title('Общий вид системы')\r\nplt.show()","sub_path":"test_n2.py","file_name":"test_n2.py","file_ext":"py","file_size_in_byte":9665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"480000547","text":"\"\"\"\n9. Среди натуральных чисел, которые были введены, найти\nнаибольшее по сумме цифр. Вывести на экран это число и сумму его цифр.\n\"\"\"\n\nmax_n = 0\nfor i in range(10):\n num = input(\"Введите натуральное число: \")\n a = 0\n for j in num:\n a += int(j)\n if a > max_n:\n max_n = a\n max_num = num\nprint(f\"Наибольшая сумма чисел у числа {max_num}\")","sub_path":"Lesson_2/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"644039799","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom dbase_create import Prova, Questao, Assertiva, Oj, Sumula, Enunciado, Lei, LeiAlt\n\nBase = declarative_base()\n \nengine = create_engine('sqlite:///smartlegis.db')\n# Bind the engine to the metadata of the Base class so that the\n# declaratives can be accessed through a DBSession instance\nBase.metadata.bind = engine\n \nDBSession = sessionmaker(bind=engine)\n# A DBSession() instance establishes all conversations with the database\n# and represents a \"staging zone\" for all the objects loaded into the\n# database session object. Any change made against the objects in the\n# session won't be persisted into the database until you call\n# session.commit(). If you're not happy about the changes, you can\n# revert all of them back to the last commit by calling\n# session.rollback()\nsession = DBSession()\n \n# begin inserts\np = Prova(\n ano=2019,\n esfera='Estadual',\n banca='CESPE',\n tipo='CE',\n escolaridade='superior',\n area='direito',\n instituto='PGE',\n instituto_uf='AP',\n instituto_municipio='macapá',\n supercargo='Procurador',\n cargo='Procurador do Estado',\n ninscritos=3000,\n nota_max=70.00,\n corte = 65.00\n )\n\nsession.add(p)\nsession.commit()","sub_path":"dbase_inserts.py","file_name":"dbase_inserts.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"114335222","text":"# COMPONENT CREATOR\nfrom CMGTools.RootTools.samples.ComponentCreator import ComponentCreator\nkreator = ComponentCreator()\n\n# ----------------------------- 2017 pp run ----------------------------------------\n\njson = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions17/5TeV/ReReco/Cert_306546-306826_5TeV_EOY2017ReReco_Collisions17_JSON.txt'\n\n# ----------------------------- Run2017F 14Dec2018 ----------------------------------------\nSingleMuon_Run2017G_17Nov2017 = kreator.makeMyPrivateDataComponent(\"SingleMuon_Run2017G\",\"/SingleMuon/jrgonzal-data5TeV_22nov2019_SingleMuon_Run2017G-17Nov2017-v1-e6071589c1d4feaedf45b2e5392eb06a/USER\", \"PRIVATE\", \".*root\", \"phys03\", json, None, [], [],True)\nDoubleMuon_Run2017G_17Nov2017 = kreator.makeMyPrivateDataComponent(\"DoubleMuon_Run2017G\",\"/DoubleMuon/jrgonzal-data5TeV_22nov2019_DoubleMuon_Run2017G-17Nov2017-v1-e6071589c1d4feaedf45b2e5392eb06a/USER\", \"PRIVATE\", \".*root\", \"phys03\", json, None, [], [],True)\nHighEGJet_Run2017G_17Nov2017 = kreator.makeMyPrivateDataComponent(\"HighEGJet_Run2017G\",\"/HighEGJet/jrgonzal-data5TeV_22nov2019_HighEGJet_Run2017G-17Nov2017-v2-e6071589c1d4feaedf45b2e5392eb06a/USER\", \"PRIVATE\", \".*root\", \"phys03\", json, None, [], [],True)\n\ndataSamples_Run2017G = [SingleMuon_Run2017G_17Nov2017, DoubleMuon_Run2017G_17Nov2017, HighEGJet_Run2017G_17Nov2017]\n\n\n# ---------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n from CMGTools.RootTools.samples.tools import runMain\n runMain(samples, localobjs=locals())\n","sub_path":"RootTools/python/samples/samples_5TeV_DATA2017_NanoAOD.py","file_name":"samples_5TeV_DATA2017_NanoAOD.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"279224837","text":"import os.path\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\n\nimport db_api\nimport image_api\n\nfrom tornado.options import define, options\ndefine('port', default=80, help='tripod report server runs on the given port', type=int)\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n bizes = db_api.conndb.GetBizes()\n caches = db_api.conndb.GetCaches()\n self.render('index.tpl',biz_items = bizes, machine_items= caches)\n \nclass BizReportHandler(tornado.web.RequestHandler):\n def post(self):\n# try:\n begin = self.get_argument('begin_time_biz')\n end = self.get_argument('end_time_biz') \n biz = self.get_argument('biz_id')\n data = db_api.conndb.ReadData(biz,['get','set','load','remove'], begin ,end)\n image_src = image_api.PlotImage(biz,['get','set','load','remove'], begin,end,data)\n bizes = db_api.conndb.GetBizes()\n self.render('biz_query.tpl', selected_biz_id =biz, biz_items = bizes, begin_time = begin, end_time= end, items = data,pic_path= image_src)\n# except Exception:\n# self.render('none.tpl')\n\nclass CacheReportHandler(tornado.web.RequestHandler):\n def post(self):\n machine = self.get_argument('machine')\n begin = self.get_argument('begin_time_cache')\n end = self.get_argument('end_time_cache') \n data = db_api.conndb.ReadData(machine,['used_memory_rss','total','hit','miss'], begin ,end)\n image_src_memory = image_api.PlotImage(machine ,['used_memory_rss'], begin,end,data)\n image_src_command = image_api.PlotImage(machine ,['total','hit','miss'], begin,end,data)\n caches = db_api.conndb.GetCaches()\n self.render('cache_query.tpl', selected_cc =machine, machine_items= caches, begin_time = begin, end_time= end, items = data, pic_path_memory= image_src_memory, pic_path_command= image_src_command)\n \nif __name__ == '__main__':\n tornado.options.parse_command_line()\n app = tornado.web.Application(\n handlers=[(r'/', IndexHandler), \n (r'/cache_query', CacheReportHandler),\n (r'/biz_query', BizReportHandler)],\n template_path=os.path.join(os.path.dirname(__file__), './templates'),\n static_path=os.path.join(os.path.dirname(__file__), './static'),\n )\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n\n","sub_path":"main_project/tripod2/py/report/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"437480318","text":"from functools import wraps\n\nfrom flask import Flask, jsonify, make_response, redirect, request\nfrom flask_restful import Api, reqparse, Resource\nfrom pony import orm\nfrom werkzeug.exceptions import BadRequest\n\n# Configuration\nPROVIDER = 'sqlite'\nCREATE_DB = True\nDATABASE = 'database.db'\nDEBUG = True\nSECRET_KEY = 'development key'\nUSERNAME = 'admin'\nPASSWORD = 'default'\n\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n# database and models\ndb = orm.Database()\n\n\nclass ServiceRegistry(db.Entity):\n _table_ = 'services'\n\n id = orm.PrimaryKey(int, auto=True)\n service = orm.Required(str)\n version = orm.Optional(str)\n change = orm.Required(str)\n\n\n# helper methods\ndef sortkeypicker(keynames):\n # https://stackoverflow.com/a/1143719/311829\n negate = set()\n for i, k in enumerate(keynames):\n if k[:1] == '-':\n keynames[i] = k[1:]\n negate.add(k[1:])\n\n def getit(adict):\n composite = [adict[k] for k in keynames]\n for i, (k, v) in enumerate(zip(keynames, composite)):\n if k in negate:\n composite[i] = -v\n return composite\n return getit\n\n\ndef errors_handler(view):\n @wraps(view)\n def wrapped(self, *f_args, **f_kwargs):\n try:\n return view(self, *f_args, **f_kwargs)\n except orm.ObjectNotFound as e:\n return make_response(jsonify({\n 'status': False,\n 'message': 'Resource Not Found'\n }), 404)\n except Exception as e:\n return make_response(jsonify({\n 'status': False,\n 'message': 'Internal Server Error: ' + str(e)\n }), 505)\n return wrapped\n\n\n# requires both service and version params\nservice_parser = reqparse.RequestParser(bundle_errors=True)\nservice_parser.add_argument(\n 'service', type=str,\n help='Service name has to be provided',\n required=True\n)\nservice_parser.add_argument(\n 'version', type=str,\n help='Version value has to be provided',\n required=True\n)\n\n\n# rest resources\nclass ServiceResourceList(Resource):\n\n # @errors_handler\n @orm.db_session\n def get(self):\n data = request.values\n if len(list(data.keys())) == 0:\n # no params to query\n query = ServiceRegistry.select()\n elif ('service' in data and data['service']) and ('version' in data and data['version']):\n # filtering by service and version\n query = orm.select(\n c for c in ServiceRegistry\n if c.service == data['service'] and c.version == data['version'])\n elif ('service' in data and data['service']):\n # filtering by only service\n query = orm.select(\n c for c in ServiceRegistry\n if c.service == data['service'])\n return {\n 'status': True,\n 'items': sorted([\n item.to_dict()\n for item in query],\n key=sortkeypicker(('service', 'version', 'change'))\n )}\n\n @errors_handler\n @orm.db_session\n def put(self):\n try:\n values = service_parser.parse_args()\n except BadRequest as e:\n err_response = {'status': False}\n err_response.update(e.data)\n return make_response(jsonify(err_response), 400)\n\n service = ServiceRegistry(\n service=values['service'],\n version=values['version'],\n change='created'\n )\n db.commit()\n return redirect('/services/' + str(service.id))\n\n\nclass ServiceResource(Resource):\n @errors_handler\n @orm.db_session\n def get(self, service_id):\n service = ServiceRegistry[service_id]\n\n if service:\n return make_response(jsonify({\n 'status': True,\n 'message': 'Resource Found',\n 'item': service.to_dict()\n }), 200)\n\n @errors_handler\n @orm.db_session\n def delete(self, service_id):\n service = ServiceRegistry[service_id]\n\n if service:\n service.change = 'removed'\n db.commit()\n return make_response(jsonify({\n 'status': True,\n 'message': 'Successfully deleted. ID [{}]'.format(service_id),\n 'item': service.to_dict()\n }), 200)\n\n @errors_handler\n @orm.db_session\n def put(self, service_id):\n if not service_id: # insert\n try:\n values = service_parser.parse_args()\n except BadRequest as e:\n err_response = {'status': False}\n err_response.update(e.data)\n return make_response(jsonify(err_response), 400)\n\n service = ServiceRegistry(\n service=values['service'],\n version=values['version'],\n change='created'\n )\n db.commit()\n return redirect('/services/' + str(service.id))\n else: # update\n service = ServiceRegistry[service_id]\n\n if service:\n if request.form.get('service') is not None:\n service.service = request.form['service']\n if request.form.get('version') is not None:\n service.version = request.form['version']\n service.change = 'changed'\n db.commit()\n return make_response(jsonify({\n 'status': True,\n 'message': 'Resource updated',\n 'item': service.to_dict()\n }), 200)\n\n\napi = Api(app)\napi.add_resource(ServiceResourceList, '/services', strict_slashes=False)\napi.add_resource(ServiceResource, '/services/', strict_slashes=False)\n\n\ndef init_db(app):\n db.bind(\n provider=app.config['PROVIDER'],\n filename=app.config['DATABASE'],\n create_db=app.config['CREATE_DB']\n )\n db.generate_mapping(create_tables=True)\n return db\n\n\nif __name__ == '__main__':\n db = init_db(app)\n app.run(debug=app.config['DEBUG'])\n","sub_path":"service_server.py","file_name":"service_server.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"514552677","text":"import math\nn, k = map(int, input().split())\na = list(map(int, input().split()))\n\nlimit = math.log(n)\n\nif k >= n:\n for i in range(n-1):\n print(n, end=\" \")\n print(n)\n exit()\n\nfor _ in range(k):\n list = [1]*n\n for i, d in enumerate(a):\n if d == 0:\n continue\n for j in range(-d, d+1):\n if j == 0:\n continue\n if i+j >= 0:\n try:\n list[i+j] += 1\n # print(i, j)\n except:\n pass\n if _ > limit:\n break\n a = list.copy()\nfor i in range(n-1):\n print(a[i], end=\" \")\nprint(a[-1])\n","sub_path":"ABC/tokyokaijo2020/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"137525352","text":"### makeTrainTest.py ###\n# \n#\n#\n#\n\nimport sys\n\n\nfirstLine = True\nfor line in sys.stdin:\n if firstLine:\n firstLine = False\n continue\n data = line.split(\",\")\n\n output = \"\"\n for i in range(5, len(data)):\n x = data[i].strip()\n if i == 13:\n x = x.strip(\"\\\"\")\n output += x\n \n if i < len(data) - 1:\n output += \"\\t\"\n\n print(output)\n\n \n","sub_path":"MULTINOM_PREDICT/makeTrainTest.py","file_name":"makeTrainTest.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42765493","text":"from Node import Node\n\nclass CircularLinkedList():\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n def push(self, val):\n if self.head == None:\n \n self.head = Node(val)\n self.tail = self.head\n self.head.next = self.tail\n self.tail.next = self.head\n\n else:\n new_node = Node(val)\n self.tail.next = new_node\n self.tail = new_node\n self.tail.next = self.head\n\n def print(self):\n curr = self.head\n print(curr.val)\n\n while curr.next != self.head:\n curr = curr.next\n print(curr.val)\n\n def get_size(self):\n curr = self.head\n count = 1\n\n while curr.next != self.head:\n curr = curr.next\n count += 1\n \n return count\n\n def even_odd(self):\n size = self.get_size()\n\n if size % 2 == 0:\n return [size // 2, size // 2]\n else:\n return [(size // 2) + 1, size - (size // 2 + 1)] \n\n def split(self): \n kind = self.even_odd()\n first = CircularLinkedList()\n second = CircularLinkedList()\n curr = self.head\n\n for i in range(kind[0]):\n first.push(curr.val)\n curr = curr.next\n \n for i in range(kind[1]):\n second.push(curr.val)\n curr = curr.next\n \n return [first, second]\n \n\n\n ","sub_path":"CircularLinkedList.py","file_name":"CircularLinkedList.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301660595","text":"#!/usr/bin/env python3\n\n# This file is Copyright (c) 2019 Antti Lukats \n# This file is Copyright (c) 2015-2019 Florent Kermarrec \n# License: BSD\n\nimport argparse\n\nfrom migen import *\n\nfrom litex_boards.partner.platforms import mega65\n\nfrom litex.soc.interconnect import wishbone\n\nfrom litex.soc.cores.clock import *\nfrom litex.soc.integration.soc_core import *\nfrom litex.soc.integration.builder import *\n\nfrom litex.soc.cores.hyperbus import HyperRAM\n\nfrom liteeth.phy.rmii import LiteEthPHYRMII\nfrom liteeth.mac import LiteEthMAC\n\n\n#from hyper_memory import *\n#self.submodules.hyperram = HyperRAM(platform.request(\"hyperram\"))\n#self.add_wb_slave(mem_decoder(self.mem_map[\"hyperram\"]), self.hyperram.bus)\n#self.add_memory_region(\"hyperram\", self.mem_map[\"hyperram\"], 8*1024*1024)\n\n\n# CRG ----------------------------------------------------------------------------------------------\n\nclass _CRG(Module):\n def __init__(self, platform, sys_clk_freq):\n self.clock_domains.cd_sys = ClockDomain()\n self.clock_domains.cd_eth = ClockDomain()\n\n # # #\n self.cd_sys.clk.attr.add(\"keep\")\n self.submodules.pll = pll = S7PLL(speedgrade=-1)\n self.comb += pll.reset.eq(platform.request(\"cpu_reset\"))\n\n pll.register_clkin(platform.request(\"clk100\"), 100e6)\n pll.create_clkout(self.cd_sys, sys_clk_freq)\n pll.create_clkout(self.cd_eth, 50e6)\n\n\n# BaseSoC ------------------------------------------------------------------------------------------\n\nclass BaseSoC(SoCCore):\n mem_map = {\n# \"spiflash\": 0x20000000,\n \"hyperram\": 0x20000000,\n }\n mem_map.update(SoCCore.mem_map)\n\n def __init__(self, sys_clk_freq=int(100e6), **kwargs):\n platform = mega65.Platform()\n\n SoCCore.__init__(self, platform, clk_freq=sys_clk_freq,\n ident=\"MEGA65\", ident_version=True,\n integrated_rom_size=0x8000,\n integrated_main_ram_size=0x10000,\n **kwargs)\n\n\t# can we just use the clock without PLL ?\n\n self.submodules.crg = _CRG(platform, sys_clk_freq)\n self.counter = counter = Signal(32)\n self.sync += counter.eq(counter + 1)\n \n\t#\n led_red = platform.request(\"user_led\", 0)\n self.comb += led_red.eq(counter[23])\n\n# led_green = platform.request(\"user_led_green\")\n# self.comb += led_green.eq(counter[25])\n\n\n# hyperram_pads = platform.request(\"hyperram\")\n# self.submodules.hyperram = HyperRAM(hyperram_pads)\n# self.add_wb_slave(mem_decoder(self.mem_map[\"hyperram\"]), self.hyperram.bus)\n# self.add_memory_region(\"hyperram\", self.mem_map[\"hyperram\"] | self.shadow_base, 8*1024*1024)\n\n self.submodules.hyperram = HyperRAM(platform.request(\"hyperram\"))\n self.add_wb_slave(mem_decoder(self.mem_map[\"hyperram\"]), self.hyperram.bus)\n self.add_memory_region(\"hyperram\", self.mem_map[\"hyperram\"], 8*1024*1024)\n\n\n\nclass EthernetSoC(BaseSoC):\n mem_map = {\n \"ethmac\": 0x30000000, # (shadow @0xb0000000)\n }\n mem_map.update(BaseSoC.mem_map)\n\n def __init__(self, **kwargs):\n BaseSoC.__init__(self, **kwargs)\n\n self.submodules.ethphy = LiteEthPHYRMII(self.platform.request(\"eth_clocks\"),\n self.platform.request(\"eth\"))\n self.add_csr(\"ethphy\")\n self.submodules.ethmac = LiteEthMAC(phy=self.ethphy, dw=32,\n interface=\"wishbone\", endianness=self.cpu.endianness)\n self.add_wb_slave(self.mem_map[\"ethmac\"], self.ethmac.bus, 0x2000)\n self.add_memory_region(\"ethmac\", self.mem_map[\"ethmac\"] | self.shadow_base, 0x2000)\n self.add_csr(\"ethmac\")\n self.add_interrupt(\"ethmac\")\n\n self.ethphy.crg.cd_eth_rx.clk.attr.add(\"keep\")\n self.ethphy.crg.cd_eth_tx.clk.attr.add(\"keep\")\n self.platform.add_period_constraint(self.ethphy.crg.cd_eth_rx.clk, 1e9/12.5e6)\n self.platform.add_period_constraint(self.ethphy.crg.cd_eth_tx.clk, 1e9/12.5e6)\n self.platform.add_false_path_constraints(\n self.crg.cd_sys.clk,\n self.ethphy.crg.cd_eth_rx.clk,\n self.ethphy.crg.cd_eth_tx.clk)\n\n\n# Build --------------------------------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser(description=\"LiteX on MEGA65\")\n builder_args(parser)\n# soc_sdram_args(parser)\n soc_core_args(parser)\n\n parser.add_argument(\"--with-ethernet\", action=\"store_true\",\n help=\"enable Ethernet support\")\n\n args = parser.parse_args()\n\n cls = EthernetSoC if args.with_ethernet else BaseSoC\n soc = cls(**soc_core_argdict(args))\n\n builder = Builder(soc, **builder_argdict(args))\n builder.build()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"litex_boards/partner/targets/mega65.py","file_name":"mega65.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"183807457","text":"import itertools\n\nimport allel\nimport msprime # type: ignore\nimport numpy as np\nimport pytest\nimport xarray as xr\nfrom allel import hudson_fst\n\nfrom sgkit import (\n Fst,\n Garud_H,\n Tajimas_D,\n count_cohort_alleles,\n count_variant_alleles,\n create_genotype_call_dataset,\n divergence,\n diversity,\n pbs,\n simulate_genotype_call_dataset,\n variables,\n)\nfrom sgkit.window import window\n\nfrom .test_aggregation import get_dataset\n\n\ndef ts_to_dataset(ts, chunks=None, samples=None):\n \"\"\"\n Convert the specified tskit tree sequence into an sgkit dataset.\n Note this just generates haploids for now. With msprime 1.0, we'll be\n able to generate diploid/whatever-ploid individuals easily.\n \"\"\"\n if samples is None:\n samples = ts.samples()\n tables = ts.dump_tables()\n alleles = []\n genotypes = []\n for var in ts.variants(samples=samples):\n alleles.append(var.alleles)\n genotypes.append(var.genotypes)\n alleles = np.array(alleles).astype(\"S\")\n genotypes = np.expand_dims(genotypes, axis=2)\n\n ds = create_genotype_call_dataset(\n variant_contig_names=[\"1\"],\n variant_contig=np.zeros(len(tables.sites), dtype=int),\n variant_position=tables.sites.position.astype(int),\n variant_allele=alleles,\n sample_id=np.array([f\"tsk_{u}\" for u in samples]).astype(\"U\"),\n call_genotype=genotypes,\n )\n if chunks is not None:\n ds = ds.chunk(dict(zip([\"variants\", \"samples\"], chunks)))\n return ds\n\n\ndef add_cohorts(ds, ts, n_cohorts=1, cohort_key_names=[\"cohorts_0\", \"cohorts_1\"]):\n subsets = np.array_split(ts.samples(), n_cohorts)\n sample_cohorts = np.concatenate(\n [np.full_like(subset, i) for i, subset in enumerate(subsets)]\n )\n ds[\"sample_cohort\"] = xr.DataArray(sample_cohorts, dims=\"samples\")\n if cohort_key_names is not None:\n cohort_names = [f\"co_{i}\" for i in range(n_cohorts)]\n coords = {k: cohort_names for k in cohort_key_names}\n ds = ds.assign_coords(coords)\n return ds, subsets\n\n\n@pytest.mark.parametrize(\"sample_size\", [2, 3, 10, 100])\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (10, -1)])\n@pytest.mark.parametrize(\n \"cohort_allele_count\",\n [None, variables.cohort_allele_count, \"cohort_allele_count_non_default\"],\n)\ndef test_diversity(sample_size, chunks, cohort_allele_count):\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, cohort_key_names=[\"cohorts\"]) # type: ignore[no-untyped-call]\n if cohort_allele_count is not None:\n ds = count_cohort_alleles(ds, merge=False).rename(\n {variables.cohort_allele_count: cohort_allele_count}\n )\n ds = ds.assign_coords({\"cohorts\": [\"co_0\"]})\n ds = diversity(ds, cohort_allele_count=cohort_allele_count)\n else:\n ds = ds.assign_coords({\"cohorts\": [\"co_0\"]})\n ds = diversity(ds)\n\n div = ds.stat_diversity.sum(axis=0, skipna=False).sel(cohorts=\"co_0\").values\n ts_div = ts.diversity(span_normalise=False)\n np.testing.assert_allclose(div, ts_div)\n\n\n@pytest.mark.parametrize(\"sample_size\", [10])\ndef test_diversity__windowed(sample_size):\n ts = msprime.simulate(sample_size, length=200, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, cohort_key_names=[\"cohorts\"]) # type: ignore[no-untyped-call]\n ds = window(ds, size=25)\n ds = diversity(ds)\n div = ds[\"stat_diversity\"].sel(cohorts=\"co_0\").compute()\n\n # Calculate diversity using tskit windows\n # Find the variant positions so we can have windows with a fixed number of variants\n positions = ts.tables.sites.position\n windows = np.concatenate(([0], positions[::25][1:], [ts.sequence_length]))\n ts_div = ts.diversity(windows=windows, span_normalise=False)\n np.testing.assert_allclose(div, ts_div)\n\n # Calculate diversity using scikit-allel moving_statistic\n # (Don't use windowed_diversity, since it treats the last window differently)\n ds = count_variant_alleles(ts_to_dataset(ts)) # type: ignore[no-untyped-call]\n ac = ds[\"variant_allele_count\"].values\n mpd = allel.mean_pairwise_difference(ac, fill=0)\n ska_div = allel.moving_statistic(mpd, np.sum, size=25)\n np.testing.assert_allclose(\n div[:-1], ska_div\n ) # scikit-allel has final window missing\n\n\ndef test_diversity__missing_call_genotype():\n ds = xr.Dataset()\n with pytest.raises(ValueError, match=\"call_genotype not present\"):\n diversity(ds)\n\n\n@pytest.mark.parametrize(\n \"sample_size, n_cohorts\",\n [(2, 2), (3, 2), (3, 3), (10, 2), (10, 3), (10, 4), (100, 2), (100, 3), (100, 4)],\n)\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (10, -1)])\ndef test_divergence(sample_size, n_cohorts, chunks):\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n ds = divergence(ds)\n div = ds.stat_divergence.sum(axis=0, skipna=False).values\n\n # entries on the diagonal are diversity values\n for i in range(n_cohorts):\n ts_div = ts.diversity([subsets[i]], span_normalise=False)\n np.testing.assert_allclose(div[i, i], ts_div)\n\n # test off-diagonal entries, by replacing diagonal with NaNs\n np.fill_diagonal(div, np.nan)\n ts_div = np.full([n_cohorts, n_cohorts], np.nan)\n for i, j in itertools.combinations(range(n_cohorts), 2):\n ts_div[i, j] = ts.divergence([subsets[i], subsets[j]], span_normalise=False)\n ts_div[j, i] = ts.divergence([subsets[j], subsets[i]], span_normalise=False)\n np.testing.assert_allclose(div, ts_div)\n\n\n@pytest.mark.parametrize(\"sample_size, n_cohorts\", [(10, 2)])\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (50, -1)])\ndef test_divergence__windowed(sample_size, n_cohorts, chunks):\n ts = msprime.simulate(sample_size, length=200, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n ds = window(ds, size=25)\n ds = divergence(ds)\n div = ds[\"stat_divergence\"].values\n # test off-diagonal entries, by replacing diagonal with NaNs\n div[:, np.arange(2), np.arange(2)] = np.nan\n\n # Calculate diversity using tskit windows\n # Find the variant positions so we can have windows with a fixed number of variants\n positions = ts.tables.sites.position\n windows = np.concatenate(([0], positions[::25][1:], [ts.sequence_length]))\n n_windows = len(windows) - 1\n ts_div = np.full([n_windows, n_cohorts, n_cohorts], np.nan)\n for i, j in itertools.combinations(range(n_cohorts), 2):\n ts_div[:, i, j] = ts.divergence(\n [subsets[i], subsets[j]], windows=windows, span_normalise=False\n )\n ts_div[:, j, i] = ts_div[:, i, j]\n np.testing.assert_allclose(div, ts_div)\n\n\n@pytest.mark.parametrize(\"sample_size, n_cohorts\", [(10, 2)])\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (50, -1)])\n@pytest.mark.xfail() # combine with test_divergence__windowed when this is passing\ndef test_divergence__windowed_scikit_allel_comparison(sample_size, n_cohorts, chunks):\n ts = msprime.simulate(sample_size, length=200, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n ds = window(ds, size=25)\n ds = divergence(ds)\n div = ds[\"stat_divergence\"].values\n # test off-diagonal entries, by replacing diagonal with NaNs\n div[:, np.arange(2), np.arange(2)] = np.nan\n\n # Calculate divergence using scikit-allel moving_statistic\n # (Don't use windowed_divergence, since it treats the last window differently)\n ds1 = count_variant_alleles(ts_to_dataset(ts, samples=ts.samples()[:1])) # type: ignore[no-untyped-call]\n ds2 = count_variant_alleles(ts_to_dataset(ts, samples=ts.samples()[1:])) # type: ignore[no-untyped-call]\n ac1 = ds1[\"variant_allele_count\"].values\n ac2 = ds2[\"variant_allele_count\"].values\n mpd = allel.mean_pairwise_difference_between(ac1, ac2, fill=0)\n ska_div = allel.moving_statistic(mpd, np.sum, size=25) # noqa: F841\n # TODO: investigate why numbers are different\n np.testing.assert_allclose(\n div[:-1], ska_div\n ) # scikit-allel has final window missing\n\n\ndef test_divergence__missing_calls():\n ds = get_dataset(\n [\n [[0, 0], [-1, -1], [-1, -1]], # all of cohort 1 calls are missing\n ]\n )\n ds[\"sample_cohort\"] = xr.DataArray(np.array([0, 1, 1]), dims=\"samples\")\n ds = divergence(ds)\n np.testing.assert_equal(ds[\"stat_divergence\"].values[0, 1], np.nan)\n\n\n@pytest.mark.parametrize(\"sample_size\", [2, 3, 10, 100])\ndef test_Fst__Hudson(sample_size):\n # scikit-allel can only calculate Fst for pairs of cohorts (populations)\n n_cohorts = 2\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n n_variants = ds.dims[\"variants\"]\n ds = window(ds, size=n_variants) # single window\n ds = Fst(ds, estimator=\"Hudson\")\n fst = ds.stat_Fst.sel(cohorts_0=\"co_0\", cohorts_1=\"co_1\").values\n\n # scikit-allel\n ac1 = ds.cohort_allele_count.values[:, 0, :]\n ac2 = ds.cohort_allele_count.values[:, 1, :]\n num, den = hudson_fst(ac1, ac2)\n ska_fst = np.sum(num) / np.sum(den)\n\n np.testing.assert_allclose(fst, ska_fst)\n\n\n@pytest.mark.parametrize(\n \"sample_size, n_cohorts\",\n [(2, 2), (3, 2), (3, 3), (10, 2), (10, 3), (10, 4), (100, 2), (100, 3), (100, 4)],\n)\ndef test_Fst__Nei(sample_size, n_cohorts):\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n n_variants = ds.dims[\"variants\"]\n ds = window(ds, size=n_variants) # single window\n ds = Fst(ds, estimator=\"Nei\")\n fst = ds.stat_Fst.values\n\n ts_fst = np.full([1, n_cohorts, n_cohorts], np.nan)\n for i, j in itertools.combinations(range(n_cohorts), 2):\n ts_fst[0, i, j] = ts.Fst([subsets[i], subsets[j]])\n ts_fst[0, j, i] = ts_fst[0, i, j]\n np.testing.assert_allclose(fst, ts_fst)\n\n\ndef test_Fst__unknown_estimator():\n ts = msprime.simulate(2, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n with pytest.raises(\n ValueError, match=\"Estimator 'Unknown' is not a known estimator\"\n ):\n Fst(ds, estimator=\"Unknown\")\n\n\n@pytest.mark.parametrize(\n \"sample_size, n_cohorts\",\n [(10, 2), (10, 3)],\n)\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (50, -1)])\ndef test_Fst__windowed(sample_size, n_cohorts, chunks):\n ts = msprime.simulate(sample_size, length=200, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts) # type: ignore[no-untyped-call]\n ds = window(ds, size=25)\n fst_ds = Fst(ds, estimator=\"Nei\")\n fst = fst_ds[\"stat_Fst\"].values\n\n # Calculate Fst using tskit windows\n # Find the variant positions so we can have windows with a fixed number of variants\n positions = ts.tables.sites.position\n windows = np.concatenate(([0], positions[::25][1:], [ts.sequence_length]))\n n_windows = len(windows) - 1\n ts_fst = np.full([n_windows, n_cohorts, n_cohorts], np.nan)\n for i, j in itertools.combinations(range(n_cohorts), 2):\n ts_fst[:, i, j] = ts.Fst(\n [subsets[i], subsets[j]], windows=windows, span_normalise=False\n )\n ts_fst[:, j, i] = ts_fst[:, i, j]\n\n np.testing.assert_allclose(fst, ts_fst)\n\n # scikit-allel\n fst_ds = Fst(ds, estimator=\"Hudson\")\n for i, j in itertools.combinations(range(n_cohorts), 2):\n fst = fst_ds[\"stat_Fst\"].sel(cohorts_0=f\"co_{i}\", cohorts_1=f\"co_{j}\").values\n\n ac_i = fst_ds.cohort_allele_count.values[:, i, :]\n ac_j = fst_ds.cohort_allele_count.values[:, j, :]\n ska_fst = allel.moving_hudson_fst(ac_i, ac_j, size=25)\n\n np.testing.assert_allclose(\n fst[:-1], ska_fst\n ) # scikit-allel has final window missing\n\n\n@pytest.mark.parametrize(\"sample_size\", [2, 3, 10, 100])\ndef test_Tajimas_D(sample_size):\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, cohort_key_names=None) # type: ignore[no-untyped-call]\n n_variants = ds.dims[\"variants\"]\n ds = window(ds, size=n_variants) # single window\n ds = Tajimas_D(ds)\n d = ds.stat_Tajimas_D.compute()\n ts_d = ts.Tajimas_D()\n np.testing.assert_allclose(d, ts_d)\n\n\n@pytest.mark.parametrize(\n \"sample_size, n_cohorts\",\n [(10, 3), (20, 4)],\n)\ndef test_pbs(sample_size, n_cohorts):\n ts = msprime.simulate(sample_size, length=100, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts, cohort_key_names=[\"cohorts_0\", \"cohorts_1\", \"cohorts_2\"]) # type: ignore[no-untyped-call]\n n_variants = ds.dims[\"variants\"]\n ds = window(ds, size=n_variants) # single window\n\n ds = pbs(ds)\n\n # scikit-allel\n for i, j, k in itertools.combinations(range(n_cohorts), 3):\n stat_pbs = (\n ds[\"stat_pbs\"]\n .sel(cohorts_0=f\"co_{i}\", cohorts_1=f\"co_{j}\", cohorts_2=f\"co_{k}\")\n .values\n )\n\n ac_i = ds.cohort_allele_count.values[:, i, :]\n ac_j = ds.cohort_allele_count.values[:, j, :]\n ac_k = ds.cohort_allele_count.values[:, k, :]\n\n ska_pbs_value = allel.pbs(ac_i, ac_j, ac_k, window_size=n_variants)\n\n np.testing.assert_allclose(stat_pbs, ska_pbs_value)\n\n\n@pytest.mark.parametrize(\n \"sample_size, n_cohorts, cohorts, cohort_indexes\",\n [\n (10, 3, None, None),\n (20, 4, None, None),\n (20, 4, [(0, 1, 2), (3, 1, 2)], [(0, 1, 2), (3, 1, 2)]),\n ],\n)\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (50, -1)])\ndef test_pbs__windowed(sample_size, n_cohorts, cohorts, cohort_indexes, chunks):\n ts = msprime.simulate(sample_size, length=200, mutation_rate=0.05, random_seed=42)\n ds = ts_to_dataset(ts, chunks) # type: ignore[no-untyped-call]\n ds, subsets = add_cohorts(ds, ts, n_cohorts, cohort_key_names=[\"cohorts_0\", \"cohorts_1\", \"cohorts_2\"]) # type: ignore[no-untyped-call]\n ds = window(ds, size=25)\n\n ds = pbs(ds, cohorts=cohorts)\n\n # scikit-allel\n for i, j, k in itertools.combinations(range(n_cohorts), 3):\n stat_pbs = (\n ds[\"stat_pbs\"]\n .sel(cohorts_0=f\"co_{i}\", cohorts_1=f\"co_{j}\", cohorts_2=f\"co_{k}\")\n .values\n )\n\n if cohort_indexes is not None and (i, j, k) not in cohort_indexes:\n np.testing.assert_array_equal(stat_pbs, np.full_like(stat_pbs, np.nan))\n else:\n ac_i = ds.cohort_allele_count.values[:, i, :]\n ac_j = ds.cohort_allele_count.values[:, j, :]\n ac_k = ds.cohort_allele_count.values[:, k, :]\n\n ska_pbs_value = allel.pbs(ac_i, ac_j, ac_k, window_size=25)\n\n # scikit-allel has final window missing\n np.testing.assert_allclose(stat_pbs[:-1], ska_pbs_value)\n\n\n@pytest.mark.parametrize(\n \"n_variants, n_samples, n_contigs, n_cohorts, cohorts, cohort_indexes\",\n [\n (9, 5, 1, 1, None, None),\n (9, 5, 1, 2, None, None),\n (9, 5, 1, 2, [1], [1]),\n (9, 5, 1, 2, [\"co_1\"], [1]),\n ],\n)\n@pytest.mark.parametrize(\"chunks\", [(-1, -1), (5, -1)])\ndef test_Garud_h(\n n_variants, n_samples, n_contigs, n_cohorts, cohorts, cohort_indexes, chunks\n):\n ds = simulate_genotype_call_dataset(\n n_variant=n_variants, n_sample=n_samples, n_contig=n_contigs\n )\n ds = ds.chunk(dict(zip([\"variants\", \"samples\"], chunks)))\n subsets = np.array_split(ds.samples.values, n_cohorts)\n sample_cohorts = np.concatenate(\n [np.full_like(subset, i) for i, subset in enumerate(subsets)]\n )\n ds[\"sample_cohort\"] = xr.DataArray(sample_cohorts, dims=\"samples\")\n cohort_names = [f\"co_{i}\" for i in range(n_cohorts)]\n coords = {k: cohort_names for k in [\"cohorts\"]}\n ds = ds.assign_coords(coords) # type: ignore[no-untyped-call]\n ds = window(ds, size=3)\n\n gh = Garud_H(ds, cohorts=cohorts)\n h1 = gh.stat_Garud_h1.values\n h12 = gh.stat_Garud_h12.values\n h123 = gh.stat_Garud_h123.values\n h2_h1 = gh.stat_Garud_h2_h1.values\n\n # scikit-allel\n for c in range(n_cohorts):\n if cohort_indexes is not None and c not in cohort_indexes:\n # cohorts that were not computed should be nan\n np.testing.assert_array_equal(h1[:, c], np.full_like(h1[:, c], np.nan))\n np.testing.assert_array_equal(h12[:, c], np.full_like(h12[:, c], np.nan))\n np.testing.assert_array_equal(h123[:, c], np.full_like(h123[:, c], np.nan))\n np.testing.assert_array_equal(\n h2_h1[:, c], np.full_like(h2_h1[:, c], np.nan)\n )\n else:\n gt = ds.call_genotype.values[:, sample_cohorts == c, :]\n ska_gt = allel.GenotypeArray(gt)\n ska_ha = ska_gt.to_haplotypes()\n ska_h = allel.moving_garud_h(ska_ha, size=3)\n\n np.testing.assert_allclose(h1[:, c], ska_h[0])\n np.testing.assert_allclose(h12[:, c], ska_h[1])\n np.testing.assert_allclose(h123[:, c], ska_h[2])\n np.testing.assert_allclose(h2_h1[:, c], ska_h[3])\n\n\ndef test_Garud_h__raise_on_non_diploid():\n ds = simulate_genotype_call_dataset(n_variant=10, n_sample=10, n_ploidy=3)\n with pytest.raises(\n NotImplementedError, match=\"Garud H only implemented for diploid genotypes\"\n ):\n Garud_H(ds)\n\n\ndef test_Garud_h__raise_on_no_windows():\n ds = simulate_genotype_call_dataset(n_variant=10, n_sample=10)\n\n with pytest.raises(ValueError, match=\"Dataset must be windowed for Garud_H\"):\n Garud_H(ds)\n","sub_path":"sgkit/tests/test_popgen.py","file_name":"test_popgen.py","file_ext":"py","file_size_in_byte":18434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"332573186","text":"import pkg_resources\npkg_resources.require('mock')\npkg_resources.require('fa-archiver')\npkg_resources.require('pml')\n\nimport numpy\nimport unittest\nimport jump_bba\nimport pml\nimport aphla as ap\nimport mock\n\n\nclass SelectDataTest(unittest.TestCase):\n\n def setUp(self):\n self.data = numpy.zeros((2000,2,2))\n self.data[:,0,0] = numpy.arange(2000)\n self.data[:,0,1] = numpy.arange(2000)\n self.data[100,1,0] = 3\n self.data[1100,1,1] = 4\n self.exc_high = mock.MagicMock(count=1000, start_time=100)\n self.exc_low = mock.MagicMock(count=1000, start_time=1100)\n\n def test_select_data_throws_AssertionError_if_exc_high_low_different_counts(self):\n self.exc_low.count = 101\n self.assertRaises(AssertionError, jump_bba.select_data, self.data,\n pml.X, self.exc_high, self.exc_low)\n\n def test_select_data_returns_correct_shape(self):\n high_data, low_data = jump_bba.select_data(self.data, pml.X,\n self.exc_high, self.exc_low)\n expected_shape = (100, 1)\n self.assertEqual(high_data.shape, expected_shape)\n self.assertEqual(low_data.shape, expected_shape)\n\n def test_select_data_selects_first_timestamp(self):\n high_data_x, _ = jump_bba.select_data(self.data, pml.X,\n self.exc_high, self.exc_low)\n _, low_data_y = jump_bba.select_data(self.data, pml.Y,\n self.exc_high, self.exc_low)\n self.assertEqual(high_data_x[0,0], 3)\n self.assertEqual(low_data_y[0,0], 4)\n\n\nclass TestJumpBba(unittest.TestCase):\n\n def setUp(self):\n pml.initialise()\n\n @mock.patch('pml.excite.caput')\n @mock.patch('jump_bba.caget')\n @mock.patch('jump_bba.caput')\n def test_jump_bba_sets_expected_pvs(self, jump_caput, jump_caget, excite_caput):\n jump_caget.return_value = 10\n quad = ap.getElements('QUAD')[0]\n print(quad.pv())\n # one 1Hz cycle\n osc = pml.excite.Oscillation(1, 0, 1, 1)\n jump_bba.jump_bba(quad, 1, osc)\n\n jump_caput.assert_has_calls([mock.call('SR01A-PC-Q1D-01:SETI', 10.5),\n mock.call('SR01A-PC-Q1D-01:SETI', 9.5),\n mock.call('SR01A-PC-Q1D-01:SETI', 10)])\n\n # Note you can assert excite_caput's calls to be [] and it will tell\n # what they actually were.\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_jump_bba.py","file_name":"test_jump_bba.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"192043664","text":"import urllib\ndef read_textProfanity():\n\tquotes = open(\"./example_text.txt\")\n\tcontent_file = quotes.read()\n\tprint(content_file)\n\tquotes.close()\n\tcheck_profanity(content_file)\n\n\ndef check_profanity(text_check):\n\tconnection = urllib.urlopen(\"http://www.wdyl.com/profanity?q=\"+text_check)\n\toutput = connection.read()\n\tprint(output)\n\tconnection.close()\nread_textProfanity()\n","sub_path":"check_profanity.py","file_name":"check_profanity.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"71169823","text":"import os\nimport sys\nimport six\nfrom bayesdend.autoencoder import *\nfrom bayesdend.utils.data_util import load_real_data, read_config, load_results\n\n\"\"\"Only change this\"\"\"\nname_of_exp = 'dend36_soma'\n\n# replicate several important parameters\nconfig = read_config(name_of_exp)\ndata_file = '{}/{}/{}'.format(config['save_path'], name_of_exp, 'data_clean.pkl')\nos.environ['CUDA_VISIBLE_DEVICES'] = str(config['GPU'])\ndata_dict = load_real_data(data_file)\nT_total = data_dict['traces'].shape[0]\n\nif config['model_mode'] == 'soma_only':\n\n data_feed = {'traces': data_dict['traces'][:, :10],\n 'noises': data_dict['noises'][:, :10],\n 'masks': data_dict['masks'][:, :10]}\n\nelse:\n spine_ind = data_dict['n_soma'] + data_dict['n_dend'] + 1\n inf_result_path = '{}/{}/{}'.format(config['save_path'], 'dend36_soma', 'results.pkl')\n inf_result = load_results(inf_result_path)\n inf_spikes = np.expand_dims(inf_result['spikes'], axis=1)\n data_feed = {'traces': data_dict['traces'][:, spine_ind:spine_ind + 1],\n 'noises': data_dict['noises'][:, spine_ind:spine_ind + 1],\n 'masks': data_dict['masks'][:, spine_ind:spine_ind + 1],\n 'soma_spikes': inf_spikes}\n\ndata_feed_batch = {}\nfor key, value in six.iteritems(data_feed):\n data_feed_batch[key] = np.expand_dims(data_feed[key], axis=0)\n\nNc = data_feed['traces'].shape[1]\nupsample = config['upsample']\nfr = config['firing_rate']\nnoise_flag = config['noise_flag']\n\n# rebuild the model\nmodel = AutoEncoder(config['model_mode'], 1, T_total, Nc, fr, noise_flag, upsample, False)\n\n# restore model\nsess = tf.InteractiveSession()\nrestorer = tf.train.Saver()\nmodel_filename = '{}/{}/{}'.format(config['save_path'], name_of_exp, 'model')\nrestorer.restore(sess, model_filename)\nprint(\"Model resotred.\")\n\ntest_fd = {model.data_ph[key]: value\n for key, value in six.iteritems(data_feed_batch)}\n\n# inference and reconstruction\nspike_inf, r_e_d, r_v_d, r_e, r_v, log_noise_est, loss = sess.run([\n model.Q.mean(), model.Erecon_d, model.Vrecon_d, model.Erecon, model.Vrecon,\n model.log_noise, model.obj], test_fd)\n\n# generative filters\nif config['model_mode'] == 'soma_only':\n filters, baseline, scaling = sess.run([model.spike_filters,\n model.b, model.A], test_fd)\nelse:\n filters, baseline, scaling, prop_factor = sess.run([model.spike_filters,\n model.b, model.A, model.prop_factor], test_fd)\n\nspike_inf = np.squeeze(spike_inf)\nr_e_d = np.squeeze(r_e_d)\nr_v_d = np.squeeze(r_v_d)\nr_e = np.squeeze(r_e)\nr_v = np.squeeze(r_v)\nfilters = np.squeeze(filters)\nlog_noise_est = np.squeeze(log_noise_est)\n\nfinal_dict = {'spikes': spike_inf,\n 'rec_mean': r_e,\n 'rec_var': r_v,\n 'rec_mean_d': r_e_d,\n 'rec_var_d': r_v_d,\n 'log_noise_est': log_noise_est,\n 'filters': filters,\n 'baseline': baseline,\n 'scaling': scaling,\n 'loss': loss}\n\nif config['model_mode'] == 'spine_only':\n final_dict['prop_factor'] = prop_factor\n\nto_save = '{}/{}/{}'.format(config['save_path'], name_of_exp, 'results.pkl')\nwith open(to_save, 'wb') as f:\n pickle.dump(final_dict, f)\nprint('Results saved.')\n","sub_path":"bayesdend/data_post.py","file_name":"data_post.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"288257725","text":"'''\n\n intro to TensorFlow variables\n\n'''\n\nimport tensorflow as tf\n\n# another y = Wx + b example\n\nW = tf.Variable([2.50, 4.0], tf.float32, name='var_W')\nx = tf.placeholder(tf.float32, name='x')\nb = tf.Variable([5.0, 10.0], tf.float32, name='var_b')\n\ny = W * x + b\n\n# because we have variables in the mix now, we must initialize them before\n# starting the Session\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n # this is a required line to initialize the global init object\n sess.run(init)\n print('y = {}'.format(\n sess.run(\n y,\n feed_dict={\n x:[10, 100]\n }\n )\n ))\n\n\n# new session\nnumber = tf.Variable(2)\nmultiplier = tf.Variable(1)\n\ninit = tf.global_variables_initializer()\n\n# in this line of code, result is the computation node (tensor) while number\n# will contain the value\nresult = number.assign(tf.multiply(number, multiplier))\n\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(10):\n # sess.run(result) will execute: number *= multiplier\n print('result = number {} * multiplier {} = {}'.format(\n sess.run(number),\n sess.run(multiplier),\n sess.run(\n result\n )))\n # in this loop the multiplier is incremented\n print('Increment multiplier, new value = {}'.format(\n sess.run(\n multiplier.assign_add(1)\n )))\n","sub_path":"foundations/fundamentals/tf_variables.py","file_name":"tf_variables.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"140613387","text":"import click\nfrom model.db_model import Region\nfrom .spider import get_projects, get_rooms\n\n\n@click.group(chain=True)\n@click.pass_context\ndef cli(ctx):\n if ctx.invoked_subcommand is None:\n click.echo('please use a command!')\n\n\n@cli.command()\n@click.argument('region', nargs=1)\ndef sync_project(region):\n if not Region.region_map.get(region):\n click.echo(f'region({region}) code not found!')\n return\n get_projects(region)\n click.echo(f'sync {Region.region_map.get(region)}({region}) successful!')\n\n\n@cli.command()\n@click.argument('building_id', nargs=1)\ndef sync_rooms(building_id):\n get_rooms(building_id)\n click.echo(f'sync {building_id} successful!')\n\n\nif __name__ == \"__main__\":\n cli() # pylint: disable=no-value-for-parameter\n","sub_path":"task/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"263163248","text":"from utils.LoadStructuralData import load_struct_data\nfrom utils.LoadFunctionalData import load_funct_data\nfrom gat_impl.ExecuteGAT import GATModel\nfrom keras.activations import relu\nimport numpy as np\nimport os\nimport pickle as pkl\nimport itertools\nimport math\nimport random\nimport pprint\n\ncached_data = {}\ngat_result_dir = os.path.join(os.path.dirname(os.path.join(os.path.dirname(__file__))), 'Results', 'GAT_results')\nif not os.path.exists(gat_result_dir):\n os.makedirs(gat_result_dir)\n\n\n# utility class for storing together the hyper-parameters of a GAT model into an object\nclass ConfigGAT(object):\n\n def __init__(self, updated_params=None):\n '''\n Initialize the object embodying the configuration of a GAT model.\n :param updated_params: specific hyper-parameters used by current GAT configuration\n '''\n self.params = {\n # architecture hyper-parameters\n 'name': 'GAT',\n 'hidden_units': [30, 20, 15],\n 'attention_heads': [3, 3, 2],\n 'include_ew': False,\n 'readout_aggregator': GATModel.master_node_aggregator,\n 'use_batch_norm': True,\n 'non_linearity': relu,\n # training hyper.\n 'load_specific_data': load_struct_data,\n 'pers_traits_selection': ['NEO.NEOFAC_A', 'NEO.NEOFAC_C', 'NEO.NEOFAC_E', 'NEO.NEOFAC_N', 'NEO.NEOFAC_O'],\n 'learning_rate': 0.0001,\n 'decay_rate': 0.0005,\n 'attn_drop': 0.6,\n 'batch_size': 32,\n 'functional_dim': 50,\n 'scan_session': 1,\n 'num_epochs': 250,\n 'pq_threshold': np.inf,\n 'train_prog_threshold': 0.1,\n 'k_strip_epochs': 5,\n # nested CV hyper.\n 'nested_CV_level': 'outer',\n 'k_outer': 5,\n 'k_inner': 5,\n 'eval_fold_out': 4,\n 'eval_fold_in': 1}\n\n # update the default hyper-parameters\n self.update(update_hyper=updated_params)\n if type(self.params['pers_traits_selection']) is not list:\n raise ValueError('The trait selection should be list, not %s' % type(self.params['pers_traits_selection']))\n if self.params['nested_CV_level'] not in {'inner', 'outer'}:\n raise ValueError('Possible CV levels: inner, outer')\n if self.params['name'] != 'GAT':\n raise ValueError('Name of the Graph Attention Network is GAT, not %s' % self.params['name'])\n if len(self.params['attention_heads']) != len(self.params['hidden_units']):\n raise ValueError('Attention heads and hidden units are not specified for the same nr. of layers')\n # values for the PQ threshold:\n pq_thresholds = {GATModel.master_node_aggregator: 0.05,\n GATModel.concat_feature_aggregator: 0.25,\n GATModel.average_feature_aggregator: 0.5}\n self.params['pq_threshold'] = pq_thresholds[self.params['readout_aggregator']]\n # keep a fixed order on the personality traits so we can decode when predicting them all at once\n self.params['pers_traits_selection'] = sorted(self.params['pers_traits_selection'])\n self.params['target_score_type'] = len(self.params['pers_traits_selection'])\n\n def __str__(self):\n '''\n Produces a unique string identifier of the current GAT model.\n :return: str of the name of the model, including the nested CV parameters\n '''\n str_dataset = 'GAT_%s' % self.params['load_specific_data'].__name__.split('_')[1]\n str_dim_sess = 'DIM_%d_SESS_%d' % (self.params['functional_dim'], self.params['scan_session'])\n str_attn_heads = 'AH_%s' % \",\".join(map(str, self.params['attention_heads']))\n str_hid_units = 'HU_%s' % \",\".join(map(str, self.params['hidden_units']))\n str_traits = 'PT_%s' % self.get_summarized_traits()\n str_aggregator = 'AGR_%s' % self.params['readout_aggregator'].__name__.split('_')[0]\n str_include_ew = 'IW_%r' % self.params['include_ew']\n str_batch_sz = 'BS_%d' % self.params['batch_size']\n str_dropout = 'DROP_%s' % str(self.params['attn_drop'])\n str_learn_rate = 'LR_%s' % str(self.params['learning_rate'])\n str_decay_rate = 'DR_%s' % str(self.params['decay_rate'])\n str_cross_val = 'CV_%d%d%s' % (self.params['eval_fold_in'], self.params['eval_fold_out'], self.params[\n 'nested_CV_level'])\n\n str_params = [str_dataset, str_dim_sess, str_attn_heads, str_hid_units, str_traits, str_aggregator,\n str_include_ew, str_batch_sz, str_dropout, str_learn_rate, str_decay_rate, str_cross_val]\n if self.params['load_specific_data'] is load_struct_data:\n str_params.remove(str_dim_sess)\n return '_'.join(str_params)\n\n def print_model_details(self):\n '''\n Prints the details of the current GAT model as hyper-parameters of architecture, training process and nested CV\n :return: void\n '''\n params = self.params\n print('Name of the current GAT model is %s' % self)\n if params['load_specific_data'] == load_struct_data:\n print('Dataset: structural HCP graphs')\n else:\n print('Dataset: functional HCP graphs')\n print('Dimension of graphs: %d and session: %d' % (params['functional_dim'], params['scan_session']))\n print('----- Opt. hyperparams -----')\n print('batch size: ' + str(params['batch_size']))\n print('number of training epochs: ' + str(params['num_epochs']))\n print('lr: ' + str(params['learning_rate']))\n print('l2_coef: ' + str(params['decay_rate']))\n print('droput rate ' + str(params['attn_drop']))\n print('using batch normalization ' + str(params['use_batch_norm']))\n print('----- Archi. hyperparams -----')\n print('nb. layers: ' + str(len(params['hidden_units'])))\n print('nb. units per layer: ' + str(params['hidden_units']))\n print('nb. attention heads: ' + str(params['attention_heads']))\n print('aggregation strategy: ' + str(params['readout_aggregator']))\n print('including edge weights: ' + str(params['include_ew']))\n print('nonlinearity: ' + str(params['non_linearity']))\n print('----- Cross-Validation params. -----')\n print('Nested-CV level: ' + self.params['nested_CV_level'])\n print('Inner split: ' + str(self.params['k_inner']))\n print('Outer split: ' + str(self.params['k_outer']))\n print('Outer evaluation fold id: ' + str(self.params['eval_fold_out']))\n print('Inner evaluation fold id: ' + str(self.params['eval_fold_in']))\n\n def get_name(self):\n '''\n Get the name of the GAT model discarding the hyper-parameters of the Nested Cross Validation.\n :return: str of the base name of the model\n '''\n import re\n return re.compile(re.escape('_CV') + '.*').sub('', re.sub(r\"PT_[A-Z]{1,5}_\", \"\", str(self)))\n\n def get_summarized_traits(self):\n '''\n Summarize the names of the traits targeted at once.\n :return: str of the concatenation of the trait names without the common prefixes\n '''\n return ''.join(self.params['pers_traits_selection']).replace('NEO.NEOFAC_', '')\n\n def update(self, update_hyper):\n '''\n Updates the default hyper-parameters of the GAT configuration object\n :param update_hyper: dict of new hyper-parameters\n :return: void, it's changing the internal state of the object\n '''\n if update_hyper is not None:\n self.params.update(update_hyper)\n\n def load_data(self):\n '''\n Load the entire dataset specified by the load_specific_dataset parameter of the configuration. Keep it\n in main memory in a global variable in case future models are trained/evaluated on it during the same run.\n :return:\n '''\n global cached_data\n loader_data = self.params['load_specific_data']\n trait_choice = self.get_summarized_traits()\n if loader_data in cached_data.keys():\n if trait_choice in cached_data[loader_data].keys():\n return cached_data[loader_data][trait_choice]\n else:\n uncached_data = loader_data(self.params)\n cached_data[loader_data][trait_choice] = uncached_data\n return uncached_data\n else:\n cached_data[loader_data] = {}\n uncached_data = loader_data(self.params)\n cached_data[loader_data][trait_choice] = uncached_data\n return uncached_data\n\n def checkpoint_file(self):\n '''\n Retrieves the path to the checkpoint file where the model (its parameters) is/should be saved\n :return: str path\n '''\n return os.path.join(gat_result_dir, 'checkpoint_' + str(self) + '.h5')\n\n def logs_file(self):\n '''\n Retrieves the path to the logs file where the training history of the model is/should be saved\n :return: str path\n '''\n return os.path.join(gat_result_dir, 'logs_' + str(self) + '.pck')\n\n def results_file(self):\n '''\n Retrieves the path to the results file where the evaluation data: test loss, predictions is/should be saved\n :return: str path\n '''\n return os.path.join(gat_result_dir, 'predictions_' + str(self))\n\n def get_results(self):\n '''\n Retrieve the results of the model.\n :return: dict with evaluation results: losses, metrics, predictions\n '''\n results = None\n if os.path.exists(self.results_file()):\n with open(self.results_file(), 'rb') as result_fp:\n results = pkl.load(result_fp)\n return results\n\n @staticmethod\n def get_sampled_models(max_samples=19200, no_layers=3, **kwargs):\n '''\n Samples a pre-defined number of GAT configurations for the inner CV of the nested CV phase\n :param max_samples: maximum number of sampled models\n :param kwargs: compatibility with the sampling function of baseline models\n :param no_layers: number of layers of the sampled configurations\n :return: dict of hyper-parameters choices to be converted to a Grid Search\n '''\n samples_file = os.path.join(os.path.dirname(os.path.join(os.path.dirname(__file__))), 'Results',\n 'gat_sampled_models.pck')\n if os.path.exists(samples_file):\n with open(samples_file, 'rb') as handle:\n choices = pkl.load(handle)\n return choices\n choices = {\n 'learning_rate': [0.005, 0.001, 0.0005, 0.0001],\n 'decay_rate': [0.0005],\n 'attn_drop': [0.2, 0.4, 0.6, 0.8],\n 'readout_aggregator': [GATModel.average_feature_aggregator,\n GATModel.master_node_aggregator,\n GATModel.concat_feature_aggregator],\n 'load_specific_data': [load_struct_data, load_funct_data],\n 'include_ew': [True, False],\n 'batch_size': [32]}\n models_so_far = np.prod(np.array([len(choices[x]) for x in choices.keys()])) * 5 * 5\n sampling_left = math.floor(max_samples / models_so_far)\n sample_ah = list(itertools.product(range(3, 7), repeat=no_layers))\n sample_hu = list(itertools.product(range(12, 48), repeat=no_layers))\n\n def check_feat_expansion(ah_hu_choice):\n '''\n Checks if the particular choice of attention heads and units per GAT layer follows an expansion approach\n of the node features' dimensionality\n :param ah_hu_choice: tuple of two lists of the choices of attention heads and hidden units\n :return: bool, the validity of the choice\n '''\n for i in range(1, no_layers - 1):\n if ah_hu_choice[0][i] * ah_hu_choice[1][i] > ah_hu_choice[0][i - 1] * ah_hu_choice[1][i - 1]:\n return False\n # the last GAT layer averages node features (no multiplication with no of attention heads)\n if ah_hu_choice[1][-1] > ah_hu_choice[0][-2] * ah_hu_choice[1][-2]:\n return False\n return True\n\n valid_ah_hu = set(filter(lambda ah_hu_choice: check_feat_expansion(ah_hu_choice),\n list(itertools.product(sample_ah, sample_hu))))\n choices['arch_width'] = list(map(lambda x: [list(x[0]), list(x[1])], random.sample(valid_ah_hu, sampling_left)))\n with open(samples_file, 'wb') as handle:\n pkl.dump(choices, handle)\n\n return choices\n\n\nif __name__ == \"__main__\":\n pprint.pprint(ConfigGAT.get_sampled_models())\n","sub_path":"gat_impl/ConfigGAT.py","file_name":"ConfigGAT.py","file_ext":"py","file_size_in_byte":12808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"126215532","text":"#!/usr/bin/env python\n#\n# radiosonde_auto_rx - Configuration File Reader\n#\n# Copyright (C) 2018 Mark Jessop \n# Released under GNU GPL v3 or later\n#\n\nimport copy\nimport logging\nimport traceback\nimport json\nfrom .utils import rtlsdr_test\n\n# Dummy initial config with some parameters we need to make the web interface happy.\nglobal_config = {'min_freq':400.0,'max_freq':403.0,'snr_threshold':10,'station_lat':0.0,'station_lon':0.0}\n\ntry:\n # Python 2\n from ConfigParser import RawConfigParser\nexcept ImportError:\n # Python 3\n from configparser import RawConfigParser\n\ndef read_auto_rx_config(filename):\n\t\"\"\" Read an Auto-RX v2 Station Configuration File.\n\n\tThis function will attempt to parse a configuration file.\n\tIt will also confirm the accessibility of any SDRs specified in the config file.\n\n\tArgs:\n\t\tfilename (str): Filename of the configuration file to read.\n\n\tReturns:\n\t\tauto_rx_config (dict): The configuration dictionary.\n\t\tsdr_config (dict): A dictionary with SDR parameters.\n\t\"\"\"\n\tglobal global_config\n\t# Configuration Defaults:\n\tauto_rx_config = {\n\t\t# Log Settings\n\t\t'per_sonde_log' : True,\n # Email Settings\n 'email_enabled': False,\n 'email_smtp_server': 'localhost',\n 'email_from': 'sonde@localhost',\n 'email_to': None,\n\t\t# SDR Settings\n\t\t'sdr_fm': 'rtl_fm',\n\t\t'sdr_power': 'rtl_power',\n\t\t'sdr_quantity': 1,\n\t\t# Search Parameters\n\t\t'min_freq'\t\t: 400.4,\n\t\t'max_freq'\t\t: 404.0,\n\t\t'rx_timeout'\t: 120,\n\t\t'whitelist'\t: [],\n\t\t'blacklist'\t: [],\n\t\t'greylist'\t: [],\n\t\t# Location Settings\n\t\t'station_lat'\t: 0.0,\n\t\t'station_lon'\t: 0.0,\n\t\t'station_alt'\t: 0.0,\n\t\t# Position Filter Settings\n\t\t'max_altitude'\t: 50000,\n\t\t'max_radius_km'\t: 1000,\n\t\t# Habitat Settings\n\t\t'habitat_enabled': False,\n\t\t'habitat_upload_rate': 30,\n\t\t'habitat_uploader_callsign': 'SONDE_AUTO_RX',\n\t\t'habitat_uploader_antenna': '1/4-wave',\n\t\t'habitat_upload_listener_position': False,\n\t\t'habitat_payload_callsign': '',\n\t\t# APRS Settings\n\t\t'aprs_enabled'\t: False,\n\t\t'aprs_upload_rate': 30,\n\t\t'aprs_user'\t\t: 'N0CALL',\n\t\t'aprs_pass'\t\t: '00000',\n\t\t'aprs_server'\t: 'rotate.aprs2.net',\n\t\t'aprs_object_id': '',\n\t\t'aprs_custom_comment': 'Radiosonde Auto-RX ',\n\t\t# Web Settings,\n\t\t'web_port'\t\t: 5000,\n\t\t'web_archive_age': 120,\n\t\t# Advanced Parameters\n\t\t'search_step'\t: 800,\n\t\t'snr_threshold'\t\t: 10,\n\t\t'min_distance'\t: 1000,\n\t\t'dwell_time'\t: 10,\n\t\t'max_peaks'\t\t: 10,\n\t\t'quantization'\t: 10000,\n\t\t'synchronous_upload' : False,\n\t\t'scan_dwell_time' : 20,\n\t\t'detect_dwell_time' : 5,\n\t\t'scan_delay' : 10,\n\t\t'payload_id_valid' : 5, \n\t\t# Rotator Settings\n\t\t'enable_rotator': False,\n\t\t'rotator_hostname': '127.0.0.1',\n\t\t'rotator_port'\t: 4533,\n\t\t'rotator_homing_enabled': False,\n\t\t'rotator_home_azimuth': 0,\n\t\t'rotator_home_elevation': 0,\n\t\t# OziExplorer Settings\n\t\t'ozi_enabled'\t: False,\n\t\t'ozi_update_rate': 5,\n\t\t'ozi_port'\t\t: 55681,\n\t\t'payload_summary_enabled': False,\n\t\t'payload_summary_port' : 55672\n\t}\n\n\tsdr_settings = {}#'0':{'ppm':0, 'gain':-1, 'bias': False}}\n\n\ttry:\n\t\tconfig = RawConfigParser(auto_rx_config)\n\t\tconfig.read(filename)\n\n\t\t# Log Settings\n\t\tauto_rx_config['per_sonde_log'] = config.getboolean('logging', 'per_sonde_log')\n\n # Email Settings\n\t\tif config.has_option('email', 'email_enabled'):\n\t\t\ttry:\n\t\t\t\tauto_rx_config['email_enabled'] = config.getboolean('email', 'email_enabled')\n\t\t\t\tauto_rx_config['email_smtp_server'] = config.get('email', 'smtp_server')\n\t\t\t\tauto_rx_config['email_from'] = config.get('email', 'from')\n\t\t\t\tauto_rx_config['email_to'] = config.get('email', 'to')\n\t\t\texcept:\n\t\t\t\tlogging.error(\"Config - Invalid email settings. Disabling.\")\n\t\t\t\tauto_rx_config['email_enabled'] = False\n\n\t\t# SDR Settings\n\t\tauto_rx_config['sdr_fm'] = config.get('advanced', 'sdr_fm_path')\n\t\tauto_rx_config['sdr_power'] = config.get('advanced', 'sdr_power_path')\n\t\tauto_rx_config['sdr_quantity'] = config.getint('sdr', 'sdr_quantity')\n\n\t\t# Search Parameters\n\t\tauto_rx_config['min_freq'] = config.getfloat('search_params', 'min_freq')\n\t\tauto_rx_config['max_freq'] = config.getfloat('search_params', 'max_freq')\n\t\tauto_rx_config['rx_timeout'] = config.getint('search_params', 'rx_timeout')\n\t\tauto_rx_config['whitelist'] = json.loads(config.get('search_params', 'whitelist'))\n\t\tauto_rx_config['blacklist'] = json.loads(config.get('search_params', 'blacklist'))\n\t\tauto_rx_config['greylist'] = json.loads(config.get('search_params', 'greylist'))\n\n\t\t# Location Settings\n\t\tauto_rx_config['station_lat'] = config.getfloat('location', 'station_lat')\n\t\tauto_rx_config['station_lon'] = config.getfloat('location', 'station_lon')\n\t\tauto_rx_config['station_alt'] = config.getfloat('location', 'station_alt')\n\n\t\t# Position Filtering\n\t\tauto_rx_config['max_altitude'] = config.getint('filtering', 'max_altitude')\n\t\tauto_rx_config['max_radius_km'] = config.getint('filtering', 'max_radius_km')\n\n\t\t# Habitat Settings\n\t\tauto_rx_config['habitat_enabled'] = config.getboolean('habitat', 'habitat_enabled')\n\t\tauto_rx_config['habitat_upload_rate'] = config.getint('habitat', 'upload_rate')\n\t\tauto_rx_config['habitat_payload_callsign'] = config.get('habitat', 'payload_callsign')\n\t\tauto_rx_config['habitat_uploader_callsign'] = config.get('habitat', 'uploader_callsign')\n\t\tauto_rx_config['habitat_upload_listener_position'] = config.getboolean('habitat','upload_listener_position')\n\n\t\t# APRS Settings\n\t\tauto_rx_config['aprs_enabled'] = config.getboolean('aprs', 'aprs_enabled')\n\t\tauto_rx_config['aprs_upload_rate'] = config.getint('aprs', 'upload_rate')\n\t\tauto_rx_config['aprs_user'] = config.get('aprs', 'aprs_user')\n\t\tauto_rx_config['aprs_pass'] = config.get('aprs', 'aprs_pass')\n\t\tauto_rx_config['aprs_server'] = config.get('aprs', 'aprs_server')\n\t\tauto_rx_config['aprs_object_id'] = config.get('aprs', 'aprs_object_id')\n\t\tauto_rx_config['aprs_custom_comment'] = config.get('aprs', 'aprs_custom_comment')\n\n\t\t# OziPlotter Settings\n\t\tauto_rx_config['ozi_enabled'] = config.getboolean('oziplotter', 'ozi_enabled')\n\t\tauto_rx_config['ozi_update_rate'] = config.getint('oziplotter', 'ozi_update_rate')\n\t\tauto_rx_config['ozi_port'] = config.getint('oziplotter', 'ozi_port')\n\t\tauto_rx_config['payload_summary_enabled'] = config.getboolean('oziplotter', 'payload_summary_enabled')\n\t\tauto_rx_config['payload_summary_port'] = config.getint('oziplotter', 'payload_summary_port')\n\n\t\t# Advanced Settings\n\t\tauto_rx_config['search_step'] = config.getfloat('advanced', 'search_step')\n\t\tauto_rx_config['snr_threshold'] = config.getfloat('advanced', 'snr_threshold')\n\t\tauto_rx_config['min_distance'] = config.getfloat('advanced', 'min_distance')\n\t\tauto_rx_config['dwell_time'] = config.getint('advanced', 'dwell_time')\n\t\tauto_rx_config['quantization'] = config.getint('advanced', 'quantization')\n\t\tauto_rx_config['max_peaks'] = config.getint('advanced', 'max_peaks')\n\t\tauto_rx_config['scan_dwell_time'] = config.getint('advanced', 'scan_dwell_time')\n\t\tauto_rx_config['detect_dwell_time'] = config.getint('advanced', 'detect_dwell_time')\n\t\tauto_rx_config['scan_delay'] = config.getint('advanced', 'scan_delay')\n\t\tauto_rx_config['payload_id_valid'] = config.getint('advanced', 'payload_id_valid')\n\t\tauto_rx_config['synchronous_upload'] = config.getboolean('advanced', 'synchronous_upload')\n\n\t\t# Rotator Settings (TBC)\n\t\tauto_rx_config['rotator_enabled'] = config.getboolean('rotator','rotator_enabled')\n\t\tauto_rx_config['rotator_update_rate'] = config.getint('rotator', 'update_rate')\n\t\tauto_rx_config['rotator_hostname'] = config.get('rotator', 'rotator_hostname')\n\t\tauto_rx_config['rotator_port'] = config.getint('rotator', 'rotator_port')\n\t\tauto_rx_config['rotator_homing_enabled'] = config.getboolean('rotator', 'rotator_homing_enabled')\n\t\tauto_rx_config['rotator_home_azimuth'] = config.getfloat('rotator', 'rotator_home_azimuth')\n\t\tauto_rx_config['rotator_home_elevation'] = config.getfloat('rotator', 'rotator_home_elevation')\n\n\n\t\t# New setting in this version (20180616). Keep it in a try-catch to avoid bombing out if the new setting isn't present.\n\t\ttry:\n\t\t\tauto_rx_config['habitat_uploader_antenna'] = config.get('habitat', 'uploader_antenna').strip()\n\t\texcept:\n\t\t\tlogging.error(\"Config - Missing uploader_antenna setting. Using default.\")\n\t\t\tauto_rx_config['habitat_uploader_antenna'] = '1/4-wave'\n\n\t\t# New settings added in 20180624.\n\t\ttry:\n\t\t\tauto_rx_config['web_port'] = config.getint('web', 'web_port')\n\t\t\tauto_rx_config['web_archive_age'] = config.getint('web', 'archive_age')\n\t\texcept:\n\t\t\tlogging.error(\"Config - Missing Web Server settings. Using defaults.\")\n\t\t\tauto_rx_config['web_port'] = 5000\n\t\t\tauto_rx_config['web_archive_age'] = 120\n\n\n\n\t\t# Now we attempt to read in the individual SDR parameters.\n\t\tauto_rx_config['sdr_settings'] = {}\n\n\t\tfor _n in range(1,auto_rx_config['sdr_quantity']+1):\n\t\t\t_section = \"sdr_%d\" % _n\n\t\t\ttry:\n\t\t\t\t_device_idx = config.get(_section,'device_idx')\n\t\t\t\t_ppm = config.getint(_section, 'ppm')\n\t\t\t\t_gain = config.getfloat(_section, 'gain')\n\t\t\t\t_bias = config.getboolean(_section, 'bias')\n\n\t\t\t\tif (auto_rx_config['sdr_quantity'] > 1) and (_device_idx == '0'):\n\t\t\t\t\tlogging.critical(\"Config - SDR Device ID of 0 used with a multi-SDR configuration. Go read the warning in the config file!\")\n\t\t\t\t\treturn None\n\n\t\t\t\t# See if the SDR exists.\n\t\t\t\t_sdr_valid = rtlsdr_test(_device_idx)\n\t\t\t\tif _sdr_valid:\n\t\t\t\t\tauto_rx_config['sdr_settings'][_device_idx] = {'ppm':_ppm, 'gain':_gain, 'bias':_bias, 'in_use': False, 'task': None}\n\t\t\t\t\tlogging.info('Config - Tested SDR #%s OK' % _device_idx)\n\t\t\t\telse:\n\t\t\t\t\tlogging.warning(\"Config - SDR #%s invalid.\" % _device_idx)\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.error(\"Config - Error parsing SDR %d config - %s\" % (_n,str(e)))\n\t\t\t\tcontinue\n\n\t\t# Sanity checks when using more than one SDR\n\t\tif (len(auto_rx_config['sdr_settings'].keys()) > 1) and (auto_rx_config['habitat_payload_callsign'] != \"\"):\n\t\t\tlogging.critical(\"Fixed Habitat Payload callsign used in a multi-SDR configuration. Go read the warnings in the config file!\")\n\t\t\treturn None\n\n\t\tif (len(auto_rx_config['sdr_settings'].keys()) > 1) and (auto_rx_config['aprs_object_id'] != \"\"):\n\t\t\tlogging.critical(\"Fixed APRS object ID used in a multi-SDR configuration. Go read the warnings in the config file!\")\n\t\t\treturn None\n\n\t\tif (len(auto_rx_config['sdr_settings'].keys()) > 1) and (auto_rx_config['rotator_enabled']):\n\t\t\tlogging.critical(\"Rotator enabled in a multi-SDR configuration. Go read the warnings in the config file!\")\n\t\t\treturn None\n\n\t\t# TODO: Revisit this limitation once the OziPlotter output sub-module is complete.\n\t\tif (len(auto_rx_config['sdr_settings'].keys()) > 1) and (auto_rx_config['ozi_enabled'] or auto_rx_config['payload_summary_enabled']):\n\t\t\tlogging.critical(\"Chase car outputs (OziPlotter/Payload Summary) enabled in a multi-SDR configuration.\")\n\t\t\treturn None\n\n\n\t\tif len(auto_rx_config['sdr_settings'].keys()) == 0:\n\t\t\t# We have no SDRs to use!!\n\t\t\tlogging.error(\"Config - No working SDRs! Cannot run...\")\n\t\t\treturn None\n\t\telse:\n\t\t\t# Create a global copy of the configuration file at this point\n\t\t\tglobal_config = copy.deepcopy(auto_rx_config)\n\t\t\treturn auto_rx_config\n\n\n\texcept:\n\t\ttraceback.print_exc()\n\t\tlogging.error(\"Could not parse config file.\")\n\t\treturn None\n\n\nif __name__ == '__main__':\n\t''' Quick test script to attempt to read in a config file. '''\n\timport sys, pprint\n\tlogging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG)\n\n\tconfig = read_auto_rx_config(sys.argv[1])\n\n\tpprint.pprint(global_config)","sub_path":"auto_rx/autorx/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":11484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296153206","text":"import re\nfrom main_modules.settings import PRIORITY, TYPE\n__classificationtype__=TYPE.BackEND_SPECIFIED\n__priority__ = PRIORITY.LOW\n\ndef tamper(payload, **kwargs):\n \"\"\"\n onvert to char()\n\n =char(100,118,119,97)\nchar(0x##)+char(0x##)+... if we can use only one character\n\n >>> tamper(\"selet\")\n 'SeLeT'\n\n CHAR(83, 101, 76, 101, 84) mysql \n CHAR(83) + CHAR(101) + CHAR(76) + CHAR(101) + CHAR(84) mssql\n CHR(115) || CHR(101) || CHR(108) || CHR(101) || CHR(99) || CHR(116) oracle\n \"\"\"\n \n string=re.sub(r\"\\w*\",convert_this,str(payload))\n\n return (string) if payload else payload\n\ndef convert_this(string):\n new_word=[]\n string=string.group()\n new_word=\" || \".join(\"Char(\"+str(\"{:07x}\".format(ord(c))+')') for c in string)\n return new_word","sub_path":"Tampers/sqli/CharacterEncoding_charOracle.py","file_name":"CharacterEncoding_charOracle.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"589026547","text":"import timeit\nfrom random import shuffle, randint\nimport random\n\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndef geraLista(tam):\n lista = list(range(1, tam + 1))\n shuffle(lista)\n return lista\n\n\ndef geraListaInvertida(tam):\n lista = list(range(1, tam + 1))\n return lista[::-1]\n\n\ndef desenhaGrafico(x, y, xl=\"Entradas\", yl=\"Saídas\", z='Tempo'):\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(111)\n ax.plot(x, y, label=\"Lista aleatória - {} \".format(z))\n ax.legend(bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)\n plt.ylabel(yl)\n plt.xlabel(xl)\n plt.show()\n plt.savefig(z + \".png\")\n\n\ndef shellSort(lista):\n\n\n intervalo = len(lista) // 2\n\n while intervalo > 0:\n for index in range(intervalo, len(lista)):\n pivo = lista[index]\n aux_index = index\n while aux_index >= intervalo and lista[aux_index - intervalo] > pivo:\n lista[aux_index] = lista[aux_index - intervalo]\n aux_index = aux_index - intervalo\n lista[aux_index] = pivo\n\n intervalo //= 2\nif __name__ == '__main__':\n z = [100000, 200000, 300000, 400000, 500000, 1000000, 2000000]\n x = []\n for i in z:\n x.append(geraLista(int(i)))\n y = []\n\n\n\n for i in range(len(x)):\n\n print(len(x[i]))\n y.append(\n timeit.timeit(\"shellSort({})\".format(x[i]), setup=\"from __main__ import shellSort\",\n number=4))\n desenhaGrafico(z, y)\n","sub_path":"shellsort/shellsort.py","file_name":"shellsort.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"51045983","text":"import pandas as pd\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport os\n\nurlList = [\n '1', '500', '1000', '1500', '2000', '2500', '3000', '3500', '4000', '4500',\n '5000'\n]\nseries_bool = [True, True, True, True]\nattri = 'onclick'\npattern = r'([0-9]+)'\n\nprint(os.getcwd())\n\nUserIdPath = 'data/UserID.txt'\nTablePath = 'data/table.csv'\nos.remove(UserIdPath)\nos.remove(TablePath)\n\n# 全ページからUserIDのみを取得\nfor urlPage in urlList:\n url = 'url'\n html = urlopen(url).read()\n soup = BeautifulSoup(html, 'html.parser')\n\n UserID = ''\n links = soup.find_all('tr')\n for link in links:\n if attri in link.attrs and link.attrs[attri].find('UserID=') != -1:\n UserID += link.attrs[attri]\n\n # UserIDのみを改行区切り\n match = '\\n'.join(re.findall(pattern, UserID)) + '\\n'\n # テキストにUserIDを書き込み\n with open(UserIdPath, mode='a', encoding='utf-8') as fw:\n fw.write(match)\n\n # table取得\n table = pd.read_html(url, flavor='bs4')[9].ix[:, series_bool].dropna()\n # csvに書き込み\n with open(TablePath, 'a', encoding='utf-8') as f:\n table.to_csv(f)","sub_path":"Python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"337298292","text":"import torch\nimport torch.nn as nn\n\n__all__ = ['mymodel2']\n\n\ndef change_dir(grad):\n return -1 * grad\n\ndef custom(grad, model_output,labels_):\n i = 0\n for col in labels_:\n label = col.data \n max_ouput = torch.max(model_output[i,label,:,:])\n norm_output = model_output[i,label,:,:]/max_ouput\n mask = norm_output > 0.85\n chd = change_dir(grad[i,label,0,0])\n grad.select(0, i).select(0,label).copy_(grad[i,label,:,:].masked_fill_(mask,chd))\n i += 1 \n return grad\n\n\nclass MyModel2(nn.Module):\n def __init__(self):\n super(MyModel2, self).__init__()\n #self.attention = None\n\n def forward(self, input_):\n if not self.training:\n return input_\n else:\n attention = torch.mean(input_, dim=1, keepdim=True)\n importance_map = torch.sigmoid(attention)\n return input_.mul(importance_map)\n\n\n","sub_path":"wsol/method/mymodel2.py","file_name":"mymodel2.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"412034109","text":"#faça um programa que receba 10 números inteiros e armazene numa lista.\n#Calcule e mostre:\n#A quantidade de números pares;\n#A soma dos números ímpares;\n#A quantidade de números entre 10 e 20(inclusive);\n#A média dos números da lista.\n\nfrom random import randint\nn=[]\nfor i in range(10):\n n.append(randint(10,20))\n print(n)\npar=impar=qtd=soma=media=0\nfor i in range(10):\n if n[i]%2==0:\n par+=1\n\n else:\n impar+=n[i]\n\n if n[i]>=10 and n[i]<=20:\n qtd+=1\n\n soma+=n[i]\n\nmedia=soma/len(n) \nprint('Quantidade de números pares: ',par)\nprint('A soma dos números ímpares: ',impar)\nprint('A quantidade de números entre 10 e 20: ',qtd)\nprint('A média dos números da lista: ',media)\n","sub_path":"Pacote para dowloand/Python/ex018(lista e for) Armazenamento em lista.py","file_name":"ex018(lista e for) Armazenamento em lista.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"614507735","text":"\"\"\"\n=================================================\nAuthor : Bulici\nTime : 2020/3/5 18:46 \nEmail : 294666094@qq.com\nMotto : Clumsy birds have to start flying early.\n=================================================\n\"\"\"\nimport unittest\nimport os\nimport random\nimport jsonpath\nfrom library.ddt import ddt,data\nfrom common.handleexcel import Excel\nfrom common.handlepath import DATADIR\nfrom common.handleconfig import conf\nfrom common.handlereplace import ReplaceData\nfrom common.handlerequests import Requests\nfrom common.handlemylog import log\nfrom common.handle_sign import HandleSign\nfrom common.handelmysql import DB\n\n@ddt\nclass TestMainstreaming(unittest.TestCase):\n excel = Excel(os.path.join(DATADIR,\"apicases.xlsx\"),\"mainstreaming\")\n cases = excel.read_data()\n request = Requests()\n db = DB()\n\n @data(*cases)\n def testmainstreaming(self,case):\n \"\"\"\n 接口测试项目主流程测试用例\n :param case: Excel文档中的用例\n :return:\n \"\"\"\n #第一步:准备测试数据\n url = conf.get(\"env\", \"url\") + ReplaceData.replace_data(case[\"url\"])\n method = case[\"method\"]\n expected = eval(case[\"expected\"])\n headers = eval(conf.get(\"env\",\"headers\"))\n title = case[\"title\"]\n row = case[\"case_id\"] + 1\n #判断是注册接口,则生成一个随机手机号,并保存为类属性\n if case[\"interface\"] == \"register\":\n ReplaceData.mobile_phone = self.random_phone()\n case[\"data\"] = ReplaceData.replace_data(case[\"data\"])\n data = eval(case[\"data\"])\n\n #判断不是注册、登录、项目列表接口就在请求头中添加token鉴权信息\n if case[\"interface\"] != \"register\" and case[\"interface\"] != \"login\" :\n headers[\"Authorization\"] = ReplaceData.Authorization\n # 添加时间戳和签名到json请求体\n sign = HandleSign.generate_sign(ReplaceData.token)\n data.update(sign)\n\n #第二步:发送请求,获取结果\n response = self.request.send(url=url,method=method,headers=headers,params=data,json=data)\n res = response.json()\n\n #判断是否是登录接口,提取用户的id,提取鉴权token值,保存为类属性\n if case[\"interface\"] == \"login\":\n ReplaceData.member_id = str(jsonpath.jsonpath(res,\"$..id\")[0])\n ReplaceData.token = jsonpath.jsonpath(res,\"$..token\")[0]\n token_type = jsonpath.jsonpath(res,\"$..token_type\")[0]\n ReplaceData.Authorization = token_type + \" \" + ReplaceData.token\n\n # 判断是否是添加项目接口,提取项目的id,保存为类属性\n if case[\"interface\"] == \"add\" and case[\"title\"] == \"管理员添加项目一\":\n ReplaceData.pass_loan_id = str(jsonpath.jsonpath(res,\"$..id\")[0])\n elif case[\"interface\"] == \"add\" and case[\"title\"] == \"管理员添加项目二\":\n ReplaceData.file_loan_id = str(jsonpath.jsonpath(res,\"$..id\")[0])\n\n #第三步:断言,比对预期结果与实际结果\n try:\n self.assertEqual(expected[\"code\"],res[\"code\"])\n self.assertEqual(expected[\"msg\"],res[\"msg\"])\n except AssertionError as e :\n print(\"预期结果:{}\".format(expected))\n print(\"实际结果:{}\".format(res))\n self.excel.write_data(row=row,column=8,value=\"未通过\")\n log.error(\"用例未通过:{},错误原因:{}\".format(title,e))\n raise e\n else:\n self.excel.write_data(row=row, column=8, value=\"通过\")\n log.debug(\"用例通过:{}\".format(title))\n\n\n\n def random_phone(self):\n \"\"\"\n 随机生成手机号的方法\n :return:\n \"\"\"\n while True:\n phone = \"155\"\n for i in range(0, 8):\n n = random.randint(0, 9)\n phone += str(n)\n sql = \"SELECT * FROM futureloan.member WHERE mobile_phone={}\".format(phone)\n res_phone = self.db.find_count(sql)\n if res_phone == 0:\n break\n\n return phone\n","sub_path":"testcase/test11mainstreaming.py","file_name":"test11mainstreaming.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496600162","text":"from django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nimport pandas as pd\n\nfrom categories.models import Pool\n\nimport os\nimport logging\nimport re\n\n\ndef vendor_logger():\n return logging.getLogger('vendor')\n\n\ndef vehicle_info(vehicle):\n field_map = {\n 'oasis': {\n 'field_types': ('core', 'zones')\n },\n 'oasis_sb': {\n 'field_types': ('core', 'setasides', 'zones') \n },\n 'hcats': {\n 'field_types': ('core', 'zones') \n },\n 'hcats_sb': {\n 'field_types': ('core', 'setasides', 'zones') \n },\n 'bmo': {\n 'field_types': ('core', 'zones') \n },\n 'bmo_sb': {\n 'field_types': ('core', 'setasides', 'zones') \n }\n }\n return field_map[vehicle]\n\n\ndef vendor_field_type_core():\n return [\n 'ContractorName',\n 'ContractNumber',\n 'ContractEnd',\n 'DUNS',\n 'POC1',\n 'Phone1',\n 'Email1',\n 'POC2',\n 'Phone2',\n 'Email2'\n ]\n \ndef vendor_field_type_setasides():\n return [\n 'SB',\n '8(a)',\n '8(a)Date',\n 'HubZ',\n 'SDB',\n 'WO',\n 'VO',\n 'SDVOSB',\n 'VIP'\n ]\n \ndef vendor_field_type_zones():\n return [\n 'Zone1',\n 'Zone2',\n 'Zone3',\n 'Zone4',\n 'Zone5',\n 'Zone6'\n ]\n\nclass Command(BaseCommand):\n \n def check_pool(self, vehicle, pool, df):\n variables = globals()\n info = vehicle_info(vehicle)\n logger = vendor_logger()\n columns = list(df.columns)\n vendor_count = 0\n \n print(\" > Data:\")\n for field_group in info['field_types']:\n field_processor = \"vendor_field_type_{}\".format(field_group)\n missing = 0\n \n print(\" - {}:\".format(field_group))\n for column in variables[field_processor]():\n if column not in columns:\n print(\" - Missing: {}\".format(column))\n missing += 1\n \n if missing == 0:\n print(\" - No missing fields\")\n \n for index, record in df.iterrows():\n vendor_count += 1\n \n print(\" > Vendors: {}\".format(vendor_count))\n\n\n def check_vehicle(self, vehicle):\n vehicle_file = os.path.join(settings.BASE_DIR, 'data/pools/{}.xlsx'.format(vehicle))\n wb = pd.ExcelFile(vehicle_file)\n sheets = wb.sheet_names\n \n print(\"\\nVehicle [ {} ]\".format(vehicle))\n \n for name in sheets:\n try:\n pool = re.search(r'\\(\\s*([0-9a-zA-Z]+)\\s*\\)', name, re.IGNORECASE).group(1)\n pool_data = Pool.objects.get(number=pool, vehicle__id__iexact=vehicle)\n \n print(\"\\n > Pool [ {} ]\".format(pool))\n self.check_pool(vehicle, pool, wb.parse(name))\n\n except AttributeError as e:\n pass # Not a pool sheet, skip...\n \n except Pool.DoesNotExist as e:\n logger.debug(\" > Pool {} not found\".format(pool))\n raise(e)\n\n except Pool.MultipleObjectsReturned as e:\n logger.debug(\" > More than one pool matched {}. Integrity error!\".format(pool))\n raise(e)\n \n\n def handle(self, *args, **options):\n for vehicle in settings.VEHICLES:\n self.check_vehicle(vehicle)\n","sub_path":"app/vendors/management/commands/check_vendors.py","file_name":"check_vendors.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"370917694","text":"\"\"\"\r\n\tCopyright (c) 2016 Arttu Ylä-Sahra\r\n\r\n\tPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\r\n\r\n\tThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\r\n\r\n\tTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n\"\"\"\r\n\r\n\"\"\"\r\n\trandom-background.py - A simple generator script using the Pintograph library for generating images. Each time it is a run, an image with a random filename is generated\r\n\"\"\"\r\n\r\nfrom pintograph_engine import Pintograph\r\nfrom PIL import Image, ImageDraw\r\nimport random\r\nimport math\r\n\r\n#How large the picture shall be?\r\nBACKGROUND_SIZE = 4500\r\n\r\n#Base steps: this amount will be ran for autodetection of position, and 10x that for actual rendering\r\nBASE_STEPS = 1000\r\n\r\ndef realign_cords(xy):\r\n\t\"\"\"\r\n\t\tAs the Pintograph library generates coordinates around an arbitrary center point, we need to convert the coordinates to a type suitable for images - aka starting from zero\r\n\t\"\"\"\r\n\tglobal BACKGROUND_SIZE\r\n\r\n\treturn [int(xy[0] + (BACKGROUND_SIZE / 2)), int((BACKGROUND_SIZE-1) - (xy[1] + BACKGROUND_SIZE / 2))]\r\n\r\n#Initialize the image library\r\nbase_img = Image.new(\"RGBA\", (BACKGROUND_SIZE, BACKGROUND_SIZE), (255,255,255,255))\r\nbase_draw = ImageDraw.Draw(base_img)\r\n\r\n#Initialize the variables required for the Pintograph library\r\nrandom.seed()\r\n#How long the rods will be?\r\nrod_length = 50 + random.randint(-20,20)\r\n\r\n#One full phase is 2pi, and we want it to complete a full circle in 10 steps, so.. \r\nphases_per_step = (2*math.pi) / 10\r\n\r\n#Starting angle at approximately 173 to 287 degrees for both\r\ninitial_phase = (4.01 - 1) + (random.random() * 2)\r\n\r\n#The radii for the circles\r\nradii = 10\r\n\r\npinto = Pintograph(r1=radii,\r\n\t\t\t\t r1_phase_per_step = phases_per_step,\r\n\t\t\t\t r2=radii,\r\n\t\t\t\t r2_phase_per_step = phases_per_step - 0.02 + (random.random() * 0.04),\r\n\t\t\t\t circle_distance = ((rod_length * 2) * (2.0/7.0)),\r\n\t\t\t\t left_rod_lngth = rod_length,\r\n\t\t\t\t right_rod_lngth = rod_length + int((random.random() * 5)),\r\n\r\n\t\t\t\t lft_radii_minimum_factor = 0.01,\r\n\t\t\t\t lft_radii_degrade_percentage_per_step = 0.0010752 + (random.random() / 10000),\r\n\r\n\t\t\t\t rght_radii_minimum_factor = 0.01,\r\n\t\t\t\t rght_radii_degrade_percentage_per_step = 0.0010751 + (random.random() / 10000),\r\n\t\t\t\t )\r\n\r\n\r\npinto.set_initial_phases(initial_phase - 0.03 + (random.random() * 0.06), initial_phase)\r\npinto.set_x_swing((rod_length*2)/7, 0, phases_per_step, BASE_STEPS * 0.85)\r\n\r\n#Run a rough scan and scale\r\npinto.automatic_scale_and_center(BASE_STEPS, 0.75, BACKGROUND_SIZE, 2)\t\r\n\r\nbase_pos = realign_cords(pinto.calculate_adjusted_step(0))\r\n\r\nfor i in range(BASE_STEPS * 10):\r\n\tnew_pos = realign_cords(pinto.calculate_adjusted_step(i / 10.0))\r\n\t#Simulate opacity; as actually implementing it would be slightly complicated, let's assume the line is darker when farther in the steps\r\n\tc = int(200 - (i / (BASE_STEPS*10.0))*180)\r\n\tbase_draw.line([base_pos[0], base_pos[1], new_pos[0], new_pos[1]], (c,c,c,c), 1)\r\n\tbase_pos = new_pos\r\n\r\nbase_img.save(\"bg-\"+str(random.randint(0,10000000000))+\".png\")\r\n","sub_path":"random-background.py","file_name":"random-background.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"377764332","text":"\n# In[1]:\n\n\n##Implement Some Importatnt functions to be used in the project flow\n\n\n# In[2]:\n\n\n# Important imports\nimport numpy as np\nimport cv2\nimport pickle\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom skimage.feature import hog\n\nimport glob\nimport time\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom scipy.ndimage.measurements import label\n\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\nimport imageio\n#imageio.plugins.ffmpeg.download()\n\n\n# In[3]:\n\n\n# Important notes when reading images in this project\n\n'''\npng, mpimg -> 0 - 1 \npng, cv2 -> 0 - 255\njpg, mpimg -> 0 - 255\njpg, cv2 -> 0 - 255\n'''\n\n\n# ## 1- Draw Boxes Function\n\n# In[4]:\n\n\n# Draw boxes using cv2 library given 2 opposite points\ndef draw_boxes(img, bboxes, color, thick):\n # make a copy from the input image\n draw_img = np.copy(img)\n # draw the bounding box which has the input opposite points in shape of ((x1,y1),(x2,y2))\n for bbox in bboxes:\n # draw the rectangle using cv2.rectangle with the input color of shape (R,G,B)\n cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick)\n \n return draw_img\n\n\n# In[5]:\n\n\n# Test draw_boxes Function\ntest_image = mpimg.imread(\"test_images/test1.jpg\")\n\ntest_bboxes = [((800,500),(950,400))]\ntest_result = draw_boxes(test_image,test_bboxes, color=(255,0,0), thick=8)\nplt.imshow(test_result)\nplt.show()\n\n\n# \n# # 2- Features Extraction \n\n# ## 2a) Color Hitograms Features\n\n# In[6]:\n\n\n# Extract features from the color histogram\ndef color_hist_features(img, nbins, bins_range):\n # Calclate the histograms for each channel seperately\n chan1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range) \n chan2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n chan3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n # Generate bins centers\n bins_edges = chan1_hist[1]\n bins_centers = (bins_edges[1:] + bins_edges[0:len(bins_edges)-1])/2\n # Concatenate all features together\n color_hist_features = np.concatenate((chan1_hist[0], chan2_hist[1], chan3_hist[0]))\n \n # return the histogram features which is the most important one from this function\n # However, the otehr histograms and bins centers will be needed to be visualized in testing this function\n return color_hist_features, chan1_hist, chan2_hist, chan3_hist, bins_centers\n\n\n# In[7]:\n\n\n# Test Color histogram features extraction\ntest_image = mpimg.imread(\"test_images/test1.jpg\")\n\ntest_features, test_ch1, test_ch2, test_ch3, test_centers = color_hist_features(test_image, \n nbins=32, \n bins_range=(0,256))\n\nfig = plt.figure(figsize=(12,3))\nplt.subplot(131)\nplt.bar(test_centers, test_ch1[0])\nplt.xlim(0, 256)\nplt.title('ch1 Histogram')\nplt.subplot(132)\nplt.bar(test_centers, test_ch2[0])\nplt.xlim(0, 256)\nplt.title('ch2 Histogram')\nplt.subplot(133)\nplt.bar(test_centers, test_ch3[0])\nplt.xlim(0, 256)\nplt.title('ch3 Histogram')\nfig.tight_layout()\nplt.show()\n\n\n# ## 2b) Color Spatial Bining Features\n\n# In[8]:\n\n\n# Extract features from the Color spatial Bining\ndef bin_spatial(img, color_space, size):\n # convert the image into the color space sent into the function\n if color_space != \"RGB\":\n if color_space == \"HSV\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space == \"HLS\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space == \"LUV\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif color_space == \"YUV\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif color_space == \"YCrCb\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n elif color_space == \"GRAY\":\n feature_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n else:\n feature_img = np.copy(img)\n \n # flatten the features extarcted from the image after resizing\n bin_spatial_features = cv2.resize(feature_img, size).ravel()\n \n # return these features\n return bin_spatial_features, feature_img\n\n\n# In[9]:\n\n\n# Test Color spatial Bining \ntest_image = mpimg.imread(\"test_images/test1.jpg\")\nprint(test_image.shape)\n\ntest_features, test_img_converted = bin_spatial(test_image, color_space=\"YCrCb\", size=(8,8))\nplt.plot(test_features)\nplt.show()\n\n\n# ## 2c) Oriented Gradient Histogram features (Hog)\n\n# In[10]:\n\n\n# Extract features of the Histogram Oriented Gradient\ndef get_hog_features(img, orient, pix_per_cell, cell_per_block, transform_sqrt_flag, vis_flag, feature_vector_flag):\n # Note that the img here should be 2D (grayscale)\n # check the visualization flag if it is true or not to plot the output of hog functionality \n if vis_flag == True:\n # apply hog with visualizing the output of hog functionality\n hog_features, hog_image = hog(img, orientations=orient, \n pixels_per_cell=(pix_per_cell,pix_per_cell),\n cells_per_block=(cell_per_block,cell_per_block),\n transform_sqrt=transform_sqrt_flag, \n visualise=vis_flag, feature_vector=feature_vector_flag)\n \n return hog_features, hog_image\n \n if vis_flag == False:\n # apply hog without visualizing the output of hog functionality\n hog_features = hog(img, orientations=orient, \n pixels_per_cell=(pix_per_cell,pix_per_cell),\n cells_per_block=(cell_per_block,cell_per_block),\n transform_sqrt=transform_sqrt_flag, \n visualise=vis_flag, feature_vector=feature_vector_flag)\n \n return hog_features\n \n\n\n# In[11]:\n\n\n# Test extraction of hog features and visulaize\ntest_image = mpimg.imread(\"test_images/test1.jpg\")\n\n# Note that the image should be 2D (grayscale) \ntest_gray = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY)\ntest_features, test_result_img = get_hog_features(test_gray, orient=12, pix_per_cell=4, \n cell_per_block=2, transform_sqrt_flag=True, \n vis_flag=True, feature_vector_flag=True)\n\nfig = plt.figure(figsize=(12,3))\nplt.subplot(121)\nplt.imshow(test_image, cmap='gray')\nplt.title('Example')\nplt.subplot(122)\nplt.imshow(test_result_img, cmap='gray')\nplt.title('HOG Visualization')\nplt.show()\n\n\n# # 3- Combine Features (Color_hist, bin_spatial) with (hog)\n\n# In[55]:\n\n\n# Extract all of the previous features from list of images \ndef extract_features(imgs, cspace, spatial_size, hist_nbins, hist_range, \n orient, pix_per_cell, cell_per_block, transform_sqrt_flag, vis_flag, feature_vector_flag,\n hog_channel, extract_spatial_flag, extract_color_hist_flag, extract_hog_flag, cv2read=False):\n # Create empty list for appending the extracted features\n features = []\n # make an iteration to apply the extraction over img by img\n\n for img in imgs:\n # create local features for every image to preserve them after finishing all images\n image_features = []\n # read the img\n if cv2read == True:\n image_read = cv2.imread(img)\n else:\n image_read = img\n \n #converted_image = image_read\n \n # Apply bin spatial features extraction\n bin_features, converted_image = bin_spatial(image_read, color_space=cspace, size=spatial_size)\n \n # Apply color hist features extraction\n col_features,_,_,_,_ = color_hist_features(converted_image, nbins=hist_nbins, bins_range=hist_range)\n \n # Apply hog features extraction\n if hog_channel == \"ALL\":\n hog_features = []\n # Apply hog features extraction over each channel in the image\n for channel in range(converted_image.shape[2]):\n hog_features.append(get_hog_features(img=converted_image[:,:,channel], orient=orient, \n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \n transform_sqrt_flag=transform_sqrt_flag, \n vis_flag=vis_flag, feature_vector_flag=feature_vector_flag))\n hog_features = np.ravel(hog_features)\n else:\n # Apply hog features extraction over the given channel in the image\n hog_features = get_hog_features(img=converted_image[:,:,hog_channel], orient=orient, \n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \n transform_sqrt_flag=transform_sqrt_flag, \n vis_flag=vis_flag, feature_vector_flag=feature_vector_flag)\n \n # Append all of the features in one list\n if extract_spatial_flag == True:\n image_features.append(bin_features)\n if extract_color_hist_flag == True:\n image_features.append(col_features)\n if extract_hog_flag == True:\n image_features.append(hog_features)\n \n #print(image_features)\n \n # Appned all of the features in (features) list after concatenate all of the previous features\n features.append(np.concatenate(image_features))\n \n # return all of these features in a feature vector\n return features\n\n\n# In[56]:\n\n\ndef extract_features_One_image(img, cspace, spatial_size, hist_nbins, hist_range, \n orient, pix_per_cell, cell_per_block, transform_sqrt_flag, vis_flag, feature_vector_flag,\n hog_channel, extract_spatial_flag, extract_color_hist_flag, extract_hog_flag, cv2read=False):\n \n # Create empty list for appending the extracted features\n features = []\n # create local features for every image to preserve them after finishing all images\n image_features = []\n # read the img\n if cv2read == True:\n image_read = cv2.imread(img)\n else:\n image_read = img\n \n #converted_image = image_read\n\n # Apply bin spatial features extraction\n bin_features, converted_image = bin_spatial(image_read, color_space=cspace, size=spatial_size)\n\n # Apply color hist features extraction\n col_features,_,_,_,_ = color_hist_features(converted_image, nbins=hist_nbins, bins_range=hist_range)\n\n # Apply hog features extraction\n if hog_channel == \"ALL\":\n hog_features = []\n # Apply hog features extraction over each channel in the image\n for channel in range(converted_image.shape[2]):\n hog_features.append(get_hog_features(img=converted_image[:,:,channel], orient=orient, \n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \n transform_sqrt_flag=transform_sqrt_flag, \n vis_flag=vis_flag, feature_vector_flag=feature_vector_flag))\n hog_features = np.ravel(hog_features)\n else:\n # Apply hog features extraction over the given channel in the image\n hog_features = get_hog_features(img=converted_image[:,:,hog_channel], orient=orient, \n pix_per_cell=pix_per_cell, cell_per_block=cell_per_block, \n transform_sqrt_flag=transform_sqrt_flag, \n vis_flag=vis_flag, feature_vector_flag=feature_vector_flag)\n\n # Append all of the features in one list\n if extract_spatial_flag == True:\n image_features.append(bin_features)\n if extract_color_hist_flag == True:\n image_features.append(col_features)\n if extract_hog_flag == True:\n image_features.append(hog_features)\n\n # Appned all of the features in (features) list after concatenate all of the previous features\n features.append(np.concatenate(image_features))\n\n # return all of these features in a feature vector\n return features\n\n\n# # 4- HeatMap, apply threhold, draw labeled bboxes Functions\n\n# ## 4a) HeatMap Function\n\n# In[58]:\n\n\n# add heatmap using the bounding boxes list given as an input to the function\ndef add_heat(heatmap, bbox_list):\n # note that heamap input here is zeros of the shape of the image or one channel only in the image\n # iterate through the bboxlist\n for bbox in bbox_list:\n heatmap[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]] += 1\n \n # return the heatmap\n return heatmap\n\n\n# ## 4b) Apply threshold on the heatmap created\n\n# In[59]:\n\n\n# apply threshold value over the heatmap created\ndef apply_threshold(heatmap, threshold):\n # values below the given threshold will be equal to 0\n heatmap[heatmap <= threshold] = 0\n \n return heatmap\n\n\n# ## 4c) draw labeled bboxes \n\n# In[60]:\n\n\n# draw the bounding box rectangle on the image given the labels \ndef draw_labels_bboxes(img, labels):\n # note that labels will be come from scipy.ndimage.measurements\n #iterate through the whole detected cars\n for car_number in range(1, labels[1]+1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)\n # Return the image\n return img\n\n\n# # 5- Build Classifier (Normalize, Train, Test, Accuracy calculation)\n\n# ## 5a) Extract features of both Cars, NotCars data\n\n# In[17]:\n\n\n# Read all images paths for cars and notcars\nCars_images = glob.glob(\"Training_Data/vehicles/*/*.png\")\nnoCars_images = glob.glob(\"Training_Data/non-vehicles/*/*.png\") \n\n# save images in these lists\ncars = []\nnotcars = []\n\n\nfor car_image in Cars_images:\n cars.append(car_image)\n \nfor notcar_image in noCars_images:\n notcars.append(notcar_image)\n\n# sample_size = 1000\n# cars = cars[0:sample_size]\n# notcars = notcars[0:sample_size]\n\n# parameters need tweak \ncolor_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\norient = 8 # HOG orientations\npix_per_cell = 8 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\nhog_channel = \"ALL\" # Can be 0, 1, 2, or \"ALL\"\nspatial_size = (16, 16) # Spatial binning dimensions\nspatial_range = (0, 200) # Spatial range\nhist_bins = 32 # Number of histogram bins\nspatial_transform = True # Spatial Transform sqrt\nspatial_feat = True # Spatial features on or off\nvisualize = False # Visualization flag\nfeature_vector = True # Feature Vector flag\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\ny_start_stop = [400, 680] # Min and max in y to search in slide_window()\n\ncar_features = extract_features(imgs=cars, cspace=color_space, spatial_size=spatial_size, hist_nbins=hist_bins, \n hist_range=spatial_range, orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, transform_sqrt_flag=spatial_transform, \n vis_flag=visualize, feature_vector_flag=feature_vector, hog_channel=hog_channel, \n extract_spatial_flag=spatial_feat, extract_color_hist_flag=hist_feat, \n extract_hog_flag=hog_feat, cv2read=True)\n\nNotcar_features = extract_features(imgs=notcars, cspace=color_space, spatial_size=spatial_size, hist_nbins=hist_bins, \n hist_range=spatial_range, orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, transform_sqrt_flag=spatial_transform, \n vis_flag=visualize, feature_vector_flag=feature_vector, hog_channel=hog_channel, \n extract_spatial_flag=spatial_feat, extract_color_hist_flag=hist_feat, \n extract_hog_flag=hog_feat, cv2read=True)\n\n\n# ## 5b) Normalize, Labels, SVC-Classifier, Train, Accuracy Calculation\n\n# In[18]:\n\n\n# Combine features of cars and notcars together\nX = np.vstack((car_features, Notcar_features)).astype(np.float64)\n\n# Fit a per-column scaler\nX_scaler = StandardScaler().fit(X)\n\n# Apply the scaler to X\nscaled_X = X_scaler.transform(X)\n\n# Define the labels vector\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(Notcar_features))))\n\n# Split up data into randomized training and test sets\nrand_state = np.random.randint(0, 100)\nX_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size=0.2, random_state=rand_state)\n\nprint('Using:', orient, 'orientations', pix_per_cell, 'pixels per cell and', cell_per_block, 'cells per block')\nprint('Feature vector length:', len(X_train[0]))\n\n# Use a linear SVC\nsvc = LinearSVC() # C=5.0, gamma='auto', kernel='rbf'\n#svc = SVC(C=5.0,kernel='rbf')\n\n# Check the training time for the SVC\nt = time.time()\nsvc.fit(X_train, y_train)\nt2 = time.time()\nprint(round(t2 - t, 2), 'Seconds to train SVC...')\n\n\n# Check the score of the SVC\nprint('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n# Check the prediction time for a single sample\nt = time.time()\n\n\n# ## 5c) Save the parameters needed after that in a pickle file\n\n# In[19]:\n\n#print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\ny_pred = svc.predict(X_test)\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\nprint(cm,'\\n')\nAcc = ((cm[0][0]+cm[1][1])/(cm[0][0]+cm[0][1]+cm[1][0]+cm[1][1]))\nprint('Acc is = ',Acc)\n\n# # 6- Sliding Windows method \n\n# In[48]:\n\n'''\n\n''' \n\n'''\n# Save the parameters in a pickle in order to be easily accessed\npickle_file = \"Classifier.p\"\nprint(\"Saving the data in a pickle file.....\")\n\nwith open(pickle_file, \"wb\") as p_file:\n pickle.dump({\"X_Scaler\": X_scaler,\n \"svc\":svc,\n \"cspace\": color_space,\n \"orient\": orient,\n \"pix_per_cell\": pix_per_cell,\n \"cell_per_block\": cell_per_block,\n \"hog_channel\":hog_channel,\n \"spatial_size\": spatial_size,\n \"hist_bins\":hist_bins,\n \"spatial_range\": spatial_range,\n \"spatial_transform\":spatial_transform,\n \"visualize\":visualize,\n \"feature_vector\": feature_vector,\n \"spatial_feat\": spatial_feat,\n \"hist_feat\": hist_feat,\n \"hog_feat\": hog_feat,\n \"y_start_stop\": y_start_stop }, p_file, pickle.HIGHEST_PROTOCOL)\n\n'''\n\n# Sliding window search to get the windows \ndef slide_window(img, x_start_stop, y_start_stop, xy_window, xy_overlap):\n # If x and/or y start/stop positions not defined, set to image size\n if x_start_stop[0] == None:\n x_start_stop[0] = 0\n if x_start_stop[1] == None:\n x_start_stop[1] = img.shape[1]\n if y_start_stop[0] == None:\n y_start_stop[0] = 0\n if y_start_stop[1] == None:\n y_start_stop[1] = img.shape[0]\n \n # Compute the span of the region to be searched \n xspan = x_start_stop[1] - x_start_stop[0]\n yspan = y_start_stop[1] - y_start_stop[0]\n \n # Compute the number of pixels per step in x/y\n nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))\n ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))\n \n # Compute the number of windows in x/y\n nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))\n ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))\n nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step) \n ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step) \n \n # Initialize a list to append window positions to\n window_list = []\n # Loop through finding x and y window positions\n for ys in range(ny_windows):\n for xs in range(nx_windows):\n # Calculate window position\n startx = xs * nx_pix_per_step + x_start_stop[0]\n endx = startx + xy_window[0]\n \n starty = ys * ny_pix_per_step + y_start_stop[0]\n endy = starty + xy_window[1]\n \n # Append window position to list\n window_list.append(((startx, starty), (endx, endy)))\n # Return the list of windows\n return window_list\n\n\n# In[49]:\n\n\n# test the Sliding window function\ntest_image = mpimg.imread(\"test_images/test1.jpg\")\n\nwindows = slide_window(test_image, x_start_stop=[0, 1300], y_start_stop=[400, 680], xy_window=(96, 96), xy_overlap=(0.5, 0.5))\ntest_result = draw_boxes(test_image, windows, (255,0,0), 8)\nplt.imshow(test_result)\nplt.show()\n\n\n# # 7- Search Windows\n\n# ## 7a) Search Window and prediction for the input image\n\n# In[53]:\n\n\n# This function will take an image as an input and list of windows to search in them \ndef search_windows(img, windows, classifier, scaler, cspace, spatial_size, hist_nbins, hist_range, \n orient, pix_per_cell, cell_per_block, transform_sqrt_flag, vis_flag, feature_vector_flag,\n hog_channel, extract_spatial_flag, extract_color_hist_flag, extract_hog_flag):\n \n \n # 1) Create an empty list to receive positive detection windows\n on_windows = []\n \n # 2) Iterate over all windows in the list\n for window in windows:\n # 3) Extract the test window from original image\n test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))\n \n # 4) Extract features for that window using single_img_features()\n features = extract_features_One_image(img=test_img, cspace=cspace, spatial_size=spatial_size, hist_nbins=hist_nbins, \n hist_range=hist_range, orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, transform_sqrt_flag=transform_sqrt_flag, \n vis_flag=vis_flag, feature_vector_flag=feature_vector_flag, hog_channel=hog_channel, \n extract_spatial_flag=extract_spatial_flag, \n extract_color_hist_flag=extract_color_hist_flag, extract_hog_flag=extract_hog_flag)\n \n #print(features.min())\n #print(features.max())\n # 5) Scale extracted features to be fed to classifier\n test_features = scaler.transform(np.array(features).reshape(1, -1))\n \n # 6) Predict using your classifier\n prediction = classifier.predict(test_features)\n #print(\"pred: \", prediction)\n \n # 7) If positive (prediction == 1) then save the window\n if prediction == 1:\n on_windows.append(window)\n # 8) Return windows for positive detections\n return on_windows\n\n\n# ## 7b) Load the pickle that conatins the needed parameters\n\n# In[23]:\n\n\n# read the data saved previously in the pickle file\npickle_file_name = \"Classifier.p\"\n\nwith open(pickle_file_name, \"rb\") as f:\n pickle_data = pickle.load(f)\n \n# # X_scaler\n# param_X_scaler = pickle_data[\"X_Scaler\"]\n# param_svc = pickle_data[\"svc\"]\n# param_color_space = pickle_data[\"cspace\"]\n# param_orient = pickle_data[\"orient\"]\n# param_pix_per_cell = pickle_data[\"pix_per_cell\"]\n# param_cell_per_block = pickle_data[\"cell_per_block\"]\n# param_hog_channel = pickle_data[\"hog_channel\"]\n# param_spatial_size = pickle_data[\"spatial_size\"]\n# param_hist_bins = pickle_data[\"hist_bins\"]\n# param_spatial_transform = pickle_data[\"spatial_transform\"]\n# param_visualize = pickle_data[\"visualize\"]\n# param_feature_vector = pickle_data[\"feature_vector\"]\n# param_hist_feat = pickle_data[\"hist_feat\"]\n# param_hog_feat = pickle_data[\"hog_feat\"]\n# param_y_start_stop = pickle_data[\"y_start_stop\"]\n \n\nprint(\"Saved parameters is loaded..\")\n \n\n\n# # 8) Apply the full pipeline\n\n# ## 8a) Find Cars in the image based on the loaded data\n\n# In[62]:\n\n\n# This function car find cars in an image based on the saved data in the pickle file\ndef find_cars(image):\n \n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.imshow(image)\n plt.show()\n # Extract parameters from the pickle file\n pickle_file_name = \"Classifier.p\"\n with open(pickle_file_name, \"rb\") as f:\n parameters = pickle.load(f)\n \n # copy from the image passed to the function\n draw_image = np.copy(image)\n y_start_stop = parameters[\"y_start_stop\"]\n\n # get windows from slide_window function with multiscale sliding window\n windows = slide_window(image, x_start_stop=[None, 1300], y_start_stop=[400,600],\n xy_window=(64, 64), xy_overlap=(0.9, 0.9))\n \n windows += slide_window(image, x_start_stop=[None, 1300], y_start_stop=y_start_stop,\n xy_window=(96, 96), xy_overlap=(0.9, 0.9))\n\n windows += slide_window(image, x_start_stop=[None, 1300], y_start_stop=y_start_stop,\n xy_window=(128, 128), xy_overlap=(0.9, 0.9))\n \n\n # search in the windows we have to select the best windows \n hot_windows = search_windows(img=image, windows=windows, classifier=parameters[\"svc\"],\n scaler=parameters[\"X_Scaler\"], cspace=parameters[\"cspace\"], \n spatial_size=parameters[\"spatial_size\"], hist_nbins=parameters[\"hist_bins\"],\n hist_range=parameters[\"spatial_range\"],orient=parameters[\"orient\"],\n pix_per_cell=parameters[\"pix_per_cell\"],\n cell_per_block=parameters[\"cell_per_block\"],\n transform_sqrt_flag=parameters[\"spatial_transform\"],\n vis_flag=parameters[\"visualize\"],\n feature_vector_flag=parameters[\"feature_vector\"], \n hog_channel=parameters[\"hog_channel\"],\n extract_spatial_flag=parameters[\"spatial_feat\"], \n extract_color_hist_flag=parameters[\"hist_feat\"],\n extract_hog_flag=parameters[\"hog_feat\"])\n \n \n # draw boxes over the given image\n window_image = draw_boxes(draw_image, hot_windows, color=(255, 0, 0), thick=8)\n \n plt.imshow(window_image)\n plt.show()\n \n # Create a zeros_like the image given in order to be passed over the function of heatmap\n heat = np.zeros_like(image[:, :, 0]).astype(np.float)\n\n # Add heat to each box in box list\n heat = add_heat(heat, hot_windows)\n \n #print(heat.max())\n\n # Apply threshold to help remove false positives\n heat = apply_threshold(heat, 15)\n\n # Visualize the heatmap when displaying\n heatmap = np.clip(heat, 0, 255)\n \n plt.imshow(heatmap)\n plt.show()\n\n # Find final boxes from heatmap using label function\n labels = label(heatmap)\n draw_image = draw_labels_bboxes(np.copy(draw_image), labels)\n\n # plt.close(\"all\")\n #\n# fig = plt.figure()\n# plt.figure(figsize=(20,10))\n# #\n# plt.subplot(133)\n# plt.imshow(draw_image)\n# plt.title('Car Positions')\n# plt.subplot(132)\n# plt.imshow(heatmap, cmap='hot')\n# plt.title('Heat Map')\n# plt.subplot(131)\n# plt.imshow(window_image)\n# plt.title('Windows')\n # # fig.tight_layout()\n # # mng = plt.get_current_fig_manager()\n #\n # # mng.full_screen_toggle()\n # # plt.pause(0.05)\n #\n # # plt.imshow(window_img)\n #plt.show()\n \n draw_image = cv2.cvtColor(draw_image, cv2.COLOR_RGB2BGR)\n \n #plt.imshow(draw_image)\n #plt.show()\n \n return draw_image\n\n# ## 8b) Test the pieline using the test images in the folder we have\n\n# In[25]:\n\n\n#--------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------\n#--------------------------------------------------------------------------------\n'''\n\n#doing all the relevant imports\n#import matplotlib.pyplot as plt\n#import matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n\n\ncap = cv2.VideoCapture('project_video.mp4')\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\n# Very important output video size should equal input video size (960,540)\nout = cv2.VideoWriter('output.mp4',fourcc, 20.0, (1280,720))\n\nwhile(cap.isOpened()):\n ret ,frame = cap.read()\n if ret == True:\n \n test_result_image_from_pipeline = find_cars(frame,parameters)\n result=cv2.cvtColor(test_result_image_from_pipeline,cv2.COLOR_BGR2RGB)\n\n k = cv2.waitKey(60) & 0xff\n if k == 27:\n break\n else:\n # cv2.imshow('result',result)\n #cv2.imshow('frame',frame)\n out.write(result)\n else:\n print('Error Vedio')\n break\ncv2.destroyAllWindows()\ncap.release()\nout.release()\n'''\n\n#group_of_images = glob.glob(\"test_images/*.jpg\")\n\n#for image in group_of_images:\n# read_test_image_to_pipeline = cv2.imread(image)\n# print(pickle_data)\n# test_result_image_from_pipeline = find_cars(read_test_image_to_pipeline)\n# plt.imshow(cv2.cvtColor(test_result_image_from_pipeline, cv2.COLOR_BGR2RGB))\n# plt.show()\n\nread_test_image_to_pipeline = cv2.imread(\"test_images/test14.jpg\")\nprint(pickle_data)\ntest_result_image_from_pipeline = find_cars(read_test_image_to_pipeline)\nplt.imshow(cv2.cvtColor(test_result_image_from_pipeline, cv2.COLOR_BGR2RGB))\nplt.show()\n\n\n# # 9) Apply the Pipeline on the Project Video\n\n# In[26]:\n\n\n# # Extract frames from the test video\n\n# project_output_video = \"test_output_video.mp4\"\n# clip = VideoFileClip(\"test_video.mp4\")\n# output_video = clip.fl_image(find_cars) # NOTE: this function expects color images!!\n# get_ipython().magic('time output_video.write_videofile(project_output_video, audio=False)')\n\n\n# In[27]:\n\n\n# group_of_images = glob.glob(\"test_images/*.jpg\")\n\n# for image in group_of_images:\n# read_test_image_to_pipeline = cv2.imread(image)\n\n# print(pickle_data)\n# read_test_image_to_pipeline=cv2.cvtColor(read_test_image_to_pipeline, cv2.COLOR_RGB2BGR)\n# test_result_image_from_pipeline = find_cars(read_test_image_to_pipeline)\n\n# #plt.imshow(cv2.cvtColor(test_result_image_from_pipeline, cv2.COLOR_BGR2RGB))\n# plt.imshow(test_result_image_from_pipeline)\n# plt.show()\n\n\n# In[28]:\n\n\n# Extract frames from the Project video\n\n# project_output_video = \"project_output_video.mp4\"\n# clip = VideoFileClip(\"project_video.mp4\")\n# output_video = clip.fl_image(find_cars) # NOTE: this function expects color images!!\n# get_ipython().magic('time output_video.write_videofile(project_output_video, audio=False)')\n\n\n# In[29]:\n'''\ndef close_clip(vidya_clip):\n # noinspection PyBroadException\n try:\n vidya_clip.reader.close()\n del vidya_clip.reader\n if vidya_clip.audio is not None:\n vidya_clip.audio.reader.close_proc()\n del vidya_clip.audio\n del vidya_clip\n except Exception:\n # sys.exc_clear()\n pass\n \nproject_output_video = \"test_video_out.mp4\"\nclip = VideoFileClip(\"test_video.mp4\")\noutput_video = clip.fl_image(find_cars).subclip(0,5) # NOTE: this function expects color images!!\nclip.write_videofile(project_output_video)\nclip.write_videofile(project_output_video, audio=False)\nclip.close()\n'''\n\n# In[30]:\n\n'''\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\nimport imageio\nimageio.plugins.ffmpeg.download()\n'''\n\n# In[31]:\n\n# project_output_video = \"test.mp4\"\n#project_output_video = \"project_video_out.mp4\"\n#clip = VideoFileClip(\"project_video.mp4\")\n#output_video = clip.fl_image(find_cars) # NOTE: this function expects color images!!\n#clip.write_videofile(project_output_video)\n#clip.write_videofile(project_output_video, audio=False)\n#clip.close()\n\n# In[32]:\n\n'''\nproject_output_video = \"part1.mp4\"\nclip1 = VideoFileClip(\"project_video.mp4\")\noutput_video = clip1.fl_image(find_cars).subclip(0,5) # NOTE: this function expects color images!!\noutput_video.write_videofile(project_output_video, audio=False)\noutput_video.write_videofile(project_output_video, audio=False)\nclip1.close()\n'''\n# In[33]:\n'''\nproject_output_video_2 = \"Output.mp4\"\nclip2 = VideoFileClip(\"project_video.mp4\")\noutput_video_2 = clip2.fl_image(find_cars) # NOTE: this function expects color images!!\n#get_ipython().magic('time output_video_2.write_videofile(project_output_video_2, audio=False)')\n\n'''\n\n# # Extract images for the Report\n\n# In[46]:\n\n# Test extraction of hog features and visulaize\n#test_image = mpimg.imread(\"test_images/test1.jpg\")\n#test_image = cv2.imread(\"non-vehicles/Extras/extra2532.png\")\n\n# Note that the image should be 2D (grayscale) \n#test_gray = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY)\n\n#pickle_file_name = \"Classifier.p\"\n\n#with open(pickle_file_name, \"rb\") as f:\n# parameters = pickle.load(f)\n\n#test_features, test_result_img = get_hog_features(test_gray, orient=parameters[\"orient\"], pix_per_cell=parameters[\"pix_per_cell\"], \n# cell_per_block=parameters['cell_per_block'], transform_sqrt_flag=True, \n# vis_flag=True, feature_vector_flag=True)\n\n#fig = plt.figure(figsize=(12,3))\n#plt.subplot(121)\n#plt.imshow(test_image, cmap='gray')\n#plt.title('Example')\n#plt.subplot(122)\n#plt.imshow(test_result_img, cmap='gray')\n#plt.title('HOG Visualization')\n#plt.show()\n\n\n# In[63]:\n\n'''\nextract_images = \"extract_images.mp4\"\nclip_extract_images = VideoFileClip(\"project_video.mp4\").reader.close()\n\nextract_sequence_images = clip_extract_images.fl_image(find_cars).subclip(30,33) # NOTE: this function expects color images!!\nget_ipython().magic('time extract_sequence_images.write_videofile(extract_images, audio=False)')\n\n#/////////////////////////////////////////////////////////////////////////////////////\n'''\n'''\nproject_video_output = './project_video_output.mp4'\nclip1 = VideoFileClip(\"./project_video.mp4\")\nlane_clip = clip1.fl_image(find_cars) #NOTE: this function expects color images!!\nlane_clip.write_videofile(project_video_output, audio=False)\n'''\nproject_video_output = './test_video_output.mp4'\nclip1 = VideoFileClip(\"./test_video.mp4\")\nlane_clip = clip1.fl_image(find_cars) #NOTE: this function expects color images!!\nlane_clip.write_videofile(project_video_output, audio=False)\n","sub_path":"Object_Detection Project.py","file_name":"Object_Detection Project.py","file_ext":"py","file_size_in_byte":35147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"119261001","text":"import pandas as pd\nimport numpy as np\nfrom coastlib.coreutils.design_tools import runup, overtopping, hudson\n\n\n# Input design parameters\nHs = 4 # Significant wave height [ft]\nTp = 8 # Wave period [s]\nrunup_limit = 2 # Runup limit [ft]\novertopping_limit = 0.1\n\n\n# Set revetment parameter ranges\nslopes = np.arange(start=0.5, stop=0.25, step=-0.01)\ncrest_elevations = np.arange(start=10, stop=20, step=0.1)\ntoe_elevations = np.arange(start=-10, stop=0, step=0.1)\n\n\n# Convert parameters to metric units\ncrest_elevations *= 0.3048\ntoe_elevations *= 0.3048\n\n\n# Find optimal combination\nsummary_m = pd.DataFrame(\n data=[\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.inf\n ],\n index=[\n 'Slope',\n 'Crest Elevation [m]',\n 'Toe elevation [m]',\n 'D50 [m]',\n 'Volume [m^3]'\n ],\n columns=['Value']\n)\nfor slp in slopes:\n for crel in crest_elevations:\n for toel in toe_elevations:\n D = hudson(Hs=Hs, alfa=slp, rock_density=1)\n if (\n runup(Hm0=Hs, Tp=Tp, slp=slp) < runup_limit and\n overtopping(Hm0=Hs, Rc=1) < overtopping_limit and\n volume(slp, crel, toel, D) < summary_m[0]['volume']\n ):\n summary_m['Value']['Slope'] = slp\n summary_m['Value']['D50 [m]'] = D\n summary_m['Value']['Volume [m^3'] = volume(slp, crel, toel)\n\n\n# Convert summary to customary units\nsummary = pd.DataFrame(\n data=[\n summary_m['Values']['Slope'],\n summary_m['Values']['Crest elevation [m]'] / 0.3048,\n summary_m['Values']['Toe elevation [m]'] / 0.3048,\n summary_m['Values']['D50 [m]'] / 0.3048,\n summary_m['Values']['Volume [m^3]'] / ((0.3048 * 3) ** 3),\n ],\n index=[\n 'Slope',\n 'Crest Elevation [ft]',\n 'Toe elevation [ft]',\n 'D50 [ft]',\n 'Volume [yd^3]'\n ],\n columns='Value'\n)\n\n\n# Return optimal revetment parameters\nif np.isinf(summary['Value']['Volume [m^3]']):\n print('No solution exists for the input provided')\nelse:\n print(summary)\n # plot optimal revetment over input profile\n","sub_path":"Hempstead/revetment_design.py","file_name":"revetment_design.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"566203147","text":"import paddle.fluid as fluid\n\n\ndef cnn_net(dict_dim=100,\n max_len=10,\n cnn_dim=32,\n cnn_filter_size=128,\n emb_dim=8,\n hid_dim=128,\n class_dim=2,\n is_prediction=False):\n \"\"\"\n Conv net\n \"\"\"\n data = fluid.data(name=\"input\", shape=[None, max_len], dtype='int64')\n label = fluid.data(name=\"label\", shape=[None, 1], dtype='int64')\n seq_len = fluid.data(name=\"seq_len\", shape=[None], dtype='int64')\n # embedding layer\n emb = fluid.embedding(input=data, size=[dict_dim, emb_dim])\n emb = fluid.layers.sequence_unpad(emb, length=seq_len)\n # convolution layer\n conv = fluid.nets.sequence_conv_pool(\n input=emb,\n num_filters=cnn_dim,\n filter_size=cnn_filter_size,\n act=\"tanh\",\n pool_type=\"max\")\n\n # full connect layer\n fc_1 = fluid.layers.fc(input=[conv], size=hid_dim)\n # softmax layer\n prediction = fluid.layers.fc(input=[fc_1], size=class_dim, act=\"softmax\")\n #if is_prediction:\n # return prediction\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost\n","sub_path":"PaddleRec/text_classification/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"233771355","text":"import tensorflow as tf\ntf.set_random_seed(66)\n\nx_train =[1,2,3]\ny_train = [3,5,7]\n\nW=tf.Variable(tf.random_normal([1]),name='weight') #정규분포에따른 랜덤값을 하나 넣겠다는 뜻\nb=tf.Variable(tf.random_normal([1]),name='bias')\n\nhypothesis = x_train * W +b\n\ncost = tf.reduce_mean(tf.square(hypothesis - y_train)) #예측값에서 실제값을 뺸것을 제곱을하여 평균을 낸것 -> 비용(손실) => loss=mse 와 같다 !\n\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) #AdamOptimizer 성능 최고\n\ntrain = optimizer.minimize(cost)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nprint(sess.run(W),sess.run(b)) #[0.06524777] [1.4264158]\n\nfor step in range(3): #step -> epoch\n sess.run(train)\n if step % 1 ==0:\n print(\"step: \",step,'sess.run(cost): ',sess.run(cost),'sess.run(W): ',sess.run(W),'sess.run(b): ',sess.run(b)) #weight 2, bias 1 로 수렴\n # sess.run(train)\n # if step % 2 ==0:\n # print(\"step: \",step,'sess.run(cost): ',sess.run(cost),'sess.run(W): ',sess.run(W),'sess.run(b): ',sess.run(b)) #weight 2, bias 1 로 수렴\n\n\n#경사하강법에있는 최적의 optimizer (minimize)해준 지점을 찾음\n#loss가 최소인것을 찾음\n#1. x*w + b =>와 y_train에서 mse를 산출\n#2. 계산된 cost를 minimized해준것이 optimizer\n#3. optimizer를 train 1,2,3 한번 돈것이 1 epoch ","sub_path":"tf114/tf05_linear.py","file_name":"tf05_linear.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"497208801","text":"\"\"\"\nSupport for www service installation and management.\n\"\"\"\n\nimport os\n\nfrom fabric.api import run, settings, env, put, sudo, local\n\nfrom os import path\nfrom twisted.python.util import sibpath\n\nfrom braid import authbind, archive\nfrom braid.twisted import service\nfrom braid.debian import equivs\nfrom braid.tasks import addTasks\nfrom braid.utils import confirm\n\nfrom braid import config\n__all__ = ['config']\n\n\nclass TwistedWeb(service.Service):\n def task_install(self):\n \"\"\"\n Install t-web, a Twisted Web based server.\n \"\"\"\n # Bootstrap a new service environment\n self.bootstrap()\n\n # Add to www-data group. Mailman depends on this.\n sudo('/usr/sbin/usermod -a -g www-data -G t-web {}'.format(self.serviceUser))\n\n # Setup authbind\n authbind.allow(self.serviceUser, 80)\n authbind.allow(self.serviceUser, 443)\n\n # Install httpd equiv, so apt doesn't try to install apache ever\n equivs.installEquiv(self.serviceName, 'httpd')\n\n with settings(user=self.serviceUser):\n run('/bin/ln -nsf {}/start {}/start'.format(self.configDir, self.binDir))\n run('/bin/ln -nsf {}/start-maintenance {}/start-maintenance'.format(self.configDir, self.binDir))\n self.update()\n # cron.install(self.serviceUser, '{}/crontab'.format(self.configDir))\n\n run('/bin/mkdir -p ~/data')\n if env.get('installPrivateData'):\n self.task_installTLSKeys()\n self.task_makeProductionServer()\n else:\n self.task_makeStagingServer()\n\n\n def task_makeProductionServer(self):\n \"\"\"\n Make the target server a production server.\n \"\"\"\n run('/usr/bin/touch {}/production'.format(self.configDir))\n\n\n def task_makeStagingServer(self):\n \"\"\"\n Make the target server a staging server.\n \"\"\"\n run('/bin/rm -f {}/production'.format(self.configDir))\n\n\n def task_makeTestTLSKeys(self):\n \"\"\"\n Make some test TLS certs.\n \"\"\"\n local(\"\"\"\n openssl req -config {config} -batch -x509 -sha256 -nodes -days 365 -newkey rsa:2048 \\\\\n -keyout {key} -out {cert}\n \"\"\".strip().format(\n key=sibpath(__file__, 'TEST.key'),\n cert=sibpath(__file__, 'twistedmatrix.com.crt'),\n config=sibpath(__file__, 'openssl.cnf')))\n local(\"cat {key} {cert} > {pem}\".format(\n key=sibpath(__file__, 'TEST.key'),\n cert=sibpath(__file__, 'twistedmatrix.com.crt'),\n pem=sibpath(__file__, 'www.twistedmatrix.com.pem')))\n\n\n def task_installTLSKeys(self):\n \"\"\"\n Install TLS keys.\n \"\"\"\n with settings(user=self.serviceUser):\n run('mkdir -p ~/ssl')\n for cert in ['www.twistedmatrix.com.pem',\n 'buildbot.twistedmatrix.com.pem']:\n fullpath = sibpath(__file__, cert)\n if path.exists(fullpath):\n put(fullpath, '~/ssl/' + cert, mode=0o600)\n run('ln -s ~/ssl/www.twistedmatrix.com.pem '\n '~/ssl/twistedmatrix.com.pem')\n run('ln -s ~/ssl/www.twistedmatrix.com.pem ~/ssl/DEFAULT.pem')\n\n\n def task_updateSoftware(self):\n \"\"\"\n Update just the PyPy and Twisted versions.\n \"\"\"\n self.task_stop()\n self.bootstrap()\n self.venv.install_twisted()\n self.task_start()\n\n\n def update(self):\n \"\"\"\n Update config.\n \"\"\"\n self.venv.install_twisted()\n\n with settings(user=self.serviceUser):\n run('mkdir -p ' + self.configDir)\n put(os.path.dirname(__file__) + '/*', self.configDir,\n mirror_local_mode=True)\n\n\n def task_update(self):\n \"\"\"\n Update config and restart.\n \"\"\"\n self.update()\n self.task_restart()\n\n\n def task_updateData(self):\n \"\"\"\n Update config.\n \"\"\"\n self.update()\n\n\n def task_dump(self, dump):\n \"\"\"\n Dump non-versioned resources.\n \"\"\"\n with settings(user=self.serviceUser):\n archive.dump({\n 'data': 'data',\n }, dump)\n\n\n def task_restore(self, dump):\n \"\"\"\n Resotre non-versioned resources.\n \"\"\"\n msg = 'All non-versioned web resources will be replaced with the backup.'\n if confirm(msg):\n with settings(user=self.serviceUser):\n archive.restore({\n 'data': 'data',\n }, dump)\n\n def task_startMaintenanceSite(self):\n \"\"\"\n Start maintenance site.\n \"\"\"\n with settings(user=self.serviceUser):\n run('{}/start-maintenance'.format(self.binDir))\n\n\n def task_uploadRelease(self, release, releasesTarball):\n \"\"\"\n Upload a relase.\n\n It expects a tarball containing the following files:\n - Twisted-.tar.bz2\n - Twisted-. for all source/windows installers\n - twisted--.txt for md5 and sha512\n - doc - for narative documentation\n - api - for api documents\n\n @param release: Release version.\n @param releasesTarball: Tarball with release tarballs and documentation\n \"\"\"\n apiVersion = '.'.join(release.split('.')[:2])\n distPaths = {}\n for ext in ['.tar.bz2', '-cp27-cp27m-win_amd64.whl']:\n tarball = 'Twisted-{}{}'.format(release, ext)\n distPaths[tarball] = 'data/releases/Twisted/{}/{}'.format(apiVersion, tarball)\n\n distPaths['doc'] = 'data/documentation/{}'.format(release)\n distPaths['api'] = 'data/documentation/{}/api'.format(release)\n for hash in ['md5sums', 'shasums']:\n hashFile = 'twisted-{}-{}.txt'.format(release,hash)\n distPaths[hashFile] = 'data/releases/{}'.format(hashFile)\n\n directories = [path.dirname(file) for file in distPaths.values()]\n\n with settings(user=self.serviceUser):\n run('/bin/mkdir -p {}'.format(' '.join(set(directories))))\n archive.restore(distPaths, releasesTarball)\n\n\n def task_updateCurrentDocumentation(self, release):\n \"\"\"\n Update the current link for documentation\n \"\"\"\n with settings(user=self.serviceUser):\n run('/bin/ln -nsf {} data/documentation/current'.format(release))\n\n\n\naddTasks(globals(), TwistedWeb('t-web').getTasks())\n","sub_path":"services/t-web/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":6509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"592511199","text":"r\"\"\"\nThis module implements differential operators on spherical grids \n\n.. autosummary::\n :nosignatures:\n\n make_laplace\n make_gradient\n make_divergence\n make_vector_gradient\n make_tensor_divergence\n \n \n.. codeauthor:: David Zwicker \n\"\"\"\n\nfrom typing import Tuple\n\nimport numpy as np\n\nfrom ...tools.docstrings import fill_in_docstring\nfrom ...tools.numba import jit\nfrom ...tools.typing import OperatorType\nfrom ..boundaries import Boundaries\nfrom ..spherical import SphericalSymGrid\nfrom .common import make_general_poisson_solver\n\n\n@SphericalSymGrid.register_operator(\"laplace\", rank_in=0, rank_out=0)\n@fill_in_docstring\ndef make_laplace(grid: SphericalSymGrid, conservative: bool = True) -> OperatorType:\n \"\"\"make a discretized laplace operator for a spherical grid\n\n {DESCR_SPHERICAL_GRID}\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n conservative (bool):\n Flag indicating whether the laplace operator should be conservative (which\n results in slightly slower computations).\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n dr = grid.discretization[0]\n rs = grid.axes_coords[0]\n r_min, r_max = grid.axes_bounds[0]\n\n if conservative:\n # create a conservative spherical laplace operator\n rl = rs - dr / 2 # inner radii of spherical shells\n rh = rs + dr / 2 # outer radii\n assert np.isclose(rl[0], r_min) and np.isclose(rh[-1], r_max)\n volumes = (rh ** 3 - rl ** 3) / 3 # volume of the spherical shells\n factor_l = (rs - 0.5 * dr) ** 2 / (dr * volumes)\n factor_h = (rs + 0.5 * dr) ** 2 / (dr * volumes)\n\n @jit\n def laplace(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply laplace operator to array `arr`\"\"\"\n for i in range(1, dim_r + 1): # iterate inner radial points\n out[i - 1] = factor_h[i - 1] * (arr[i + 1] - arr[i])\n out[i - 1] -= factor_l[i - 1] * (arr[i] - arr[i - 1])\n\n else: # create an operator that is not conservative\n dr2 = 1 / dr ** 2\n\n @jit\n def laplace(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply laplace operator to array `arr`\"\"\"\n for i in range(1, dim_r + 1): # iterate inner radial points\n out[i - 1] = (arr[i + 1] - 2 * arr[i] + arr[i - 1]) * dr2\n out[i - 1] += (arr[i + 1] - arr[i - 1]) / (rs[i - 1] * dr)\n\n return laplace # type: ignore\n\n\n@SphericalSymGrid.register_operator(\"gradient\", rank_in=0, rank_out=1)\n@fill_in_docstring\ndef make_gradient(grid: SphericalSymGrid) -> OperatorType:\n \"\"\"make a discretized gradient operator for a spherical grid\n\n {DESCR_SPHERICAL_GRID}\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n dr = grid.discretization[0]\n\n scale_r = 1 / (2 * dr)\n\n @jit\n def gradient(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply gradient operator to array `arr`\"\"\"\n for i in range(1, dim_r + 1): # iterate inner radial points\n out[0, i - 1] = (arr[i + 1] - arr[i - 1]) * scale_r\n out[1, i - 1] = out[2, i - 1] = 0 # no angular dependence by definition\n\n return gradient # type: ignore\n\n\n@SphericalSymGrid.register_operator(\"gradient_squared\", rank_in=0, rank_out=0)\n@fill_in_docstring\ndef make_gradient_squared(grid: SphericalSymGrid, central: bool = True) -> OperatorType:\n \"\"\"make a discretized gradient squared operator for a spherical grid\n\n {DESCR_SPHERICAL_GRID}\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n central (bool):\n Whether a central difference approximation is used for the gradient\n operator. If this is False, the squared gradient is calculated as\n the mean of the squared values of the forward and backward\n derivatives.\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n dr = grid.discretization[0]\n\n if central:\n # use central differences\n scale = 0.25 / dr ** 2\n\n @jit\n def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply squared gradient operator to array `arr`\"\"\"\n for i in range(1, dim_r + 1): # iterate inner radial points\n out[i - 1] = (arr[i + 1] - arr[i - 1]) ** 2 * scale\n\n else:\n # use forward and backward differences\n scale = 0.5 / dr ** 2\n\n @jit\n def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply squared gradient operator to array `arr`\"\"\"\n for i in range(1, dim_r + 1): # iterate inner radial points\n term = (arr[i + 1] - arr[i]) ** 2 + (arr[i] - arr[i - 1]) ** 2\n out[i - 1] = term * scale\n\n return gradient_squared # type: ignore\n\n\n@SphericalSymGrid.register_operator(\"divergence\", rank_in=1, rank_out=0)\n@fill_in_docstring\ndef make_divergence(grid: SphericalSymGrid, safe: bool = True) -> OperatorType:\n \"\"\"make a discretized divergence operator for a spherical grid\n\n {DESCR_SPHERICAL_GRID}\n\n Warning:\n This operator ignores the θ-component of the field when calculating the\n divergence. This is because the resulting scalar field could not be expressed\n on a :class:`~pde.grids.spherical_sym.SphericalSymGrid`.\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n safe (bool):\n Add extra checks for the validity of the input\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n dr = grid.discretization[0]\n rs = grid.axes_coords[0]\n\n scale_r = 1 / (2 * dr)\n fs = 2 / rs # factors that need to be multiplied below\n\n @jit\n def divergence(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply divergence operator to array `arr`\"\"\"\n if safe:\n assert np.all(arr[1, 1:-1] == 0)\n arr_r = arr[0, :]\n for i in range(1, dim_r + 1): # iterate radial points\n out[i - 1] = (arr_r[i + 1] - arr_r[i - 1]) * scale_r + fs[i - 1] * arr_r[i]\n\n return divergence # type: ignore\n\n\n@SphericalSymGrid.register_operator(\"vector_gradient\", rank_in=1, rank_out=2)\n@fill_in_docstring\ndef make_vector_gradient(grid: SphericalSymGrid, safe: bool = True) -> OperatorType:\n \"\"\"make a discretized vector gradient operator for a spherical grid\n\n Warning:\n This operator ignores the two angular components of the field when calculating\n the gradient. This is because the resulting field could not be expressed on a\n :class:`~pde.grids.spherical_sym.SphericalSymGrid`.\n\n {DESCR_SPHERICAL_GRID}\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n safe (bool):\n Add extra checks for the validity of the input\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n rs = grid.axes_coords[0]\n dr = grid.discretization[0]\n scale_r = 1 / (2 * dr)\n\n @jit\n def vector_gradient(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply vector gradient operator to array `arr`\"\"\"\n if safe:\n assert np.all(arr[1:, 1:-1] == 0)\n\n # assign aliases\n arr_r = arr[0, :]\n out_rr, out_rθ, out_rφ = out[0, 0, :], out[0, 1, :], out[0, 2, :]\n out_θr, out_θθ, out_θφ = out[1, 0, :], out[1, 1, :], out[1, 2, :]\n out_φr, out_φθ, out_φφ = out[2, 0, :], out[2, 1, :], out[2, 2, :]\n\n # set all components to zero that are not affected\n out_rθ[:] = 0\n out_rφ[:] = 0\n out_θr[:] = 0\n out_θφ[:] = 0\n out_φr[:] = 0\n out_φθ[:] = 0\n\n # inner radial boundary condition\n for i in range(1, dim_r + 1): # iterate radial points\n out_rr[i - 1] = (arr_r[i + 1] - arr_r[i - 1]) * scale_r\n out_θθ[i - 1] = arr_r[i] / rs[i - 1]\n out_φφ[i - 1] = arr_r[i] / rs[i - 1]\n\n return vector_gradient # type: ignore\n\n\n@SphericalSymGrid.register_operator(\"tensor_divergence\", rank_in=2, rank_out=1)\n@fill_in_docstring\ndef make_tensor_divergence(grid: SphericalSymGrid, safe: bool = True) -> OperatorType:\n \"\"\"make a discretized tensor divergence operator for a spherical grid\n\n {DESCR_SPHERICAL_GRID}\n\n Args:\n grid (:class:`~pde.grids.spherical.SphericalSymGrid`):\n The polar grid for which this operator will be defined\n safe (bool):\n Add extra checks for the validity of the input\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n assert isinstance(grid, SphericalSymGrid)\n\n # calculate preliminary quantities\n dim_r = grid.shape[0]\n rs = grid.axes_coords[0]\n dr = grid.discretization[0]\n scale_r = 1 / (2 * dr)\n\n @jit\n def tensor_divergence(arr: np.ndarray, out: np.ndarray) -> None:\n \"\"\"apply tensor divergence operator to array `arr`\"\"\"\n # assign aliases\n arr_rr, arr_rθ, arr_rφ = arr[0, 0, :], arr[0, 1, :], arr[0, 2, :]\n arr_θr, arr_θθ, arr_θφ = arr[1, 0, :], arr[1, 1, :], arr[1, 2, :]\n arr_φr, arr_φθ, arr_φφ = arr[2, 0, :], arr[2, 1, :], arr[2, 2, :]\n out_r, out_θ, out_φ = out[0, :], out[1, :], out[2, :]\n\n # check inputs\n if safe:\n assert np.all(arr_rθ[1:-1] == 0)\n assert np.all(arr_θθ[1:-1] == 0)\n assert np.all(arr_φφ[1:-1] == 0)\n assert np.all(arr_φθ[1:-1] == 0)\n assert np.all(arr_θφ[1:-1] == 0)\n\n # iterate over inner points\n for i in range(1, dim_r + 1):\n deriv_r = (arr_rr[i + 1] - arr_rr[i - 1]) * scale_r\n out_r[i - 1] = deriv_r + 2 * arr_rr[i] / rs[i - 1]\n\n deriv_r = (arr_θr[i + 1] - arr_θr[i - 1]) * scale_r\n out_θ[i - 1] = deriv_r + 2 * arr_θr[i] / rs[i - 1]\n\n deriv_r = (arr_φr[i + 1] - arr_φr[i - 1]) * scale_r\n out_φ[i - 1] = deriv_r + (2 * arr_φr[i] + arr_rφ[i]) / rs[i - 1]\n\n return tensor_divergence # type: ignore\n\n\n@fill_in_docstring\ndef _get_laplace_matrix(bcs: Boundaries) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"get sparse matrix for laplace operator on a polar grid\n\n Args:\n bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):\n {ARG_BOUNDARIES_INSTANCE}\n\n Returns:\n tuple: A sparse matrix and a sparse vector that can be used to evaluate\n the discretized laplacian\n \"\"\"\n from scipy import sparse\n\n assert isinstance(bcs.grid, SphericalSymGrid)\n bcs.check_value_rank(0)\n\n # calculate preliminary quantities\n dim_r = bcs.grid.shape[0]\n dr = bcs.grid.discretization[0]\n rs = bcs.grid.axes_coords[0]\n r_min, r_max = bcs.grid.axes_bounds[0]\n\n # create a conservative spherical laplace operator\n rl = r_min + dr * np.arange(dim_r) # inner radii of spherical shells\n rh = rl + dr # outer radii\n assert np.isclose(rh[-1], r_max)\n volumes = (rh ** 3 - rl ** 3) / 3 # volume of the spherical shells\n\n factor_l = (rs - 0.5 * dr) ** 2 / (dr * volumes)\n factor_h = (rs + 0.5 * dr) ** 2 / (dr * volumes)\n\n matrix = sparse.dok_matrix((dim_r, dim_r))\n vector = sparse.dok_matrix((dim_r, 1))\n\n for i in range(dim_r):\n matrix[i, i] += -factor_l[i] - factor_h[i]\n\n if i == 0:\n if r_min == 0:\n matrix[i, i + 1] = factor_l[i]\n else:\n const, entries = bcs[0].get_data((-1,))\n vector[i] += const * factor_l[i]\n for k, v in entries.items():\n matrix[i, k] += v * factor_l[i]\n\n else:\n matrix[i, i - 1] = factor_l[i]\n\n if i == dim_r - 1:\n const, entries = bcs[0].get_data((dim_r,))\n vector[i] += const * factor_h[i]\n for k, v in entries.items():\n matrix[i, k] += v * factor_h[i]\n\n else:\n matrix[i, i + 1] = factor_h[i]\n\n return matrix, vector\n\n\n@SphericalSymGrid.register_operator(\"poisson_solver\", rank_in=0, rank_out=0)\n@fill_in_docstring\ndef make_poisson_solver(bcs: Boundaries, method: str = \"auto\") -> OperatorType:\n \"\"\"make a operator that solves Poisson's equation\n\n {DESCR_POLAR_GRID}\n\n Args:\n bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):\n {ARG_BOUNDARIES_INSTANCE}\n\n Returns:\n A function that can be applied to an array of values\n \"\"\"\n matrix, vector = _get_laplace_matrix(bcs)\n return make_general_poisson_solver(matrix, vector, method)\n","sub_path":"pde/grids/operators/spherical_sym.py","file_name":"spherical_sym.py","file_ext":"py","file_size_in_byte":13575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"133233967","text":"from secml.array import CArray\nfrom secml.figure import CFigure\n\nX = CArray.linspace(-3.14, 3.14, 256, endpoint=True)\nC, S = X.cos(), X.sin()\n\nfig = CFigure(fontsize=14)\n\nfig.sp.plot(X, C, color='red', alpha=0.5, linewidth=1.0, linestyle='-')\nfig.sp.plot(X, S)\n\nfig.sp.xlim(-3, 3)\n\nfig.show()\n","sub_path":"docs/source/pyplots/xlim.py","file_name":"xlim.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"354867316","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response, render\nfrom django.template import RequestContext\n\nfrom eventus_backend.forms import RegistrationForm, LoginForm\nfrom .models import EventusUser\n\n\n# Main render for the login page\ndef eventus_login(request):\n if request.user.is_authenticated():\n ''' If facebook user, check that he is linked to an eventus profile.'''\n return HttpResponseRedirect('/home/')\n if request.method == 'POST':\n login_form = LoginForm(request.POST)\n if login_form.is_valid():\n username = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n eventus_user = authenticate(username=username, password=password)\n if eventus_user is not None:\n login(request, eventus_user)\n return HttpResponseRedirect('/home/')\n else:\n return render_to_response('eventus/login.html', {'form': login_form},\n context_instance=RequestContext(request))\n else:\n print('form not valid')\n return render_to_response('eventus/login.html', {'form': login_form},\n context_instance=RequestContext(request))\n else:\n ''' The user is not submitting any login form.'''\n login_form = LoginForm(request.POST)\n context = {'form': login_form}\n return render_to_response('eventus/login.html', context, context_instance=RequestContext(request))\n\n\n# Method managing login through facebook\ndef eventus_login_fb(request):\n print(\"BMR - running the view facebook login method\")\n if request.user.is_authenticated():\n ''' If facebook user, check that he is linked to an eventus profile.'''\n print(\"Loggging with facebook\")\n print(request.__dict__)\n return HttpResponseRedirect('/home/')\n else:\n return HttpResponseRedirect('/login/')\n\n\n# Main render for the registration page\ndef eventus_register(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect('/home/')\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n user = User.objects.create_user(username=form.cleaned_data['username'], email=form.cleaned_data['email'],\n password=form.cleaned_data['password'])\n user.save()\n eventus_user = EventusUser(user=user, name=form.cleaned_data['name'],\n birthday=form.cleaned_data['birthday'])\n eventus_user.save()\n return HttpResponseRedirect('/home/')\n else:\n context = {'form': form}\n return render_to_response('eventus/register.html', context,\n context_instance=RequestContext(request))\n else:\n ''' user is not submitting the form, show them a blank registration form '''\n form = RegistrationForm()\n context = {'form': form}\n return render_to_response('eventus/register.html', context, context_instance=RequestContext(request))\n\n# Main renderer for the home page\ndef eventus_home(request):\n if request.user.is_authenticated():\n return render(request, 'eventus/home.html', {})\n else:\n return HttpResponseRedirect('/')\n\n\ndef eventus_logout(request):\n logout(request)\n return HttpResponseRedirect('/')\n","sub_path":"eventus_backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"247730855","text":"import pandas as pd\r\n\r\n#open data\r\ndf = pd.read_csv('Absenteeism_at_work.csv',\r\n header=0, sep=',',encoding='utf-8')\r\n\r\ndf.dropna(how=\"all\", inplace=True) # drops the empty line at file-end\r\n\r\n#### make a new date variable\r\ndef label_month_year(row):\r\n return(str(row['Month of absence'])+str(row['Year']))\r\ndf['month-year']=df.apply(lambda row: label_month_year (row),axis=1)\r\n\r\n#and sum all of the values for each month\r\ndf2=df.groupby('month-year').sum()[['Absenteeism time in hours']]\r\ndf2['Date']=pd.to_datetime(df2.ix[:,0].keys(), format='%m%Y')\r\ndf2=df2.sort_values(by='Date')\r\n\r\n# plots\r\nfrom matplotlib import pyplot\r\nfrom statsmodels.graphics.tsaplots import plot_acf\r\ntime1=pd.Series(df2['Absenteeism time in hours'].values, index=df2['Date'])\r\n\r\n# simple line plot\r\nplot=time1.plot() \r\n\r\n\r\n#autocorrelation plot - comment out the first plot and run the line below to get autocorrelation plot\r\n#plot2=plot_acf(time1) \r\n\r\npyplot.show()\r\n","sub_path":"autocorrelations/autocor.py","file_name":"autocor.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"626505207","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import IncrementalPCA\nfrom beat_extraction_fcns import *\n\nclass The_Autoencoder:\n\n def __init__(self, chunks_list=None, group_list=None, encoded_dim=20,\n encoder_filename=None):\n self._encoded_dim = encoded_dim\n if chunks_list is not None:\n self._chunk_arr = np.array(chunks_list)\n self._all_groups = np.array(group_list)\n self.break_data()\n self.build_autoencoder()\n self.train_autoencoder()\n if encoder_filename is not None:\n with open(encoder_filename, 'rb') as handle:\n self._encoder = pickle.load(handle)\n handle.close()\n \n \n \n \n def expand_chunks(self, chunk_arr):\n break_list = []\n for chunk in chunk_arr:\n for i in range(chunk.shape[1]):\n this_piece = chunk[:,i]\n this_piece = (this_piece - np.min(this_piece))/((np.max(this_piece) - np.min(this_piece)) + 0.000001)\n break_list.append(this_piece)\n break_list = np.array(break_list)\n return break_list\n \n \n \n def break_data(self):\n self._X_train = self.expand_chunks(self._chunk_arr)\n self._X_train = extract_beats_from_many(self._X_train)\n \n \n def build_autoencoder(self):\n self._encoder = IncrementalPCA(n_components=self._encoded_dim, \n whiten=True)\n \n \n def train_autoencoder(self):\n self._encoder = self._encoder.fit(self._X_train)\n \n \n \n def encode(self, chunk_arr, num_beats_to_encode=10):\n chunk_encode_rows = []\n for chunk in chunk_arr:\n for col in range(chunk.shape[1]):\n peaks, beat_sigs = detect_peaks(chunk[:,col])\n if len(beat_sigs) > 0:\n beat_pca = self._encoder.transform(np.array(beat_sigs))\n flat_sig = np.zeros(num_beats_to_encode*self._encoded_dim)\n idx = 0\n count = 0\n if len(beat_sigs) > 0:\n for row in beat_pca:\n flat_sig[idx:idx+beat_pca.shape[1]] = row\n idx += beat_pca.shape[1]\n count += 1 \n if count == num_beats_to_encode:\n break\n if col == 0:\n flat_chunk = flat_sig\n else:\n flat_chunk = np.concatenate((flat_chunk, flat_sig))\n chunk_encode_rows.append(flat_chunk)\n chunk_encode_rows = np.array(chunk_encode_rows)\n return chunk_encode_rows\n \n \n def save(self, filename):\n with open(filename, 'wb') as handle:\n pickle.dump(self._encoder, handle)\n handle.close()\n \n ","sub_path":"Train_Autoencoder.py","file_name":"Train_Autoencoder.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"147254870","text":"from Data.DataAPI import DataAPI as DB\n\n\nclass Search_Manager:\n '''Searches database by term, catagory and file'''\n def __init__(self):\n self.DB = DB() #DataAPI\n self.current_data = None\n self.search_string = None\n self.search_field = None\n self.result = []\n\n\n def search(self, search_string, search_field, search_catagory):\n '''Recieves search specifications and delegates search, returning result after search'''\n self.search_field = search_field.lower()\n self.current_data = self.fetch(search_catagory)\n self.search_string = str(search_string.lower())\n self._search()\n \n results = self.result\n self.clear()\n\n return results\n\n\n def fetch(self, cat):\n '''returns database entries by catagory '''\n if cat == 'contract': return self.DB.read_all_contracts()\n if cat == 'customer': return self.DB.read_all_customers()\n if cat == 'destination': return self.DB.read_all_destinations()\n if cat == 'employee': return self.DB.read_all_employees()\n if cat == 'vehicle': return self.DB.read_all_vehicles()\n if cat == 'vehicle_type': return self.DB.read_all_vehicle_types()\n\n\n def _search(self):\n '''Searches loaded document for a match to loaded search string'''\n for el in self.current_data:\n obj = vars(el)\n if obj[self.search_field].lower() == self.search_string:\n self.result.append(el)\n \n\n def clear(self):\n '''Clears result'''\n self.result = []\n\n\n\n\n\n\n ","sub_path":"src/Logic/Search_Manager.py","file_name":"Search_Manager.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"7969968","text":"# Copyright (c) 2017 The Khronos Group Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division, print_function\n\nfrom collections import OrderedDict\n\nimport nnef\n\nfrom . import dog\nfrom . import utils\nfrom .nnef_dog_types import NnefDN, NnefOp, NnefGraph\n\n\ndef nnefgraph_to_nnefdog(nnefgraph, variables_dir=None):\n if variables_dir:\n variables_dir = utils.without_slash(variables_dir)\n\n properties, nnefops = nnefgraph\n dtype_by_nnefdn_name = properties[\"dtypes\"]\n shape_by_nnefdn_name = properties[\"shapes\"]\n graph_name = properties[\"graph\"].name\n graph_inputs = list(properties[\"graph\"].params.keys())\n graph_outputs = list(properties[\"graph\"].results.keys())\n\n ops = []\n dn_by_name = {}\n\n def transform_arg(arg, op):\n if isinstance(arg, nnef.Identifier):\n dn = dn_by_name.get(str(arg))\n if dn is None:\n utils.print_error(\"DataNode {} not defined before use\".format(str(arg)))\n return utils.REMOVE\n if op not in dn.consumers: # can be multiple times, eg: matmul(a, a)\n dn.consumers.append(op)\n return dn\n else:\n return arg\n\n def transform_result(result, op):\n\n if isinstance(result, nnef.Identifier):\n dn = NnefDN(str(result))\n dn.shape = list(shape_by_nnefdn_name[str(result)])\n dn.dtype = str(dtype_by_nnefdn_name.get(str(result)))\n dn.producer = op\n if dn.name in dn_by_name:\n utils.print_error(\"DataNode {} defined multiple times\".format(dn.name))\n return utils.REMOVE\n dn_by_name[dn.name] = dn\n return dn\n else:\n return result\n\n def transform_tensor_to_dn(tensor):\n dn = dn_by_name.get(str(tensor))\n if dn is None:\n utils.print_error(\"DataNode {} not defined before use\".format(str(tensor)))\n return utils.REMOVE\n return dn\n\n for prototype, values in nnefops:\n op = NnefOp(prototype.name)\n\n args = OrderedDict([(name, values[name]) for name in prototype.params.keys()])\n results = OrderedDict([(name, values[name]) for name in prototype.results.keys()])\n\n op.args = utils.recursive_transform(args, lambda arg: transform_arg(arg, op))\n op.results = utils.recursive_transform(results, lambda result: transform_result(result, op))\n ops.append(op)\n\n if variables_dir and op.name == \"variable\":\n op.result_node.extra[dog.EXTRA_WEIGHTS] = utils.read_nnef_tensor(\n \"{}/{}.dat\".format(variables_dir, op.args[\"label\"]))\n\n input_dn_names = [dn.name for dn in utils.recursive_transform(graph_inputs, transform_tensor_to_dn)]\n output_dn_names = [dn.name for dn in utils.recursive_transform(graph_outputs, transform_tensor_to_dn)]\n\n return NnefGraph(graph_name, ops, dn_by_name, input_dn_names, output_dn_names)\n","sub_path":"converter/nnef_converters/common/nnef_to_dog.py","file_name":"nnef_to_dog.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"376471928","text":"#!/usr/bin/python3\n# -*- coding: utf=8 -*-\nimport sys\nimport cv2\nimport numpy as np\nimport platform\nprint(platform.python_version())\nfrom cv2 import aruco\nfrom cv_bridge import CvBridge, CvBridgeError\n\nprint(cv2.__version__)\n\ndef getid(image_src):\n aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)\n parameters = aruco.DetectorParameters_create()\n corners, ids, rejectedImgPoints = aruco.detectMarkers(image_src, aruco_dict, parameters=parameters)\n if(ids is None):\n print(\"None!!!!\")\n result = []\n result.append(0)\n else:\n print(\"cdcd\")\n frame_markers = aruco.drawDetectedMarkers(image_src.copy(), corners, ids)\n # cv2.imshow('frame_result', frame_markers), cv2.waitKey(1)\n print(len(ids))\n print(ids)\n\n result = []\n for i in range(len(ids)):\n print(ids[i])\n result.append(ids[i][0])\n print(result)\n return result\n","sub_path":"src/aruco_detector.py","file_name":"aruco_detector.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"129077401","text":"import binascii\nimport struct\nimport math\nimport collections\n\ndef dayTime(seconds):\n \"\"\"\n 天内秒转时间\n 单位:秒\n :param seconds:\n :return:\n \"\"\"\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n if h<10:\n return (\"%02d:%02d:%02d\" % (h, m, s))\n else:\n return (\"%d:%02d:%02d\" % (h, m, s))\n\n\ndef signeddata(val,nbits):\n result = 0x0\n result = (val << (32-nbits)) & 2147483647\n result = result >> (32-nbits)\n return result\n\n\nf = open(\"G:\\\\ts03\\\\bds_cd03\", \"rb\")\nf2 = open(\"G:\\\\ts03\\\\bds_data3.txt\",\"w+\")\nf2.write(\"datetime\".rjust(19, \" \") +\" \"+ \"prn\".rjust(5, \" \") + \"time\".rjust(10, \" \") +\n \"type\".rjust(15, \" \") + \"A0_utc\".rjust(28, \" \") + \"A1_utc\".rjust(28, \" \") +\n \"dT_ls\".rjust(10, \" \") + \"dT_lsf\".rjust(10, \" \") + \"DN_lsf\".rjust(10, \" \") +\n \"WN_lsf\".rjust(10, \" \") + \"\\n\")\n\ndate1 = \"\"\ndatestr = \"\"\n\nwhile True:\n data1 = f.read(1)\n datad1 = binascii.b2a_hex(data1)\n if not data1:\n break\n elif data1 == b'\\x0A':\n data2 = f.read(2)\n datad2 = binascii.b2a_hex(data2)\n if data2 == b'RD':\n data3 = f.read(3)\n datad3 = binascii.b2a_hex(data3)\n if data3 == b'\\x30\\x30\\x36':\n y = struct.unpack('H', f.read(2))[0] # 年\n m = struct.unpack('B', f.read(1))[0] # 月\n d = struct.unpack('B', f.read(1))[0] # 日\n b = struct.unpack('B', f.read(1))[0]\n if y == 65535 or m == 255 or d == 255:\n print(\"rd无效值\")\n break\n date1 = str(y) + \"-\" + str(m) + \"-\" + str(d)\n elif data2 == b'~~':\n data4 = f.read(3)\n datad4 = binascii.b2a_hex(data4)\n if data4 == b'\\x30\\x30\\x35':\n data5 = f.read(4)\n datad5 = binascii.b2a_hex(data5)\n rt = struct.unpack('I', data5)[0] # 天内秒\n if rt == 4294967295:\n continue # 无效值\n time1 = dayTime(rt / 1000)\n if date1 == \"\":\n date1 = \" \"\n datestr = date1 +\" \"+ time1\n elif data2 == b'cd':\n data5 = f.read(3)\n datad5 = binascii.b2a_hex(data5)\n if data5 == b'\\x30\\x33\\x30':\n bin_data = \"\"\n prn_u = struct.unpack('B', f.read(1))[0]\n if prn_u > 5:\n if prn_u<10:\n prn_u = \"C0\" + str(prn_u)\n else:\n prn_u = \"C\" + str(prn_u)\n s1 = f.read(4)\n datas1 = binascii.b2a_hex(s1)\n time_u = struct.unpack('I', s1)[0]\n s2 = f.read(1)\n datas2 = binascii.b2a_hex(s2)\n type_u = struct.unpack('B', s2)[0]\n if type_u == 0:\n type_u = \"B1\"\n elif type_u == 1:\n type_u = \"B2\"\n elif type_u == 2:\n type_u = \"B3\"\n elif type_u == 3:\n type_u = \"B1 from CEO\"\n elif type_u == 4:\n type_u = \"B2 from CEO\"\n elif type_u == 5:\n type_u = \"B2 from CEO\"\n elif type_u == 6:\n type_u = \"B1C\"\n elif type_u == 7:\n type_u = \"B1-2\"\n\n s3 = f.read(1)\n datas3 = binascii.b2a_hex(s3)\n len_u = struct.unpack('B', s3)[0]\n\n pinput = []\n # pinput2 = []\n poutput = []\n for i in range(len_u):\n data6 = f.read(4)\n datad6 = binascii.b2a_hex(data6)\n data_u = struct.unpack('I', data6)[0]\n # pinput2.append(data6)\n pinput.append(data_u)\n poutput.append((pinput[0] >> 2) & 0x3FFFFFFF)\n poutput.append(((pinput[0] & 0x000003) << 28) | ((pinput[1] >> 4) & 0x0FFFFFFF))\n poutput.append(((pinput[1] & 0x00000F) << 26) | ((pinput[2] >> 6) & 0x03FFFFFF))\n poutput.append(((pinput[2] & 0x00003F) << 24) | ((pinput[3] >> 8) & 0x00FFFFFF))\n poutput.append(((pinput[3] & 0x0000FF) << 22) | ((pinput[4] >> 10) & 0x003FFFFF))\n poutput.append(((pinput[4] & 0x0003FF) << 20) | ((pinput[5] >> 12) & 0x000FFFFF))\n poutput.append(((pinput[5] & 0x000FFF) << 18) | ((pinput[6] >> 14) & 0x0003FFFF))\n poutput.append(((pinput[6] & 0x003FFF) << 16) | ((pinput[7] >> 16) & 0x0000FFFF))\n poutput.append(((pinput[7] & 0x00FFFF) << 14) | ((pinput[8] >> 18) & 0x00003FFF))\n poutput.append(((pinput[8] & 0x03FFFF) << 12) | ((pinput[9] >> 16) & 0x00000FFF))\n\n for y in range(len_u):\n poutput[y] <<= 2\n\n FraID = (poutput[0] & 0x0001C000) >> 14\n if FraID == 5:\n Pnum = (poutput[1] & 0x0007F000) >> 12\n if Pnum == 10:\n utcmap = ((poutput[1] >> 10) & 0x03 << 6) | (poutput[2] >> 26) & 0x3F\n tls = utcmap\n tlsf = signeddata(poutput[2] >> 18, 8)\n WNlsf = (poutput[2] >> 10) & 0xFF\n utc1 = poutput[3] & 0xFFFFFC00\n utch = hex(utc1)[2:]\n fmt = \"\"\n bts = bytearray()\n for u1 in range(0, len(utch), 2):\n bts.append(int(utch[u1:u1+2], 16))\n if len(bts) == 4:\n fmt = \"!i\"\n elif len(bts) == 1:\n fmt = \"!b\"\n utc1 = struct.unpack(fmt, bts)[0]\n utc2 = (poutput[4] >> 22) & 0x03FF\n A0utc = (utc1 | utc2) * 9.31322574615479e-10\n utcmap = ((poutput[4] & 0x003FFC00) << 2) | ((poutput[5] >> 20) & 0x0FFF)\n if utcmap & 0x800000:\n utcmap |=0xFF000000\n utcmapu = hex(utcmap)[2:]\n if len(utcmapu)==1:\n utcmapu = \"0\"+utcmapu\n bts2 = bytearray()\n for u2 in range(0, len(utcmapu) ,2):\n bts2.append(int(utcmapu[u2:u2+2], 16))\n if len(bts2) == 4:\n fmt = \"!i\"\n elif len(bts2) == 1:\n fmt = \"!b\"\n utcmap2 = struct.unpack(fmt, bts2)[0]\n A1utc = utcmap2 * 8.881784197e-16\n DN = (poutput[5] >> 12) & 0xFF\n print(\"datetime:\" + datestr + \"\\n\" + \"prn:\" + str(prn_u) + \"\\n\" + \"time:\" + str(time_u) + \"\\n\" +\n \"type:\" + str(type_u) + \"\\n\" + \"A0_utc:\" + str(A0utc) + \"\\n\" + \"A1_utc:\" + str(A1utc) + \"\\n\" +\n \"dT_ls:\" + str(tls) + \"\\n\" + \"dT_lsf:\" + str(tlsf) + \"\\n\" + \"DN_lsf:\" + str(DN) + \"\\n\" +\n \"WN_lsf:\" + str(WNlsf) + \"\\n\" + \"------------------------------\")\n f2.write(datestr+ \" \" + str(prn_u).rjust(5, \" \") + str(time_u).rjust(10, \" \") +\n str(type_u).rjust(15, \" \") + str(A0utc).rjust(28, \" \") + str(A1utc).rjust(28, \" \") +\n str(tls).rjust(10, \" \") + str(tlsf).rjust(10, \" \") + str(DN).rjust(10, \" \") +\n str(WNlsf).rjust(10, \" \") + \"\\n\")\nf.close()\nf2.close()\n\n\n\n\n","sub_path":"test/read_BDS_File.py","file_name":"read_BDS_File.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"207193315","text":"# coding: utf-8\nimport unittest\n\nclass TestMyParser(unittest.TestCase):\n\n def setUp(self):\n self.parser = MyParser()\n fixture_path = 'tests/fixtures/sample.html'\n with open(fixture_path, 'r') as f:\n self.markup = f.read()\n\n def test_processing(self):\n data = self.parser.process(markup=self.markup)\n # Check that every value exists and is not empty\n for k,v in data:\n self.assertTrue(v)\n","sub_path":"tests/test_parsers.py","file_name":"test_parsers.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"143918989","text":"from abc import ABC, abstractmethod\nfrom typing import Dict, Union, List\n\nfrom ansible.cli import CLI\nfrom ansible.errors import AnsibleParserError, AnsibleUndefinedVariable, AnsibleError\nfrom ansible.parsing.yaml.objects import AnsibleUnicode\nfrom ansible.playbook import Playbook, Play\nfrom ansible.playbook.block import Block\nfrom ansible.playbook.helpers import load_list_of_blocks\nfrom ansible.playbook.role_include import IncludeRole\nfrom ansible.playbook.task import Task\nfrom ansible.playbook.task_include import TaskInclude\nfrom ansible.template import Templar\nfrom ansible.utils.display import Display\n\nfrom ansibleplaybookgrapher.graph import EdgeNode, TaskNode, PlaybookNode, RoleNode, PlayNode, CompositeNode, BlockNode\nfrom ansibleplaybookgrapher.utils import clean_name, handle_include_path, has_role_parent, generate_id, \\\n convert_when_to_str\n\n\nclass BaseParser(ABC):\n \"\"\"\n Base Parser of a playbook\n \"\"\"\n\n def __init__(self, tags: List[str] = None, skip_tags: List[str] = None, display: Display = None):\n \"\"\"\n\n :param tags: Only add plays and tasks tagged with these values\n :param skip_tags: Only add plays and tasks whose tags do not match these values\n :param display: Ansible display used to print some messages in the console\n \"\"\"\n loader, inventory, variable_manager = CLI._play_prereqs()\n self.data_loader = loader\n self.inventory_manager = inventory\n self.variable_manager = variable_manager\n\n self.tags = tags or [\"all\"]\n self.skip_tags = skip_tags or []\n self.display = display or Display()\n\n @abstractmethod\n def generate_graph(self, *args, **kwargs) -> PlaybookNode:\n pass\n\n def template(self, data: Union[str, AnsibleUnicode], variables: Dict,\n fail_on_undefined=False) -> Union[str, AnsibleUnicode]:\n \"\"\"\n Template the data using Jinja. Return data if an error occurs during the templating\n :param data:\n :param fail_on_undefined:\n :param variables:\n :return:\n \"\"\"\n try:\n templar = Templar(loader=self.data_loader, variables=variables)\n return templar.template(data, fail_on_undefined=fail_on_undefined)\n except AnsibleError as ansible_error:\n # Sometime we need to export\n if fail_on_undefined:\n raise\n self.display.warning(ansible_error)\n return data\n\n def _add_task(self, task: Task, task_vars: Dict, node_type: str, parent_node: CompositeNode) -> bool:\n \"\"\"\n Include the task in the graph.\n :return: True if the task has been included, false otherwise\n \"\"\"\n\n if not task.evaluate_tags(only_tags=self.tags, skip_tags=self.skip_tags, all_vars=task_vars):\n self.display.vv(f\"The task '{task.get_name()}' is skipped due to the tags.\")\n return False\n\n self.display.vv(f\"Adding {node_type} '{task.get_name()}' to the graph\")\n\n task_name = clean_name(f\"[{node_type}] \" + self.template(task.get_name(), task_vars))\n edge_label = convert_when_to_str(task.when)\n\n edge_node = EdgeNode(parent_node, TaskNode(task_name, generate_id(f\"{node_type}_\")), edge_label)\n parent_node.add_node(target_composition=f\"{node_type}s\", node=edge_node)\n\n return True\n\n\nclass PlaybookParser(BaseParser):\n \"\"\"\n The playbook parser. This is the main entrypoint responsible to parser the playbook into a graph structure\n \"\"\"\n\n def __init__(self, playbook_filename: str, include_role_tasks=False, tags: List[str] = None,\n skip_tags: List[str] = None, display: Display = None):\n \"\"\"\n :param playbook_filename: The filename of the playbook to parse\n :param display: Ansible display used to print some messages in the console\n :param include_role_tasks: If true, the tasks of the role will be included in the graph\n :param tags: Only add plays and tasks tagged with these values\n :param skip_tags: Only add plays and tasks whose tags do not match these values\n \"\"\"\n\n super().__init__(tags=tags, skip_tags=skip_tags, display=display)\n\n self.include_role_tasks = include_role_tasks\n self.playbook_filename = playbook_filename\n self.playbook = Playbook.load(playbook_filename, loader=self.data_loader,\n variable_manager=self.variable_manager)\n # the root node\n self.playbook_root_node = PlaybookNode(self.playbook_filename)\n\n def generate_graph(self, *args, **kwargs) -> PlaybookNode:\n \"\"\"\n Loop through the playbook and generate the graph.\n\n The graph is drawn following this order (https://docs.ansible.com/ansible/2.4/playbooks_reuse_roles.html#using-roles)\n for each play:\n add pre_tasks\n add roles\n if include_role_tasks\n add role_tasks\n add tasks\n add post_tasks\n :return:\n \"\"\"\n\n # loop through the plays\n for play in self.playbook.get_plays():\n\n # the load basedir is relative to the playbook path\n if play._included_path is not None:\n self.data_loader.set_basedir(play._included_path)\n else:\n self.data_loader.set_basedir(self.playbook._basedir)\n self.display.vvv(f\"Loader basedir set to {self.data_loader.get_basedir()}\")\n\n play_vars = self.variable_manager.get_vars(play)\n play_hosts = [h.get_name() for h in self.inventory_manager.get_hosts(self.template(play.hosts, play_vars))]\n play_name = \"Play: {} ({})\".format(clean_name(play.get_name()), len(play_hosts))\n play_name = self.template(play_name, play_vars)\n\n self.display.banner(\"Parsing \" + play_name)\n\n play_node = PlayNode(play_name, hosts=play_hosts)\n self.playbook_root_node.add_play(play_node, \"\")\n\n # loop through the pre_tasks\n self.display.v(\"Parsing pre_tasks...\")\n for pre_task_block in play.pre_tasks:\n self._include_tasks_in_blocks(current_play=play, parent_nodes=[play_node], block=pre_task_block,\n play_vars=play_vars, node_type=\"pre_task\")\n\n # loop through the roles\n self.display.v(\"Parsing roles...\")\n\n for role in play.get_roles():\n # Don't insert tasks from ``import/include_role``, preventing duplicate graphing\n if role.from_include:\n continue\n\n # the role object doesn't inherit the tags from the play. So we add it manually.\n role.tags = role.tags + play.tags\n if not role.evaluate_tags(only_tags=self.tags, skip_tags=self.skip_tags, all_vars=play_vars):\n self.display.vv(f\"The role '{role.get_name()}' is skipped due to the tags.\")\n # Go to the next role\n continue\n\n role_node = RoleNode(clean_name(role.get_name()))\n # edge from play to role\n play_node.add_node(\"roles\", EdgeNode(play_node, role_node))\n\n if self.include_role_tasks:\n # loop through the tasks of the roles\n for block in role.compile(play):\n self._include_tasks_in_blocks(current_play=play, parent_nodes=[role_node], block=block,\n play_vars=play_vars, node_type=\"task\")\n # end of roles loop\n\n # loop through the tasks\n self.display.v(\"Parsing tasks...\")\n for task_block in play.tasks:\n self._include_tasks_in_blocks(current_play=play, parent_nodes=[play_node], block=task_block,\n play_vars=play_vars, node_type=\"task\")\n\n # loop through the post_tasks\n self.display.v(\"Parsing post_tasks...\")\n for post_task_block in play.post_tasks:\n self._include_tasks_in_blocks(current_play=play, parent_nodes=[play_node], block=post_task_block,\n play_vars=play_vars, node_type=\"post_task\")\n # Summary\n self.display.display(\"\") # just an empty line\n self.display.v(f\"{len(play_node.pre_tasks)} pre_task(s) added to the graph.\")\n self.display.v(f\"{len(play_node.roles)} role(s) added to the play\")\n self.display.v(f\"{len(play_node.tasks)} task(s) added to the play\")\n self.display.v(f\"{len(play_node.post_tasks)} post_task(s) added to the play\")\n\n self.display.banner(f\"Done parsing {play_name}\")\n self.display.display(\"\") # just an empty line\n # moving to the next play\n\n return self.playbook_root_node\n\n def _include_tasks_in_blocks(self, current_play: Play, parent_nodes: List[CompositeNode],\n block: Union[Block, TaskInclude], node_type: str, play_vars: Dict = None):\n \"\"\"\n Recursively read all the tasks of the block and add it to the graph\n :param parent_nodes: This a list of parent nodes. Each time, we see an include_role, the corresponding node is\n added to this list\n :param current_play:\n :param block:\n :param play_vars:\n :param node_type:\n :return:\n \"\"\"\n\n if not block._implicit and block._role is None:\n # Here we have an explicit block. Ansible internally converts all normal tasks to Block\n block_node = BlockNode(str(block.name))\n parent_nodes[-1].add_node(f\"{node_type}s\",\n EdgeNode(parent_nodes[-1], block_node, convert_when_to_str(block.when)))\n parent_nodes.append(block_node)\n\n # loop through the tasks\n for task_or_block in block.block:\n if isinstance(task_or_block, Block):\n self._include_tasks_in_blocks(current_play=current_play, parent_nodes=parent_nodes, block=task_or_block,\n node_type=node_type, play_vars=play_vars)\n elif isinstance(task_or_block, TaskInclude): # include, include_tasks, include_role are dynamic\n # So we need to process them explicitly because Ansible does it during the execution of the playbook\n\n task_vars = self.variable_manager.get_vars(play=current_play, task=task_or_block)\n\n if isinstance(task_or_block, IncludeRole):\n # Here we have an 'include_role'. The class IncludeRole is a subclass of TaskInclude.\n # We do this because the management of an 'include_role' is different.\n # See :func:`~ansible.playbook.included_file.IncludedFile.process_include_results` from line 155\n self.display.v(\n f\"An 'include_role' found. Including tasks from the role '{task_or_block.args['name']}'\")\n\n role_node = RoleNode(task_or_block.args['name'])\n parent_nodes[-1].add_node(f\"{node_type}s\", EdgeNode(parent_nodes[-1], role_node,\n convert_when_to_str(task_or_block.when)))\n\n if self.include_role_tasks:\n # If we have an include_role and we want to include role tasks, the parent node now becomes\n # the role.\n parent_nodes.append(role_node)\n\n block_list, _ = task_or_block.get_block_list(play=current_play, loader=self.data_loader,\n variable_manager=self.variable_manager)\n else:\n self.display.v(f\"An 'include_tasks' found. Including tasks from '{task_or_block.get_name()}'\")\n\n templar = Templar(loader=self.data_loader, variables=task_vars)\n try:\n include_file = handle_include_path(original_task=task_or_block, loader=self.data_loader,\n templar=templar)\n except AnsibleUndefinedVariable as e:\n # TODO: mark this task with some special shape or color\n self.display.warning(\n f\"Unable to translate the include task '{task_or_block.get_name()}' due to an undefined variable: {str(e)}. \"\n \"Some variables are available only during the execution of the playbook.\")\n self._add_task(task=task_or_block, task_vars=task_vars, node_type=node_type,\n parent_node=parent_nodes[-1])\n continue\n\n data = self.data_loader.load_from_file(include_file)\n if data is None:\n self.display.warning(f\"The file '{include_file}' is empty and has no tasks to include\")\n continue\n elif not isinstance(data, list):\n raise AnsibleParserError(\"Included task files must contain a list of tasks\", obj=data)\n\n # get the blocks from the include_tasks\n block_list = load_list_of_blocks(data, play=current_play, variable_manager=self.variable_manager,\n role=task_or_block._role, loader=self.data_loader,\n parent_block=task_or_block)\n\n for b in block_list: # loop through the blocks inside the included tasks or role\n self._include_tasks_in_blocks(current_play=current_play, parent_nodes=parent_nodes, block=b,\n play_vars=task_vars, node_type=node_type)\n else:\n if len(parent_nodes) > 1 and not has_role_parent(task_or_block) and task_or_block._parent._implicit:\n # We add a new parent node if:\n # - We found an include_role\n # - We found an explicit Block\n # If an include_role is not found and we have a task that is not from an include_role and not from\n # an explicit block => we remove the last CompositeNode we have added.\n parent_nodes.pop()\n\n # check if this task comes from a role, and we don't want to include tasks of the role\n if has_role_parent(task_or_block) and not self.include_role_tasks:\n # skip role's task\n self.display.vv(\n f\"The task '{task_or_block.get_name()}' has a role as parent and include_role_tasks is false. \"\n \"It will be skipped.\")\n # skipping\n continue\n\n self._add_task(task=task_or_block, task_vars=play_vars, node_type=node_type,\n parent_node=parent_nodes[-1])\n","sub_path":"ansibleplaybookgrapher/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":15183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"20584312","text":"import os\nimport pprint as ppr\nimport re\nfrom os.path import join as pj\n\n\ndef detect_xml_files(basedir):\n xml_files = []\n for root, dirs, files in os.walk(basedir):\n for file in files:\n if file.endswith('.xml') is True \\\n and file.startswith('_new') is False:\n xml_files.append(os.path.join(root, file))\n return sorted(xml_files)\n\n\ndef get_uris_from_file(fil, filter=None):\n uris = []\n content = read_xml(fil)\n for line in content:\n s = rxsearch(r'(?<=dc:uri=\").*?(?=\")', line)\n if s is not None:\n if filter is None:\n uris.append(s)\n else:\n if rxmatch(filter, s) is True:\n uris.append(s)\n return uris\n\n\ndef mkdir(folder):\n try:\n os.makedirs(folder)\n except FileExistsError:\n pass\n\n\ndef pprint(obj):\n pp = ppr.PrettyPrinter(indent=4)\n pp.pprint(obj)\n\n\ndef rxmatch(rxscheme, s):\n return bool(re.search(rxscheme, s))\n\n\ndef rxsearch(rxscheme, s, group=0):\n r = None\n m = re.search(rxscheme, s)\n if m is not None:\n r = m.group(group)\n return r\n\n\ndef read_xml(filename):\n print('Read file ' + filename)\n arr = []\n try:\n filecontent = open(filename, 'r')\n except Exception as e:\n print(e)\n else:\n for line in filecontent.read().splitlines():\n arr.append(line)\n return(arr)\n\n\ndef write_xml(data, folder, filename, debug=False):\n mkdir(folder)\n target = pj(folder, filename)\n print('Write file ' + target)\n if debug is False:\n with open(target, 'w') as fp:\n for line in data:\n fp.write(line + '\\n')\n","sub_path":"tools/sanitizer/lib/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"128199771","text":"import torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import precision_recall_fscore_support\nimport datetime\nimport gen_features_torch\nimport torch.utils.data\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\n\ndef read_data_files(file_path):\n data = pd.read_csv(file_path + 'data_long.csv')\n return data\n\ndef gen_batch_data(data):\n cols = list(data.columns)\n del_cols_index = [cols.index(col) for col in ['sid', 'pid', 'click_mode']]\n sel_cols_index = list(range(len(cols)))\n for item in del_cols_index:\n sel_cols_index.pop(item)\n\n grouped = data.groupby('sid')\n batch_feas_list = []\n batch_click_mode_list = []\n for i, (group_id, group) in tqdm(enumerate(grouped)):\n grouped_values = group.values\n batch_click_mode = grouped_values[:,del_cols_index[-1]][0]\n batch_feas = torch.tensor(grouped_values[:, sel_cols_index]).type(torch.FloatTensor)\n batch_feas_list.append(batch_feas)\n batch_click_mode_list.append(batch_click_mode)\n batch_click_mode_list = torch.tensor(batch_click_mode_list).type(torch.LongTensor)\n # first paddle\n # check why size does not add up to 10000???\n print('list_len:', len(batch_click_mode_list))\n return batch_feas_list, batch_click_mode_list\n\ndef split_train_test_12_class(file_path):\n # data = read_data_files(file_path)\n data = gen_features_torch.merge_data()\n train_data = data[data['click_mode'] != -1]\n #train-val split\n val_data = train_data.iloc[:int(0.2 * train_data.shape[0]), :]\n train_data = train_data.iloc[int(0.2 * train_data.shape[0]):, :]\n\n train_x, train_y = gen_batch_data(train_data)\n val_x, val_y = gen_batch_data(val_data)\n\n test_data = data.query('click_mode == -1')\n submit = test_data[['sid']].copy()\n\n test_x, _ = gen_batch_data(test_data)\n return train_x, train_y, val_x, val_y, test_x, submit\n\ndef eval_f(y_pred, train_data):\n y_true = train_data.label\n y_pred = y_pred.reshape((12, -1)).T\n y_pred = np.argmax(y_pred, axis=1)\n score = f1_score(y_true, y_pred, average='weighted')\n return 'weighted-f1-score', score, True\n\ndef f1_decomposition(val_y, val_pred):\n precision, recall, F1, support = precision_recall_fscore_support(val_y, val_pred)\n weighted_F1 = precision_recall_fscore_support(val_y, val_pred, average ='weighted')[2]\n df_eval = pd.DataFrame({'precision':precision, 'recall':recall,'F1':F1, 'support':support, 'weighted_F1':weighted_F1})\n return df_eval\n\ndef submit_result_12_class(submit, pred_test, model_name):\n now_time = str(datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M\"))\n submit['recommend_mode'] = pred_test\n submit.to_csv(\n '../submit/{}_result_{}.csv'.format(model_name, now_time), index=False)\n\nclass RNNModel(nn.Module):\n def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):\n super(RNNModel, self).__init__()\n # batch_first=True will affect the input shape to self.rnn()\n\n self.rnn = nn.RNN(input_dim, 64, layer_dim, batch_first=True,\n nonlinearity='relu')\n self.fc1 = nn.Linear(64, 32)\n self.fc2 = nn.Linear(32, output_dim)\n self.dropout = nn.Dropout(0.5)\n\n\n def forward(self, x):\n out, hn = self.rnn(x)\n #hn[-1] is of (batch_size, hidden_dim)\n # hn_ = self.dropout(hn[-1])\n out = self.fc1(hn[-1])\n out = self.fc2(out)\n # out = self.dropout(out)\n return out\n\nclass LSTM(nn.Module):\n def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):\n super(LSTM, self).__init__()\n self.lstm = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True, dropout = 0.5)\n self.fc = nn.Linear(hidden_dim, output_dim)\n self.layer_dim = layer_dim\n self.hidden_dim = hidden_dim\n\n def forward(self, x):\n # may want to initialize h0 and c0\n out, (hn,cn) = self.lstm(x)\n out = self.fc(hn[-1])\n return out\n\ndef train_test(train_x, train_y, val_x, val_y, test_x):\n '''train_x is a list of two-dim tensor\n train_y is an one-dim tensor\n '''\n input_dim = train_x[0].size()[1]\n hidden_dim = 10\n output_dim = 12\n layer_dim = 1 #used in RNN\n batch_size = 100\n\n model = RNNModel(input_dim, hidden_dim, layer_dim, output_dim)\n # model = LSTM(input_dim, hidden_dim, layer_dim, output_dim)\n\n # Cross Entropy Loss\n error = nn.CrossEntropyLoss()\n # error = nn.CrossEntropyLoss(weight = torch.tensor(np.array([1,1,6,1,1,1,1,1,1,1,1,1])).type(torch.FloatTensor))\n\n #learning_rate = 1 for LSTM\n learning_rate = 0.000001 # for RNN with num_epochs 500 seems to have 0.29 F1\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)\n\n\n num_epochs = 50\n pred_test_list = []\n score_list = []\n print('# of iters:',(len(train_x)/batch_size),'\\n')\n for epoch in range(num_epochs):\n for i in range(len(train_x)//batch_size+1):\n if i < len(train_x)//batch_size:\n feas = train_x[i*batch_size:(i+1)*batch_size]\n labels = train_y[i*batch_size:(i+1)*batch_size]\n else:\n feas = train_x[i*batch_size:]\n labels = train_y[i*batch_size:]\n # feas = feas.view(-1, 12, 1) #needed for RNN\n lens = list(map(len, feas))\n padded_feas = pad_sequence(feas, batch_first=True)\n packed_feas = pack_padded_sequence(padded_feas, lens, batch_first=True, enforce_sorted=False)\n optimizer.zero_grad()\n outputs = model(packed_feas)\n loss = error(outputs, labels)\n if i%100==0:\n print('loss at {0} is {1}'.format(i, loss.item()))\n loss.backward()\n optimizer.step()\n\n if i ==0:\n train_outputs = outputs\n else:\n train_outputs = torch.cat((train_outputs, outputs), dim = 0)\n i+=1\n\n with torch.no_grad():\n for i in range(len(val_x)//batch_size+1):\n if i < len(val_x)//batch_size:\n val_feas = val_x[i*batch_size:(i+1)*batch_size]\n val_labels = val_y[i*batch_size:(i+1)*batch_size]\n else:\n val_feas = val_x[i*batch_size:]\n val_labels = val_y[i*batch_size:]\n val_lens = list(map(len, val_feas))\n padded_val_feas = pad_sequence(val_feas, batch_first=True)\n packed_val_feas = pack_padded_sequence(padded_val_feas, val_lens, batch_first=True, enforce_sorted=False)\n val_outputs = model(packed_val_feas)\n predicted = torch.max(val_outputs.data, 1)[1]\n if i ==0:\n val_result = predicted\n #for evaluation purposes\n val_outputs_long = val_outputs\n else:\n val_result = torch.cat((val_result, predicted), dim = 0)\n val_outputs_long = torch.cat((val_outputs_long, val_outputs), dim = 0)\n # pred_test = predict(model, test_loader)\n # pred_test_list.append(pred_test)\n score = precision_recall_fscore_support(val_y, val_result, average ='weighted')[2]\n # print('epoch:', epoch, 'f1-score:', score)\n score_list.append(score)\n max_index = np.argmax(np.array(score_list))\n print('max_index', max_index)\n print(f1_decomposition(val_y, val_result))\n val_outputs_long = pd.DataFrame(val_outputs_long.numpy())\n val_outputs_long['click_mode']=val_y\n val_outputs_long['predicted']=val_result\n val_outputs_long.to_csv('../output/val_outputs_long.csv')\n train_outputs_long = pd.DataFrame(train_outputs.detach().numpy())\n train_outputs_long['predicted']=np.argmax(train_outputs_long.values, axis=1)\n train_outputs_long['click_mode']=train_y\n train_outputs_long.to_csv('../output/train_outputs_long.csv')\n # pred_test = pred_test_list[max_index]\n # return pred_test\n\nif __name__ == '__main__':\n train_x, train_y, val_x, val_y, test_x, submit = split_train_test_12_class('../input_torch/')\n train_test(train_x, train_y, val_x, val_y, test_x)\n # submit_result_12_class(submit, pred_test, 'pytorch')\n","sub_path":"code/pytorch.py","file_name":"pytorch.py","file_ext":"py","file_size_in_byte":8369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"640337381","text":"# Author: Alex Gezerlis\n# Numerical Methods in Physics with Python (2nd ed., CUP, 2023)\n\nfrom triang import forsub, backsub, testcreate\nfrom ludec import ludec\nfrom jacobi import termcrit\nfrom power import mag, testeigone\nimport numpy as np\n\n#def invpowershift(A,shift=20,kmax=200,tol=1.e-2):\ndef invpowershift(A,shift=20,kmax=200,tol=1.e-8):\n n = A.shape[0]\n znews = np.ones(n)\n qnews = znews/mag(znews)\n Astar = A - np.identity(n)*shift\n L, U = ludec(Astar)\n\n for k in range(1,kmax):\n qs = np.copy(qnews)\n ys = forsub(L,qs)\n znews = backsub(U,ys)\n qnews = znews/mag(znews)\n\n if qs@qnews<0:\n qnews = -qnews\n\n err = termcrit(qs,qnews)\n #print(k, qnews, err)\n\n if err < tol:\n lam = qnews@A@qnews\n break\n else:\n lam = qnews = None\n\n return lam, qnews\n\nif __name__ == '__main__':\n A, _ = testcreate(4,21)\n testeigone(invpowershift,A)\n","sub_path":"second_edition/codes/invpowershift.py","file_name":"invpowershift.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"391737759","text":"from collections import OrderedDict, namedtuple\n\n\nclass _SearchRecipe(namedtuple(\"SearchRecipe\", \"ref\")):\n with_packages = True\n\n def to_dict(self):\n data = {\"id\": str(self.ref)}\n return data\n\n\nclass _SearchPackage(namedtuple(\"SearchPackage\",\n \"package_id, options, settings, requires, outdated\")):\n\n def to_dict(self):\n return {\"id\": self.package_id, \"options\": self.options, \"settings\": self.settings,\n \"requires\": self.requires, \"outdated\": self.outdated}\n\n\nclass SearchRecorder(object):\n\n def __init__(self):\n self.error = False\n self.keyword = \"results\"\n self._info = OrderedDict()\n\n def add_recipe(self, remote_name, ref, with_packages=True):\n recipe = _SearchRecipe(ref)\n recipe.with_packages = with_packages\n if remote_name not in self._info:\n self._info[remote_name] = OrderedDict()\n self._info[remote_name][ref.full_repr()] = {\"recipe\": recipe, \"packages\": []}\n\n def add_package(self, remote_name, ref, package_id, options, settings, requires, outdated):\n sp = _SearchPackage(package_id, options, settings, requires, outdated)\n self._info[remote_name][ref.full_repr()][\"packages\"].append(sp)\n\n def get_info(self):\n info = {\"error\": self.error, self.keyword: []}\n\n for remote_name, recipe_packages in sorted(self._info.items()):\n remote_info = {\"remote\": remote_name, \"items\": []}\n for item in recipe_packages.values():\n recipe_info = item[\"recipe\"].to_dict()\n if item[\"recipe\"].with_packages:\n packages_info = [package.to_dict() for package in item[\"packages\"]]\n remote_info[\"items\"].append({\"recipe\": recipe_info, \"packages\": packages_info})\n else:\n remote_info[\"items\"].append({\"recipe\": recipe_info})\n info[self.keyword].append(remote_info)\n return info\n","sub_path":"conans/client/recorder/search_recorder.py","file_name":"search_recorder.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"63082380","text":"'''\n(1)输入一行字符(仅含英文字母),编写函数分别统计各个字符的个数,不区分大小写.\n(2)输入一行字符,编写函数统计出四类字母的数量: 英文字母、空格、数字和其它字符的个数。\n分别编写一个函数,完成上述功能。\n\n提示: 设d为一个dict对象,其\"值\"为\"键\"(字符)出现的次数.\n1.返回一个字典对象: d[keyvalue]\n2.如果当前字符为keyvalue,则 d[keyvalue] += 1 增加一次出现次数.\n'''\n\n#第一题\ndef count1(string):\n garge = {}\n for c in string:\n if c in garge:\n garge[c] += 1\n else:\n garge[c] = 1\n return garge\n\n'''\ns = input('输入:')\ngarge = count1(s.lower())\nprint(garge)\n'''\n\n#第2题\ndef count2(string):\n garge = {'Word': 0, 'Number': 0, 'Blank': 0, 'Other': 0}\n for c in string:\n if c.isalpha():\n garge['Word'] += 1\n elif c.isdigit():\n garge['Number'] += 1\n elif c.isspace():\n garge['Blank'] += 1\n else:\n garge['Other'] += 1\n return garge\n\ns = input('输入:') #123123qweewerwe() (_=)\nprint(count2(s))","sub_path":"Visual Studio/Python/字符统计.py","file_name":"字符统计.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"631874363","text":"import json\nimport sys\nimport time\n\nimport requests\nfrom git_wrapper import git_wrapper\n\n\ndef common_update():\n while is_running:\n s.update()\n s.add('.')\n s.commit('syncing')\n s.push()\n\n time.sleep(5)\n\ndef update():\n while is_running:\n s.update()\n\n time.sleep(1)\n\ndef git_commit():\n while is_running:\n s.add('.')\n s.commit('syncing')\n s.push()\n\n time.sleep(5)\n\ndef svn_commit():\n while is_running:\n l, is_modified = s.getNewFiles()\n\n for e in l:\n s.add(e)\n\n if is_modified:\n s.commit('syncing')\n\n time.sleep(1)\n\n\nassert len(sys.argv) == 2, \"usages: python3 SClient.py user.json\"\n\nwith open('user.json', 'r') as f:\n user_data = f.read()\n\nuser_data = json.loads(user_data)\n\nresp = requests.get(user_data['auth-url'],\n auth=(user_data['id'], user_data['password']))\n\nprint(resp.text)\nprint(len(resp.text))\nret = resp.text.split('\\\\n ')[0]\nprint(ret)\nassert ret == \"success\", \"Invalid url\"\n\ns = git_wrapper(user_data['id'], user_data['password'])\n # svn_wrapper(user_data['id'], user_data['password'])\nis_running = True\n\ns.checkout(url=user_data['repo-url'],\n dest=user_data['repo-dir'])\n\ncommon_update()\n\n'''\nupdater = Thread(target=update)\nupdater.start()\n\ncommitter = Thread(target=git_commit)\ncommitter.start()\n\nupdater.join()\ncommitter.join()\n'''\n","sub_path":"SClient.py","file_name":"SClient.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"185683388","text":"import random\nfrom XOR_task import *\nfrom enum import Enum\nimport matplotlib.pyplot as plt\nimport time\n\nclass Actions(Enum):\n Stand = 'S'\n Hit = 'H'\n Split = 'SP'\n Double = 'DD'\n \n\nclass BlackJack:\n DECK = []\n def __init__(self):\n self.deckCount = 8\n self.drawnCards = []\n BlackJack.DECK = BlackJack.getStaticDeck()\n self.bank = 200\n self.deck = BlackJack.DECK * self.deckCount\n random.shuffle(self.deck)\n \n plt.close()\n plt.ion()\n plt.show()\n self.bankHistory = []\n self.bet = 1\n\n def run(self, agent, plot=True):\n self.bank -= self.bet\n \n if len(self.deck) < (len(BlackJack.DECK) * self.deckCount)/2:\n self.deck = BlackJack.DECK * self.deckCount\n random.shuffle(self.deck)\n # print(\"New Deck . . .\")\n \n dealer_cards = []\n agent_cards = []\n\n # Initial Turn\n agent_cards.append(self.draw(self.deck))\n dealer_cards.append(self.draw(self.deck))\n agent_cards.append(self.draw(self.deck))\n dealer_cards.append(self.draw(self.deck))\n \n # print(f'Bank: {self.bank}')\n # print(f'Player hand: {agent_cards}')\n # print(f'Dealer hand: {dealer_cards}')\n\n # Player Dealing\n if BlackJack.countCards(agent_cards) == 21 and BlackJack.countCards(dealer_cards) == 21:\n self.bank += self.bet\n # print(\"Push Black Jack.\")\n return\n \n \n choice = agent.input(agent_cards, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n \n hand1 = hand2 = None\n \n if choice == Actions.Hit:\n while (BlackJack.countCards(agent_cards) < 21):\n agent_cards.append(self.draw(self.deck))\n # print(f'Player hand: {agent_cards}')\n if BlackJack.countCards(agent_cards) > 21:\n break\n choice = agent.input(agent_cards, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n if (choice != Actions.Hit):\n break\n elif choice == Actions.Double:\n agent_cards.append(self.draw(self.deck))\n self.bank -= self.bet\n # print(f'Player hand: {agent_cards}')\n elif choice == Actions.Split and agent_cards[0][0] == agent_cards[1][0]:\n hand1 = [agent_cards[0], self.draw(self.deck)]\n hand2 = [agent_cards[1], self.draw(self.deck)]\n self.bank -= self.bet\n \n # print(f'Player hand1: {hand1}')\n choice = agent.input(hand1, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n if choice == Actions.Hit:\n while (BlackJack.countCards(hand1) < 21):\n hand1.append(self.draw(self.deck))\n # print(f'Player hand1: {hand1}')\n if BlackJack.countCards(hand1) > 21:\n break\n choice = agent.input(hand1, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n if (choice != Actions.Hit):\n break\n \n # print(f'Player hand2: {hand2}')\n choice = agent.input(hand2, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n if choice == Actions.Hit:\n while (BlackJack.countCards(hand2) < 21):\n hand2.append(self.draw(self.deck))\n # print(f'Player hand2: {hand2}')\n if BlackJack.countCards(hand2) > 21:\n break\n choice = agent.input(hand2, dealer_cards[0])\n # print(f'Player chose to: {choice}')\n if (choice != Actions.Hit):\n break\n \n \n # DealerDealing\n while BlackJack.countCards(dealer_cards) < 17:\n dealer_cards.append(self.draw(self.deck))\n # print(f'Dealer hand: {dealer_cards}')\n \n # Final Count\n for hand in [agent_cards, hand1, hand2]:\n if hand == None:\n continue\n if len(hand) == 2 and hand1 == None and BlackJack.countCards(hand) == 21:\n # print(\"BLACK JACK !!!!\")\n self.bank += self.bet*2.5\n elif BlackJack.countCards(hand) == BlackJack.countCards(dealer_cards):\n if choice == Actions.Double:\n self.bank += self.bet\n self.bank += self.bet\n elif BlackJack.countCards(hand) > 21:\n ()\n elif BlackJack.countCards(hand) > BlackJack.countCards(dealer_cards):\n if choice == Actions.Double:\n self.bank += self.bet*2\n self.bank += self.bet*2\n elif BlackJack.countCards(dealer_cards) > 21:\n if choice == Actions.Double:\n self.bank += self.bet*2\n self.bank += self.bet*2\n \n # print(f'Final bank: {self.bank}')\n self.bankHistory.append(self.bank)\n if plot and len(self.bankHistory)%10 == 0:\n plt.xlim(0, max(300, len(self.bankHistory)+(1-((len(self.bankHistory)%300)/300))*300))\n plt.ylim(0, 300)\n plt.grid(True)\n plt.xlabel(\"matches played\")\n plt.ylabel(\"bank\")\n plt.plot(self.bankHistory, color='blue')\n plt.show()\n plt.pause(0.001)\n \n def countCards(cards):\n count = 0\n aces = 0\n for card in cards:\n if card[0] in \"KQJT\":\n count += 10\n elif card[0] in \"23456789\":\n count += int(card[0])\n elif card[0] == \"A\":\n aces += 1\n for i in range(aces):\n if count + 11 > 21:\n count += 1\n else:\n count += 11\n return count\n\n def draw(self, deck):\n card = deck.pop(0)\n self.drawnCards.append(card)\n return card\n\n def getStaticDeck():\n deck = []\n for card in \"A23456789TJQK\":\n for sign in \"SCDH\":\n deck.append(card + sign)\n return deck\n\nclass BasicAgent:\n def __init__(self) -> None:\n pass\n \n def input(self, agent_cards, dealer_card):\n # Table Strat\n for i in range(len(X)):\n c1 = BlackJack.countCards(agent_cards[0])\n c2 = BlackJack.countCards(agent_cards[1])\n row = BlackJack.countCards(dealer_card)\n \n if c1 == 11:\n c1 = 1\n if c2 == 11:\n c2 = 1\n if row == 11:\n row = 1\n\n if X[i][0] == c1 and X[i][1] == c2 and X[i][3] == row:\n if y[i] == [1, 0, 0, 0]:\n return Actions.Stand\n elif y[i] == [0, 1, 0, 0]:\n return Actions.Hit\n elif y[i] == [0, 0, 1, 0]:\n return Actions.Split\n elif y[i] == [0, 0, 0, 1]:\n return Actions.Double\n else:\n assert False\n # print(f'Agent Cards: {agent_cards}')\n # print(f'Dealer Card: {dealer_card}')\n assert False\n \n # Dealer Strat\n #\n # if BlackJack.countCards(agent_cards) < 17:\n # return Actions.Hit\n # else:\n # return Actions.Stand\n \n # Manual Mode\n #\n # choice = input('>')\n # if choice == 'H':\n # return Actions.Hit\n # elif choice == 'S':\n # return Actions.Stand\n # elif choice == 'SP':\n # return Actions.Split\n # elif choice == 'DD':\n # return Actions.Double\n # else:\n # print(f'Action \"{choice}\" not found.')\n # return Actions.Stand\n \n \nif __name__ == '__main__':\n bj = BlackJack()\n agent = BasicAgent()\n \n while bj.bank > 0:\n bj.run(agent)\n \n plt.pause(0)","sub_path":"BlackJack.py","file_name":"BlackJack.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"207778287","text":"# Copyright (c) 2020 Patrick Hart, Julian Bernhard,\n# Klemens Esterle, Tobias Kessler\n# \n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\nimport numpy as np\n# BARK\nfrom bark.core.world.evaluation import \\\n EvaluatorGoalReached, EvaluatorCollisionEgoAgent, \\\n EvaluatorStepCount, EvaluatorDrivableArea\nfrom bark.runtime.commons.parameters import ParameterServer\nfrom bark.core.geometry import *\n# BARK-ML\nfrom bark_ml.evaluators.evaluator import StateEvaluator\n\n\nclass GoalReachedGuiding(StateEvaluator):\n def __init__(self,\n params=ParameterServer(),\n eval_agent=None):\n StateEvaluator.__init__(self, params)\n self._goal_reward = \\\n self._params[\"ML\"][\"GoalReachedGuiding\"][\"GoalReward\",\n \"Reward for reaching the goal.\",\n 1.]\n self._col_penalty = \\\n self._params[\"ML\"][\"GoalReachedGuiding\"][\"CollisionPenalty\",\n \"Reward given for a collisions.\",\n -1.]\n self._max_steps = \\\n self._params[\"ML\"][\"GoalReachedGuiding\"][\"MaxSteps\",\n \"Maximum steps per episode.\",\n 50]\n self._act_penalty = \\\n self._params[\"ML\"][\"GoalReachedGuiding\"][\"ActionPenalty\",\n \"Weight factor for penalizing actions\",\n 0.01]\n self._goal_dist = \\\n self._params[\"ML\"][\"GoalReachedGuiding\"][\"GoalDistance\",\n \"Weight factor for distance to goal\",\n 0.01]\n self._eval_agent = eval_agent\n self._goal_lane_corr = None\n\n def _add_evaluators(self):\n \"\"\"Evaluators that will be set in the BARK world\"\"\"\n self._evaluators[\"goal_reached\"] = EvaluatorGoalReached()\n self._evaluators[\"collision\"] = EvaluatorCollisionEgoAgent()\n self._evaluators[\"step_count\"] = EvaluatorStepCount()\n self._evaluators[\"drivable_area\"] = EvaluatorDrivableArea()\n\n def GetGoalLaneCorridorForGoal(self, observed_world):\n \"\"\"Returns the lanecorridor the goal is in\"\"\"\n if self._goal_lane_corr is not None:\n return self._goal_lane_corr\n ego_agent = observed_world.ego_agent\n goal_def = ego_agent.goal_definition\n goal_shape = goal_def.goal_shape\n rc = observed_world.ego_agent.road_corridor\n lane_corr = None\n for lc in rc.lane_corridors:\n if Collide(lc.polygon, goal_shape):\n lane_corr = lc\n return lane_corr\n\n def CalculateDistanceToGoal(self, observed_world, goal_lane_corr):\n \"\"\"Calculates the distance to the goal of the ego_agent\"\"\"\n goal_center_line = goal_lane_corr.center_line\n ego_agent = observed_world.ego_agent\n ego_agent_state = ego_agent.state\n distance_to_gaol = Distance(\n goal_center_line,\n Point2d(ego_agent_state[1], ego_agent_state[2]))\n return distance_to_gaol\n\n def CalculateGuidingReward(self, observed_world, action):\n \"\"\"Returns a guiding reward using the dist. to goal and penalized acts.\"\"\"\n guiding_reward = 0.\n goal_lane_corr = self.GetGoalLaneCorridorForGoal(observed_world)\n distance_to_goal = self.CalculateDistanceToGoal(observed_world, goal_lane_corr)\n guiding_reward -= self._goal_dist*distance_to_goal\n # NOTE: this will only work for continious actions\n if action is not None and type(action) is not int:\n accs = action[0]\n delta = action[1]\n guiding_reward -= self._act_penalty*(accs**2 + delta*+2)\n return guiding_reward\n\n def _evaluate(self, observed_world, eval_results, action):\n \"\"\"Returns information about the current world state\n \"\"\"\n done = False\n success = eval_results[\"goal_reached\"]\n collision = eval_results[\"collision\"] or eval_results[\"drivable_area\"]\n step_count = eval_results[\"step_count\"]\n # determine whether the simulation should terminate\n if success or collision or step_count > self._max_steps:\n done = True\n guiding_reward = self.CalculateGuidingReward(observed_world, action)\n # calculate reward\n reward = collision * self._col_penalty + \\\n success * self._goal_reward + guiding_reward\n return reward, done, eval_results\n \n def Reset(self, world):\n self._goal_lane_corr = None\n return super(GoalReachedGuiding, self).Reset(world)","sub_path":"bark_ml/evaluators/goal_reached_guiding.py","file_name":"goal_reached_guiding.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"31533946","text":"# Kaggle Python Tutorial on Machine Learning\n# Chapter 1\n# Exercise 6 - First prediction\n\n# Create a copy of test: test_one\ntest_one = test.copy(deep=True)\n\n# Initialize a Survived column to 0\ntest_one[\"Survived\"] = 0\n\n# Set Survived to 1 if Sex equals \"female\" and print the `Survived` column from `test_one`\ntest_one[\"Survived\"][test_one[\"Sex\"] == 'female'] = 1\nprint(test_one[\"Survived\"])\n","sub_path":"chapter1/06-firstprediction.py","file_name":"06-firstprediction.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"49662885","text":"# -*- coding: utf-8 -*-\n\"\"\" Configuration handling.\n\n References:\n - http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html\n - http://freedesktop.org/wiki/Software/pyxdg/\n - https://github.com/ActiveState/appdirs\n\"\"\"\n# Copyright © 2013 1&1 Internet AG\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, with_statement\n\nimport sys\nimport logging\nimport ConfigParser\n\n\nclass Configuration(object):\n \"\"\" Reads and manages the configuation.\n \"\"\"\n\n # Singleton instance\n instance = None\n\n\n @classmethod\n def create(cls, config_file=None):\n \"\"\" Return the default configuration.\n \"\"\"\n if cls.instance is None:\n cls.instance = cls(config_file)\n\n # Load config file, possibly overwriting the defaults\n cls.instance.load_ini()\n\n if config_file and config_file != cls.instance.config_file:\n raise RuntimeError(\"Configuration initialized a second time with a different file!\")\n\n return cls.instance\n\n\n def __init__(self, config_file=None):\n \"\"\" Initialize configuration.\n \"\"\"\n self.config_file = config_file\n\n # Set defaults\n #self.default(\"apt\", \"repositories\", \"primary\", list)\n #self.default(\"apt\", \"repositories\", \"secondary\", list)\n self._validate()\n\n\n def _validate(self):\n \"\"\" Validate a loaded configuration.\n \"\"\"\n #if isinstance(self.foobar, basestring):\n # try:\n # self.foobar = int(self.foobar, 10)\n # except (ValueError, TypeError), exc:\n # raise ValueError(\"Bad foobar %r: %s\" % (self.foobar, exc))\n\n\n def load_ini(self):\n \"\"\" Load the given .INI file.\n \"\"\"\n if not self.config_file:\n return\n\n # Load INI file\n ini_file = ConfigParser.SafeConfigParser()\n if not ini_file.read(self.config_file):\n raise ConfigParser.ParsingError(\"Global configuration file %r not found!\" % (\n self.config_file,\n ))\n\n \"\"\"\n # Make sure there's our global settings section\n if not ini_file.has_section(self.SECTION):\n raise ConfigParser.ParsingError(\"%r needs to have a [%s] section!\" % (\n self.config_file, self.SECTION,\n ))\n\n # Get the given values\n for key, val in ini_file.items(self.SECTION):\n # Ensure that all names are known (to prevent uncaught typos)\n if key not in self.KEYS:\n raise ConfigParser.ParsingError(\"%r has an unknown key %s in the [%s] section!\" % (\n self.config_file, key, self.SECTION,\n ))\n\n # Do some shell-like path expansion\n val = os.path.expanduser(os.path.expandvars(val))\n\n # Set as attribute for easy access\n setattr(self, key, val)\n \"\"\"\n\n self._validate()\n","sub_path":"src/infrascope/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123188730","text":"from modules.alarm.alarm_time_file_base import AlarmTimeFileBase\nfrom logging import Logger\nfrom setting import Setting\n\n\nclass BlockAlarmRise(AlarmTimeFileBase):\n \"\"\"description of class\"\"\"\n\n def __init__(self, logger: Logger, setting: Setting):\n \"\"\"Initializes (declare internal variables)\"\"\"\n super(BlockAlarmRise, self).__init__(logger, setting)\n self._start_r = None\n self._start_g = None\n self._start_b = None\n self._stop_r = None\n self._stop_g = None\n self._stop_b = None\n self._step_r = None\n self._step_g = None\n self._step_b = None\n self._current_r = None\n self._current_g = None\n self._current_b = None\n\n def update_display(self, screen, size, fore_color, back_color, blocks, current_time) -> None:\n try:\n if not self._is_alarm:\n return\n back_color = (self._current_r, self._current_g, self._current_b)\n screen.fill(back_color)\n for block in blocks:\n block.update_display(True, screen, size, self._fore_color, back_color, current_time)\n\n (self._current_r, self._step_r) = self._calculate_color_part(\n self._start_r,\n self._stop_r,\n self._step_r,\n self._current_r)\n (self._current_g, self._step_g) = self._calculate_color_part(\n self._start_g,\n self._stop_g,\n self._step_g,\n self._current_g)\n (self._current_b, self._step_b) = self._calculate_color_part(\n self._start_b,\n self._stop_b,\n self._step_b,\n self._current_b)\n\n except Exception as ex:\n self._logger.exception(ex)\n\n def init_draw(self):\n super(BlockAlarmRise, self).init_draw()\n (_, background_color, _, _) = self._setting.get_curret_setting()\n self._start_r = background_color[0]\n self._start_g = background_color[1]\n self._start_b = background_color[2]\n self._stop_r = self._back_color[0]\n self._stop_g = self._back_color[1]\n self._stop_b = self._back_color[2]\n self._step_r = (self._stop_r - self._start_r) / 20\n self._step_g = (self._stop_g - self._start_g) / 20\n self._step_b = (self._stop_b - self._start_b) / 20\n self._current_r = self._start_r\n self._current_g = self._start_g\n self._current_b = self._start_b\n\n def _calculate_color_part(self, start, stop, step, current):\n current += step\n if current > stop:\n step = -step\n current += step\n if current < start:\n step = -step\n current += step\n return (current, step)\n","sub_path":"modules/alarm/block_alarm_rise.py","file_name":"block_alarm_rise.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"52548546","text":"# -*- coding: utf-8 -*-\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'HOST': '{{ db_expo }}'.split(':')[0],\n 'PORT': '{{ db_expo }}'.split(':')[1],\n# 'HOST': '{{ db_hhservice_host }}',\n# 'PORT': '{{ db_hhservice_port }}',\n 'NAME': 'hhservice',\n 'USER': '{{ db_expo_u }}',\n 'PASSWORD': '{{ db_expo_p }}'\n }\n}\n\n\nVISITOR_LOG_CACHE_HOSTS = ({{ memcache_quotes }},)\nVISITOR_LOG_CACHE_PREFIX = 'expo.'\nVISITOR_LOG_CACHE_TTL = 300\nVISITOR_LOG_ENABLE = True\nVISITOR_LOG_ACTIVITY_TRESHOLD = 300\nVISITOR_LOG_STAND_ACTIVITY_TRESHOLD = 300\n\ngelf_handler = {'class': 'graypy.GELFHandler',\n 'host': '{{ graylog_host }}',\n 'port': 12201}\n\nhandler = {'class': 'logging.handlers.WatchedFileHandler',\n 'filename': '/var/log/hh-expo/hh-expo.log',\n 'level': 'DEBUG',\n 'formatter': 'verbose'}\n\nemail_handler = {'class': 'logging.handlers.WatchedFileHandler',\n 'filename': '/var/log/hh-expo/emails.log',\n 'level': 'DEBUG',\n 'formatter': 'verbose'}\n\nMEDIA_URL = 'http://hh.ru/i-expo/'\nUPLOAD_PUT_URL = 'http://{{ intbal_n_c1 }}:8024/i-expo/'\nUPLOAD_TIMEOUT = 500\nUPLOAD_GET_TIMEOUT = 500\n\nSTATIC_URL = 'http://i-expo.hh.ru/static/'\n\nMAX_UPLOAD_SIZE = ''\n\nMAILER_QUEUE_HOSTS = ({{ cf_mailer_queue_hosts }})\nMAILER_QUEUE_LOGIN = '{{ cf_mailer_queue_login }}'\nMAILER_QUEUE_PASSWORD = '{{ cf_mailer_queue_password }}'\nMAILER_QUEUE_ROUTING_KEYS = {\n 'LOW': 'mail.active.LOW',\n 'NORMAL': 'mail.active.NORMAL',\n 'HIGH': 'mail.active.HIGH',\n}\nMAILER_QUEUE_VIRTUAL_HOST = '/'\nMAILER_QUEUE_EXCHANGE = ''\nEMAIL_QUEUE_DELAY = 300\nCAREER_EMAIL_TEMPLATE = 'career_fair_base'\nMAILER_QUEUE_MAILING_TYPE = 'applicant_advertising_mailer'\nMAILER_QUEUE_MAILING_ID = 0\n\nDEFAULT_FROM_EMAIL = 'Ярмарка вакансий '\n\nAPPLICANT_FAIR_START = '13.02.2013 10:00'\nAPPLICANT_FAIR_FINISH = '14.02.2013 18:00'\nPRESENTATION_CONVERTATION_ATTEMPTS = 3\nIMAGE_RESIZE_QUALITY = 95\nPRESENTATION_DENSITY = '150' # quality for presentation file convertation( in dpi )\n\nBANNER_SERVICE_ENABLED = True\nBANNER_SERVICE_HOST = '//hhcdn.ru/pv'\n\nVACANCY_SEARCH_ENABLE = True\nVACANCY_SEARCH_PER_PAGE = 20\n\nTEMPLATE_LOADERS = (\n ('django.template.loaders.cached.Loader', (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n )),\n)\n\nTEXT_BLOCK_CACHE_TTL = 60 * 60 * 24 # 1 day\n\nSOCIAL_AUTH_VK_OAUTH2_KEY = '{{ advdream_social_auth_vk_oauth2_key }}'\nSOCIAL_AUTH_VK_OAUTH2_SECRET = '{{ advdream_social_auth_vk_oauth2_secret }}'\n\nSOCIAL_AUTH_FACEBOOK_KEY = '{{ advdream_social_auth_facebook_key }}'\nSOCIAL_AUTH_FACEBOOK_SECRET = '{{ advdream_social_auth_facebook_secret }}'\n\nSOCIAL_AUTH_HHRU_OAUTH2_KEY = '{{ advdream_social_auth_hhru_oauth2_key }}'\nSOCIAL_AUTH_HHRU_OAUTH2_SECRET = '{{ advdream_social_auth_hhru_oauth2_secret }}'\n\nSOCIAL_AUTH_MAILRU_OAUTH2_KEY = '{{ advdream_social_auth_mailru_oauth2_key }}'\nSOCIAL_AUTH_MAILRU_OAUTH2_SECRET = '{{ advdream_social_auth_mailru_oauth2_secret }}'\n\nBANNER_INDEX_MAIN_SITE = '146'\nBANNER_INDEX_MAIN_PLACE = '627'\nBANNER_INDEX_RIGHT_SITE = '146'\nBANNER_INDEX_RIGHT_PLACE = '623'\nBANNER_INDEX_FOOTER_SITE = '146'\nBANNER_INDEX_FOOTER_PLACE = '624'\nBANNER_INNER_RIGHT_SITE = '146'\nBANNER_INNER_RIGHT_PLACE = '623'\nBANNER_INNER_FOOTER_SITE = '146'\nBANNER_INNER_FOOTER_PLACE = '624'\n\nTEXT_BLOCK_CACHE_TTL = 60 * 60 * 24 # 1 day\nEXPO_REGION_CACHE_TTL = 60 * 60 * 24 # 1 day\n\nSESSION_COOKIE_DOMAIN = '.expo.hh.ru'\nFAQITEM_PER_PAGE = 10\n","sub_path":"public/playbooks/roles/expo/templates/etc/hh-expo/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"291588282","text":"\"\"\"\nBuilds the infrastructure for Hip Edit backing services.\n\"\"\"\nfrom __future__ import print_function\nimport logging\nfrom os import path\nfrom hip_edit import activemq\nfrom hip_edit import cli_arg_parser\nfrom hip_edit import cf_template_builder\nfrom hip_edit import cf_driver\nfrom hip_edit import log\nfrom hip_edit.build_context import BuildContext\n\nLOGGER = log.get_stream_logger(__name__)\n\ndef main():\n \"\"\"\n Entry point\n \"\"\"\n cli_options = cli_arg_parser.services_arg_parser().parse_args()\n logging.root.setLevel(logging.DEBUG if cli_options.verbose else logging.INFO)\n if not cli_options.stack_down():\n if cli_options.stack_halt():\n if confirm(\"\"\"You are going to stop the ActveMQ instance and release the EIP forever.\n Is this what you want?\"\"\") != 'yes':\n LOGGER.info('No changes made.')\n return\n template = cf_template_builder.build(cli_options)\n else:\n if confirm(\"\"\"You are going to destroy all stack resources and\n this operation can not be done. Is this what you want?\"\"\") != 'yes':\n LOGGER.info('No changes made.')\n return\n template = None\n outputs = cf_driver.execute(cli_options, template)\n if outputs is None or cli_options.stack_down():\n return\n build_ctx = BuildContext()\n build_ctx.add('services', outputs).save()\n activemq_instance_id = build_ctx.get('MessageServerInstanceId', group_key='services')\n if cli_options.stack_up():\n activemq.check_instance_status(instance_id=activemq_instance_id)\n hostname = build_ctx.get('npm_config_messaging_host')\n outputs = activemq.configure(cli_options, hostname,\n templates_path=path.abspath('./artifacts/activemq'),\n distribution_type='bitnami')\n build_ctx.add(('services', 'activemq', 'users'), outputs).save()\n else:\n activemq.halt_instance(instance_id=activemq_instance_id)\n\n\n\ndef confirm(message, prompt=' ([no]/yes) '):\n \"\"\"Prints a message and returns user input.\"\"\"\n print(\"\\n\".join((s.strip() for s in message.split(\"\\n\"))), end='')\n return raw_input(prompt)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hip-edit-infra/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"118243247","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n#path =\"../frictionCompensator/velocities/test3/\"\npath =\"../testTorqueEncs/torques/leftArm/joint22/\"\n\nfor file in os.listdir(path):\n if file.endswith(\".txt\"):\n with open(path+file, 'r') as f:\n data = f.readlines()\n time = []\n velocity = []\n for line in data:\n numbers = line.split()\n time.append(float(numbers[1]))\n velocity.append(float(numbers[2]))\n time[:]=[x - time[0] for x in time]\n plt.plot(time, velocity, lw=4)\n\n\n accel=[]\n for j in range(len(velocity)-1):\n accel.append( float((velocity[j+1] - velocity[j])/(time[j+1] - time[j])) )\n accel.append(float((velocity[-1] - velocity[-2])/(time[-1] - time[-2])))\n plt.plot(time, accel, lw=4)\n plt.show()\n\n\n#labels\nplt.xlabel('time [s]', size=30)\n#plt.ylabel('velocity ' u\"\\u03C9\" ' [rad/s]', size=25)\nplt.ylabel('velocity [degrees/s]', size=30)\n\n#limits tuneados a mano\n#plt.xlim((0, 1))\nplt.xlim((0, 3.5))\n\nplt.xticks(size=25)\n# plt.ylim((100, y))\nplt.yticks(size=25)\n\n#plt.title('Velocity vs. Time curves', size=30)\n\n#show\nplt.show()\n","sub_path":"friction-gravity-compensation/friction/accel_stability.py","file_name":"accel_stability.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"131616492","text":"\"\"\"The following file is released under the Apache 2 Licence, see LICENCE.txt.\"\"\"\n\nimport setuptools\n\n\nwith open(\"README.md\") as fp:\n long_description = fp.read()\n\n\nsetuptools.setup(\n name=\"vpn_example\",\n version=\"0.0.1\",\n\n description=\"A Cloud Gurus Transit Gateway\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n\n author=\"Phil Basford\",\n\n package_dir={\"\": \"vpn_example\"},\n packages=setuptools.find_packages(where=\"vpn_example\"),\n\n install_requires=[\n \"aws-cdk.core==1.109.0\",\n \"aws-cdk.aws_ec2\",\n \"aws-cdk.aws_ecs\",\n \"aws-cdk.aws_ecs_patterns\",\n \"aws-cdk.aws_rds\",\n \"aws_cdk.aws_secretsmanager\"\n ],\n\n python_requires=\">=3.6\",\n\n classifiers=[\n \"Development Status :: 4 - Beta\",\n\n \"Intended Audience :: Developers\",\n\n \"Programming Language :: JavaScript\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Utilities\",\n\n \"Typing :: Typed\",\n ],\n)\n","sub_path":"vpn-example/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"338623324","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Documentation source:\n# - https://gbdev.gg8.se/wiki/articles/Sound_Controller\n\nclass PolynomialCounter:\n\n def __init__(self):\n self.i = 0\n self.shifted_divisor = 0\n\n def set_nr43(self,value):\n clock_shifted = value >> 4\n divisor = 0\n divisor = {\n 0: 8,\n 1: 16,\n 2: 32,\n 3: 48,\n 4: 64,\n 5: 80,\n 6: 96,\n 7: 112\n }.get(value & 0b00000111)\n self.shifted_divisor = divisor << clock_shifted\n self.i = 1\n\n def step(self, ticks):\n self.i -= ticks\n if self.i <= 0:\n self.i = self.shifted_divisor\n return True\n return False\n","sub_path":"vsgb/audio/polynomial_counter.py","file_name":"polynomial_counter.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"339984011","text":"# Copyright 2021 Nokia\n# Licensed under the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport secrets\nimport json\nfrom flask import Blueprint, render_template, flash, redirect, request\n\nimport a10.structures.constants\nimport a10.structures.identity\n\nimport a10.asvr.elements\nimport a10.asvr.results\nimport a10.asvr.policies\n\nfrom . import formatting\n\nelements_blueprint = Blueprint('elements', __name__, static_folder='../static', template_folder='../templates/')\n\nsecret = secrets.token_urlsafe(64)\nelements_blueprint.secret_key = secret\n\n\n@elements_blueprint.route(\"/elements\", methods=['GET'])\ndef elements():\n\tlrs=5 # default number of latest results if nothing else is specified\n\n\tif 'lrs' in request.args:\n\t\tlrs = int(request.args['lrs'])\n\n\tes = a10.asvr.elements.getElementsFull()\n\n\tfor e in es:\n\t\tprint(\"HERE\",e)\n\t\tres = a10.asvr.results.getLatestResults(e['itemid'], lrs)\n\t\tresultsummary = []\n\t\tfor r in res:\n\t\t\tsummarystr = {\n\t\t\t\t'verifiedAt': formatting.futc(r['verifiedAt']),\n\t\t\t\t'pid': r['policyID'],\n\t\t\t\t'pname': a10.asvr.policies.getPolicy(r['policyID']).msg()['name'],\n\t\t\t\t'res': r['result'],\n\t\t\t\t'rul': r['ruleName'],\n\t\t\t\t'rid': r['itemid']\n\t\t\t}\n\n\t\t\tresultsummary.append(summarystr)\n\n\t\te['summary'] = resultsummary\n\n\tes_sorted = sorted(es, key=lambda i: (i['name']))\n\n\treturn render_template('elements.html', elements=es_sorted)\n\n\n@elements_blueprint.route(\"/element/\", methods=['GET'])\ndef element(item_id):\n\tlrs=50 # default number of latest results if nothing else is specified\n\n\tif 'lrs' in request.args:\n\t\tlrs = int(request.args['lrs'])\n\n\te = a10.asvr.elements.getElement(item_id)\n\tevs = a10.asvr.expectedvalues.getExpectedValuesForElement(item_id)\n\tfor i in evs:\n\t\tp = a10.asvr.policies.getPolicy(i['policyID'])\n\t\tif p.rc()==a10.structures.constants.SUCCESS:\n\t\t\ti['policyname'] = p.msg()['name']\n\t\telse:\n\t\t\ti['policyname'] = \"POLICY DELETED\"\t\n\n\tresultsummary = []\n\tres = a10.asvr.results.getLatestResults(item_id, lrs)\n\n\tfor r in res:\n\t\tresultsummary.append({\n\t\t\t\t'verifiedAt': formatting.futc(r['verifiedAt']),\n\t\t\t\t'pid': r['policyID'],\n\t\t\t\t'pname': a10.asvr.policies.getPolicy(r['policyID']).msg()['name'],\n\t\t\t\t'res': r['result'],\n\t\t\t\t'rul': r['ruleName'],\n\t\t\t\t'msg': r['message'],\n\t\t\t\t'rid': r['itemid']\n\t\t\t})\n\n\tpp = json.dumps(e.msg(), sort_keys=True, indent=4)\n\treturn render_template(\"element.html\", e=e.msg(), evs=evs, rs=resultsummary, pp=pp)\n","sub_path":"u10/blueprints/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4263545","text":"# -*- coding: utf-8 -*\n\"\"\"\n模型的训练控制器,核心成员有:model、reader、evaluate(待定)。核心方法有:\n0.运行时环境初始化\n1.网络初始化\n2.reader初始化\n3.模型训练\n4.模型评估\n5.模型保存:meta信息尽可能完整一些\n6.模型指标与模型网络结构可视化\n7.模型选择的策略\n\n--------------------------\n\n核心方法的调用顺序为:\n1.打印meta及version相关日志,便于问题追查\n2.鉴权\n3.初始化运行所需要的环境\n\"\"\"\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport shutil\nimport paddle.distributed.fleet as fleet\nimport paddle.distributed.fleet.base.role_maker as role_maker\nimport paddle\nimport paddle.static as static\nfrom .. import version\nfrom ..common.rule import InstanceName\nfrom ..utils.util_helper import get_model_paths, save_meta_data, make_targz\n\n\nclass BaseStaticTrainer(object):\n def __init__(self, params, data_set_reader, model):\n \"\"\"\n :param params\n :param data_set_reader\n :param model\n \"\"\"\n self.params = params\n self.data_set_reader = data_set_reader\n self.model_class = model\n\n # 参数解析\n # 动态图or静态图\n self.enable_static = True\n self.is_recompute = self.params.get(\"is_recompute\", 0)\n if 'output_path' in self.params.keys() and self.params[\"output_path\"]:\n self.save_checkpoints_path = os.path.join(self.params[\"output_path\"], \"save_checkpoints\")\n self.save_inference_model_path = os.path.join(self.params[\"output_path\"], \"save_inference_model\")\n else:\n self.save_checkpoints_path = \"./output/save_checkpoints/\"\n self.save_inference_model_path = \"./output/save_inference_model/\"\n\n self.forward_train_output = {}\n self.fetch_list_train = []\n self.fetch_list_evaluate = []\n self.fetch_list_train_key = []\n self.fetch_list_evaluate_key = []\n\n self.parser_meta()\n self.use_fleet = False\n self.init_env_static()\n\n def do_train(self):\n \"\"\"\n 启动数据集循环,开始训练\n :return:\n \"\"\"\n raise NotImplementedError\n\n def do_evaluate(self, reader, phase, step):\n \"\"\"在当前的训练状态下,对某个测试集进行评估\n :param reader:待评估数据集\n :param phase:当前的运行阶段\n :param step:当前的运行步数\n \"\"\"\n raise NotImplementedError\n\n def do_visual(self):\n \"\"\"评估指标的可视化展示\n \"\"\"\n raise NotImplementedError\n\n def parser_meta(self):\n logging.info(\"parser meta ....\")\n model_meta_info = {}\n if self.params[\"load_checkpoint\"] or self.params[\"load_parameters\"]:\n model_meta_info = self.load_model_meta_info(\"net_model\")\n elif self.params[\"pre_train_model\"]:\n model_meta_info = self.load_model_meta_info(\"pre_train_model\")\n # 由外部json配置传入\n meta_param = {}\n extra_param = self.params.get(\"extra_param\", None)\n if extra_param:\n meta_param = extra_param.get(\"meta\", None)\n\n self.meta_dict = {\n \"framework_version\": version.full_version,\n \"model_type\": model_meta_info.get(\"model_type\", \"\"),\n \"pretrain_model_version\": model_meta_info.get(\"pretrain_model_version\", \"\"),\n \"pretrain_model_type\": model_meta_info.get(\"pretrain_model_type\", \"\"),\n \"job_type\": meta_param.get(\"job_type\", \"custom\"),\n \"net_type\": self.model_class.__class__.__name__,\n \"task_type\": \"train\",\n \"deploy_type\": 4,\n \"is_dynamic\": 0\n }\n return\n\n def init_env_static(self):\n \"\"\"\n 初始化静态图的运行时环境:包括:program、executor、fleet、cuda、place\n :return:\n \"\"\"\n logging.info(\"init environment on static mode......\")\n paddle.enable_static()\n\n # step1: init program\n self.startup_program = static.Program()\n self.train_program = static.Program()\n self.test_program = static.Program()\n self.evaluate_program = static.Program()\n self.save_inference_program = static.Program()\n\n random_seed = self.params.get(\"random_seed\", 0)\n if random_seed is not None:\n self.startup_program.random_seed = random_seed\n self.train_program.random_seed = random_seed\n self.test_program.random_seed = random_seed\n self.evaluate_program.random_seed = random_seed\n self.save_inference_program.random_seed = random_seed\n\n # step2: init run place、executor、fleet\n self.num_trainers = 1\n self.trainer_id = 0\n\n self.place_type = self.params.get(\"PADDLE_PLACE_TYPE\", os.getenv(\"PADDLE_PLACE_TYPE\", \"cpu\"))\n self.params[\"PADDLE_PLACE_TYPE\"] = self.place_type\n\n # executor执行器的一些参数设置\n self.use_fast_executor = self.params.get(\"use_fast_executor\", False)\n self.exe_strategy = paddle.static.ExecutionStrategy()\n self.exe_strategy.num_iteration_per_run = self.params.get(\"num_iteration_per_run\", 1)\n self.exe_strategy.num_iteration_per_drop_scope = self.params.get(\"num_iteration_per_drop_scope\", 10)\n\n self.build_strategy = paddle.static.BuildStrategy()\n\n if self.place_type == \"gpu\":\n logging.info(\"gpu place....\")\n gpus = os.getenv('FLAGS_selected_gpus', '0').split(\",\")\n self.gpu_id = int(gpus[0])\n self.run_place = paddle.CUDAPlace(int(gpus[0]))\n self.dev_count = len(gpus)\n self.exe_strategy.num_threads = self.dev_count\n self.use_cuda = True\n \"\"\"\n gpu fleet 使用三步骤:\n 1.导入依赖包:from paddle.distributed import fleet\n 2.初始化fleet环境:包括定义缺省的分布式策略,然后通过将参数is_collective设置为True,使训练架构设定为Collective架构。\n strategy = fleet.DistributedStrategy()\n fleet.init(is_collective=True, strategy=strategy)\n 3.使用distributed_optimizer设置分布式训练优化器\n optimizer = fleet.distributed_optimizer(optimizer)\n \"\"\"\n if self.params.get(\"PADDLE_IS_FLEET\", 0):\n fleet.init(is_collective=True)\n logging.info(\"fleet init ...\")\n self.use_fleet = True\n self.strategy = fleet.DistributedStrategy()\n self.strategy.execution_strategy = self.exe_strategy\n self.strategy.build_strategy = self.build_strategy\n # TODO nccl_comm_num 可以加快GPU之间的通信效率,建议单机设置为1,多机设置为2。\n # TODO 找个判断多机的方法,设置nccl_comm_num参数\n self.strategy.nccl_comm_num = 1\n self.strategy.sync_nccl_allreduce = True\n self.strategy.fuse_all_reduce_ops = True\n\n # amp设置\n self.use_amp = self.params.get(\"use_amp\", False)\n if self.use_amp:\n opt_params = self.model_class.model_params.get('optimization', None)\n init_loss_scaling = opt_params.get(\"init_loss_scaling\", 1.0)\n incr_every_n_steps = opt_params.get(\"incr_every_n_steps\", 1000)\n decr_every_n_nan_or_inf = opt_params.get(\"decr_every_n_nan_or_inf\", 2)\n incr_ratio = opt_params.get(\"incr_ratio\", 2.0)\n decr_ratio = opt_params.get(\"decr_ratio\", 0.8)\n\n self.strategy.amp = True\n self.strategy.amp_configs = {\n \"init_loss_scaling\": init_loss_scaling,\n \"decr_every_n_nan_or_inf\": decr_every_n_nan_or_inf,\n \"incr_every_n_steps\": incr_every_n_steps,\n \"incr_ratio\": incr_ratio,\n \"use_dynamic_loss_scaling\": True,\n \"decr_ratio\": decr_ratio,\n \"custom_white_list\": [],\n \"custom_black_list\": [],\n }\n\n fleet.init(is_collective=True, strategy=self.strategy)\n # 以下代码是为了打印日志,不影响训练\n trainer_id = fleet.worker_index()\n current_endpoint = os.getenv(\"PADDLE_CURRENT_ENDPOINT\")\n worker_endpoints = fleet.worker_endpoints()\n trainers_num = len(worker_endpoints)\n logging.debug(\"worker_endpoints:{} trainers_num:{} current_endpoint:{} trainer_id:{}\".format(\n worker_endpoints,\n trainers_num,\n current_endpoint,\n trainer_id))\n self.num_trainers = trainers_num\n self.trainer_id = trainer_id\n else:\n self.use_fleet = False\n self.num_trainers = 1\n self.trainer_id = 0\n\n elif self.place_type == \"xpu\":\n logging.info(\"xpu_place, support single device mode only\")\n xpus = os.getenv('FLAGS_selected_xpus', '0').split(\",\")\n # self.run_place = paddle.XPUPlace(int(xpus[0]))\n self.run_place = paddle.set_device(\"xpu:\" + xpus[0])\n self.dev_count = 1\n self.exe_strategy.num_threads = self.dev_count\n self.gpu_id = 0\n self.use_cuda = False\n logging.info(\"finish prepare xpu single deviece env\")\n self.use_fleet = False\n self.num_trainers = 1\n self.trainer_id = 0\n else:\n logging.info(\"cpu place....\")\n self.run_place = paddle.CPUPlace()\n self.dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))\n self.use_cuda = False\n self.gpu_id = 0\n self.exe_strategy.num_threads = self.dev_count\n \"\"\"\n cpu fleet 使用步骤\n https://fleetx.readthedocs.io/en/latest/paddle_fleet_rst/parameter_server/ps_quick_start.html\n 1.导入依赖:\n import paddle.distributed.fleet as fleet\n import paddle.distributed.fleet.base.role_maker as role_maker\n \n 2.定义分布式模式并初始化分布式训练环境,当前参数服务器模式只支持静态图模式\n 通过fleet.init()接口,用户可以定义训练相关的环境,注意此环境是用户预先在环境变量中配置好的,\n 包括:训练节点个数,服务节点个数,当前节点的序号,服务节点完整的IP:PORT列表等。\n paddle.enable_static()\n role = role_maker.PaddleCloudRoleMaker()\n fleet.init(role)\n \n 3.组网,加载reader\n model = init_net()\n reader = init_dataset_reader()\n \n 4.定义同步训练 Strategy 及 Optimizer\n optimizer = paddle.optimizer.SGD(learning_rate=0.0001)\n strategy = fleet.DistributedStrategy()\n strategy.a_sync = True\n optimizer = fleet.distributed_optimizer(optimizer, strategy)\n optimizer.minimize(model.cost)\n \n 5.训练\n \n \"\"\"\n if self.params.get(\"PADDLE_IS_FLEET\", 0):\n logging.info(\"int fleet parameter server mode in multi cpus....\")\n role = role_maker.PaddleCloudRoleMaker(is_collective=False)\n fleet.init(role)\n self.use_fleet = True\n else:\n self.use_fleet = False\n self.num_trainers = 1\n self.trainer_id = 0\n\n # step3: init executor with run place\n self.executor = static.Executor(self.run_place)\n\n # step4: init model net\n self.init_static_model_net()\n\n # step5: run executor\n self.executor.run(self.startup_program)\n\n # step6: load model params: checkpoints or pre_train_model\n if self.params[\"load_checkpoint\"] or self.params[\"load_parameters\"]:\n self.load_static_model_params(\"net_model\")\n elif self.params[\"pre_train_model\"]:\n self.load_static_model_params(\"pre_train_model\")\n\n # step7: init train_executor\n if self.use_fleet:\n self.train_exe = self.executor\n else:\n if self.place_type == \"xpu\":\n self.train_exe = self.executor\n else:\n # 单机模式下可以使用ParallelExecutor来提速\n self.train_exe = static.ParallelExecutor(\n use_cuda=self.use_cuda,\n loss_name=self.forward_train_output[InstanceName.LOSS].name,\n exec_strategy=self.exe_strategy,\n build_strategy=self.build_strategy,\n main_program=self.train_program,\n num_trainers=self.num_trainers,\n trainer_id=self.trainer_id)\n\n def init_static_model_net(self):\n \"\"\"init static model net\n \"\"\"\n logging.info(\"init_model_net.....\")\n self.init_static_train_net()\n if self.params[\"is_eval_dev\"]:\n self.evaluate_program = self.init_static_evaluate_net(self.data_set_reader.dev_reader,\n self.evaluate_program)\n if self.params[\"is_eval_test\"]:\n self.test_program = self.init_static_evaluate_net(self.data_set_reader.test_reader, self.test_program)\n self.init_static_save_inference_net()\n\n def init_static_train_net(self):\n \"\"\"\n 训练网络初始化,前向+后向\n :return:\n \"\"\"\n with static.program_guard(self.train_program, self.startup_program):\n with paddle.fluid.unique_name.guard():\n self.data_set_reader.train_reader.dataset.create_reader()\n fields_dict = self.data_set_reader.train_reader.dataset.instance_fields_dict()\n self.model_class.structure()\n if getattr(self.model_class, 'param_attrs', None):\n self.model_class.set_param_attrs(self.train_program)\n self.forward_train_output = self.model_class.forward(fields_dict, phase=InstanceName.TRAINING)\n loss = self.forward_train_output[InstanceName.LOSS]\n self.model_class.set_optimizer()\n \n # 加入recompute功能\n if self.is_recompute:\n self.strategy.recompute = True\n self.strategy.recompute_configs = {\"checkpoints\": self.forward_train_output['checkpoints']}\n del self.forward_train_output[\"checkpoints\"]\n \n if self.use_fleet:\n self.optimizer = fleet.distributed_optimizer(self.model_class.optimizer, strategy=self.strategy)\n else:\n self.optimizer = self.model_class.optimizer\n\n self.optimizer.minimize(loss)\n\n if self.forward_train_output.__contains__(InstanceName.TARGET_FEED):\n self.forward_train_output.pop(InstanceName.TARGET_FEED)\n if self.forward_train_output.__contains__(InstanceName.TARGET_PREDICTS):\n self.forward_train_output.pop(InstanceName.TARGET_PREDICTS)\n # TODO:这里需要注意一下,或许有坑\n # self.forward_train_output.update(self.optimizer_output_dict)\n # 如果想获取学习率,加上下面这一行就能fetch出来\n self.forward_train_output.update({\"lr\": \"learning_rate_0\"})\n self.fetch_list_train = list(self.forward_train_output.values())\n self.fetch_list_train_key = list(self.forward_train_output.keys())\n\n def init_static_evaluate_net(self, reader, program):\n \"\"\"初始化评估过程的网络,网络只有前向\n :return:\n \"\"\"\n with static.program_guard(program, self.startup_program):\n with paddle.fluid.unique_name.guard():\n reader.dataset.create_reader()\n fields_dict = reader.dataset.instance_fields_dict()\n self.model_class.structure()\n self.forward_evaluate_output = self.model_class.forward(fields_dict, phase=InstanceName.EVALUATE)\n if \"mems\" in self.forward_evaluate_output.keys():\n self.mems_eval = self.forward_evaluate_output[\"mems\"]\n del self.forward_evaluate_output[\"mems\"]\n\n if self.forward_evaluate_output.__contains__(InstanceName.TARGET_FEED):\n self.forward_evaluate_output.pop(InstanceName.TARGET_FEED)\n\n if self.forward_evaluate_output.__contains__(InstanceName.TARGET_PREDICTS):\n self.forward_evaluate_output.pop(InstanceName.TARGET_PREDICTS)\n\n self.fetch_list_evaluate = list(self.forward_evaluate_output.values())\n self.fetch_list_evaluate_key = list(self.forward_evaluate_output.keys())\n\n program = program.clone(for_test=True)\n return program\n\n def init_static_save_inference_net(self):\n \"\"\"初始化用来保存inference model的网络,只有前向,且是裁切过后的网络。\n :return:\n \"\"\"\n with static.program_guard(self.save_inference_program, self.startup_program):\n with paddle.fluid.unique_name.guard():\n self.data_set_reader.predict_reader.dataset.create_reader()\n fields_dict = self.data_set_reader.predict_reader.dataset.instance_fields_dict()\n self.model_class.structure()\n forward_output_dict = self.model_class.forward(fields_dict, phase=InstanceName.SAVE_INFERENCE)\n feed_tensor = forward_output_dict[InstanceName.TARGET_FEED]\n target_feed_list = []\n for x in feed_tensor:\n target_feed_list.append(x.name)\n\n self.infer_dict = get_infer_data_meta(target_feed_list, fields_dict)\n self.feed_target_tensor = feed_tensor\n self.inference_output = forward_output_dict[InstanceName.TARGET_PREDICTS]\n\n self.save_inference_program = self.save_inference_program.clone(for_test=True)\n\n def load_static_model_params(self, params_type):\n \"\"\"\n \"\"\"\n logging.info(\"load_model_params on static mode....\")\n if params_type == \"net_model\":\n if self.params[\"load_checkpoint\"] and self.params[\"load_parameters\"]:\n raise ValueError(\n \"ERROR: config 'load_checkpoint' and 'load_parameters' \"\n \"both are set! Only one of them should be set. \"\n \"if you want warmstart checkpoint keep its learning_rate and moments, plese set 'load_checkpoint'. \"\n \"if you want warmstart checkpoint with only its parameters, and you want reset a new learning_rate \"\n \"by config, plese set 'load_parameters'\")\n if self.params[\"load_checkpoint\"]:\n original_path = self.params[\"load_checkpoint\"]\n init_checkpoint(exe=self.executor, init_checkpoint_path=original_path, main_program=self.train_program)\n elif self.params[\"load_parameters\"]:\n original_path = self.params[\"load_parameters\"]\n init_pretraining_params(exe=self.executor,\n pretraining_params_path=original_path, main_program=self.train_program)\n\n elif params_type == \"pre_train_model\":\n # pretrain_embedding_path = self.get_pretrain_embedding_path()\n for pre_train_model in self.params[\"pre_train_model\"]:\n logging.info(\"pre_train_model's name = %s\" % pre_train_model[\"name\"])\n params_path = pre_train_model[\"params_path\"]\n init_pretraining_params(exe=self.executor,\n pretraining_params_path=params_path,\n main_program=self.train_program)\n # self.save_model(0)\n # exit()\n\n def save_model(self, steps, save_checkpoint=True, save_inference=True):\n if self.enable_static:\n logging.info(\"save model on static....\")\n if save_checkpoint:\n self.save_checkpoint(self.executor, self.train_program, steps)\n if save_inference:\n self.save_inference(self.executor, self.feed_target_tensor, self.inference_output,\n self.save_inference_program, steps, self.infer_dict)\n else:\n logging.info(\"save model on dynamic....\")\n\n def save_checkpoint(self, exe, program, steps):\n \"\"\"\n :param exe:\n :param program:\n :param steps:\n :return:\n \"\"\"\n path_dict = get_model_paths(self.save_checkpoints_path, self.save_inference_model_path, steps)\n save_path = path_dict[\"checkpoints_model_path\"]\n # todo: 需要验证一下fleet的save和非fleet有没有区别\n paddle.fluid.io.save_persistables(exe, save_path, program)\n meta_path = path_dict[\"checkpoints_meta_path\"]\n save_meta_data(self.meta_dict, meta_path)\n if self.params.get(\"need_tar\", False):\n # 压缩为tar.gz\n errcode = make_targz(save_path + \".tar.gz\", save_path)\n if errcode == 0:\n shutil.rmtree(save_path)\n\n def save_inference(self, exe, feed_vars, target_vars, program, steps, data_dict):\n \"\"\"\n :param exe:\n :param feed_vars\n :param target_vars\n :param program:\n :param steps:\n :param data_dict:\n :return:\n \"\"\"\n path_dict = get_model_paths(self.save_checkpoints_path, self.save_inference_model_path, steps)\n save_path = os.path.join(path_dict[\"inference_model_path\"], \"wenxin\")\n # paddle.fluid.io.save_inference_model\n # paddle.static.save_inference_model\n paddle.static.save_inference_model(\n save_path,\n feed_vars,\n target_vars,\n exe,\n program=program,\n model_filename=\"model\",\n params_filename=\"params\")\n\n infer_meta_path = path_dict[\"inference_infer_meta_path\"]\n meta_path = path_dict[\"inference_meta_path\"]\n save_meta_data(data_dict, infer_meta_path)\n save_meta_data(self.meta_dict, meta_path)\n\n def load_model_meta_info(self, load_model):\n \"\"\"\n 获取模型的meta信息\n :param load_model:\n :return:\n \"\"\"\n meta_info = {}\n if load_model == \"net_model\":\n if self.params[\"load_checkpoint\"]:\n original_path = self.params[\"load_checkpoint\"]\n meta_info = parse_meta(original_path)\n elif self.params[\"load_parameters\"]:\n original_path = self.params[\"load_parameters\"]\n meta_info = parse_meta(original_path)\n elif load_model == \"pre_train_model\":\n for pre_train_model in self.params[\"pre_train_model\"]:\n logging.info(\"pre_train_model's name = %s\" % pre_train_model[\"name\"])\n params_path = os.path.dirname(pre_train_model[\"params_path\"])\n # original_path = params_path = os.path.dirname(pre_train_model[\"params_path\"])\n meta_info = parse_meta(params_path)\n return meta_info\n\n\ndef get_infer_data_meta(target_feed_list, fields_dict):\n \"\"\"\n :param target_feed_list:\n :param fields_dict:\n :return:\n \"\"\"\n infer_dict = {\"fields\": []}\n for name in target_feed_list:\n for k1, v1 in fields_dict.items(): # dict_keys(['text_a', 'label'])\n for k2, v2 in v1.items():\n if v2:\n for k3 in v2:\n # logging.info(k3)\n if v2[k3] and v2[k3].name == name:\n field_ele = \"%s#%s\" % (k1, k3)\n infer_dict[\"fields\"].append(field_ele)\n return infer_dict\n\n\ndef parse_meta(model_dir):\n \"\"\"\n :param model_dir:\n :return: meta_dict\n \"\"\"\n json_path = None\n meta_dict = {}\n for file in os.listdir(model_dir):\n if file.endswith(\".meta\"):\n json_path = file\n break\n try:\n if json_path:\n json_file = open(os.path.join(model_dir, json_path), 'r')\n model_info = json_file.read()\n meta_dict = json.loads(model_info)\n except Exception as e:\n logging.error(\"error in parser model.meta.....\")\n return meta_dict\n\n\ndef init_checkpoint(exe, init_checkpoint_path, main_program):\n \"\"\"加载checkpoints文件\n :param exe:\n :param init_checkpoint_path:\n :param main_program:\n :return:\n \"\"\"\n assert os.path.exists(init_checkpoint_path), \"[%s] cann't be found.\" % init_checkpoint_path\n\n def existed_persitables(var):\n \"\"\"\n existed_presitables\n \"\"\"\n if not paddle.fluid.io.is_persistable(var):\n return False\n return os.path.exists(os.path.join(init_checkpoint_path, var.name))\n\n paddle.fluid.io.load_vars(exe, init_checkpoint_path, main_program=main_program, predicate=existed_persitables)\n logging.info(\"Load model from {}\".format(init_checkpoint_path))\n\n\ndef init_pretraining_params(exe, pretraining_params_path, main_program):\n \"\"\"\n :param exe:\n :param pretraining_params_path:\n :param main_program:\n :return:\n \"\"\"\n assert os.path.exists(pretraining_params_path), \"[%s] cann't be found.\" % pretraining_params_path\n\n def existed_params(var):\n \"\"\"\n existed_params\n \"\"\"\n if not isinstance(var, paddle.fluid.framework.Parameter):\n return False\n return os.path.exists(os.path.join(pretraining_params_path, var.name))\n\n paddle.fluid.io.load_vars(exe, pretraining_params_path, main_program=main_program, predicate=existed_params)\n\n\n","sub_path":"erniekit/controller/static_trainer.py","file_name":"static_trainer.py","file_ext":"py","file_size_in_byte":26171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"513144393","text":"#!/usr/bin/env python\nimport os, sys, unittest\nfrom time import sleep\nimport PyCoralTest\n\n#============================================================================\n\ndef createTable():\n ###print MSGHDR, \"Recreate the table\"\n session = svc.connect( urlRW, coral.access_Update )\n session.transaction().start(False)\n session.nominalSchema().dropIfExistsTable( tableName )\n description = coral.TableDescription()\n description.setName( tableName)\n description.insertColumn( 'ID', 'int' )\n description.insertColumn( 'Data', 'float' ) # MySQL test fails with double?\n description.setPrimaryKey( 'ID' )\n tableHandle = session.nominalSchema().createTable( description )\n session.transaction().commit()\n sleep(1)\n\n#============================================================================\n\nclass PyCoralMiscellaneousBugsTest( PyCoralTest.TestCase ):\n\n #------------------------------------------------------------------------\n\n def setUp(self):\n # Call the base class method\n PyCoralTest.TestCase.setUp(self)\n print(\"\")\n # Use CORAL defaults for retrial parameters\n svc.configuration().setConnectionRetrialPeriod( retrialPeriod )\n svc.configuration().setConnectionRetrialTimeOut( retrialTimeOut )\n # Use CORAL defaults for connection sharing (enabled)\n svc.configuration().enableConnectionSharing()\n # Configure the connection service (see bug #71449)\n # - disable the CORAL connection pool cleanup\n # - connection timeout=0: \"idle\" connections are immediately \"expired\"\n svc.configuration().disablePoolAutomaticCleanUp()\n svc.configuration().setConnectionTimeOut(0)\n # Use CORAL defaults for RO transactions (serializable)\n if \"CORAL_ORA_SKIP_TRANS_READONLY\" in os.environ:\n del os.environ[\"CORAL_ORA_SKIP_TRANS_READONLY\"]\n\n #------------------------------------------------------------------------\n\n def tearDown(self):\n # Purge the connection pool after each test\n svc.purgeConnectionPool()\n # Call the base class method\n PyCoralTest.TestCase.tearDown(self)\n \n #------------------------------------------------------------------------\n\n # Test bug #61090 aka bug #76501\n def test010_bug61090(self):\n session = svc.connect( urlRW, coral.access_Update )\n # Fill table in R/W tx (do not delete bulkInserter yet)\n print(MSGHDR, \"Fill the table in R/W tx - do not delete bulkInserter\")\n session.transaction().start(False)\n editor=session.nominalSchema().tableHandle(tableName).dataEditor()\n editor.deleteRows(\"\",coral.AttributeList())\n bulkInserter = editor.bulkInsert( rowBuffer, 100 )\n for i in range(5):\n rowBuffer[\"ID\"].setData(i)\n rowBuffer[\"Data\"].setData(i+0.1*i)\n bulkInserter.processNextIteration()\n bulkInserter.flush()\n session.transaction().commit()\n # Read data in R/O tx (no need to reconnect R/O session to show bug)\n print(MSGHDR, \"Query the table in R/O tx\")\n session.transaction().start(True)\n query = session.nominalSchema().newQuery()\n query.addToTableList(tableName)\n query.setRowCacheSize(3)\n query.defineOutput(rowBuffer)\n cursor=query.execute()\n nrows = 0\n while cursor.next() :\n nrows = nrows + 1\n row = cursor.currentRow()\n print(MSGHDR, \"Current row:\", row)\n self.assertEqual( 5, nrows )\n cursor=None\n query=None\n session.transaction().commit()\n # Release the bulk operation outside a tx\n print(MSGHDR, \"Release bulkInserter outside tx\")\n crash=True\n ###crash=False # Flag to cause the crash or bypass it\n if not crash: session.transaction().start(True)\n bulkInserter = None # CRASH!\n if not crash: session.transaction().commit()\n\n#============================================================================\n\nif __name__ == '__main__':\n\n print(\"\")\n MSGHDR = \"+++ PYCORAL MIXBUGS TEST +++\"\n\n # Build the unique table name and the URLs\n tableName = \"PYCORALMIXBUGSTEST\"\n import PyCoralTest\n tableName = PyCoralTest.buildUniqueTableName( tableName )\n [urlRW,urlRO] = PyCoralTest.parseArguments()\n print(MSGHDR, \"Table name:\", tableName)\n print(MSGHDR, \"URL [RW,RO]:\", [urlRW,urlRO])\n\n # Bootstrap CORAL\n #os.environ['CORAL_MSGLEVEL']='Verbose'\n #os.environ['CORAL_MSGLEVEL']='Info'\n import coral\n print(MSGHDR, \"Instantiate the PyCoral connection service\")\n svc = coral.ConnectionService()\n\n # Save CORAL default retrial parameters as global variables\n retrialPeriod=svc.configuration().connectionRetrialPeriod()\n retrialTimeOut=svc.configuration().connectionRetrialTimeOut()\n\n # Prepare the row buffer for this test\n rowBuffer = coral.AttributeList()\n rowBuffer.extend(\"ID\",\"int\")\n rowBuffer.extend(\"Data\",\"float\")\n\n # Recreate the table\n print(MSGHDR, \"Create the test table\")\n createTable()\n\n # Start the unit test (can specify one specific test as cl argument)\n print(MSGHDR, \"Start the test suite\")\n unittest.main( testRunner =\n unittest.TextTestRunner(stream=sys.stdout,verbosity=2) )\n","sub_path":"PyCoral/tests/Python3/test_PyCoral_MiscellaneousBugs.py","file_name":"test_PyCoral_MiscellaneousBugs.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"156318861","text":"import os\nfrom gp import bayesian_optimisation\nimport numpy as np\nfrom sklearn.model_selection import cross_val_score, KFold\nfrom sklearn.gaussian_process.kernels import Matern\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nimport pandas as pd\nfrom sklearn.feature_selection import f_regression\nfrom sklearn.naive_bayes import GaussianNB\n\ntry: # if running in CLI\n cur_path = os.path.abspath(__file__)\nexcept NameError: # if running in IDE\n cur_path = os.getcwd()\n\nwhile cur_path.split('/')[-1] != 'bb_preds':\n cur_path = os.path.abspath(os.path.join(cur_path, os.pardir))\noutput_folder = os.path.join(cur_path, 'model_results')\n\ndef test_scaler(x, y):\n print('Searching for best scaler...')\n scores = []\n for scale in [StandardScaler(), MinMaxScaler(), RobustScaler()]:\n pipe = Pipeline([('scale',scale), ('clf',LogisticRegression(random_state = 1108))])\n score = cross_val_score(pipe, x, y, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 46))\n scores.append(np.mean(score))\n if scores.index(max(scores)) == 0:\n print('Using Standard Scaler')\n return StandardScaler()\n elif scores.index(max(scores)) == 1:\n print('Using Min Max Scaler')\n return MinMaxScaler()\n elif scores.index(max(scores)) == 2:\n print('Using Robust Scaler')\n return RobustScaler()\n\ndef sample_loss_n_feats(parameters):\n feats = int(parameters[0])\n print('%s features' % (feats))\n model = Pipeline([('scale',scale), ('clf',LogisticRegression(random_state = 1108, solver = solver_, C = C_))])\n score = cross_val_score(model, x_data[feat_sigs[:feats]], y_data, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 1108))\n print('----> score: %s' % np.mean(score))\n return np.mean(score)\n\ndef find_feats():\n print('Searching for best number of features...')\n bounds = np.array([[1, len(list(x_data))]])\n start = [[len(list(x_data))]]\n results = bayesian_optimisation(n_iters=5, \n sample_loss=sample_loss_n_feats, \n bounds=bounds,\n x0 = start,\n gp_params = {'kernel': Matern(), 'alpha': 1e-5, 'n_restarts_optimizer': 10, 'normalize_y': True})\n return int(results[0][list(results[1]).index(max(results[1]))])\n\ndef test_solver(x, y):\n print('Searching for best solver...')\n scores = []\n for slvr in ['liblinear', 'newton-cg', 'lbfgs', 'sag','saga']:\n pipe = Pipeline([('scale',scale), ('clf',LogisticRegression(random_state = 1108, solver = slvr, C = C_))])\n score = cross_val_score(pipe, x, y, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 86))\n scores.append(np.mean(score))\n if scores.index(max(scores)) == 0:\n print('Using liblinear')\n return 'liblinear'\n elif scores.index(max(scores)) == 1:\n print('Using newton-cg')\n return 'newton-cg'\n elif scores.index(max(scores)) == 2:\n print('Using lbfgs')\n return 'lbfgs'\n elif scores.index(max(scores)) == 3:\n print('Using sag')\n return 'sag'\n elif scores.index(max(scores)) == 4:\n print('Using saga')\n return 'saga'\n \ndef sample_loss_c(parameters):\n c = 10**parameters[0]\n model = Pipeline([('scale',scale), ('clf',LogisticRegression(random_state = 1108, C = c))])\n score = cross_val_score(model, x_data[feat_sigs[:features]], y_data, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 88))\n print('----> score: %s' % np.mean(score))\n return np.mean(score)\n \ndef c_tuning():\n print('-- Beginning C Search')\n bounds = np.array([[-3, 3]])\n results = bayesian_optimisation(n_iters=5, \n sample_loss=sample_loss_c, \n bounds=bounds,\n gp_params = {'kernel': Matern(), 'alpha': 1e-5, 'n_restarts_optimizer': 10, 'normalize_y': True})\n print('Best C: %s, Best score: %s' % (results[0][list(results[1]).index(max(results[1]))][0], max(results[1]))) \n return 10**results[0][list(results[1]).index(max(results[1]))][0]\n\ndef execute(sa, od, X_data = None, Y_data = None):\n x_data = X_data\n y_data = Y_data\n x_feats = list(x_data)\n global x_data\n global y_data\n \n scale = test_scaler(x_data, y_data) #minmax\n global scale\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'w')\n f.write('scale: %s,'%(scale))\n f.close()\n \n C_ = 1\n global C_\n solver_ = test_solver(x_data, y_data)\n global solver_\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('start solver: %s,'%(solver_))\n f.close()\n\n feat_sigs = list(x_data)\n global feat_sigs\n features = len(feat_sigs)\n global features\n C_ = c_tuning()\n global C_\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('start C: %s,'%(C_))\n f.close()\n\n print('Starting feature ranking')\n sigs = f_regression(x_data, y_data)[1]\n indices = np.argsort(sigs)\n feat_sigs = [x_feats[i] for i in indices]\n global feat_sigs\n features = find_feats()\n global features\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('n feats: %s,'%(features))\n f.close()\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('significant features: ')\n for line in feat_sigs[:features]:\n f.write('%s, '%(line))\n f.close()\n \n solver_ = test_solver(x_data[feat_sigs[:features]], y_data)\n global solver_\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('final solver: %s,'%(solver_))\n f.close()\n\n C_ = c_tuning()\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('final C: %s,'%(C_))\n f.close()\n \n print('---Finalizing Log Model')\n model = Pipeline([('scale',scale), ('clf',LogisticRegression(random_state = 1108, solver = solver_, C = C_))]) \n tune_score = cross_val_score(model, x_data[feat_sigs[:features]], y_data, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 88))\n print('...Log Model Finalized')\n tune_score = np.mean(tune_score)\n base_model = Pipeline([('scale',scale), ('clf',GaussianNB())])\n baseline_score = cross_val_score(base_model, x_data[feat_sigs], y_data, scoring = 'accuracy' ,cv = KFold(n_splits = 10, random_state = 86))\n baseline_score = np.mean(baseline_score)\n improvement = (tune_score - baseline_score)/baseline_score\n print('%s percent improvement from baseline' % (improvement * 100))\n if improvement < 0:\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('final score: XXX,')\n f.close()\n return 0\n else:\n f = open(os.path.join(output_folder, '%s-%s-log.txt'%(sa, od)), 'a')\n f.write('final score: %s,'%(tune_score))\n f.close()\n return tune_score\n ","sub_path":"model_tuning/log_tuning.py","file_name":"log_tuning.py","file_ext":"py","file_size_in_byte":7190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"113028108","text":"import os\nimport csv\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.apps import apps\n\n\nclass Command(BaseCommand):\n args = 'Model.csv'\n help = 'Import `Model`.csv into `Model` database.'\n\n def handle(self, *args, **options):\n if len(args) != 1:\n raise CommandError(\"Invalid Invocation. See help.\")\n\n csvPath = args[0]\n if not os.path.exists(csvPath):\n raise CommandError(\"%s doesnt exist.\" % csvPath)\n\n model, _ = os.path.splitext(os.path.basename(csvPath))\n Model = apps.get_model(\"message_sender\", model.title())\n if not Model:\n raise CommandError(\"%s Model doesn't exist'\")\n\n model_fields = [f.name for f in Model._meta.fields]\n fields_name = []\n with open(csvPath, 'rb') as csvFile:\n reader = csv.reader(csvFile, delimiter=',', quotechar=\"\\\"\")\n fields_name = reader.next()\n for i, _ in enumerate(fields_name):\n fields_name[i] = fields_name[i].lower()\n fields_name[i] = fields_name[i].replace(' ', '_')\n if not fields_name[i] in model_fields:\n raise CommandError(\"%s field doesn't exists in %s Model\" %\n (fields_name[i], Model))\n\n for row in reader:\n try:\n obj = Model()\n for i, field in enumerate(row):\n setattr(obj, fields_name[i], field)\n obj.save()\n except Exception as e:\n raise CommandError(e)\n","sub_path":"message_sender/management/commands/importcsv.py","file_name":"importcsv.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"162141995","text":"import urllib.parse \nfrom config import Config\nimport datetime,calendar\n\nconfig = Config()\nclass EventsModel:\n def __init__(self):\n self.url = self.setUrl(endpoint=config.getTestCasesEndPointURL('TestSuite_1'))\n self.ric = ''\n self.queryString = {\n \"eventTypes\":\"\",\n \"start\":\"\",\n \"end\":\"\",\n \"adjustments\":\"\",\n \"fields\":\"\",\n \"count\":\"\",\n \"sessions\": \"\"\n }\n\n def setUrl(self,protocol='http',endpoint=None,port=8080):\n return \"{protocol}://{endpoint}:{port}\".format(protocol=protocol,endpoint=endpoint,port=port)\n\n def getURL(self):\n keys = self.queryString.keys()\n for k in list(keys):\n if self.queryString[k]==\"\":\n self.queryString.pop(k)\n\n path = \"data/historical-pricing/v1/views/\"\n URL = \"{url}/{path}/{ric}?{query}\".format(url=self.url,path=path,ric=self.ric,query=urllib.parse.urlencode(self.queryString))\n return URL\n \n def setRic(self, ric):\n self.ric = ric\n \n def setEventTypes(self,eventTypes):\n self.queryString['eventTypes'] = eventTypes\n\n def setStartDate(self, startdate):\n self.queryString['start'] = startdate\n \n def setEndDate(self, enddate):\n self.queryString['end'] = enddate\n\n def setAdjustments(self, adjustment):\n self.queryString['adjustment'] = adjustment\n\n def setFields(self, fields):\n self.queryString['fields'] = fields\n \n def setCount(self,count):\n self.queryString['count'] = count\n\n def setSessions(self,sessions):\n self.queryString['sessions'] = sessions\n \n def setDateByStartEnd(self,start,end):\n startDate = start\n endDate = end\n if start != \"\":\n if start != \"now\":\n start = start.split('-')[1]\n if start[-1] == \"M\":\n monthToDayCount = 0\n now = datetime.datetime.now()\n for i in range(int(start[:-1]), 0, -1):\n monthRange = calendar.monthrange(now.year-(i/now.month), now.month - (i%now.month))[1]\n monthToDayCount += monthRange\n startDate = datetime.datetime.now() - datetime.timedelta(days=monthToDayCount)\n elif start[-1] == \"W\":\n startDate = datetime.datetime.now() - datetime.timedelta(weeks=int(start[:-1]))\n elif start[-1] == \"D\":\n startDate = datetime.datetime.now() - datetime.timedelta(days=int(start[:-1]))\n elif start[-1] == \"h\":\n startDate = datetime.datetime.now() - datetime.timedelta(hours=int(start[:-1]))\n elif start[-1] == \"m\":\n startDate = datetime.datetime.now() - datetime.timedelta(minutes=int(start[:-1]))\n elif start[-1] == 's':\n startDate = datetime.datetime.now() - datetime.timedelta(seconds=int(start[:-1]))\n else:\n startDate = datetime.datetime.now()\n elif start == \"now\":\n startDate = datetime.datetime.now()\n\n try:\n startDate = startDate.strftime('%Y-%m-%dT%H:%M:%SZ')\n except Exception as e:\n print(e)\n\n if end != \"\":\n if end != \"now\":\n end = end.split('-')[1]\n if end[-1] == \"M\":\n monthToDayCount = 0\n now = datetime.datetime.now()\n for i in range(int(end[:-1]), 0, -1):\n monthRange = calendar.monthrange(now.year - (i / now.month), now.month - (i % now.month))[1]\n monthToDayCount += monthRange\n endDate = datetime.datetime.now() - datetime.timedelta(days=monthToDayCount)\n elif end[-1] == \"W\":\n endDate = datetime.datetime.now() - datetime.timedelta(weeks=int(end[:-1]))\n elif end[-1] == \"D\":\n endDate = datetime.datetime.now() - datetime.timedelta(days=int(end[:-1]))\n elif end[-1] == \"h\":\n endDate = datetime.datetime.now() - datetime.timedelta(hours=int(end[:-1]))\n elif end[-1] == \"m\":\n endDate = datetime.datetime.now() - datetime.timedelta(minutes=int(end[:-1]))\n elif end[-1] == \"s\":\n endDate = datetime.datetime.now() - datetime.timedelta(seconds=int(end[:-1]))\n else:\n endDate = datetime.datetime.now()\n elif end == \"now\":\n endDate = datetime.datetime.now()\n\n try:\n endDate = endDate.strftime('%Y-%m-%dT%H:%M:%SZ')\n except Exception as e:\n print(e)\n pass\n\n self.setStartDate(startDate)\n self.setEndDate(endDate)\n \n ","sub_path":"Events.py","file_name":"Events.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"465252177","text":"import FWCore.ParameterSet.Config as cms\n\npfsim = cms.EDProducer(\n 'PFSimParticleProducer',\n hepmcSrc = cms.InputTag('generator'),\n genSrc = cms.InputTag('genParticles'),\n verbose = cms.untracked.bool( False )\n )\n\njets = cms.EDProducer(\n 'PFSimFastJetProducer',\n particleSrc = cms.InputTag('pfsim'),\n jetPtThreshold = cms.double(5.),\n )\n\npfsimSequence = cms.Sequence(\n pfsim +\n jets\n ) \n","sub_path":"CMGTools/PFSim/python/pfsim_cff.py","file_name":"pfsim_cff.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"156675403","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom functools import reduce\n\nimport copy\n\nfrom mahjong.constants import EAST, SOUTH, WEST, NORTH\nfrom mahjong.tile import TilesConverter\nfrom utils.settings_handler import settings\nfrom mahjong.ai.shanten import Shanten\n\nlogger = logging.getLogger('tenhou')\n\n\nclass Player(object):\n # the place where is player is sitting\n # always = 0 for our player\n seat = 0\n # where is sitting dealer, based on this information we can calculate player wind\n dealer_seat = 0\n # position based on scores\n position = 0\n scores = 0\n uma = 0\n\n name = ''\n rank = ''\n\n discards = []\n # tiles that were discarded after player's riichi\n safe_tiles = []\n tiles = []\n melds = []\n table = None\n last_draw = None\n in_tempai = False\n in_riichi = False\n in_defence_mode = False\n\n # system fields\n # for local games emulation\n _is_daburi = False\n _is_ippatsu = False\n\n def __init__(self, seat, dealer_seat, table, use_previous_ai_version=False):\n self.discards = []\n self.melds = []\n self.tiles = []\n self.safe_tiles = []\n self.seat = seat\n self.table = table\n self.dealer_seat = dealer_seat\n\n if use_previous_ai_version:\n try:\n from mahjong.ai.old_version import MainAI\n # project wasn't set up properly\n # we don't have old version\n except ImportError:\n logger.error('Wasn\\'t able to load old api version')\n from mahjong.ai.main import MainAI\n else:\n if settings.ENABLE_AI:\n from mahjong.ai.main import MainAI\n else:\n from mahjong.ai.random import MainAI\n\n self.ai = MainAI(table, self)\n\n def __str__(self):\n result = u'{0}'.format(self.name)\n if self.scores:\n result += u' ({:,d})'.format(int(self.scores))\n if self.uma:\n result += u' {0}'.format(self.uma)\n else:\n result += u' ({0})'.format(self.rank)\n return result\n\n # for calls in array\n def __repr__(self):\n return self.__str__()\n\n def erase_state(self):\n self.discards = []\n self.melds = []\n self.tiles = []\n self.safe_tiles = []\n\n self.last_draw = None\n self.in_tempai = False\n self.in_riichi = False\n self.in_defence_mode = False\n\n self.dealer_seat = 0\n\n self.ai.erase_state()\n\n self._is_daburi = False\n self._is_ippatsu = False\n\n def add_called_meld(self, meld):\n self.melds.append(meld)\n\n def add_discarded_tile(self, tile):\n self.discards.append(tile)\n\n def init_hand(self, tiles):\n self.tiles = tiles\n\n self.ai.determine_strategy()\n\n def draw_tile(self, tile):\n self.last_draw = tile\n self.tiles.append(tile)\n # we need sort it to have a better string presentation\n self.tiles = sorted(self.tiles)\n\n self.ai.determine_strategy()\n\n def discard_tile(self, tile=None):\n \"\"\"\n We can say what tile to discard\n input tile = None we will discard tile based on AI logic\n :param tile: 136 tiles format\n :return:\n \"\"\"\n # we can't use if tile, because of 0 tile\n if tile is not None:\n tile_to_discard = tile\n else:\n tile_to_discard = self.ai.discard_tile()\n\n if tile_to_discard != Shanten.AGARI_STATE:\n self.add_discarded_tile(tile_to_discard)\n self.tiles.remove(tile_to_discard)\n\n return tile_to_discard\n\n def can_call_riichi(self):\n return all([\n self.in_tempai,\n\n not self.in_riichi,\n not self.is_open_hand,\n\n self.scores >= 1000,\n self.table.count_of_remaining_tiles > 4\n ])\n\n def try_to_call_meld(self, tile, is_kamicha_discard):\n return self.ai.try_to_call_meld(tile, is_kamicha_discard)\n\n @property\n def player_wind(self):\n position = self.dealer_seat\n if position == 0:\n return EAST\n elif position == 1:\n return NORTH\n elif position == 2:\n return WEST\n else:\n return SOUTH\n\n @property\n def is_dealer(self):\n return self.seat == self.dealer_seat\n\n @property\n def is_open_hand(self):\n return len(self.melds) > 0\n\n @property\n def closed_hand(self):\n tiles = self.tiles[:]\n meld_tiles = [x.tiles for x in self.melds]\n if meld_tiles:\n meld_tiles = reduce(lambda z, y: z + y, [x.tiles for x in self.melds])\n return [item for item in tiles if item not in meld_tiles]\n\n @property\n def meld_tiles(self):\n \"\"\"\n Array of array with 34 tiles indices\n :return: array\n \"\"\"\n melds = [x.tiles for x in self.melds]\n melds = copy.deepcopy(melds)\n for meld in melds:\n meld[0] //= 4\n meld[1] //= 4\n meld[2] //= 4\n return melds\n\n def format_hand_for_print(self, tile):\n hand_string = '{} + {}'.format(\n TilesConverter.to_one_line_string(self.closed_hand),\n TilesConverter.to_one_line_string([tile])\n )\n if self.is_open_hand:\n melds = []\n for item in self.melds:\n melds.append('{}'.format(TilesConverter.to_one_line_string(item.tiles)))\n hand_string += ' [{}]'.format(', '.join(melds))\n return hand_string\n","sub_path":"project/mahjong/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"211494714","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Go through all Russian lemmas looking for headwords with secondary stress in them.\n\nimport pywikibot, re, sys, argparse\nimport unicodedata\n\nimport blib\nfrom blib import getparam, rmparam, tname, msg, site\n\nimport rulib\nimport runounlib\n\nGR = \"\\u0300\" # grave = ̀\n\nru_normal_head_templates = [\"ru-noun\", \"ru-proper noun\", \"ru-verb\", \"ru-adj\",\n \"ru-adv\", \"ru-phrase\", \"ru-noun form\", \"ru-diacritical mark\",\n \"ru-noun-alt-ё\", \"ru-adj-alt-ё\", \"ru-verb-alt-ё\"]\n\noverall_head_count = {}\ncat_head_count = {}\n\ndef has_secondary_stress(text):\n return GR in unicodedata.normalize(\"NFD\", str(text))\n\ndef output_heads_seen(overall=False):\n if overall:\n dic = overall_head_count\n msg(\"Overall templates seen:\")\n else:\n dic = cat_head_count\n msg(\"Templates seen per category:\")\n for head, count in sorted(dic.items(), key=lambda x:-x[1]):\n msg(\" %s = %s\" % (head, count))\n\ndef process_text_on_page(index, pagetitle, text):\n global args\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n def expand_text(tempcall):\n return blib.expand_text(tempcall, pagetitle, pagemsg, args.verbose)\n\n notes = []\n\n parsed = blib.parse_text(text)\n found_page_head = False\n for t in parsed.filter_templates():\n found_this_head = False\n tn = tname(t)\n if tn in ru_normal_head_templates:\n heads = blib.fetch_param_chain(t, \"1\", \"head\")\n for head in heads:\n if has_secondary_stress(head):\n pagemsg(\"Found secondarily stressed head %s in %s\" % (head,\n str(t)))\n elif tn == \"head\" and getparam(t, \"1\") == \"ru\":\n heads = blib.fetch_param_chain(t, \"head\", \"head\")\n for head in heads:\n if has_secondary_stress(head):\n pagemsg(\"Found secondarily stressed head %s in %s\" % (head,\n str(t)))\n elif tn in [\"ru-noun+\", \"ru-proper noun+\", \"ru-noun-table\", \"ru-noun-old\"]:\n per_word_objs = runounlib.split_noun_decl_arg_sets(t, pagemsg)\n for per_word in per_word_objs:\n for arg_set in per_word:\n if has_secondary_stress(arg_set[1]):\n pagemsg(\"Found secondarily stressed head %s in %s\" % (\n arg_set[1], str(t)))\n elif tn == \"ru-decl-adj\":\n head = getparam(t, \"1\")\n if has_secondary_stress(head):\n pagemsg(\"Found secondarily stressed head %s in %s\" % (head,\n str(t)))\n\nparser = blib.create_argparser(\"Find Russian terms with secondary stress in the headword\",\n include_pagefile=True, include_stdin=True)\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nblib.do_pagefile_cats_refs(args, start, end, process_text_on_page, edit=True, stdin=True,\n default_cats=[\"Russian lemmas\", \"Russian non-lemma forms\"])\n","sub_path":"find_ru_headword_secondary_stress.py","file_name":"find_ru_headword_secondary_stress.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"253469087","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/6/19 17:05\n# @Author : Zcs\n# @File : send_log(logging).py\n\n# 使用tcp协议向日志服务器发送日志\nimport logging.handlers\nimport socket\n\nmsg = 'test'\nip = '192.168.205.141'\nport = 601\nlogger = logging.getLogger('SysLogger')\n# socktype -- tcp:socket.SOCK_STREAM -- udp:socket.SOCK_DGRAM\nfh = logging.handlers.SysLogHandler((ip, port), logging.handlers.SysLogHandler.LOG_AUTH, socktype=socket.SOCK_STREAM)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.warning(msg)\nfh.close()\n","sub_path":"my_log/send_log(logging).py","file_name":"send_log(logging).py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"101821326","text":"from django.db.models.base import ModelBase as BaseModelBase\nfrom django.db.models import Field\nfrom api.serializers import ModelSerializer\nfrom collections import defaultdict\nimport importlib\nfrom django.utils.translation import activate, deactivate, get_language, ugettext\n\n\nclass Registry(dict):\n pass\n\n\nregistry = Registry()\n\n\nclass AlreadyRegisteredException(Exception):\n pass\n\n\nclass ApiMeta(type):\n def __new__(cls, cls_name, super_classes, attrs):\n if cls_name != \"BaseApi\":\n return super().__new__(cls, cls_name, super_classes, attrs)()\n else:\n return super().__new__(cls, cls_name, super_classes, attrs)\n\n\nclass BaseApi(metaclass=ApiMeta):\n def __init__(self, *args, **kwargs):\n self.serializer_store = dict()\n self.extra_kwargs = getattr(self, \"extra_kwargs\", {})\n\n def contribute_to_class(self, model, name):\n self.model = model\n # try:\n setattr(model, name, self)\n # self.resource_name = getattr(self, \"resource_name\", self._resource_name)\n\n registry[self.resource_name] = self\n\n @property\n def base_name(self):\n lang = get_language()\n deactivate()\n name = ugettext(self.model._meta.verbose_name_plural)\n activate(lang)\n return name\n\n @property\n def url_name(self):\n return \"_\".join(self.base_name.lower().split())\n\n @property\n def resource_name(self):\n return \"-\".join(self.base_name.lower().split())\n\n @property\n def url_path(self):\n return self.resource_name\n\n def fields(self, key=\"default\", remove=[]):\n for field in self.model._meta.get_fields(include_hidden=False):\n\n name = field.name\n if not name in self.extra_kwargs:\n self.extra_kwargs.update({name: {}})\n\n extra_kwargs = {}\n\n if (\n hasattr(field, \"many_to_many\")\n and hasattr(field, \"one_to_many\")\n and (field.many_to_many or field.one_to_many)\n ):\n extra_kwargs.update({\"many\": True})\n\n model = getattr(field, \"related_model\", self.model)\n\n if not isinstance(field, Field):\n field = field.remote_field\n serializer_conf = getattr(field, \"related_serializers\", {})\n extra_kwargs.update({\"read_only\": True})\n\n else:\n serializer_conf = getattr(field, \"serializers\", {})\n\n if key in serializer_conf:\n yield name, field, serializer_conf.get(key), model, extra_kwargs\n\n def get_serializer(\n self, key,\n ):\n\n if key in self.serializer_store:\n return self.serializer_store.get(key)\n attrs = {}\n _fields = [\"pk\"]\n\n serializer_name = f\"{self.model.__name__}{key.title()}Serializer\"\n\n for name, field, conf, model, extra_kwargs in self.fields(key):\n self.extra_kwargs[name].update(extra_kwargs)\n _fields.append(name)\n if type(conf) == str:\n serializer = model.Api.get_serializer(conf)\n attrs.update({name: serializer(**extra_kwargs)})\n elif conf is not None:\n attrs.update({name: conf})\n _key = key\n\n class Meta:\n model = self.model\n fields = _fields\n key = _key\n\n serializer = type(serializer_name, (ModelSerializer,), {\"Meta\": Meta, **attrs})\n # self.serializer_store[key] = serializer\n return serializer\n\n @property\n def search_fields(self):\n return []\n\n def register_serializer(self, serializer):\n\n key = (\n getattr(\n serializer.Meta,\n \"key\",\n serializer.__name__.replace(self.model.__name__, \"\")\n .replace(\"Serializer\", \"\")\n .lower(),\n )\n or \"default\"\n )\n if key in self.serializer_store:\n raise AlreadyRegisteredException(\n f\"Serializer with key:{key} is already registered for {self.model.__name__} api\"\n )\n else:\n self.serializer_store[key] = serializer\n\n if not hasattr(serializer.Meta, \"key\"):\n setattr(serializer.Meta, \"key\", key)\n\n","sub_path":"{{cookiecutter.project_slug}}/api/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"575050907","text":"#!/usr/bin/env python\n\n'''\nA game to be played through a socket connection, arbitred by a controller\nusing GTP.\n\nAlternatively, the commands can be read and sent by the console.\n'''\nfrom GTPEngine import GTPEngine\nfrom optparse import OptionParser\nfrom socket import socket, AF_INET, SOCK_STREAM\nimport sys\nfrom pygo1963.model.PlayersFactory import ALPHA_BETA_KEY, HUMAN_KEY, RANDOM_KEY,\\\n create_player\nfrom pygo1963.view.View import View, GameView\nfrom pygo1963.view.main import view_loop\n\nclass NetworkGame():\n \"\"\" A Go game to be played through a remote connection. \"\"\"\n \n def __init__(self, engine, use_sockets=False, host=None, port=None):\n \n self.engine = engine\n \n self.next_move = None\n \n if options.use_sockets:\n logger.write('trying to make socket on %s, %d\\n' % (options.host,\n options.port)) \n self.sockobj = socket(AF_INET, SOCK_STREAM)\n self.sockobj.connect((options.host, options.port))\n \n logger.write('making socket file.\\n')\n self.in_file = self.out_file = self.sockobj.makefile() \n \n else:\n self.in_file = sys.stdin\n self.out_file = sys.stdout\n \n self.sockobj = None\n \n def __getattr__(self, name):\n \n if name == 'board':\n return self.engine.board\n else:\n raise AttributeError\n \n #TODO cerrar archivos cuando termina\n def play(self):\n \"\"\" Game loop. \"\"\"\n \n logger.write('starting play.\\n')\n while not self.engine.received_quit:\n \n cmd = ''\n while not cmd: \n cmd = self._preprocess_command(self.in_file.readline())\n logger.write('command read: ' + cmd)\n \n response = engine.process_command(cmd)\n logger.write('response sent: ' + response)\n \n self.out_file.write(response)\n self.out_file.flush()\n \n self.finish()\n\n def _preprocess_command(self, command):\n \"\"\" Removes control characters and comments from the command. \"\"\"\n \n #Remove control chars except HT and LF\n del_chars = ''.join(chr(n) for n in range(9) + range(11,32)) \n command = command.translate(None, del_chars)\n \n #Convert HT to spaces\n command.replace('\\t', ' ')\n \n #remove comments\n index = command.find('#')\n if index != -1:\n command = command[:index]\n \n #if its only whitespaces make it empty\n if command.isspace():\n return ''\n \n return command\n \n def finish(self):\n \"\"\" Makes the necessary cleanup after the game has finished. \"\"\"\n \n if self.sockobj:\n self.out_file.close()\n self.sockobj.close()\n \n\n\ndef parse_options():\n parser = OptionParser()\n parser.add_option('-c', '--color', action='store', dest='color',\n type='choice', choices=('b', 'w', 'black', 'white'), \n default='b')\n parser.add_option('-e', '--engine', action='store', dest='player',\n type='choice', choices=(ALPHA_BETA_KEY, HUMAN_KEY, RANDOM_KEY), \n default=ALPHA_BETA_KEY)\n parser.add_option('-s', '--use-sockets', action='store_true', \n dest='use_sockets', default=False)\n parser.add_option('-i', '--host', action='store', dest='host', \n default='localhost')\n parser.add_option('-p', '--port', action='store', dest='port', type=\"int\",\n default='50007')\n return parser.parse_args()[0]\n\n\n#MAIN PROGRAM\nif __name__ == \"__main__\":\n options = parse_options()\n \n view = View() \n \n logger = open('network.%s.log' % options.color, 'w')\n \n engine = GTPEngine(create_player(options.player, options.color, view.controller)) \n game = NetworkGame(engine, options.use_sockets, options.host, options.port)\n GameView(view, game)\n\n view_loop(view)\n \n logger.close()\n ","sub_path":"src/pygo1963/network_game/network_game.py","file_name":"network_game.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"249435306","text":"\n\"\"\" \nSet up the plot figures, axes, and items to be done for each frame.\n\nThis module is imported by the plotting routines and then the\nfunction setplot is called to set the plot parameters.\n \n\"\"\" \n\n#--------------------------\ndef setplot(plotdata):\n#--------------------------\n \n \"\"\" \n Specify what is to be plotted at each frame.\n Input: plotdata, an instance of visclaw.data.ClawPlotData.\n Output: a modified version of plotdata.\n \n \"\"\" \n\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n\n # Figure for q[0]\n plotfigure = plotdata.new_plotfigure(name='Pressure', figno=1)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.axescmd = 'subplot(211)'\n \n #plotaxes.xlimits = [0.,150.]\n plotaxes.ylimits = [-1.,1.0]\n plotaxes.title = 'Pressure'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 0\n plotitem.plotstyle = '-o'\n plotitem.color = 'b'\n plotitem.show = True # show on plot?\n plotitem.kwargs = {'linewidth':2,'markersize':5}\n \n\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.axescmd = 'subplot(212)'\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = [-1.,1.]\n plotaxes.title = 'Velocity'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 1\n plotitem.plotstyle = '-'\n plotitem.color = 'b'\n plotitem.show = True # show on plot?\n plotitem.kwargs = {'linewidth':3,'markersize':5}\n \n\n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via visclaw.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # list of frames to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.html_homelink = '../README.html'\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n\n return plotdata\n\n \n","sub_path":"fvmbook/chap7/standing/setplot.py","file_name":"setplot.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"380711015","text":"from RobotArm import RobotArm\r\n\r\nrobotArm = RobotArm('exercise 11')\r\nrobotArm.speed = 3\r\n\r\n[robotArm.moveRight() for movement in range (8)] # begin helemaal aan de rechter kant!!!!!\r\nfor movement in range (9): # anders leg je misschien een wit blok op een ander wit blok\r\n robotArm.grab() # en dan kan het onderste witte blok niet verplaatst worden\r\n color = robotArm.scan()\r\n if color == 'white':\r\n robotArm.moveRight()\r\n robotArm.drop()\r\n [robotArm.moveLeft() for movement in range (2)]\r\n else:\r\n robotArm.drop()\r\n robotArm.moveLeft()\r\n\r\nrobotArm.wait()\r\nfrom RobotArm import RobotArm\r\n","sub_path":"ex11.py","file_name":"ex11.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"263425743","text":"\"\"\"\nA helper script to install `pandoc` to a user-writable\nlocation in their `PATH`.\n\"\"\"\nimport os\nimport tarfile\nimport tempfile\nimport subprocess\n\nurl = 'https://github.com/jgm/pandoc/releases/download/2.18/pandoc-2.18-linux-amd64.tar.gz'\n\nif __name__ == '__main__':\n # the values of PATH\n candidates = os.environ['PATH'].split(':')\n # locations that are writeable by current user\n writeable = list(c for c in candidates if\n os.access(c, os.W_OK))\n\n if len(writeable) == 0:\n raise ValueError('no writeable locations in path!')\n\n # we have multiple writeable locations in the user's path\n # rank the writeable locations by a metric\n # here we're just using the path length\n # this is totally arbitrary and could be whatever\n score = [len(w) for w in writeable]\n\n # take the writeable location with the lowest \"score\"\n target = writeable[score.index(min(score))]\n\n with tempfile.TemporaryDirectory() as d:\n subprocess.check_call(['wget', '-q', url, '-P', d])\n\n with tarfile.open(os.path.join(d, url.split('/')[-1])) as t:\n for member in t.getmembers():\n if member.name.split('/')[-1] == 'pandoc':\n data = t.extractfile(member).read()\n break\n\n if data is None or len(data) == 0:\n raise ValueError('unable to extract pandoc!')\n\n full = os.path.join(target, 'pandoc')\n print(f'writing `pandoc` to {full}')\n with open(full, 'wb') as f:\n f.write(data)\n\n # make binary executable\n os.chmod(full, 755)\n","sub_path":"docker/builds/pandoc.py","file_name":"pandoc.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"12752075","text":"#\n# @lc app=leetcode id=52 lang=python3\n#\n# [52] N-Queens II\n#\n# https://leetcode.com/problems/n-queens-ii/description/\n#\n# algorithms\n# Hard (52.66%)\n# Total Accepted: 103.8K\n# Total Submissions: 196.6K\n# Testcase Example: '4'\n#\n# The n-queens puzzle is the problem of placing n queens on an n×n chessboard\n# such that no two queens attack each other.\n#\n#\n#\n# Given an integer n, return the number of distinct solutions to the n-queens\n# puzzle.\n#\n# Example:\n#\n#\n# Input: 4\n# Output: 2\n# Explanation: There are two distinct solutions to the 4-queens puzzle as shown\n# below.\n# [\n# [\".Q..\",  // Solution 1\n# \"...Q\",\n# \"Q...\",\n# \"..Q.\"],\n#\n# [\"..Q.\",  // Solution 2\n# \"Q...\",\n# \"...Q\",\n# \".Q..\"]\n# ]\n#\n#\n#\nclass Solution:\n def totalNQueens(self, n: int) -> int:\n total = [0]\n q_list = [0 for _ in range(n)]\n\n def _total(n, q_list, row, total):\n if row == n:\n total[0] += 1\n return\n for col in range(n):\n if isConfict(q_list, row, col):\n q_list[row] = col\n _total(n, q_list, row + 1, total)\n\n def isConfict(q_list, row, col):\n for i in range(row):\n if q_list[i] == col - row + i:\n return False\n if q_list[i] == col + row - i:\n return False\n if q_list[i] == col:\n return False\n return True\n _total(n, q_list, 0, total)\n return total[0]\n\n# 经典的dps题目,记录一下\n","sub_path":"leetcode/52.n-queens-ii.py","file_name":"52.n-queens-ii.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"137191940","text":"from logging.config import dictConfig\r\nfrom os import path\r\n\r\n_bot_root = path.dirname(path.dirname(path.abspath(__file__)))\r\n\r\nBOT_TOKEN = None\r\nMOE_URL = \"http://localhost:3000/\"\r\nPATH_TO_BOT_DB: str = path.join(_bot_root, path.join(\"data\", \"bot_database.db\"))\r\nLOG_PATH = path.join(_bot_root, path.join(\"data\", \"bot.log\"))\r\nREQUEST_KWARGS = {\r\n \"proxy_url\": \"socks5://127.0.0.1:9050\"\r\n}\r\n_REQUEST_KWARGS_EXAMPLE = {\r\n \"proxy_url\": \"socks5 OR socks5h://URL_OF_THE_PROXY_SERVER:PROXY_PORT\",\r\n # Optional, if you need authentication:\r\n \"urllib3_proxy_kwargs\": {\r\n \"username\": \"PROXY_USER\",\r\n \"password\": \"PROXY_PASS\",\r\n }\r\n}\r\n\r\nLOGGING = {\r\n \"version\": 1,\r\n \"disable_existing_loggers\": True,\r\n \"formatters\": {\r\n \"base\": {\r\n \"format\": \"%(asctime)s %(levelname)s | %(pathname)s:%(funcName)s:%(lineno)d | %(message)s\",\r\n },\r\n },\r\n \"handlers\": {\r\n \"console\": {\r\n \"level\": \"INFO\",\r\n \"class\": \"logging.StreamHandler\",\r\n \"formatter\": \"base\"\r\n },\r\n \"file\": {\r\n \"level\": \"INFO\",\r\n \"class\": \"logging.handlers.RotatingFileHandler\",\r\n \"filename\": LOG_PATH,\r\n \"formatter\": \"base\",\r\n \"maxBytes\": 1024 * 1024 * 100,\r\n },\r\n },\r\n \"loggers\": {\r\n \"general\": {\r\n \"handlers\": [\"file\", \"console\"],\r\n \"level\": \"INFO\",\r\n },\r\n }\r\n}\r\n\r\ntry:\r\n from .local_settings import *\r\nexcept ImportError:\r\n pass\r\n\r\ndictConfig(LOGGING)\r\n","sub_path":"config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"220082717","text":"# ###### /=================================================\\\n# ####### | CLASS EXAMPLE FOR \"COMPUTER SCIENCES\" (07JCJ**) |\n# #### \\ | https://github.com/squillero/computer-sciences |\n# ###G c\\ | |\n# ## _\\ | © 2020 Giovanni Squillero |\n# | _/ | Free for personal or classroom use. |\n# | _/ \\=================================================/\n\n\ndef safe_int(whatever):\n try:\n value = int(whatever)\n except ValueError as exception:\n print(f\"Yeuch: {str(exception)}\")\n value = 0\n return value\n\n\ndef main():\n while True:\n user_input = input(\"> \")\n value = safe_int(user_input)\n print(f\"Value is {value}\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python/src/2020-21/20201207_try-execpt_1.py","file_name":"20201207_try-execpt_1.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"511233598","text":"from elements import *\n\nshape_dict = {\"process\" : \"oval\", \"file_store\" : \"rectangle\"}\n\n#\n# Object helper functions\n#\ndef get_elem_by_id(id):\n\tfor element in elements:\n\t\ttry:\n\t\t\tif element.id == id:\n\t\t\t\treturn element\n\t\texcept:\n\t\t\tcontinue\n\ndef get_connected_items(id):\n\tfor element in elements:\n\t\ttry:\n\t\t\tif element.id == id:\n\t\t\t\treturn element.connected_items\n\t\texcept:\n\t\t\tcontinue\n\ndef get_conn_elems(ein):\n\tconn_tag = ein.part_of\n\tconn_elems = []\n\tfor e in elements:\n\t\tif (e.part_of == conn_tag):\n\t\t\tconn_elems.append(e)\n\n\treturn conn_elems\n\n\t\n\n\ndef is_object(id):\n\tfor word in shape_dict.keys():\n\t\ttry:\n\t\t\tif get_elem_by_id(id).tag.find(word) != -1:\n\t\t\t\tif get_elem_by_id(id).tag.find(\"_text\") == -1:\n\t\t\t\t\treturn True\n\t\texcept:\n\t\t\tcontinue\n\treturn False\n\ndef is_text_object(id):\n\ttry:\n\t\tif get_elem_by_id(id).tag.find(\"_text\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\ndef is_node(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.tag.find(\"_node\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\ndef is_left_node(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.tag.find(\"left_node\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\ndef is_right_node(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.tag.find(\"right_node\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\ndef is_center_node(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.tag.find(\"center_node\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\ndef is_cline(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.tag.find(\"connecting_line\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\tFalse\n\ndef is_a_connector(id):\n\te = get_elem_by_id(id)\n\ttry:\n\t\tif e.part_of.find(\"connector\") != -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept:\n\t\treturn False\n\n","sub_path":"build/lib.linux-i686-2.7/tmtool/items/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"487789176","text":"import thread\nimport os\nimport time\n\ndef countchar(pos):\n# updates lenlist, do not count EOL as part of line length\n\tglobal lenlist, nthread, readlock, file, listlock, threadlock\n\n\tindex = 0\n\n\treadlock.acquire()\n\tfile.seek(0)\n\tline = file.read(pos)\n\treadlock.release()\n\n\t# gets the line number (index) to start counting bytes for\n\tfor i in line:\n\t\tif i == '\\n':\n\t\t\tindex += 1\n\n\tfor i in range(splitnum):\n\t\t# place locks here because we want to make sure that the threads are reading from where they left off when suspended\n\t\treadlock.acquire()\n\t\tfile.seek(pos)\n\t\tchar = file.read(1)\n\t\treadlock.release()\n\n\t\tif char != '\\n':\n\t\t\tlistlock.acquire()\n\t\t\tlenlist[index] += 1\n\t\t\tlistlock.release()\n\t\telse: # goes to the next line\n\t\t\tindex += 1\n\n\t\tpos += 1\n\n\tthreadlock.acquire()\n\tnthread -= 1;\n\tthreadlock.release()\n\ndef linelengths(filenm, ntrh):\n# returns a Python list, the ith element of which is the number of characters in line i of the file.\n\tstartTime = time.time()\n\tglobal lenlist, nthread, splitnum, readlock, file, listlock, threadlock\n\n\treadlock = thread.allocate_lock()\n\tlistlock = thread.allocate_lock()\n\tthreadlock = thread.allocate_lock()\n\tnthread = ntrh\n\tfile = open(filenm, 'r')\n\tlenlist = sum(1 for i in file) * [0]\n\tnbytes = os.path.getsize(filenm)\n\tsplitnum = nbytes/ntrh\n\tstartpos = 0\n\n\tfor i in range(ntrh):\n\t\tif i != 0:\n\t\t\tstartpos += splitnum\n\t\tthread.start_new_thread(countchar, (startpos,))\n\n\twhile nthread > 0: # busy wait\n\t\tpass\n\n\tprint(\"Threaded version took %s seconds.\" % (time.time() - startTime))\n\t#return lenlist\n","sub_path":"HW2/Threaded.py","file_name":"Threaded.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"637819599","text":"import os\nimport pickle\nfrom sklearn.datasets.base import Bunch\nimport codecs\n#Bunch 类提供了一种key,value的对象形式\n#target_name 所有分类集的名称列表\n#label 每个文件的分类标签列表\n#filenames 文件路径\n#contents 分词后文件词向量形式\n\n'''\n本程序用来将文本变成词向量bunch对象的形式\n'''\n\ndef _readfile(path):\n\tfp = codecs.open(path, \"r\", 'utf-8')\n\tcontent = fp.read()\n\t# print(content)\n\tfp.close()\n\treturn content\n\n\ndef segment2Bunch():\n\t\n\t# wordbag_path=\"train_word_bag/train_set.dat\"\n\t# seg_path=\"train_corpus_seg/\"\n\t# wordbag_path=\"test_word_bag/test_set.dat\"\n\t# seg_path=\"test_seg/\"\n\n\ttrain_test_dict = {'train':[\"train_word_bag/train_set.dat\", \"train_corpus_seg/\"], \\\n\t\t\t\t\t'test':[\"test_word_bag/test_set.dat\", \"test_corpus_seg/\"]}\n\n\tfor i in train_test_dict.keys():\n\n\t\tbunch=Bunch(target_name=[],label=[],filenames=[],contents=[])\n\n\t\twordbag_path = train_test_dict[i][0]\n\t\tseg_path = train_test_dict[i][1]\n\t\t# print(wordbag_path, seg_path)\n\t\tcatelist = os.listdir(seg_path)\n\t\tbunch.target_name.extend(catelist)#将类别信息保存到Bunch对象\n\t\tfor mydir in catelist:\n\t\t\tclass_path = seg_path+mydir+\"/\"\n\t\t\tfile_list = os.listdir(class_path)\n\t\t\tfor file_path in file_list:\n\t\t\t\tfullname = class_path + file_path\n\t\t\t\tbunch.label.append(mydir)#保存当前文件的分类标签\n\t\t\t\tbunch.filenames.append(fullname)#保存当前文件的文件路径\n\t\t\t\tbunch.contents.append(_readfile(fullname).strip())#保存文件词向量\n\n\t\t#Bunch对象持久化\n\t\tfile_obj=open(wordbag_path,\"wb\")\n\t\tpickle.dump(bunch,file_obj)\n\t\tfile_obj.close()\n\n\tprint(\"构建文本对象结束\")\n\n\nif __name__ == '__main__':\n\tsegment2Bunch()\n","sub_path":"THU_naive_bayes/segment2Bunch.py","file_name":"segment2Bunch.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"322770955","text":"from pymongo import MongoClient\nfrom bson.objectid import ObjectId\nimport pickle\n\n\nNULL_ID = ObjectId(\"000000000000000000000000\")\n\n\nclass Database:\n def __init__(self, mongo_port = 27017):\n self.client = MongoClient('localhost', mongo_port)\n self.db = self.client.poker\n self.games = self.db.games\n self.meta = self.db.meta\n self.players = self.db.players\n self.features = self.db.features\n self.opponentmodels = self.db.opponentmodels\n\n self.meta.update({ '_id': 1 },\n { '$setOnInsert': {'lastProcessedGame': NULL_ID}, },\n upsert = True)\n\n def add_game(self, game):\n object_id = self.games.insert_one(game).inserted_id\n return object_id\n\n\n @property\n def last_processed_game(self):\n return self.meta.find_one({'_id': 1})['lastProcessedGame']\n\n\n @last_processed_game.setter\n def last_processed_game(self, game_id):\n self.meta.update({'_id':1}, {\"$set\": {'lastProcessedGame' : game_id}})\n\n\n def get_games(self):\n return self.games.find({'_id': {'$gt' : self.last_processed_game}})\n\n\n @property\n def unprocessed_game_count(self):\n return self.games.count({'_id': {'$gt' : self.last_processed_game}})\n\n\n def add_player_model(self, player_name, model):\n model_data = pickle.dumps(model)\n update = {\n '$set' : {'model' : pickle.dumps(model)},\n '$setOnInsert' : {'name' : player_name}\n }\n self.players.update({'name': player_name}, update, upsert = True)\n\n\n def get_player_model(self, player_name):\n found = self.players.find_one({'name' : player_name})\n if found is not None:\n return pickle.loads(found['model'])\n else:\n return None\n\n\n def add_player_stat(self, player_name, stat_name, stat_value):\n update = {\n '$set' : {stat_name : stat_value},\n '$setOnInsert' : {'name' : player_name}\n }\n self.players.update({'name': player_name}, update, upsert = True)\n\n\n def get_player_stat(self, player_name, stat_name):\n found = self.players.find_one({'name' : player_name}, {stat_name : 1})\n if found is not None:\n return found[stat_name]\n else:\n return None\n\n\n def add_player_features(self, player_name, inputs, responses):\n update = {\n '$set' : {'inputs' : inputs, 'responses':responses},\n '$setOnInsert' : {'name' : player_name}\n }\n self.features.update({'name': player_name}, update, upsert = True)\n\n\n def get_player_features(self, player_name):\n return self.features.find_one({'name': player_name})\n","sub_path":"pokerbot/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"258008884","text":"# Inspired from: https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist_softmax.py\n\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A very simple MNIST classifier.\n\nSee extensive documentation at\nhttps://www.tensorflow.org/get_started/mnist/beginners\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport numpy as np\nimport tensorflow as tf\n\nif __name__ == '__main__':\n np.random.seed(12345678)\n tf.set_random_seed(87654321)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dtype\", type=str, default='float32', help='Input and output dtype')\n parser.add_argument(\"--nbatch\", type=int, default=64, help='Batch size of the layer')\n parser.add_argument(\"--nin\", type=int, default=100, help='Input size of the layer')\n parser.add_argument(\"--nout\", type=int, default=10, help='Output size of the layer')\n parser.add_argument(\"--nsteps\", type=int, default=1000, help='Number of training steps')\n args = parser.parse_args()\n\n # Create the model\n x = tf.placeholder(args.dtype, [None, args.nin])\n W = tf.Variable(tf.zeros([args.nin, args.nout], dtype=args.dtype))\n b = tf.Variable(tf.zeros([args.nout], dtype=args.dtype))\n y = tf.matmul(x, W) + b\n\n # Define loss and optimizer\n y_ = tf.placeholder(args.dtype, [None, args.nout])\n\n # The raw formulation of cross-entropy,\n #\n # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n # reduction_indices=[1]))\n #\n # can be numerically unstable.\n #\n # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n # outputs of 'y', and then average across the batch.\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n # tf.global_variables_initializer().run()\n # Train\n for i in range(args.nsteps):\n data = np.random.normal(size=(args.nbatch, args.nin)).astype(args.dtype)\n target = np.zeros((args.nbatch, args.nout), dtype=args.dtype)\n target[np.arange(args.nbatch), np.random.randint(0, args.nout, args.nbatch)] = 1\n\n sess.run(train_step, feed_dict={x: data, y_: target})\n if (i + 1) % 100 == 0:\n print(\"Step %d/%d\" % (i + 1, args.nsteps))\n print('End')\n","sub_path":"tutorials/tensorflow/old_benchmark/_backup_benchmark_softmax.py","file_name":"_backup_benchmark_softmax.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227341089","text":"def load_stock(filename):\n file = open(filename, \"r\")\n stock_list = []\n for line in file:\n line = line.strip()\n item = line.split(\",\")\n item[1] = int(item[1])\n item[2] = int(item[2])\n stock_list.append(item)\n file.close()\n stock_list.sort()\n\n return stock_list\n\ndef store_stock(stock_list):\n file = open(\"stock.txt\", \"w\")\n for line in stock_list:\n line[1] = str(line[1])\n line[2] = str(line[2])\n item = \",\".join(line) + \"\\n\"\n file.write(item)\n file.close()\n\ndef take_name(stock_list):\n while True:\n res = \" \"\n name = input(\"What you want to buy? >>>\")\n for item in stock_list:\n if item[0] == name:\n break\n if item[0] == name:\n break\n else:\n print (\"Sorry, we do not have a stock for \" + name + \".\")\n res = input(\"Do you want to buy other item? (y/n)>>>\")\n if res == \"n\" :\n break \n if res == \" \":\n return item\n else:\n return []\n \ndef take_quant(item):\n while True:\n res = \" \"\n try:\n qty = int(input(\"How many? >>>\"))\n if qty > item[1]:\n print (\"Sorry, we have only %5d items.\" % item[2])\n res = input(\"Would you buy? (y/n)>>>\")\n if res == \"y\":\n qty = item[1]\n break \n except:\n print (\"Type in a number. >>>\")\n if res == \"y\" or res == \" \":\n return qty\n else:\n return 0\n\ndef take_input(stock_list):\n item = take_name(stock_list)\n if item != []:\n quant = take_quant(item)\n else:\n quant = 0\n return item, quant\n \ndef sell(stock_list, sales_hist):\n item, quant = take_input(stock_list)\n if item == []:\n return\n item[1] -= quant\n amount = item[2] * quant\n print (\"item = \", item[0], \"; price = \", item[2], \"; quanity = \", quant, \\\n \"; amount = \", amount)\n sales_hist.append((item[0], item[2], quant, amount))\n \ndef print_stock(stock_list):\n print (\"\\n\", \" \" * 20 + \"STOCK REPORT\")\n print (\"Name price quatity amount\")\n for item in stock_list:\n print (\"%-10s %5d %5d %6d\" % (item[0], item[2], item[1], \\\n item[1] * item[2]))\n \n \ndef print_sales(sales_hist):\n print (\" \" * 20 + \"SALES REPORT\")\n print (\"Name price quatity amount\")\n for item in sales_hist:\n print (\"%-10s %5d %5d %6d\" % (item[0], item[1], item[2], item[3]))\n\"\"\"\nWhat would you like to do?\n S: Sell item \n P: Print stock\n R: Report sales\n E: Exit\nEnter your choice (S, P, R, or E)>>\n\"\"\"\ndef show_menu():\n print (\"\\n\", \"What would you like to do?\")\n print (\" S: Sell an item\")\n print (\" P: Print stock\")\n print (\" R: Report sales\")\n print (\" E: Exit\")\n return input (\"Enter your choice (S, P, R, or E))>>>\")\n\ndef input_error(s):\n print (s + \"?\" + \"I beg your pardon.\")\n \ndef main(): \n stock_list = load_stock(\"stock.txt\")\n sales_hist = []\n while True:\n s = show_menu()\n if s == \"E\":\n break\n elif s==\"S\":\n sell( stock_list, sales_hist)\n elif s ==\"P\":\n print_stock(stock_list)\n elif s == \"R\":\n print_sales(sales_hist)\n else:\n input_error(s)\n store_stock(stock_list)\n\nmain()\n","sub_path":"2020/20200410/homework05.py","file_name":"homework05.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"116130064","text":"#!/usr/bin/env python3\n\"\"\"\nGiven a sorted (in ascending order) integer array nums of n elements\nand a target value, write a function to search target in nums. If\ntarget exists, then return its index, otherwise return -1.\n\n\nExamples:\n\n Input: nums = [-1,0,3,5,9,12], target = 9\n Output: 4\n Explanation: 9 exists in nums and its index is 4\n\n\n Input: nums = [-1,0,3,5,9,12], target = 2\n Output: -1\n Explanation: 2 does not exist in nums so return -1\n \nNote:\n - You may assume that all elements in nums are unique.\n - n will be in the range [1, 10000].\n - The value of each element in nums will be in the range [-9999, 9999].\n\nReference:\n - https://leetcode.com/problems/binary-search/ (Easy)\n - https://www.geeksforgeeks.org/binary-search/\n\n\"\"\"\n\nfrom typing import List\n\nclass Solution:\n\n def search_v1(self, nums: List[int], target: int):\n \"\"\"Recursion.\"\"\"\n def helper(nums, target, L, R):\n if R < L :\n return -1\n else:\n mid = (L + R) // 2\n if target == nums[mid]:\n return mid\n elif target > nums[mid]:\n return helper(nums, target, mid+1, R)\n else:\n return helper(nums, target, L, mid-1)\n return helper(nums, target, 0, len(nums)-1)\n\n def search_v2(self, nums: List[int], target: int):\n \"\"\"Loop.\"\"\"\n L, R = 0, len(nums)-1\n while L <= R:\n mid = (L + R) // 2\n if target == nums[mid]:\n return mid\n elif target > nums[mid]:\n L = mid + 1\n else: \n R = mid - 1\n return -1\n\n\ndef main():\n a = [-1, 0, 3, 5, 9, 12]\n test_data = [\n [a, 9],\n [a, 2],\n [a, -1],\n ]\n\n sol = Solution()\n for arr, target in test_data:\n print(f\"# Input: {arr}, target = {target}\")\n print(\" - Output v1 = {}\".format(sol.search_v1(arr, target)))\n print(\" - Output v2 = {}\".format(sol.search_v2(arr, target)))\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"python3/sorting_and_search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"467156959","text":"import torch\n\nclass Encoder(torch.nn.Module):\n def __init__(self, hidden_size, vocab_size, embedding_dim, num_layers=1, bidirectional=False):\n super(Encoder, self).__init__()\n self.hidden_size = hidden_size\n # self.embedding = torch.nn.Embedding.from_pretrained(weights)\n self.embedding = torch.nn.Embedding(vocab_size, embedding_dim)\n self.num_layers = num_layers\n self.bidirectional = bidirectional\n self.lstm = torch.nn.LSTM(embedding_dim, hidden_size, dropout=0.2, num_layers=num_layers, bidirectional=bidirectional)\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n def forward(self, inputs, hidden):\n ips, lengths = inputs\n inputs = self.embedding(ips)\n packed_embedded = torch.nn.utils.rnn.pack_padded_sequence(inputs, lengths)\n outputs, hidden = self.lstm(packed_embedded, hidden)\n outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)\n return outputs, hidden\n \n def init_hidden(self, batch_size):\n s = self.num_layers * (2 if self.bidirectional else 1)\n return (torch.zeros(size=(s, batch_size, self.hidden_size), device=self.device), torch.zeros(size=(s, batch_size, self.hidden_size), device=self.device))\n","sub_path":"Encoder.py","file_name":"Encoder.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"590578585","text":"import matplotlib.pyplot as plt\r\nimport neural_network as nn\r\nimport graphics as g\r\nimport random\r\nimport time\r\nimport math\r\n\r\n'''\r\n File name: main.py\r\n Author: Michael Berge\r\n Date created: 7/19/2018\r\n Date modified: 5/2/2020\r\n Python Version: 3.8.1\r\n'''\r\n\r\ndef main():\r\n num_i = 2\r\n num_h1 = 5\r\n num_h2 = 5\r\n num_o = 1\r\n args = [num_i, num_h1, num_o, num_h2]\r\n\r\n neural_network = nn.NeuralNetwork(args)\r\n start_time = time.time()\r\n plt.title(\"Training Data Improvement\")\r\n plt.xlabel(\"Iterations\")\r\n plt.ylabel(\"Guess\")\r\n\r\n l1 = []\r\n l2 = []\r\n l3 = []\r\n l4 = []\r\n\r\n # Number of training iterations\r\n epoch = 1000\r\n\r\n # Graphics object\r\n gr = g.Graphics(args)\r\n\r\n for i in range(epoch):\r\n file = open(\"training_data.txt\", \"r\")\r\n arr = []\r\n\r\n for j in range(4):\r\n str_ = file.readline()\r\n str_split = str_.split(\" \")\r\n arr.append(str_split)\r\n random.shuffle(arr)\r\n\r\n for j in range(4):\r\n input_ = [int(arr[j][0]), int(arr[j][1])]\r\n target = [int(arr[j][2].strip())]\r\n neural_network.train(input_, target, args, gr)\r\n\r\n if i % (epoch / 100) == 0:\r\n l1.append(neural_network.feed_forward([0, 0], args))\r\n l2.append(neural_network.feed_forward([1, 1], args))\r\n l3.append(neural_network.feed_forward([0, 1], args))\r\n l4.append(neural_network.feed_forward([1, 0], args))\r\n file.close()\r\n\r\n # Print progress bar\r\n print_progress_bar(i + 1, epoch, prefix='Progress:', suffix='Complete', length=50)\r\n\r\n # Calculate and display training time\r\n display_time(start_time)\r\n\r\n # testing data for the network\r\n print(\"[0, 0]: \" + str(round(neural_network.feed_forward([0, 0], args)[0])))\r\n print(\"[1, 1]: \" + str(round(neural_network.feed_forward([1, 1], args)[0])))\r\n print(\"[0, 1]: \" + str(round(neural_network.feed_forward([0, 1], args)[0])))\r\n print(\"[1, 0]: \" + str(round(neural_network.feed_forward([1, 0], args)[0])))\r\n\r\n # Plot points and display graph\r\n x = []\r\n for i in range(epoch):\r\n if i % (epoch / 100) == 0:\r\n x.append(i)\r\n plt.plot(x, l1, \"black\")\r\n plt.plot(x, l2, \"black\")\r\n plt.plot(x, l3, \"black\")\r\n plt.plot(x, l4, \"black\")\r\n plt.show()\r\n\r\n# Prints the progress bar for training data iterations\r\ndef print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='#'):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filled_length = int(length * iteration // total)\r\n bar = fill * filled_length + '-' * (length - filled_length)\r\n print('%s [%s] %s%% %s' % (prefix, bar, percent, suffix), end='\\r')\r\n # Print New line on Complete\r\n if iteration == total:\r\n print()\r\n\r\n# Displays the time from start_time to the time the function was called\r\ndef display_time(start_time):\r\n end_time = time.time()\r\n time_elapsed = end_time - start_time\r\n print(\"\\nTime Elapsed: \", end=\"\")\r\n if time_elapsed > 60:\r\n print(\"{:02d}\".format(math.floor(time_elapsed / 60)), end=\"\")\r\n print(\":{:02}\".format(round(time_elapsed % 60)), end=\"\\n\\n\")\r\n else:\r\n print(\"00:{:02d}\".format(round(time_elapsed % 60)), end=\"\\n\\n\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"554016938","text":"import copy\nimport pytest\n\nfrom uuid import uuid4\n\n\nORDER = [\n 'user', 'award', 'lab', 'static_section', 'higlass_view_config', 'page',\n 'ontology', 'ontology_term', 'file_format', 'badge', 'organism', 'gene',\n 'genomic_region', 'bio_feature', 'target', 'imaging_path', 'publication',\n 'publication_tracking', 'document', 'image', 'vendor', 'construct',\n 'modification', 'experiment_type', 'protocol', 'sop_map', 'biosample_cell_culture',\n 'individual_human', 'individual_mouse', 'individual_fly', 'individual_primate',\n 'individual_chicken', 'individual_zebrafish', 'biosource', 'antibody', 'enzyme',\n 'treatment_rnai', 'treatment_agent',\n 'biosample', 'quality_metric_fastqc', 'quality_metric_bamcheck', 'quality_metric_rnaseq',\n 'quality_metric_bamqc', 'quality_metric_pairsqc', 'quality_metric_margi',\n 'quality_metric_dedupqc_repliseq', 'quality_metric_chipseq', 'quality_metric_chipseq_v2', 'quality_metric_workflowrun',\n 'quality_metric_atacseq', 'quality_metric_rnaseq_madqc', 'quality_metric_qclist',\n 'microscope_setting_d1', 'microscope_setting_d2',\n 'microscope_setting_a1', 'microscope_setting_a2', 'file_fastq',\n 'file_processed', 'file_reference', 'file_calibration', 'file_microscopy',\n 'file_set', 'file_set_calibration', 'file_set_microscope_qc',\n 'file_vistrack', 'experiment_hi_c', 'experiment_capture_c',\n 'experiment_repliseq', 'experiment_atacseq', 'experiment_chiapet',\n 'experiment_damid', 'experiment_seq', 'experiment_tsaseq',\n 'experiment_mic', 'experiment_set', 'experiment_set_replicate',\n 'data_release_update', 'software', 'analysis_step', 'workflow',\n 'workflow_mapping', 'workflow_run_sbg', 'workflow_run_awsem',\n 'tracking_item', 'quality_metric_flag',\n 'summary_statistic', 'summary_statistic_hi_c', 'workflow_run',\n 'microscope_configuration', 'image_setting', 'quality_metric_mcool',\n 'ingestion_submission', 'file_other', 'filter_set'\n]\n\n\n@pytest.fixture\ndef wrangler_testapp(wrangler, app, external_tx, zsa_savepoints):\n return remote_user_testapp(app, wrangler['uuid'])\n\n\n@pytest.fixture\ndef submitter_testapp(submitter, app, external_tx, zsa_savepoints):\n return remote_user_testapp(app, submitter['uuid'])\n\n\n@pytest.fixture\ndef lab(testapp, award):\n item = {\n 'name': 'encode-lab',\n 'title': 'ENCODE lab',\n 'status': 'current',\n 'awards': [award['@id']]\n }\n return testapp.post_json('/lab', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef another_lab(testapp, award):\n item = {\n 'name': 'another-encode-lab',\n 'title': 'Another ENCODE lab',\n 'status': 'current',\n 'awards': [award['@id']]\n }\n return testapp.post_json('/lab', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef admin(testapp):\n item = {\n 'first_name': 'Test',\n 'last_name': 'Admin',\n 'email': 'admin@example.org',\n 'groups': ['admin'],\n 'status': 'current'\n }\n # User @@object view has keys omitted.\n res = testapp.post_json('/user', item)\n return testapp.get(res.location).json\n\n\n@pytest.fixture\ndef submitter(testapp, lab, award):\n item = {\n 'first_name': 'ENCODE',\n 'last_name': 'Submitter',\n 'email': 'encode_submitter@example.org',\n 'submits_for': [lab['@id']],\n 'viewing_groups': [award['viewing_group']],\n 'status': \"current\"\n }\n # User @@object view has keys omitted.\n res = testapp.post_json('/user', item)\n return testapp.get(res.location).json\n\n\n@pytest.fixture\ndef pi(testapp, lab, award):\n item = {\n 'first_name': 'ENCODE',\n 'last_name': 'PI',\n 'email': 'encode_pi@example.org',\n 'submits_for': [lab['@id']],\n 'viewing_groups': [award['viewing_group']],\n 'status': \"current\"\n }\n # User @@object view has keys omitted.\n res = testapp.post_json('/user', item)\n return testapp.get(res.location).json\n\n@pytest.fixture\ndef access_key(testapp, submitter):\n description = 'My programmatic key'\n item = {\n 'user': submitter['@id'],\n 'description': description,\n }\n res = testapp.post_json('/access_key', item)\n result = res.json['@graph'][0].copy()\n result['secret_access_key'] = res.json['secret_access_key']\n return result\n\n\n@pytest.fixture\ndef award(testapp):\n item = {\n 'name': 'encode3-award',\n 'description': 'ENCODE test award',\n 'viewing_group': '4DN',\n 'project': '4DN'\n }\n return testapp.post_json('/award', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef human_individual(testapp, award, lab, human):\n item = {\n \"accession\": \"4DNINOOOAAQ1\",\n \"age\": 53,\n \"age_units\": \"year\",\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'organism': human['@id'],\n \"ethnicity\": \"Caucasian\",\n \"health_status\": \"unknown\",\n \"life_stage\": \"adult\",\n \"sex\": \"female\",\n \"status\": \"released\",\n \"url\": \"http://ccr.coriell.org/Sections/BrowseCatalog/FamilyTypeSubDetail.aspx?PgId=402&fam=1463&coll=GM\",\n # \"uuid\": \"44d24e3f-bc5b-469a-8500-7ebd728f8ed5\"\n }\n return testapp.post_json('/individual_human', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef worthington_biochemical(testapp, award, lab):\n item = {\n \"title\": \"Worthington Biochemical\",\n \"name\": \"worthington-biochemical\",\n \"description\": \"\",\n \"url\": \"http://www.worthington-biochem.com\",\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'status': 'current'\n }\n return testapp.post_json('/vendor', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef mboI(testapp, worthington_biochemical, lab, award):\n item = {\n \"name\": \"MboI\",\n \"enzyme_source\": worthington_biochemical['@id'],\n 'status': 'current',\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/enzyme', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef lung_biosource(testapp, lab, award, lung_oterm):\n item = {\n \"biosource_type\": \"tissue\",\n 'tissue': lung_oterm['@id'],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/biosource', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef de_term(testapp, uberon_ont, lab, award):\n item = {\n \"term_id\": \"UBERON:0005439\",\n \"term_name\": \"definitive endoderm\",\n \"term_url\": \"http://purl.obolibrary.org/obo/UBERON_0005439\",\n \"source_ontologies\": [uberon_ont['@id']]\n }\n return testapp.post_json('/ontology_term', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef biosample_cc_wo_diff(testapp, lab, award):\n item = {\n \"culture_start_date\": \"2018-01-01\",\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/biosample_cell_culture', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef tissue_biosample(testapp, lung_biosource, lab, award):\n item = {\n 'description': \"Tissue Biosample\",\n 'biosource': [lung_biosource['uuid']],\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/biosample', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef protocol_data(lab, award):\n return {'description': 'A Protocol',\n 'protocol_type': 'Experimental protocol',\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n\n\n@pytest.fixture\ndef protocol(testapp, protocol_data):\n return testapp.post_json('/protocol', protocol_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef so_ont(testapp):\n return testapp.post_json('/ontology', {'ontology_name': 'SO'}).json['@graph'][0]\n\n\n@pytest.fixture\ndef gene_term(testapp, so_ont):\n gterm = {\n 'uuid': '7bea5bde-d860-49f8-b178-35d0dadbd644',\n 'term_id': 'SO:0000704', 'term_name': 'gene',\n 'source_ontologies': [so_ont['@id']]}\n return testapp.post_json('/ontology_term', gterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef region_term(testapp, so_ont):\n gterm = {\n 'uuid': '6bea5bde-d860-49f8-b178-35d0dadbd644',\n 'term_id': 'SO:0000001', 'term_name': 'region',\n 'source_ontologies': [so_ont['@id']]}\n return testapp.post_json('/ontology_term', gterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef protein_term(testapp, so_ont):\n gterm = {\n 'uuid': '8bea5bde-d860-49f8-b178-35d0dadbd644',\n 'term_id': 'SO:0000104', 'term_name': 'polypeptide',\n 'preferred_name': 'protein',\n 'source_ontologies': [so_ont['@id']]}\n return testapp.post_json('/ontology_term', gterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef transcript_term(testapp, so_ont):\n gterm = {\n 'uuid': '5bea5bde-d860-49f8-b178-35d0dadbd644',\n 'term_id': 'SO:0000673', 'term_name': 'transcript',\n 'source_ontologies': [so_ont['@id']]}\n return testapp.post_json('/ontology_term', gterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef component_term(testapp, so_ont):\n gterm = {\n 'uuid': '4bea5bde-d860-49f8-b178-35d0dadbd644',\n 'term_id': 'GO:0005575', 'term_name': 'cellular_component',\n 'source_ontologies': [so_ont['@id']]}\n return testapp.post_json('/ontology_term', gterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef cell_line_term(testapp, ontology):\n item = {\n \"is_slim_for\": \"cell\",\n \"namespace\": \"http://www.ebi.ac.uk/efo\",\n \"term_id\": \"EFO:0000322\",\n \"term_name\": \"cell line\",\n \"uuid\": \"111189bc-8535-4448-903e-854af460a233\",\n \"source_ontologies\": [ontology['@id']],\n \"term_url\": \"http://www.ebi.ac.uk/efo/EFO_0000322\"\n }\n return testapp.post_json('/ontology_term', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef f123_oterm(testapp, ontology, cell_line_term):\n item = {\n \"uuid\": \"530036bc-8535-4448-903e-854af460b254\",\n \"term_name\": \"F123-CASTx129\",\n \"term_id\": \"EFO:0009319\",\n \"source_ontologies\": [ontology['@id']],\n \"slim_terms\": [cell_line_term['@id']]\n }\n return testapp.post_json('/ontology_term', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef gm12878_oterm(testapp, ontology, cell_line_term):\n item = {\n \"uuid\": \"530056bc-8535-4448-903e-854af460b111\",\n \"term_name\": \"GM12878\",\n \"term_id\": \"EFO:0002784\",\n \"source_ontologies\": [ontology['@id']],\n \"slim_terms\": [cell_line_term['@id']]\n }\n return testapp.post_json('/ontology_term', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef thousandgen_oterm_data(ontology, cell_line_term):\n return {\"source_ontologies\": [ontology['@id']],\n \"slim_terms\": [cell_line_term['@id']]}\n\n\n@pytest.fixture\ndef thousandgen_oterms(testapp, thousandgen_oterm_data):\n oterms = []\n names = {'HG12345': 'EFO:999998', 'GM12345': 'EFO:999999'}\n for tn, tid in names.items():\n thousandgen_oterm_data['term_name'] = tn\n thousandgen_oterm_data['term_id'] = tid\n oterms.append(testapp.post_json('/ontology_term', thousandgen_oterm_data).json['@graph'][0])\n return oterms\n\n\n@pytest.fixture\ndef b_lymphocyte_oterm(testapp, uberon_ont):\n item = {\n \"term_name\": \"lymphocyte of B lineage\",\n \"term_id\": \"CL:0000945\",\n \"preferred_name\": \"B-lymphocyte\",\n \"source_ontologies\": [uberon_ont['@id']],\n }\n return testapp.post_json('/ontology_term', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef F123_biosource(testapp, lab, award, f123_oterm):\n item = {\n \"accession\": \"4DNSROOOAAQ2\",\n \"biosource_type\": \"stem cell\",\n \"cell_line\": f123_oterm['@id'],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/biosource', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef GM12878_biosource(testapp, lab, award, gm12878_oterm, b_lymphocyte_oterm):\n item = {\n \"accession\": \"4DNSROOOAAQ1\",\n \"biosource_type\": \"immortalized cell line\",\n \"cell_line\": gm12878_oterm['@id'],\n \"tissue\": b_lymphocyte_oterm['@id'],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/biosource', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef tier1_biosource(testapp, protocol, lab, award, gm12878_oterm):\n item = {\n 'description': 'Tier 1 cell line Biosource',\n 'biosource_type': 'immortalized cell line',\n 'cell_line': gm12878_oterm['@id'],\n 'SOP_cell_line': protocol['@id'],\n 'cell_line_tier': 'Tier 1',\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/biosource', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef human_biosource(testapp, human_individual, worthington_biochemical, gm12878_oterm, lab, award):\n item = {\n \"description\": \"GM12878 cells\",\n \"biosource_type\": \"immortalized cell line\",\n \"individual\": human_individual['@id'],\n \"cell_line\": gm12878_oterm['@id'],\n \"biosource_vendor\": worthington_biochemical['@id'],\n \"status\": \"current\",\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/biosource', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef human_data():\n return {\n 'uuid': '7745b647-ff15-4ff3-9ced-b897d4e2983c',\n 'name': 'human',\n 'scientific_name': 'Homo sapiens',\n 'taxon_id': '9606',\n 'genome_assembly': 'GRCh38'\n }\n\n\n@pytest.fixture\ndef human(testapp, human_data):\n return testapp.post_json('/organism', human_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef mouse(testapp):\n item = {\n 'uuid': '3413218c-3d86-498b-a0a2-9a406638e786',\n 'name': 'mouse',\n 'scientific_name': 'Mus musculus',\n 'taxon_id': '10090',\n 'genome_assembly': 'GRCm38'\n }\n return testapp.post_json('/organism', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef mouse_individual(testapp, mouse, lab, award):\n item = {\n 'uuid': '4731442b-f283-4fdf-ad8a-a69cf5a7c68a',\n \"age\": 53,\n \"age_units\": \"day\",\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'organism': mouse['@id'],\n \"mouse_strain\": \"Balb-c\",\n \"mouse_life_stage\": \"adult\",\n \"sex\": \"female\",\n }\n return testapp.post_json('/individual_mouse', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef organism(human):\n return human\n\n\n@pytest.fixture\ndef experiment_set(testapp, lab, award):\n item = {\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'experimentset_type': 'replicates',\n 'status': 'in review by lab'\n }\n return testapp.post_json('/experiment_set', item).json['@graph'][0]\n\n\n# fixtures for testing calculated experiment_sets property in experiment_set\n# and also for _update method of experiment_set_replicate (and experiment_set)\n@pytest.fixture\ndef experiment(testapp, experiment_data):\n return testapp.post_json('/experiment_hi_c', experiment_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef experiment_data(lab, award, human_biosample, mboI, exp_types):\n return {\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'biosample': human_biosample['@id'],\n 'experiment_type': exp_types['hic']['@id'],\n 'digestion_enzyme': mboI['@id'],\n 'status': 'in review by lab'\n }\n\n\n@pytest.fixture\ndef exp_types(testapp, lab, award):\n experiment_types = {}\n title_dict = {\n 'hic': ('in situ Hi-C', [\"ExperimentHiC\"]),\n 'microc': ('Micro-C', [\"ExperimentHiC\"]),\n 'capc': ('Capture Hi-C', [\"ExperimentCaptureC\"]),\n 'rnaseq': ('RNA-seq', [\"ExperimentSeq\"]),\n 'fish': ('DNA FISH', [\"ExperimentMic\"]),\n 'dnase': ('DNase Hi-C', [\"ExperimentHiC\"]),\n 'dam': ('DamID-seq', [\"ExperimentDamid\"]),\n 'chia': ('ChIA-PET', [\"ExperimentChiapet\"]),\n 'repliseq': ('2-stage Repli-seq', [\"ExperimentRepliseq\"]),\n 'multi': ('Multi-stage Repli-seq', [\"ExperimentRepliseq\"]),\n 'chipseq': ('ChIP-seq', [\"ExperimentSeq\"]),\n 'dilution': ('Dilution Hi-C', [\"ExperimentHiC\"]),\n 'atacseq': ('ATAC-seq', [\"ExperimentAtacseq\"]),\n 'tsaseq': ('TSA-seq', [\"ExperimentTsaseq\"])\n }\n for k, v in title_dict.items():\n data = {\n 'uuid': str(uuid4()),\n 'title': v[0],\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'status': 'released',\n 'valid_item_types': v[1]\n }\n experiment_types[k] = testapp.post_json('/experiment_type', data, status=201).json['@graph'][0]\n return experiment_types\n\n\n@pytest.fixture\ndef experiment_project_release(testapp, lab, award, human_biosample, exp_types):\n item = {\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'biosample': human_biosample['@id'],\n 'experiment_type': exp_types['microc']['@id'],\n 'status': 'released to project'\n }\n return testapp.post_json('/experiment_hi_c', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef base_experiment(testapp, experiment_data):\n return testapp.post_json('/experiment_hi_c', experiment_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef experiments(testapp, experiment_data):\n expts = []\n for i in range(4):\n experiment_data['description'] = 'Experiment ' + str(i)\n expts.append(testapp.post_json('/experiment_hi_c', experiment_data).json['@graph'][0])\n return expts\n\n\n@pytest.fixture\ndef rep_set_data(lab, award):\n return {\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'description': 'Test replicate set',\n }\n\n\n@pytest.fixture\ndef empty_replicate_set(testapp, rep_set_data):\n return testapp.post_json('/experiment_set_replicate', rep_set_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef two_experiment_replicate_set(testapp, rep_set_data, experiments):\n rep_set_data['description'] = 'Two one BioRep Experiment Replicate Set'\n rep_set_data['replicate_exps'] = [\n {'replicate_exp': experiments[0]['@id'],\n 'bio_rep_no': 1,\n 'tec_rep_no': 1},\n {'replicate_exp': experiments[1]['@id'],\n 'bio_rep_no': 1,\n 'tec_rep_no': 2}\n ]\n return testapp.post_json('/experiment_set_replicate', rep_set_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef file_formats(testapp, lab, award):\n formats = {}\n ef_format_info = {\n 'pairs_px2': {'standard_file_extension': 'pairs.gz.px2',\n \"valid_item_types\": [\"FileProcessed\"]},\n 'pairsam_px2': {'standard_file_extension': 'sam.pairs.gz.px2',\n \"valid_item_types\": [\"FileProcessed\"]},\n 'bai': {'standard_file_extension': 'bam.bai',\n \"valid_item_types\": [\"FileProcessed\"]},\n 'beddb': {\"standard_file_extension\": \"beddb\",\n \"valid_item_types\": [\"FileProcessed\", \"FileReference\"]},\n }\n format_info = {\n 'fastq': {'standard_file_extension': 'fastq.gz',\n 'other_allowed_extensions': ['fq.gz'],\n \"valid_item_types\": [\"FileFastq\"]},\n 'pairs': {'standard_file_extension': 'pairs.gz',\n \"extrafile_formats\": ['pairs_px2', 'pairsam_px2'],\n \"valid_item_types\": [\"FileProcessed\"]},\n 'bam': {'standard_file_extension': 'bam',\n 'extrafile_formats': ['bai'],\n \"valid_item_types\": [\"FileProcessed\"]},\n 'mcool': {'standard_file_extension': 'mcool',\n \"valid_item_types\": [\"FileProcessed\", \"FileVistrack\"]},\n 'tiff': {'standard_file_extension': 'tiff',\n 'other_allowed_extensions': ['tif'],\n \"valid_item_types\": [\"FileMicroscopy\", \"FileCalibration\"]},\n 'zip': {'standard_file_extension': 'zip',\n \"valid_item_types\": [\"FileProcessed\", \"FileMicroscopy\", \"FileCalibration\"]},\n 'chromsizes': {'standard_file_extension': 'chrom.sizes',\n \"valid_item_types\": [\"FileReference\"]},\n 'other': {'standard_file_extension': '',\n \"valid_item_types\": [\"FileProcessed\", \"FileMicroscopy\",\n \"FileReference\", \"FileCalibration\"]},\n 'bw': {'standard_file_extension': 'bw',\n \"valid_item_types\": [\"FileProcessed\", \"FileVistrack\"]},\n 'bg': {'standard_file_extension': 'bedGraph.gz',\n \"valid_item_types\": [\"FileProcessed\", \"FileVistrack\"]},\n 'bigbed': {'standard_file_extension': 'bb',\n \"valid_item_types\": [\"FileProcessed\", \"FileReference\"]},\n 'bed': {\"standard_file_extension\": \"bed.gz\",\n \"extrafile_formats\": ['beddb'],\n \"valid_item_types\": [\"FileProcessed\", \"FileReference\"]}\n }\n\n for eff, info in ef_format_info.items():\n info['file_format'] = eff\n info['uuid'] = str(uuid4())\n info['lab'] = lab['@id']\n info['award'] = award['@id']\n formats[eff] = testapp.post_json('/file_format', info, status=201).json['@graph'][0]\n for ff, info in format_info.items():\n info['file_format'] = ff\n info['uuid'] = str(uuid4())\n if info.get('extrafile_formats'):\n eff2add = []\n for eff in info.get('extrafile_formats'):\n eff2add.append(formats[eff].get('@id'))\n info['extrafile_formats'] = eff2add\n info['lab'] = lab['@id']\n info['award'] = award['@id']\n formats[ff] = testapp.post_json('/file_format', info, status=201).json['@graph'][0]\n return formats\n\n\n@pytest.fixture\ndef file(testapp, lab, award, file_formats):\n item = {\n 'file_format': file_formats.get('fastq').get('@id'),\n 'md5sum': 'd41d8cd98f00b204e9800998ecf8427e',\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'status': 'uploaded', # avoid s3 upload codepath\n }\n return testapp.post_json('/file_fastq', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef file_fastq(testapp, lab, award, file_formats):\n item = {\n 'file_format': file_formats.get('fastq').get('@id'),\n 'md5sum': 'd41d8cd9f00b204e9800998ecf8427e',\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'status': 'uploaded', # avoid s3 upload codepath\n }\n return testapp.post_json('/file_fastq', item).json['@graph'][0]\n\n\nRED_DOT = \"\"\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUA\nAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO\n9TXL0Y4OHwAAAABJRU5ErkJggg==\"\"\"\n\n\n@pytest.fixture\ndef attachment():\n return {'download': 'red-dot.png', 'href': RED_DOT}\n\n\n@pytest.fixture\ndef image_data(attachment, lab, award):\n return {\n 'attachment': attachment,\n 'caption': 'Test image',\n 'award': award['uuid'],\n 'lab': lab['uuid'],\n }\n\n\n@pytest.fixture\ndef image(testapp, image_data):\n return testapp.post_json('/image', image_data).json['@graph'][0]\n\n\n@pytest.fixture\ndef rnai(testapp, lab, award):\n item = {\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'target_sequence': 'TATATGGGGAA',\n 'rnai_type': 'shRNA',\n }\n return testapp.post_json('/treatment_rnai', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef construct(testapp, lab, award):\n item = {\n 'name': 'Awesome_Construct',\n 'construct_type': 'tagging construct',\n 'protein_tags': ['eGFP, C-terminal'],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/construct', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef publication(testapp, lab, award):\n item = {\n 'uuid': '8312fc0c-b241-4cb2-9b01-1438910550ad',\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'ID': \"PMID:22955616\",\n }\n return testapp.post_json('/publication', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef publication_tracking(testapp, lab, award):\n item = {\n 'uuid': '8312fc0c-b241-4cb2-9b01-1438910550ac',\n 'award': award['@id'],\n 'lab': lab['@id'],\n 'PMID': \"PMID:12345678\",\n }\n return testapp.post_json('/publication_tracking', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef software(testapp, lab, award):\n # TODO: ASK_ANDY do we want software_type to be an array?\n item = {\n \"name\": \"FastQC\",\n \"software_type\": [\"indexer\", ],\n \"version\": \"1\",\n 'lab': lab['@id'],\n 'award': award['@id']\n }\n return testapp.post_json('/software', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef analysis_step(testapp, software, lab, award):\n item = {\n 'name': 'fastqc',\n \"software_used\": software['@id'],\n \"version\": \"1\",\n 'lab': lab['@id'],\n 'award': award['@id']\n }\n return testapp.post_json('/analysis_step', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef document(testapp, lab, award):\n item = {\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/document', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef workflow_run_sbg(testapp, lab, award, workflow_bam):\n item = {'run_platform': 'SBG',\n 'parameters': [],\n 'workflow': workflow_bam['@id'],\n 'title': u'md5 run 2017-01-20 13:16:11.026176',\n 'sbg_import_ids': [u'TBCKPdzfUE9DpvtzO6yb9yoIvO81RaZd'],\n 'award': award['@id'],\n 'sbg_task_id': '1235',\n 'lab': lab['@id'],\n 'sbg_mounted_volume_ids': ['4dn_s32gkz1s7x', '4dn_s33xkquabu'],\n 'run_status': 'started',\n }\n return testapp.post_json('/workflow_run_sbg', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef workflow_run_awsem(testapp, lab, award, workflow_bam):\n item = {'run_platform': 'AWSEM',\n 'parameters': [],\n 'workflow': workflow_bam['@id'],\n 'title': u'md5 run 2017-01-20 13:16:11.026176',\n 'award': award['@id'],\n 'awsem_job_id': '1235',\n 'lab': lab['@id'],\n 'run_status': 'started',\n }\n return testapp.post_json('/workflow_run_awsem', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef workflow_run_json(testapp, lab, award, workflow_bam):\n return {'run_platform': 'SBG',\n 'parameters': [],\n 'workflow': workflow_bam['@id'],\n 'title': u'md5 run 2017-01-20 13:16:11.026176',\n 'sbg_import_ids': [u'TBCKPdzfUE9DpvtzO6yb9yoIvO81RaZd'],\n 'award': award['@id'],\n 'sbg_task_id': '1235',\n 'lab': lab['@id'],\n 'sbg_mounted_volume_ids': ['4dn_s32gkz1s7x', '4dn_s33xkquabu'],\n 'run_status': 'started',\n }\n\n\n@pytest.fixture\ndef workflow_run_awsem_json(testapp, lab, award, workflow_bam):\n return {'run_platform': 'AWSEM',\n 'parameters': [],\n 'workflow': workflow_bam['@id'],\n 'title': u'md5 run 2017-01-20 13:16:11.026176',\n 'award': award['@id'],\n 'awsem_job_id': '1235',\n 'lab': lab['@id'],\n 'run_status': 'started',\n }\n\n\n@pytest.fixture\ndef human_biosample(testapp, human_biosource, lab, award):\n item = {\n \"description\": \"GM12878 prepared for Hi-C\",\n \"biosource\": [human_biosource['@id'], ],\n \"status\": \"in review by lab\",\n 'award': award['@id'],\n 'lab': lab['@id']\n # \"biosample_protocols\": [\"131106bc-8535-4448-903e-854af460b212\"],\n # \"modifications\": [\"431106bc-8535-4448-903e-854af460b254\"],\n # \"treatments\": [\"686b362f-4eb6-4a9c-8173-3ab267307e3b\"]\n }\n return testapp.post_json('/biosample', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef software_bam(testapp, lab, award):\n # TODO: ASK_ANDY do we want software_type to be an array?\n item = {\n \"name\": \"Aligner\",\n \"software_type\": [\"indexer\", ],\n \"version\": \"1\",\n 'lab': lab['@id'],\n 'award': award['@id']\n }\n return testapp.post_json('/software', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef workflow_bam(testapp, lab, award):\n item = {\n 'title': \"test workflow\",\n 'name': \"test_workflow\",\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/workflow', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef workflow_mapping(testapp, workflow_bam, lab, award):\n item = {\n \"name\": \"test mapping\",\n \"workflow_name\": \"test workflow name\",\n \"workflow\": workflow_bam['@id'],\n \"data_input_type\": \"experiment\",\n 'lab': lab['@id'],\n 'award': award['@id'],\n \"workflow_parameters\": [\n {\"parameter\": \"bowtie_index\", \"value\": \"some value\"}\n ],\n \"experiment_parameters\": [\n {\"parameter\": \"biosample.biosource.individual.organism\", \"value\": \"mouse\"}\n ],\n \"workflow_parameters\": [\n {\"parameter\": \"genome_version\", \"value\": \"mm9\"}\n ]\n }\n return testapp.post_json('/workflow_mapping', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef basic_genomic_region(testapp, lab, award):\n item = {\n \"genome_assembly\": \"GRCh38\",\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/genomic_region', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef genome_info(lab, award):\n return {\n \"genome_assembly\": \"GRCh38\",\n \"chromosome\": \"X\",\n \"start_coordinate\": 1,\n \"end_coordinate\": 3,\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n\n\n@pytest.fixture\ndef genomic_region_w_chrloc(testapp, genome_info):\n return testapp.post_json('/genomic_region', genome_info).json['@graph'][0]\n\n\n@pytest.fixture\ndef genomic_region_2(testapp, genome_info):\n genome_info['chromosome'] = '9'\n genome_info['start_coordinate'] = 50\n genome_info['start_coordinate'] = 300\n return testapp.post_json('/genomic_region', genome_info).json['@graph'][0]\n\n\n@pytest.fixture\ndef target_w_genes(testapp, lab, award):\n item = {\n \"targeted_genes\": [\"eeny\", \"meeny\"],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/target', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef targ_w_alias(testapp, target_w_genes):\n return testapp.patch_json(target_w_genes['@id'], {'aliases': ['lab:test_targ']}, status=200).json['@graph'][0]\n\n\n@pytest.fixture\ndef targ_gr_w_alias(testapp, target_w_region):\n return testapp.patch_json(target_w_region['@id'], {'aliases': ['lab:test_targ_gr']}, status=200).json['@graph'][0]\n\n\n@pytest.fixture\ndef targ_agr_w_alias(testapp, another_target_w_region):\n return testapp.patch_json(another_target_w_region['@id'], {'aliases': ['lab:test_another_gr']}, status=200).json['@graph'][0]\n\n\n@pytest.fixture\ndef gene_item(testapp, lab, award):\n return testapp.post_json('/gene', {'lab': lab['@id'], 'award': award['@id'], 'geneid': '5885'}).json['@graph'][0]\n\n\n@pytest.fixture\ndef gene_bio_feature(testapp, lab, award, gene_term, gene_item):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'description': 'Test Gene BioFeature',\n 'feature_type': gene_term['@id'],\n 'relevant_genes': [gene_item['@id']]}\n return testapp.post_json('/bio_feature', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef prot_bio_feature(testapp, lab, award, protein_term, gene_item):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'description': 'Test Protein BioFeature',\n 'feature_type': protein_term['@id'],\n 'relevant_genes': [gene_item['@id']]}\n return testapp.post_json('/bio_feature', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef biofeat_w_alias(testapp, gene_bio_feature):\n return testapp.patch_json(gene_bio_feature['@id'], {'aliases': ['lab:test_targ_bf']}, status=200).json['@graph'][0]\n\n\n@pytest.fixture\ndef gr_biofeat_w_alias(testapp, genomic_region_bio_feature):\n return testapp.patch_json(\n genomic_region_bio_feature['@id'], {'aliases': ['lab:test_targ_gr_bf']}, status=200).json['@graph'][0]\n\n\n@pytest.fixture\ndef some_genomic_region(testapp, lab, award):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'genome_assembly': 'GRCh38',\n 'chromosome': '1',\n 'start_coordinate': 17,\n 'end_coordinate': 544}\n return testapp.post_json('/genomic_region', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef vague_genomic_region(testapp, lab, award):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'genome_assembly': 'GRCm38',\n 'chromosome': '5',\n 'start_location': 'beginning',\n 'end_location': 'centromere'}\n return testapp.post_json('/genomic_region', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef vague_genomic_region_w_desc(testapp, lab, award):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'genome_assembly': 'GRCm38',\n 'chromosome': '5',\n 'start_location': 'beginning',\n 'end_location': 'centromere',\n 'location_description': 'gene X enhancer'}\n return testapp.post_json('/genomic_region', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef basic_region_bio_feature(testapp, lab, award, region_term):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'description': 'Test Region BioFeature with minimal info',\n 'feature_type': region_term['@id']}\n return testapp.post_json('/bio_feature', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef genomic_region_bio_feature(testapp, lab, award, region_term, some_genomic_region):\n item = {'award': award['@id'],\n 'lab': lab['@id'],\n 'description': 'Test Region BioFeature',\n 'feature_type': region_term['@id'],\n 'genome_location': [some_genomic_region['@id']]}\n return testapp.post_json('/bio_feature', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef target_w_region(testapp, genomic_region_w_chrloc, lab, award):\n item = {\n \"targeted_genome_regions\": [genomic_region_w_chrloc['@id']],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/target', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef another_target_w_region(testapp, genomic_region_2, lab, award):\n item = {\n \"targeted_genome_regions\": [genomic_region_2['@id']],\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/target', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef target_w_desc(testapp, lab, award):\n item = {\n \"description\": \"I'm a region\",\n 'award': award['@id'],\n 'lab': lab['@id'],\n }\n return testapp.post_json('/target', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef mod_basic_info(lab, award):\n return {\n 'lab': lab['@id'],\n 'award': award['@id'],\n 'description': 'minimal modification',\n 'modification_type': 'Crispr',\n }\n\n\n@pytest.fixture\ndef basic_modification(testapp, mod_basic_info):\n return testapp.post_json('/modification', mod_basic_info).json['@graph'][0]\n\n\n@pytest.fixture\ndef mod_w_genomic_change(testapp, mod_basic_info):\n mod = copy.deepcopy(mod_basic_info)\n mod['description'] = 'mod with genomic change'\n mod['genomic_change'] = \"deletion\"\n return testapp.post_json('/modification', mod).json['@graph'][0]\n\n\n@pytest.fixture\ndef mod_w_target(testapp, mod_basic_info, gene_bio_feature):\n mod = copy.deepcopy(mod_basic_info)\n mod['description'] = 'mod with target'\n mod['target_of_mod'] = [gene_bio_feature['@id']]\n return testapp.post_json('/modification', mod).json['@graph'][0]\n\n\n@pytest.fixture\ndef mod_w_change_and_target(testapp, mod_basic_info, gene_bio_feature):\n mod = copy.deepcopy(mod_basic_info)\n mod['description'] = 'mod with target and genomic change'\n mod['target_of_mod'] = [gene_bio_feature['@id']]\n mod['genomic_change'] = \"deletion\"\n return testapp.post_json('/modification', mod).json['@graph'][0]\n\n\n@pytest.fixture\ndef uberon_ont(testapp):\n return testapp.post_json('/ontology', {'ontology_name': 'Uberon'}).json['@graph'][0]\n\n\n@pytest.fixture\ndef ontology(testapp):\n data = {\n \"uuid\": \"530006bc-8535-4448-903e-854af460b254\",\n \"ontology_name\": \"Experimental Factor Ontology\",\n \"ontology_url\": \"http://www.ebi.ac.uk/efo/\",\n \"download_url\": \"http://sourceforge.net/p/efo/code/HEAD/tree/trunk/src/efoinowl/InferredEFOOWLview/EFO_inferred.owl?format=raw\",\n \"namespace_url\": \"http://www.ebi.ac.uk/efo/\",\n \"ontology_prefix\": \"EFO\",\n \"description\": \"The description\",\n \"notes\": \"The download\",\n }\n return testapp.post_json('/ontology', data).json['@graph'][0]\n\n\n@pytest.fixture\ndef oterm(uberon_ont):\n return {\n \"uuid\": \"530036bc-8535-4448-903e-854af460b222\",\n \"preferred_name\": \"preferred lung name\",\n \"term_name\": \"lung\",\n \"term_id\": \"UBERON:0002048\",\n \"term_url\": \"http://purl.obolibrary.org/obo/UBERON_0002048\",\n \"source_ontologies\": [uberon_ont['@id']]\n }\n\n\n@pytest.fixture\ndef lung_oterm(oterm, testapp):\n return testapp.post_json('/ontology_term', oterm).json['@graph'][0]\n\n\n@pytest.fixture\ndef quality_metric_fastqc(testapp, award, lab):\n item = {\n \"uuid\": \"ed80c2a5-ae55-459b-ba1d-7b0971ce2613\",\n \"award\": award['@id'],\n \"lab\": lab['@id']\n }\n return testapp.post_json('/quality_metric_fastqc', item).json['@graph'][0]\n\n\n@pytest.fixture\ndef quality_metric_pairsqc(testapp, lab, award):\n item = {\n 'uuid': 'fdc5ca7f-35bc-421e-ab1f-00f9e5146041',\n 'award': award['@id'],\n 'lab': lab['@id']\n }\n return testapp.post_json('/quality_metric_pairsqc', item).json['@graph'][0]\n","sub_path":"src/encoded/tests/datafixtures.py","file_name":"datafixtures.py","file_ext":"py","file_size_in_byte":37392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"539933907","text":"from PIL import Image\nimport time\nimport numpy as np\nimport binascii\nimport os\nimport traceback\nimport re\nimport statistics\nimport collections\nfrom collections import Counter\n\n#Excel\nimport openpyxl\n\n#Other directories\nimport sys \nsys.path.append(\"../../../Analysis/PSNR+SSIM/\")\nfrom PSNRSSIM import returnValues\n\n\n#Matrix for Ordered dither\nthe_2x2 = np.array([[0,2],\n\t\t\t\t[3,1]])\nthe_2x2 = np.divide(the_2x2,4)\ntiled = np.tile(the_2x2,(256,256))\n\nlength =8\n\ndef greyScale(image):\n\timageArray = np.array(image, 'float64')\t\t\t#Image to numpy array\n\timageArray = np.divide(imageArray, 256)\t\t\t#Divides image values by the range of pixel values. 256 for 8 bit images\n\n\ti,j = 0,0\n\t\n\t#Message to embed and conversion to binary\n\tmessage = \"Why do programmers always mix up Halloween and Christmas? Because 31 OCT = 25 DEC!\"\n\tmessage = bin(int.from_bytes(message.encode('utf-8', 'surrogatepass'), 'big'))[2:]\n\tmessage = message.zfill(8*((len(message) + 7)//8))\n\t\n\tvariableY=0\n\tcounter =0\n\tfor x in range(height):\n\t\tfor y in range(0,width,length):\t\t\t\n\t\t\ttheGroup = imageArray[x,y:y+length]\n\t\t\t\t\n\t\t\n\t\t\twcounter = len([i for i in halftoneValue(np.copy(theGroup),x,y) if i > 128]) \t\t\t\t#Count number of white pixels in the halftoned group\n\t\t\t\n\t\t\tc = length-2*wcounter \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Broadens the groups complexity value\n\t\t\tt = int((length*2)/5)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Defines the threshold to compare against the complexity\n\t\t\t\n\t\t\t\n\t\t\tif((0-t) <= float(c) <= 0+t):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#If the complexity is within the threshold\n\t\t\t\tif(j < len(message)):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#For iterating through the message\n\t\t\t\t\tei = minimum_error(theGroup, message[j], length, c,t,x,y)\t\t\t\t\t\t\t#Get the minimum error to change the greyscale values so that when halftoned, embeds the message bit\n\t\t\t\t\tembeddedGroup = [x+y for x,y in zip(theGroup,ei)]\t\t\t\t\t\t\t\t#Creates a group for the combination of the selected group added with the error added\n\n\t\t\t\t\twcounter1 = len([i for i in halftoneValue(np.copy(embeddedGroup),x,y) if i > 128])\t#Count number of white pixels in the halftoned embedded group\n\t\t\t\t\td = length-2*wcounter1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Again, broadens the groups complexity value\n\t\t\t\t\n\t\t\t\t\tif((0-t) <= float(d) <= 0+t):\t\t\t\t\t\t\t\t\t\t\t\t\t#If the new embedded group is within the complexity range...\n\t\t\t\t\t\tfor g in range(0,length,1):\n\t\t\t\t\t\t\timageArray[x,y+g] = embeddedGroup[g]\t\t\t\t\t\t\t\t\t#Update the imageArray to store the greyscale values\n\n\t\t\t\t\t\thalftoneGroup(x,y,imageArray,length)\t\t\t\t#Halftone the group\n\n\t\t\t\t\t\tj += 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Iterate through the message by 1\n\t\t\t\t\telse:\n\t\t\t\t\t\thalftoneGroup(x,y,imageArray,length)\t\t\t\t#Halftone the group\n\n\t\t\t\telse:\n\t\t\t\t\thalftoneGroup(x,y,imageArray,length)\t\t\t\t\t#Halftone the group\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\telse:\n\t\t\t\thalftoneGroup(x,y,imageArray,length)\t\t\t\t\t\t#Halftone the group\n\t\t\t\t\n\t\t\n\treturn Image.fromarray(np.array(imageArray,'uint8'))\n\n\n#Find the minimum change to embed the message\ndef minimum_error(theGroup, messageBit, length,c,t,x,y):\n\teu, ed = [0]*length, [0]*length\n\tn = 1\n\n\tk = 0\n\twhile((halftoneValue([x+y for x,y in zip(theGroup,eu)],x,y).count(255)) % (2**n) != int(messageBit)):\t\t#While the number of white pixels from halftoning mod 2 is not equal to the message bit (1 or 0)\n\t\teu[k] += 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Add 1 to eu position k\n\t\tk = (k+1)%length \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Add 1 to k\n\n\n\tk = 0\n\twhile(halftoneValue([x+y for x,y in zip(theGroup,ed)],x,y).count(255) % (2**n) != int(messageBit)):\t\t\t#While the number of white pixels from halftoning mod 2 is not equal to the message bit (1 or 0)\n\t\ted[k] -= 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Take away 1 from ed position k\n\t\tk = (k+1)%length \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Add 1 to k\n\n\n\tfor counter, value in enumerate(eu):\t\t#Go through eu's values and compare them against ed. Whichever is smallest gets assigned to be returned\n\t\tif(value < abs(ed[counter])):\n\t\t\tei = eu\n\t\t\t#return eu\n\t\telse:\n\t\t\tei = ed\n\t\t\t#return ed\n\n\tembeddedGroup = [x+y for x,y in zip(theGroup,ei)]\n\twcounter1 = len([i for i in halftoneValue(np.copy(embeddedGroup),x,y) if i > 128])\n\td = length-2*wcounter1\n\n\tif((0-t) <= float(d) <= 0+t):\t\t\t\t#If the complexity value is within the threshold...\n\t\treturn ei \t\t\t\t\t\t\t\t#Return the error variance\n\telse:\t\t\t\t\t\t\t\t\t\t#If not...\n\t\tfor counter, value in enumerate(eu):\t#Iterate through eu and return ed\n\t\t\tif(value < abs(ed[counter])):\n\t\t\t\tei = ed\n\t\t\telse:\n\t\t\t\tei = eu\n\n\treturn ei\n\n\n#Halftone value is what the group halftone values are\ndef halftoneValue(theGroup,x,y):\n\tfor i in range(0, len(theGroup)):\n\t\ttheGroup[i] = (255 if (theGroup[i]>tiled[x,y]) else 0)\n\t\ty+=1\n\n\treturn theGroup\n\n\n#Halftone group is halftoning the group and distrubuting to other pixels \ndef halftoneGroup(x,y,imageArray, length):\n\tfor i in range(0, length):\n\t\timageArray[x,y] = (255 if (imageArray[x,y]>tiled[x,y]) else 0)\n\t\ty+=1\n\n\treturn imageArray\n\n\n#Extract the message from the embedded halftoned image\ndef extraction(image):\n\timageArray = np.array(image, 'float64')\t\t\t\t\t\t\t\t#Image to array\t\t\t\t\t\t\t\n\tmessage = []\n\n\tfor x in range(height):\n\t\tfor y in range(0,width,length):\n\t\t\ttheGroup = imageArray[x,y:y+length]\t\t\t\t\t\t\t#Select the group\n\t\t\n\t\t\twcounter = len([i for i in theGroup if i > 128]) \t\t\t#Number of white pixels in the group\n\n\t\t\tc = length-2*wcounter \t\t\t\t\t\t\t\t\t\t#Broaden the complexity value\n\t\t\tt = int((length*2)/5)\t\t\t\t\t\t\t\t\t\t#Threshold\n\t\t\tn = 1\n\t\t\t\n\t\t\tif((0-t) <= float(c) <= 0+t):\t\t\t\t\t\t\t\t#If the complexity is within the threshold\n\t\t\t\t\n\t\t\t\tmessageBit = wcounter % (2**n)\t\t\t\t\t\t\t#Get a 1 or a 0 from the white number count\n\t\t\t\tmessage.append(str(messageBit))\t\t\t\t\t\t\t#Add the bit to the message list\n\t\n\n\textractedMessage = ''.join(message)\t\t\t\t\t\t\t\t\t#Join the extracted bits together\n\tfinalMessage =[]\n\tfor i in range(0, len(extractedMessage), 8):\t\t\t\t\t\t#For every 8 bits (makes a character)\n\t\tmessage = int(extractedMessage[i:i+8],2)\n\t\tcharacter = message.to_bytes((message.bit_length() + 7)//8, 'big').decode('utf-8', 'ignore')\t\t#Convert to ASCII\n\t\tif(re.match(r'[\\w ?=]+', character)):\t\t\t\t\t\t\t#If the character is part of the regex\n\t\t\tfinalMessage.append(character)\t\t\t\t\t\t\t\t#Add it to the final message list\n\tprint(''.join(finalMessage)[:84])\t\n\tprint()\n\tanalyse(finalMessage)\t\t\t\t\t\t\t\t\t\t\t\t#Analyse the final message\n\ndef analyse(decryptMessage):\n\tmessage = \"Why do programmers always mix up Halloween and Christmas? Because 31 OCT = 25 DEC!\"\t\t#Original message\n\tmessage = list(message)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Make the original message a list\n\n\tvalue = 0\n\tfor item in message:\n\t\tif item in decryptMessage:\t\t\t\t\t\t#Compare the two lists\n\t\t\tvalue +=1\n\n\tmessagePercents.append(((value/84)*100))\t\t\t#Get percentage of extracted message against the original message\n\n\n\nembedTimes = []\ndecryptTimes = []\npsnrValues = []\nssimValues = []\nmessagePercents = []\n\n\n#Processes every file in the original images folder\nfileList = []\nfor file in os.listdir(\"../../../Images/Original/\"):\n\tfileList.append(file[:-4])\t\t\t\t\t\t\t#Remove the file extension so\nfileList = sorted(fileList, key=int)\t\t\t\t\t#it can be sorted by int\n\nfor file in fileList:\t\t\t\t\t\t\t\t\t#For every file in the sorted file list\n\tfilename = os.fsdecode(file)\n\tfilename+=\".png\"\t\t\t\t\t\t\t\t\t#Add png file extension. Converts any file format to png\n\t\n\n\timage = Image.open(\"../../../Images/Original/\"+filename)\t\t\t\t\t\t\t\t\t#Open file to embed\t\t\t\t\t\t\t\t\t\t\t\n\toriginal = Image.open(\"../../../Images/Basic Halftone/Ordered/2x2/\"+filename)\t\t#For comparing against original\n\tprint(filename)\n\n\theight, width = image.size\n\tstart_time = time.time()\n\timageConverted = greyScale(image)\n\tembedTime = time.time() - start_time\n\t\t\n\timageConverted.save(\"../../../Images/Embedded/3. Greyscale Text/Ordered/2x2/\"+filename)\n\n\n\timageDecode = Image.open(\"../../../Images/Embedded/3. Greyscale Text/Ordered/2x2/\"+filename)\n\tstart_time = time.time()\n\textraction(imageDecode)\n\tdecryptTime = time.time() - start_time\n\n\tpsnr, ssim = returnValues(original,imageConverted)\t\t#Send original and processed image to get PSNR and SSIM values\n\tpsnrValues.append(psnr)\n\tssimValues.append(ssim)\n\tembedTimes.append(embedTime)\n\tdecryptTimes.append(decryptTime)\n\n\nexcel_document = openpyxl.load_workbook(\"../../../../Data/Data.xlsx\")\t#Open excel\nsheet = (excel_document['Greyscale Embed'])\t\t\t\t\t\t\t\t#Selects sheet\n\n#Input valeus to the sheet\nmultiple_cells = sheet['T4' : 'T51']\nfor value, row in enumerate(multiple_cells):\n for cell in row:\n \tcell.value = psnrValues[value]\n\nmultiple_cells = sheet['U4' : 'U51']\nfor value, row in enumerate(multiple_cells):\n for cell in row:\n \tcell.value = ssimValues[value]\n\nmultiple_cells = sheet['V4' : 'V51']\nfor value, row in enumerate(multiple_cells):\n for cell in row:\n \tcell.value = embedTimes[value]\n\nmultiple_cells = sheet['W4' : 'W51']\nfor value, row in enumerate(multiple_cells):\n for cell in row:\n \tcell.value = decryptTimes[value]\n\nmultiple_cells = sheet['X4' : 'X51']\nfor value, row in enumerate(multiple_cells):\n for cell in row:\n \tcell.value = messagePercents[value]\n#End of inputting values\n\n\nexcel_document.save(\"../../../../Data/Data.xlsx\")","sub_path":"Algorithms/Embedding/3. Greyscale Text/Ordered Greyscale/Greyscale 2x2.py","file_name":"Greyscale 2x2.py","file_ext":"py","file_size_in_byte":8935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"182355852","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFile name: quad_mdl.py\nAuthor: Daniel Hulse\nCreated: June 2019\nDescription: A fault model of a multi-rotor drone.\n\"\"\"\n\nimport networkx as nx\nimport numpy as np\n\nimport auxfunctions as aux\nimport faultprop as fp\n\n#Declare time range to run model over\ntimes=[0,3, 55]\n\n##Define flows for model\nclass EE:\n def __init__(self,name):\n self.rate=1.0\n self.effort=1.0\n def status(self):\n status={'rate':self.rate, 'effort':self.effort}\n return status.copy()\n \nclass Force:\n def __init__(self,name):\n self.flowtype='Force'\n self.name=name\n self.value=1.0\n def status(self):\n status={'value':self.value}\n return status.copy()\n\nclass ME:\n def __init__(self,name):\n self.flowtype='ME'\n self.name=name\n self.rate=1.0\n self.effort=1.0\n self.nominal={'rate':1.0, 'effort':1.0}\n def status(self):\n status={'rate':self.rate, 'effort':self.effort}\n return status.copy() \n\nclass Sig:\n def __init__(self,name):\n self.flowtype='Sig'\n self.name=name\n self.forward=0.0\n self.upward=0.0\n def status(self):\n status={'forward':self.forward, 'upward':self.upward}\n return status.copy() \n\nclass DOF:\n def __init__(self,name):\n self.flowtype='DOF'\n self.name=name\n self.stab=1.0\n self.vertvel=0.0\n self.planvel=0.0\n self.uppwr=0.0\n self.planpwr=0.0\n def status(self):\n status={'stab':self.stab, 'vertvel':self.vertvel, 'planvel':self.planvel, 'planpwr':self.planpwr, 'uppwr':self.uppwr}\n return status.copy() \nclass Land:\n def __init__(self,name):\n self.flowtype='Land'\n self.name=name\n self.stat='landed'\n self.area='start'\n self.nominal={'status':'landed', 'area':'start'}\n def status(self):\n status={'status':self.stat, 'area':self.area}\n return status.copy() \n\nclass Env:\n def __init__(self,name):\n self.flowtype='Env'\n self.name=name\n self.elev=0.0\n self.x=0.0\n self.y=0.0\n self.start=[0.0,0.0]\n self.start_xw=5\n self.start_yw=5\n self.start_area=aux.square(self.start,self.start_xw, self.start_yw)\n self.flyelev=30\n self.poi_center=[0,150]\n self.poi_xw=50\n self.poi_yw=50\n self.poi_area=aux.square(self.poi_center, self.poi_xw, self.poi_yw)\n self.dang_center=[0,150]\n self.dang_xw=150\n self.dang_yw=150\n self.dang_area=aux.square(self.dang_center, self.dang_xw, self.dang_yw)\n self.safe1_center=[-25,100]\n self.safe1_xw=10\n self.safe1_yw=10\n self.safe1_area=aux.square(self.safe1_center, self.safe1_xw, self.safe1_yw)\n self.safe2_center=[25,50]\n self.safe2_xw=10\n self.safe2_yw=10\n self.safe2_area=aux.square(self.safe2_center, self.safe2_xw, self.safe2_yw)\n self.nominal={'elev':1.0, 'x':1.0, 'y':1.0}\n def status(self):\n status={'elev':self.elev, 'x':self.x, 'y':self.y}\n return status.copy()\n\nclass Direc:\n def __init__(self,name):\n self.flowtype='Dir'\n self.name=name\n self.traj=[0,0,0]\n self.power=1\n self.nominal={'x': self.traj[0], 'y': self.traj[1], 'z': self.traj[2], 'power': 1}\n def status(self):\n status={'x': self.traj[0], 'y': self.traj[1], 'z': self.traj[2], 'power': self.power}\n return status.copy()\n\nclass storeEE:\n def __init__(self, name,EEout, FS):\n self.type='function'\n self.EEout=EEout\n self.FS=FS\n self.effstate=1.0\n self.ratestate=1.0\n self.soc=2000\n self.faultmodes={'short':{'rate':'moderate', 'rcost':'major'}, \\\n 'degr':{'rate':'moderate', 'rcost':'minor'}, \\\n 'break':{'rate':'common', 'rcost':'moderate'}, \\\n 'nocharge':{'rate':'moderate','rcost':'minor'}, \\\n 'lowcharge':{'rate':'moderate','rcost':'minor'}}\n self.faults=set(['nom'])\n def condfaults(self):\n if self.FS.value<1.0:\n self.faults.update(['break'])\n if self.EEout.rate>2:\n self.faults.add('break')\n if self.soc<20:\n self.faults.add('lowcharge')\n if self.soc<1:\n self.faults.remove('lowcharge')\n self.faults.add('nocharge')\n return 0\n def behavior(self, time):\n if self.faults.intersection(set(['short'])):\n self.effstate=0.0\n elif self.faults.intersection(set(['break'])):\n self.effstate=0.0\n elif self.faults.intersection(set(['degr'])):\n self.effstate=0.5\n \n if self.faults.intersection(set(['nocharge'])):\n self.soc=0.0\n self.effstate=0.0\n \n self.EEout.effort=self.effstate\n self.soc=self.soc-self.EEout.rate*time\n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.faults.update(faults)\n self.condfaults()\n self.behavior(time)\n return \n\nclass distEE:\n def __init__(self,EEin,EEmot,EEctl,FS):\n self.useprop=1.0\n self.type='function'\n self.EEin=EEin\n self.EEmot=EEmot\n self.EEctl=EEctl\n self.FS=FS\n self.effstate=1.0\n self.ratestate=1.0\n self.faultmodes={'short':{'rate':'moderate', 'rcost':'major'}, \\\n 'degr':{'rate':'moderate', 'rcost':'minor'}, \\\n 'break':{'rate':'common', 'rcost':'moderate'}}\n self.faults=set(['nom'])\n def condfaults(self):\n if self.FS.value<0.5:\n self.faults.update(['break'])\n if max(self.EEmot.rate,self.EEctl.rate)>2:\n self.faults.add('break') \n def behavior(self, time):\n if self.faults.intersection(set(['short'])):\n self.ratestate=np.inf\n self.effstate=0.0\n elif self.faults.intersection(set(['break'])):\n self.effstate=0.0\n elif self.faults.intersection(set(['degr'])):\n self.effstate=0.5\n self.EEin.rate=self.ratestate*self.EEin.effort\n self.EEmot.effort=self.effstate*self.EEin.effort\n self.EEctl.effort=self.effstate*self.EEin.effort\n \n self.EEin.rate=aux.m2to1([ self.EEin.effort, self.ratestate, max(self.EEmot.rate,self.EEctl.rate)])\n \n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.faults.update(faults)\n self.condfaults()\n self.behavior(time)\n return \n\nclass engageLand:\n def __init__(self,name, Forcein, Forceout):\n self.useprop=1.0\n self.name=name\n self.type='function'\n self.forcein=Forcein\n self.forceout=Forceout\n self.fstate=1.0\n self.faultmodes={'break':{'rate':'moderate', 'rcost':'major'}, \\\n 'deform':{'rate':'moderate', 'rcost':'minor'}, }\n self.faults=set(['nom'])\n def condfaults(self):\n if self.forceout.value<-1.4:\n self.faults.update(['break'])\n elif self.forceout.value<-1.2:\n self.faults.update(['deform'])\n def behavior(self, time):\n if self.faults.intersection(set(['break'])):\n self.fstate=4.0\n elif self.faults.intersection(set(['deform'])):\n self.fstate=2.0\n else:\n self.fstate=1.0\n \n self.forceout.value=self.fstate*min([-2.0,self.forcein.value])*0.2\n \n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.faults.update(faults)\n self.condfaults()\n self.behavior(time)\n return \n\nclass holdPayload:\n def __init__(self,name, Force_gr,Force_air, Force_struct):\n self.name=name\n self.useprop=1.0\n self.type='function'\n self.FG=Force_gr\n self.FA=Force_air\n self.FS=Force_struct\n self.fstate=1.0\n self.faultmodes={'break':{'rate':'moderate', 'rcost':'major'}, \\\n 'deform':{'rate':'moderate', 'rcost':'minor'}, }\n self.faults=set(['nom'])\n def condfaults(self):\n if abs(self.FG.value)>1.6:\n self.faults.update(['break'])\n elif abs(self.FG.value)>1.4:\n self.faults.update(['deform'])\n def behavior(self, time):\n if self.faults.intersection(set(['break'])):\n self.fstate=0.0\n elif self.faults.intersection(set(['deform'])):\n self.fstate=0.5\n else:\n self.fstate=1.0\n self.FA.value=self.fstate\n self.FS.value=self.fstate\n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.faults.update(faults)\n self.condfaults()\n self.behavior(time)\n return \n \nclass affectDOF:\n def __init__(self, name, EEin, Ctlin, DOFout,Force, archtype):\n self.type='function'\n self.EEin=EEin\n self.Ctlin=Ctlin\n self.DOF=DOFout\n self.Force=Force\n self.archtype=archtype\n self.faultmodes={}\n if archtype=='quad':\n LineRF=line('RF')\n LineLF=line('LF')\n LineLR=line('LR')\n LineRR=line('RR')\n self.lines=[LineRF,LineLF,LineLR, LineRR]\n self.upward=[1,1,1,1]\n self.forward=[0.5,0.5,-0.5,-0.5]\n for lin in self.lines:\n self.faultmodes.update(lin.faultmodes) \n self.faults={'nom'}\n def behavior(self, time):\n Air={}\n EEin={}\n #injects faults into lines\n for lin in self.lines:\n for fault in self.faults:\n if fault in lin.faultmodes:\n lin.faults.update([fault])\n \n ind=self.lines.index(lin)\n cmds={'up':self.upward[ind], 'for':self.forward[ind]}\n lin.behavior(self.EEin.effort, self.Ctlin, cmds, self.Force.value)\n self.faults.update(lin.faults) \n Air[lin.name]=lin.Airout\n EEin[lin.name]=lin.EE_in\n \n if any(value==np.inf for value in EEin.values()):\n self.EEin.rate=np.inf\n elif any(value!=0.0 for value in EEin.values()):\n self.EEin.rate=np.max(list(EEin.values()))\n else:\n self.EEin.rate=0.0\n \n if all(value==1.0 for value in Air.values()):\n self.DOF.stab=1.0\n elif all(value==0.5 for value in Air.values()):\n self.DOF.stab=1.0\n elif all(value==2.0 for value in Air.values()):\n self.DOF.stab=1.0\n elif all(value==0.0 for value in Air.values()):\n self.DOF.stab=1.0\n elif any(value==0.0 for value in Air.values()):\n self.DOF.stab=0.0\n elif any(value>2.5 for value in Air.values()):\n self.DOF.stab=0.0\n Airs=list(Air.values())\n #if not(self.Force.value==1.0):\n # self.DOF.stab=self.Force.value\n \n self.DOF.uppwr=np.mean(Airs)\n \n list1=Airs[:len(Airs)//2]\n list2=Airs[len(Airs)//2:]\n vect=np.array([list1,list2])\n self.DOF.planpwr=np.sum(vect[0]-vect[1])/3\n \n #need to expand on this, add directional velocity, etc\n return\n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.faults.update(faults)\n self.behavior(time)\n return \n\nclass line:\n def __init__(self, name):\n self.type='component'\n self.name=name \n self.elecstate=1.0\n self.elecstate_in=1.0\n self.ctlstate=1.0\n self.mechstate=1.0\n self.propstate=1.0\n self.Airout=1.0\n self.faultmodes={name+'short':{'rate':'moderate', 'rcost':'major'}, \\\n name+'openc':{'rate':'moderate', 'rcost':'major'}, \\\n name+'ctlup':{'rate':'moderate', 'rcost':'minor'}, \\\n name+'ctldn':{'rate':'moderate', 'rcost':'minor'}, \\\n name+'ctlbreak':{'rate':'common', 'rcost':'moderate'}, \\\n name+'mechbreak':{'rate':'common', 'rcost':'moderate'}, \\\n name+'mechfriction':{'rate':'common', 'rcost':'moderate'}, \\\n name+'propwarp':{'rate':'veryrare', 'rcost':'replacement'}, \\\n name+'propstuck':{'rate':'veryrare', 'rcost':'replacement'}, \\\n name+'propbreak':{'rate':'veryrare', 'rcost':'replacement'}\n }\n self.faults=set(['nom'])\n def behavior(self, EEin, Ctlin, cmds, Force):\n \n if Force<=0.0:\n self.faults.update([self.name+'mechbreak', self.name+'propbreak'])\n elif Force<=0.5:\n self.faults.update([self.name+'mechfriction'])\n \n if self.faults.intersection(set([self.name+'short'])):\n self.elecstate=0.0\n self.elecstate_in=np.inf\n elif self.faults.intersection(set([self.name+'openc'])):\n self.elecstate=0.0\n self.elecstate_in=0.0\n if self.faults.intersection(set([self.name+'ctlbreak'])):\n self.ctlstate=0.0\n elif self.faults.intersection(set([self.name+'ctldn'])):\n self.ctlstate=0.5\n elif self.faults.intersection(set([self.name+'ctlup'])):\n self.ctlstate=2.0\n if self.faults.intersection(set([self.name+'mechbreak'])):\n self.mechstate=0.0\n elif self.faults.intersection(set([self.name+'mechfriction'])):\n self.mechstate=0.5\n self.elecstate_in=2.0\n if self.faults.intersection(set([self.name+'propstuck'])):\n self.propstate=0.0\n self.mechstate=0.0\n self.elecstate_in=4.0\n elif self.faults.intersection(set([self.name+'propbreak'])):\n self.propstate=0.0\n elif self.faults.intersection(set([self.name+'propwarp'])):\n self.propstate=0.5\n \n self.Airout=aux.m2to1([EEin,self.elecstate,Ctlin.upward*cmds['up']+Ctlin.forward*cmds['for'],self.ctlstate,self.mechstate,self.propstate])\n self.EE_in=aux.m2to1([EEin,self.elecstate_in]) \n \nclass ctlDOF:\n def __init__(self, name,EEin, Dir, Ctl, DOFs, FS):\n self.type='function'\n self.EEin=EEin\n self.Ctl=Ctl\n self.Dir=Dir\n self.DOFs=DOFs\n self.FS=FS\n self.vel=0.0\n self.t1=0\n self.ctlstate=1.0\n self.faultmodes={'noctl':{'rate':'rare', 'rcost':'high'}, \\\n 'degctl':{'rate':'rare', 'rcost':'high'}}\n self.faults=set(['nom'])\n def condfaults(self):\n if self.FS.value<0.5:\n self.faults.update(['noctl'])\n def behavior(self, time):\n if self.faults.intersection(set(['noctl'])):\n self.ctlstate=0.0\n elif self.faults.intersection(set(['degctl'])):\n self.ctlstate=0.5\n \n if time>self.t1:\n self.vel=self.DOFs.vertvel\n self.t1=time\n \n upthrottle=1.0\n \n if self.Dir.traj[2]>=1:\n upthrottle=1.5\n elif self.Dir.traj[2]>0 and self.Dir.traj[2]>1:\n upthrottle= 0.5 * self.Dir.traj[2] + 1.0\n elif self.Dir.traj[2]==0:\n damp=np.sign(self.vel)\n damp2=damp*min(1.0, np.power(self.vel, 2))\n upthrottle=1.0-0.2*damp2\n elif self.Dir.traj[2]<=0.0 and self.Dir.traj[2]>-1.0:\n maxdesc=-0.5\n damp=min(1.0, np.power(self.vel-maxdesc, 2))\n upthrottle=0.75+0.4*damp\n elif self.Dir.traj[2]<=-1.0:\n maxdesc=-5.0\n damp=min(0.75, np.power(self.vel-maxdesc, 2))\n upthrottle=0.75+0.15*damp\n \n if self.Dir.traj[0]==0 and self.Dir.traj[1]==0:\n forwardthrottle=0.0\n else:\n forwardthrottle=1.0\n \n pwr=self.Dir.power\n self.Ctl.forward=self.EEin.effort*self.ctlstate*forwardthrottle*pwr\n self.Ctl.upward=self.EEin.effort*self.ctlstate*upthrottle*pwr\n\n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.condfaults()\n self.faults.update(faults)\n self.behavior(time)\n\nclass planpath:\n def __init__(self, name,EEin, Env, Dir, FS):\n self.type='function'\n self.EEin=EEin\n self.Env=Env\n self.Dir=Dir\n self.FS=FS\n self.mode='taxi'\n self.faultmodes={'noloc':{'rate':'rare', 'rcost':'high'}, \\\n 'degloc':{'rate':'rare', 'rcost':'high'}}\n self.faults=set(['nom'])\n def condfaults(self):\n if self.FS.value<0.5:\n self.faults.update(['noloc'])\n def behavior(self, t):\n \n if t<1:\n self.mode='taxi'\n elif self.mode=='taxi' and t<2:\n self.mode='climb'\n elif self.mode=='climb' and self.Env.elev>=50:\n self.mode='hover'\n elif self.mode=='hover' and self.Env.y==0 and t<20:\n self.mode='forward'\n elif self.mode=='forward' and self.Env.y>50:\n self.mode='hover'\n elif self.mode=='hover' and self.Env.y>50:\n self.mode='backward'\n elif self.mode=='backward' and self.Env.y<0:\n self.mode='hover'\n elif self.mode=='hover' and self.Env.y<0:\n self.mode='descend'\n elif self.mode=='descend' and self.Env.elev<10:\n self.mode='land'\n elif self.mode=='land' and self.Env.elev<1:\n self.mode='taxi'\n \n if self.mode=='taxi':\n self.Dir.power=0.0\n elif self.mode=='takeoff':\n self.Dir.power=1.0\n self.Dir.traj=[0,0,1]\n elif self.mode=='climb':\n self.Dir.power=1.0\n self.Dir.traj=[0,0,1]\n elif self.mode=='hover':\n self.Dir.power=1.0\n self.Dir.traj=[0,0,0]\n elif self.mode=='forward':\n self.Dir.power=1.0\n self.Dir.traj=[0,1,0]\n elif self.mode=='backward':\n self.Dir.power=1.0\n self.Dir.traj=[0,-1,0]\n elif self.mode=='descend':\n self.Dir.power=1.0\n self.Dir.traj=[0,0,-1]\n elif self.mode=='land':\n self.Dir.power=1.0\n self.Dir.traj=[0,0,-0.1]\n \n if self.faults.intersection(set(['noloc'])):\n self.Dir.traj=[0,0,0]\n elif self.faults.intersection(set(['degloc'])):\n self.Dir.traj=[0,0,-1]\n if self.EEin.effort<0.5:\n self.Dir.power=0.0\n self.Dir.traj=[0,0,0]\n \n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n self.condfaults()\n self.faults.update(faults)\n self.behavior(time)\n\nclass trajectory:\n def __init__(self, name, Env, DOF, Land, Dir, Force_LG):\n self.type='environment'\n self.Env=Env\n self.DOF=DOF\n self.Land=Land\n self.Dir=Dir\n self.Force_LG=Force_LG\n self.lasttime=0\n self.t1=0.0\n self.faultmodes={'nom':{'rate':'common', 'rcost':'NA'}, }\n self.faults=set(['nom'])\n def condfaults(self):\n return 0\n def behavior(self, time):\n maxvel=20.0\n maxpvel=5.0\n \n if self.Env.elev<=0.0:\n self.Force_LG.value=min(-2.0, (self.DOF.vertvel-self.DOF.planvel)/3)\n flight=0.0\n else:\n self.Force_LG.value=0.0\n flight=1.0\n \n if time>self.t1:\n sign=np.sign(self.DOF.vertvel)\n damp=-0.02*sign*np.power(self.DOF.vertvel, 2)-0.1*self.DOF.vertvel\n acc=10*(self.DOF.uppwr-flight)\n self.DOF.vertvel=self.DOF.vertvel+acc+damp\n if self.Env.elev<=0.0:\n self.DOF.vertvel=max(0,self.DOF.vertvel)\n self.t1=time\n \n self.DOF.planvel=flight*maxpvel*self.DOF.planpwr\n \n self.Env.elev=max(0.0, self.Env.elev+self.DOF.vertvel)\n self.Env.x=self.Env.x+self.DOF.planvel*self.Dir.traj[0]\n self.Env.y=self.Env.y+self.DOF.planvel*self.Dir.traj[1]\n \n def updatefxn(self,faults=['nom'],opermode=[], time=0):\n if time>self.lasttime:\n self.behavior(time)\n self.lasttime=time\n self.condfaults()\n\n##future: try to automate this part so you don't have to do it in a wierd order\ndef initialize():\n \n #initialize graph\n g=nx.DiGraph()\n \n Force_ST=Force('Force_ST')\n EE_1=EE('EE_1')\n StoreEE=storeEE('StoreEE',EE_1, Force_ST)\n g.add_node('StoreEE', obj=StoreEE)\n \n EEmot=EE('EEmot')\n EEctl=EE('EEctl')\n \n DistEE=distEE(EE_1,EEmot,EEctl, Force_ST)\n g.add_node('DistEE', obj=DistEE)\n g.add_edge('StoreEE','DistEE', EE_1=EE_1)\n \n Ctl1=Sig('Ctl1')\n DOFs=DOF('DOFs')\n \n Force_Air=Force('Force_Air')\n AffectDOF=affectDOF('AffectDOF',EEmot,Ctl1,DOFs,Force_Air, 'quad')\n g.add_node('AffectDOF', obj=AffectDOF)\n Dir1=Direc('Dir1')\n CtlDOF=ctlDOF('CtlDOF',EEctl, Dir1, Ctl1, DOFs, Force_ST)\n g.add_node('CtlDOF', obj=CtlDOF)\n g.add_edge('DistEE','AffectDOF', EEmot=EEmot)\n g.add_edge('DistEE','CtlDOF', EEctl=EEctl)\n g.add_edge('CtlDOF','AffectDOF', Ctl1=Ctl1,DOFs=DOFs)\n\n Env1=Env('Env1')\n Planpath=planpath('Planpath',EEctl, Env1,Dir1, Force_ST)\n g.add_node('Planpath', obj=Planpath)\n g.add_edge('DistEE','Planpath', EEctl=EEctl)\n g.add_edge('Planpath','CtlDOF', Dir1=Dir1)\n \n Land1=Land('Land')\n Force_GR=Force('Force_GR')\n Force_LG=Force('Force_LG')\n Trajectory=trajectory('Trajectory',Env1,DOFs,Land1,Dir1, Force_GR)\n g.add_node('Trajectory', obj=Trajectory)\n g.add_edge('Trajectory','AffectDOF',DOFs=DOFs)\n g.add_edge('Planpath', 'Trajectory', Dir1=Dir1, Env1=Env1)\n \n \n EngageLand=engageLand('EngageLand',Force_GR, Force_LG)\n g.add_node('EngageLand', obj=EngageLand)\n g.add_edge('Trajectory', 'EngageLand', Force_GR=Force_GR)\n \n \n HoldPayload=holdPayload('HoldPayload',Force_LG, Force_Air, Force_ST)\n g.add_node('HoldPayload', obj=HoldPayload)\n g.add_edge('EngageLand','HoldPayload', Force_LG=Force_LG)\n g.add_edge('HoldPayload', 'AffectDOF', Force_Air=Force_Air)\n g.add_edge('HoldPayload', 'StoreEE', Force_ST=Force_ST)\n g.add_edge('HoldPayload', 'DistEE', Force_ST=Force_ST)\n g.add_edge('HoldPayload', 'Planpath', Force_ST=Force_ST)\n g.add_edge('HoldPayload', 'CtlDOF', Force_ST=Force_ST)\n \n return g\n\n#def environment(DOF,t):\n# if DOF.stab\n \ndef findclassification(g, endfaults, endflows, scen):\n \n Env=fp.getflow('Env1', g)\n \n #may need to redo this\n if aux.inrange(Env.start_area, Env.x, Env.y):\n landloc='nominal'\n area=1\n elif aux.inrange(Env.safe1_area, Env.x, Env.y) or aux.inrange(Env.safe2_area, Env.x, Env.y):\n landloc='emsafe'\n area=1000\n elif aux.inrange(Env.dang_area, Env.x, Env.y):\n landloc='emdang'\n area=100000\n else:\n landloc='emunsanc'\n area=10000\n \n repaircosts=fp.listfaultsprops(endfaults, g, 'rcost')\n maxcost=aux.textmax(repaircosts.values())\n \n if maxcost=='major':\n repcost=10000\n elif maxcost=='moderate':\n repcost=3000\n elif maxcost=='minor':\n repcost=500\n elif maxcost=='replacement':\n repcost=250\n else:\n repcost=0\n\n totcost=repcost+area\n \n rate=1e-6\n \n expcost=totcost*rate*1e5\n \n return {'rate':rate, 'cost': totcost, 'expected cost': expcost}","sub_path":"quad_mdl.py","file_name":"quad_mdl.py","file_ext":"py","file_size_in_byte":23412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"151754866","text":"#!/usr/bin/python3\n\n# student test file for HW4\n\nimport unittest\nfrom hw4 import *\n\n#====================================\n\nTIMEOUT_SHORT = 1\nTIMEOUT_LONG = 10\n\nclass tester_most_common_char(unittest.TestCase):\n\t# O(k) time and space\n\tdef test__given(self):\n\t\tself.assertEqual(most_common_char('AVX is a feature in modern CPUs that allows one instruction to affect multiple units. vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvectors'), 'v')\n\t\tself.assertIn(most_common_char('aabbaabb'), ['a','b'])\n\n\nclass tester_alphabet_finder(unittest.TestCase):\n\t# O(k) time and space\n\tdef test__given(self):\n\t\ttest = 'qwertyuiopASDFGHJKLzxcvbnm insensitive paella'\n\t\tresult = test[:26]\n\t\tself.assertEqual(alphabet_finder(test), result)\n\n\t\ttest = 'aardvarks are cool!'\n\t\tresult = None\n\t\tself.assertEqual(alphabet_finder(test), result)\n\n\nclass tester_longest_unique_subarray(unittest.TestCase):\n\t# O(k) time and space\n\tdef test__given(self):\n\t\tself.assertEqual(tuple(longest_unique_subarray([1, 2, 3, 1, 4, 5, 6])), (1, 6))\n\t\tself.assertEqual(tuple(longest_unique_subarray(list(range(10)))), (0, 10))\n\n\nclass tester_string_my_one_true_love(unittest.TestCase):\n\t# O(k) time and space\n\tdef test__given(self):\n\t\tself.assertTrue(string_my_one_true_love('abcbabcdcdda'))\n\t\tself.assertTrue(string_my_one_true_love('aaabbbcccddde'))\n\t\tself.assertFalse(string_my_one_true_love('aaabbbcccdddeeffgg'))\n\n\nclass tester_alive_people(unittest.TestCase):\n\t# O(k log k) time, O(k) space\n\t# under certain circumstances O(k) solution MIGHT exist?@timeout_decorator.timeout(TIMEOUT_SHORT)\n\tdef test__given(self):\n\t\tself.assertEqual(alive_people([[1920, 80], [1940, 22], [1961, 10]]), 1961)\n\n\nclass tester_three_sum(unittest.TestCase):\n\t# O(k^2) time and space\n\tdef _transform(self, result):\n\t\tresult = list(result)\n\t\t\n\t\tfor i in range(len(result)):\n\t\t\tresult[i] = sorted(list(result[i]))\n\t\t\n\t\tresult.sort()\n\t\t\n\t\treturn result\n\t\n\tdef test__given(self):\n\t\tresult = three_sum([-1, 0, 1, 2, -1, -4], 0)\n\t\tex = [[-1, 0, 1], [-1, -1, 2]]\n\t\t\n\t\tresult = self._transform(result)\n\t\tex = self._transform(ex)\n\t\t\n\t\tself.assertEqual(result, ex)\n\n\nclass tester_happy_numbers(unittest.TestCase):\n\t# O(k log k) time, O(log k) space\n\tdef test__given(self):\n\t\tself.assertEqual(happy_numbers(8), 2468 // 1234)\n\t\tself.assertEqual(happy_numbers(15), 4)\n\n\nclass tester_zero_sum_subarray(unittest.TestCase):\n\t# O(k) time and space\n\tdef test__given(self):\n\t\tself.assertEqual(tuple(zero_sum_subarray([0, 1, 2, 3, 4, 5])), (0, 1))\n\t\tself.assertEqual(tuple(zero_sum_subarray([10, 20, -20, 3, 21, 2, -6])), (1, 2))\n\n\n#===================================\n# BOILERPLATE CODE\n\n# suppress stdout, but keep stderr since that's what unittest uses\n# https://stackoverflow.com/questions/30715337\n\nfrom io import StringIO\nimport sys\n\nclass ReplaceStd(object):\n\t\"\"\" Let's make it pythonic. \"\"\"\n\n\tdef __init__(self):\n\t\tself.stdout = None\n\n\tdef __enter__(self):\n\t\tself.stdout = sys.stdout\n\t\tsys.stdout = StringIO()\n\n\tdef __exit__(self, type, value, traceback):\n\t\tsys.stdout.close()\n\t\tsys.stdout = self.stdout\n\nif __name__ == \"__main__\":\n\twith ReplaceStd():\n\t\tunittest.main(module=__name__, buffer=True, exit=False)\n","sub_path":"hw4-test.py","file_name":"hw4-test.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"414558906","text":"bigram={}\nfile1 = open(\"G:/file1.txt\", \"r\", encoding='utf-8')\nfile2 = open(\"G:/file2.txt\", \"r\", encoding='utf-8')\nline1 = file1.readlines()\nline2= file1.readlines()\n#print(line1)\ndef gramma(line,i):\n for l in range(2,int(len(str(line)))-i-2):\n t='' \n for q in range(0,i):\n t=t+str(str(line)[l+q])\n\n if len(t)>1:\n\n if t in bigram:\n bigram[t]=bigram[t]+1;\n else:\n bigram[t]=1;\ngramma(line1,3)\n\nnew_d={}\nfor k in sorted(bigram, key=len, reverse=False):\n new_d[k] = bigram[k]\nprint(new_d)\ngramma(line2,5)\n","sub_path":"cp_4/shyshkin_fb-73_vitrovich_fb-73_cp4/4_1.py","file_name":"4_1.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"307789996","text":"from tkinter import *\nimport math\n# ---------------------------- CONSTANTS ------------------------------- #\nPINK = \"#e2979c\"\nRED = \"#e7305b\"\nGREEN = \"#9bdeac\"\nYELLOW = \"#fff47d\"\nFONT_NAME = \"Courier\"\nWORK = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\nreps=0\ntimer= None\n\n# ---------------------------- TIMER RESET ------------------------------- # \ndef reset_timer():\n window.after_cancel(timer)\n canvas.itemconfig(timer_text, text=\"00:00\")\n check_marks.config(text=\"\")\n timer_label.config(text=\"Timer\",fg=PINK)\n global reps\n reps=0\n\n# ---------------------------- TIMER MECHANISM ------------------------------- # \ndef start_timer():\n global reps\n reps+=1\n work_sec=WORK*60\n short_break_sec=SHORT_BREAK_MIN * 60\n long_break_sec=LONG_BREAK_MIN * 60\n\n if reps % 8 == 0:\n countdown(long_break_sec)\n timer_label.config(text=\"Break\",fg=RED)\n elif reps % 2 == 0:\n countdown(short_break_sec)\n timer_label.config(text=\"Break\", fg=PINK)\n else:\n countdown(work_sec)\n timer_label.config(text=\"Work\", fg=GREEN)\n\n\n\n# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #\ndef countdown(count):\n count_min=math.floor(count / 60)\n count_sec=count % 60\n if count_sec <10:\n count_sec =f\"0{count_sec}\"\n canvas.itemconfig(timer_text,text=f\"{count_min}:{count_sec}\")\n if count > 0:\n global timer\n timer=window.after(1000, countdown, count-1)\n else:\n start_timer()\n marks=\"\"\n for i in range(math.floor(reps/2)):\n marks+=\"✔\"\n check_marks.config(text=marks)\n\n# ---------------------------- UI SETUP ------------------------------- #\nwindow=Tk()\nwindow.title(\"Pomodoro\")\nwindow.config(padx=100,pady=50,bg=YELLOW)\ncanvas=Canvas(width=200,height=224,bg=YELLOW,highlightthickness=0)\ntomato=PhotoImage(file=\"tomato.png\")\ncanvas.create_image(100,112,image=tomato)\ntimer_text=canvas.create_text(100,130,text=\"00:00\",fill=\"white\",font=(FONT_NAME,35,\"bold\"))\ncanvas.grid(column=1,row=1)\ntimer_label=Label(text=\"Timer\",bg=YELLOW,fg=GREEN,font=(FONT_NAME,50,\"normal\"))\ntimer_label.grid(column=1,row=0)\nstart_button=Button(text=\"Start\",highlightthickness=0,command=start_timer)\nstart_button.grid(column=0,row=2)\n\nreset_button=Button(text=\"Reset\",highlightthickness=0,command=reset_timer)\nreset_button.grid(column=2,row=2)\n\ncheck_marks=Label(text=\"\",font=(24),fg=GREEN,bg=YELLOW)\ncheck_marks.grid(column=1,row=3)\n\n\n\n\n\nwindow.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"547787583","text":"import numpy as np\n\ndef insertion_sort(a):\n if len(a) == 1:\n return a\n\n result = np.copy(a)\n for i in range(1,len(a)):\n x = result[i]\n j = i - 1\n while j >= 0 and result[j] > x:\n result[j+1] = result[j]\n j-=1\n\n result[j+1] = x\n\n return result\n\n\na = np.random.random(10000)\nassert np.all(insertion_sort(a) == np.sort(a))","sub_path":"insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"608419927","text":"import unittest\nfrom multipageform import MultipageFormFactory, FileField\nfrom django import forms\nfrom django.core.files.uploadedfile import SimpleUploadedFile\n\n\nclass SimpleForm1(forms.Form):\n req_1 = forms.CharField(required=True)\n req_2 = forms.CharField(required=True)\n unreq_file = FileField(required=False)\n\n\nclass SimpleForm2(forms.Form):\n req_3 = forms.CharField(required=True)\n\n\nclass TestFileField_SimpleWorkflow(unittest.TestCase):\n def setUp(self):\n self.formCls = MultipageFormFactory(SimpleForm1)\n self.testFile = SimpleUploadedFile(\"/1/2/3/filename.txt\", \"Preved\\nMedved\")\n\n def test_Simple(self):\n form = self.formCls({\"req_1\": \"1\", \"req_2\": \"2\"})\n self.assertEqual(form.is_valid(), True)\n self.assertEqual(form.cleaned_data['unreq_file'], None)\n\n form = self.formCls({\"req_1\": \"1\", \"req_2\": \"2\"}, {'unreq_file': self.testFile})\n self.assertEqual(form.is_valid(), True)\n self.assertEqual(SimpleUploadedFile, type(form.cleaned_data['unreq_file']))\n\nimport StringIO\n\n\nclass TestFileField_2PagesWorkflow(unittest.TestCase):\n def setUp(self):\n self.formCls = MultipageFormFactory(SimpleForm1, SimpleForm2)\n self.testFile = SimpleUploadedFile(\"/1/2/3/filename.txt\", \"Preved\\nMedved\", \"image/jpeg\")\n # self.testFile.file = StringIO.StringIO(self.testFile.file)\n # raise Exception(self.testFile.__dict__)\n\n def test_Simple(self):\n form = self.formCls({\"req_1\": \"1\", \"req_2\": \"2\"}, {'unreq_file': self.testFile})\n self.assertEqual(form.is_valid(), False)\n\n step, form = form.current_step()\n self.assertEqual(step, 1)\n\n new_post = form.cleaned_data\n new_post['unreq_file'] = form.forms[0].fields['unreq_file'].saved_filename\n\n form = self.formCls(new_post)\n self.assertEqual(form.is_valid(), False)\n step, form = form.current_step()\n self.assertEqual(step, 1)\n\n new_post = form.cleaned_data\n new_post['unreq_file'] = form.forms[0].fields['unreq_file'].saved_filename\n\n new_post['req_3'] = 'req 3'\n\n assert new_post['unreq_file']\n\n form = self.formCls(new_post)\n\n self.assertEqual(form.is_valid(), True)\n\n data = form.cleaned_data\n assert data\n self.assertEqual(data['unreq_file']._name, \"filename.txt\")\n self.assertEqual(data['unreq_file'].content_type, \"image/jpeg\")\n","sub_path":"multipageform/tests/filefield.py","file_name":"filefield.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"338224276","text":"#!/usr/bin/env python3\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='3'\n\nimport tensorflow as tf\n\nfrom config import *\n\ndef clear_dir(d):\n if os.path.isdir(d):\n os.system('rm -r \"%s\"' % d)\n os.mkdir(d)\n\nclear_dir('images')\nclear_dir('models')\nclear_dir('logs')\n\nwith tf.device('/gpu:1'):\n import model_config\n model = model_config.get_model()\n\n import weight_visualizer\n\n import inputs\n flow_train = inputs.get_data()\n\n def vis_weight(epoch, logs):\n epoch += 1\n if epoch % 20 == 0:\n weight_visualizer.visualize(model, 'conv0_%04d.png' % epoch)\n if epoch % 500 == 0:\n fn = 'models/weight_%04d.h5' % epoch\n print('\\nSaving weights to: ' + fn)\n model.save_weights(fn)\n\n try:\n from keras.optimizers import SGD, Adam\n from keras.callbacks import LambdaCallback, TensorBoard\n if BASE is not None:\n print('Loading weights.')\n model.load_weights(BASE, by_name=True)\n\n model.compile(\n optimizer=Adam(),\n #optimizer=SGD(1e-4, decay=1e-3, momentum=0.9),\n loss='mse'\n )\n\n model.fit_generator(flow_train,\n MINI_EPOCH, EPOCH, verbose=True,\n callbacks=[LambdaCallback(on_epoch_end=vis_weight),\n TensorBoard()],\n validation_data=inputs.get_data(), validation_steps=16)\n\n except KeyboardInterrupt:\n print('Halted.')\n\n model.save_weights(SAVETO)\n print('Saved to '+SAVETO)\n\n","sub_path":"autoencoder-wta/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"586562890","text":"class Person:\n def __init__(self, name, birth_year, gender, father=None, mother=None):\n self.name = name\n self.birth_year = birth_year\n self.gender = gender\n if father:\n self.add_parent(father)\n if mother:\n self.add_parent(mother)\n self.kids = []\n\n def add_parent(self, parent):\n if isinstance(parent, Person):\n if parent.gender == 'F':\n self.mother = parent\n else:\n self.father = parent\n parent.kids.append(self)\n\n def children(self, gender='both'):\n if not gender == 'both':\n return list(filter(lambda person: person.gender == gender,\n self.kids))\n else:\n return self.kids\n\n def get_siblings_by_gender(self, gender):\n if hasattr(self, \"mother\"):\n if hasattr(self, \"father\"):\n siblings = list(set(self.mother.children(gender) +\n self.father.children(gender)))\n else:\n siblings = self.mother.children(gender)\n elif hasattr(self, \"father\"):\n siblings = self.father.children(gender)\n else:\n siblings = []\n\n return list(set(siblings) - {self})\n\n def get_brothers(self):\n return self.get_siblings_by_gender('M')\n\n def get_sisters(self):\n return self.get_siblings_by_gender('F')\n\n def is_direct_successor(self, other_person):\n return other_person in self.children()\n","sub_path":"task3/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"161496198","text":"import cv2\r\n\r\nvideo = cv2.VideoCapture(0)\r\nfaceCascade = cv2.CascadeClassifier(\"dataset/haarcascade_frontalface_default.xml\")\r\nsmileCascade = cv2.CascadeClassifier(\"dataset/haarcascade_smile.xml\")\r\n\r\nwhile True:\r\n success, img = video.read()\r\n grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces = faceCascade.detectMultiScale(grayImg, 1.1, 4)\r\n count = 500\r\n keyPressed = cv2.waitKey(1)\r\n for x, y, w, h in faces:\r\n smiles = smileCascade.detectMultiScale(grayImg, 1.8, 15)\r\n for x, y, w, h in smiles:\r\n print(\"Image \" + str(count) + \"Saved\")\r\n path = 'SavedImages\\\\' + str(count) + '.jpg'\r\n cv2.imwrite(path, img)\r\n count += 1\r\n if count >= 503:\r\n break\r\n\r\n cv2.imshow('live video', img)\r\n if keyPressed & 0xFF == ord('q'):\r\n break\r\n\r\nvideo.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"28128029","text":"n = int(input())\nA=[]*n\nA = list(map(int, input().split()))\n\ndef findmax(A):\n s=[0]*len(A)\n s[0]=A[0]\n for i in range(1,len(A)):\n s[i]=max(s[i-1]+A[i],A[i])\n return max(s)\nprint(findmax(A))","sub_path":"자료구조_알고리즘설계해석/최대 구간 합 계산.py","file_name":"최대 구간 합 계산.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"19698531","text":"import numpy as np\nimport gym\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Activation, Flatten, Input, merge\nfrom keras.layers.advanced_activations import LeakyReLU, ELU\nfrom keras.layers.noise import GaussianNoise\nfrom keras.optimizers import RMSprop, Adam, Nadam\n\nfrom rl.agents import ContinuousDQNAgent\nfrom rl.memory import SequentialMemory\nfrom rl.random import OrnsteinUhlenbeckProcess\nfrom rl.core import Processor\n\nfrom rl.callbacks import FileLogger, ModelIntervalCheckpoint\n\nfrom osim.env import *\n\nimport argparse\nimport math\n\n\nclass PendulumProcessor(Processor):\n def process_reward(self, reward):\n # The magnitude of the reward can be important. Since each step yields a relatively\n # high reward, we reduce the magnitude by two orders.\n return reward\n\n# Command line parameters\nparser = argparse.ArgumentParser(description='Train or test deep neural net motor controller')\nparser.add_argument('--train', dest='train', action='store_true', default=True)\nparser.add_argument('--test', dest='train', action='store_false', default=True)\nparser.add_argument('--steps', dest='steps', action='store', default=500000)\nparser.add_argument('--visualize', dest='visualize', action='store_true', default=False)\nparser.add_argument('--start_weights', dest='start_weights', action='store', default=\"best/ddpg_elu_rew2_best_actor.h5f\")\nparser.add_argument('--model', dest='model', action='store', default=\"CDQN/cdqn_Gait_from_ddpg.h5f\")\nparser.add_argument('--sigma', dest='sigma', action='store', default=0.25)\nparser.add_argument('--theta', dest='theta', action='store', default=0.15)\nparser.add_argument('--gamma', dest='gamma', action='store', default=0.99)\nparser.add_argument('--rseed', dest='rseed', action='store', default=53, type=int)\nargs = parser.parse_args()\n\nENV_NAME = 'Pendulum-v0'\ngym.undo_logger_setup()\n\n\n# Get the environment and extract the number of actions.\nenv = GaitEnv(args.visualize)\n#env = gym.make(ENV_NAME)\nprint (\"Random seed: %i\\n\", args.rseed)\nnp.random.seed(args.rseed)\nrandom.seed(args.rseed)\nenv.seed(args.rseed)\n\nassert len(env.action_space.shape) == 1\nnb_actions = env.action_space.shape[0]\n\n# Total number of steps in training\nnallsteps = args.steps\n\ninit = 'lecun_uniform'\n \n# Build all necessary models: V, mu, and L networks.\nV_model = Sequential()\nV_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))\nV_model.add(GaussianNoise(0.01)) # add to the command line!\nV_model.add(Dense(32, init = init))\nV_model.add(ELU())\nV_model.add(Dense(32, init = init))\nV_model.add(ELU())\nV_model.add(Dense(32, init = init))\nV_model.add(ELU())\nV_model.add(Dense(1))\nV_model.add(Activation('linear'))\nprint(V_model.summary())\n\nmu_model = Sequential()\nmu_model.add(Flatten(input_shape=(1,) + env.observation_space.shape))\nmu_model.add(GaussianNoise(0.01)) # add to the command line!\nmu_model.add(Dense(32, init = init))\nmu_model.add(ELU())\nmu_model.add(Dense(32, init = init))\nmu_model.add(ELU())\nmu_model.add(Dense(32, init = init))\nmu_model.add(ELU())\nmu_model.add(Dense(nb_actions, init = init))\nmu_model.add(GaussianNoise(0.01))\nmu_model.add(Activation('sigmoid'))\nprint(mu_model.summary())\n\naction_input = Input(shape=(nb_actions,), name='action_input')\nobservation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')\nx = merge([action_input, Flatten()(observation_input)], mode='concat')\nx = GaussianNoise(0.01)(x)\nx = Dense(64, init = init)(x)\nx = ELU()(x)\nx = Dense(64, init = init)(x)\nx = ELU()(x)\nx = Dense(64, init = init)(x)\nx = ELU()(x)\nx = Dense(((nb_actions * nb_actions + nb_actions) / 2))(x)\nx = Activation('sigmoid')(x)\nL_model = Model(input=[action_input, observation_input], output=x)\nprint(L_model.summary())\n\n# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n# even the metrics!\nprocessor = PendulumProcessor()\nmemory = SequentialMemory(limit=100000, window_length=1)\nrandom_process = OrnsteinUhlenbeckProcess(theta=float(args.theta), mu=0., sigma=float(args.sigma), size=nb_actions)\n\n#mu_model.load_weights('best/ddpg_elu_rew2_best_actor.h5f')\nagent = ContinuousDQNAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,\n memory=memory, nb_steps_warmup=100, random_process=random_process,\n gamma=float(args.gamma), target_model_update=1e-3, delta_clip=2., \n processor=processor)\nagent.compile(Nadam(lr=.001, clipnorm=2.), metrics=['mae'])\n\nif args.train:\n# agent.load_weights(args.start_weights)\n checkpoint_weights_filename = 'CDQN_train/cdqn_Gait_from_ddpg_{step}.h5f'\n# log_filename = 'CDQN/cdqn_from_ddpg_{}.json'.format('Gait')\n log_filename = 'CDQN/cdqn_{}_from_ddpg.json'.format('Gait')\n callbacks = [ModelIntervalCheckpoint(checkpoint_weights_filename, interval=10000)]\n callbacks += [FileLogger(log_filename, interval=10000)]\n agent.fit(env, callbacks=callbacks, nb_steps=nallsteps, visualize=False, verbose=1, nb_max_episode_steps=1000)\n agent.save_weights(args.model, overwrite=True)\n\nif not args.train:\n agent.load_weights(args.model)\n agent.test(env, nb_episodes=3, visualize=False, nb_max_episode_steps=500) \n","sub_path":"scripts/cdqn.py","file_name":"cdqn.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"397941145","text":"from django.http import HttpResponse\nfrom django.conf import settings\n\nclass Surf(object):\n def __init__(self, request):\n self.session= request.session\n cart= self.session.get(settings.SURF_SESSION_ID)\n if not cart:\n cart=self.session[settings.SURF_SESSION_ID]= {'page_counter':0}\n self.cart = cart\n\n def add(self):\n n=self.cart['page_counter']+1\n self.cart['page_counter']=n\n self.save()\n\n def save(self):\n self.session[settings.SURF_SESSION_ID] = self.cart\n self.session.modified = True\n\n def check(self):\n counter=self.cart['page_counter']\n print(counter)\n if int(counter) > 5:\n return False\n else:\n return True\n\n def clear(self):\n self.cart['page_counter']=0\n self.save()\n\n\n\n","sub_path":"apply/surfing.py","file_name":"surfing.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"638049672","text":"from django.urls import path\nfrom .views import *\n\napp_name=\"main\"\nurlpatterns = [\n path('', showmain, name=\"showmain\"), \n path('faq/',faq,name = 'faq'),\n path('contact/',contact,name='contact'),\n path('academic/', academic, name=\"academic\"),\n path('volunteer/', volunteer, name=\"volunteer\"),\n path('research/', research, name=\"research\"),\n path('art/', art, name=\"art\"),\n path('fresh/', fresh, name=\"fresh\"),\n path('performance/', performance, name=\"performance\"),\n path('atheletic/', atheletic, name=\"atheletic\"),\n path('social/',social,name='social'),\n\n # likelion\n path('likelion/',likelion,name='likelion'),\n path('likelion/',likelion_detail,name='likelion_detail'),\n path('likelion/new/',likelion_new,name='likelion_new'),\n path('likelion/create/',likelion_create,name='likelion_create'),\n path('likelion/edit/',likelion_edit,name='likelion_edit'),\n path('likelion/update/',likelion_update,name='likelion_update'),\n path('likelion/delete/',likelion_delete,name='likelion_delete'),\n \n # cafein\n path('cafein/',cafein,name='cafein'),\n path('cafein/',cafein_detail,name='cafein_detail'),\n path('cafein/new/',cafein_new,name='cafein_new'),\n path('cafein/create/',cafein_create,name='cafein_create'),\n path('cafein/edit/',cafein_edit,name='cafein_edit'),\n path('cafein/update/',cafein_update,name='cafein_update'),\n path('cafein/delete/',cafein_delete,name='cafein_delete'),\n\n # dna\n path('dna/',dna,name='dna'),\n path('dna/',dna_detail,name='dna_detail'),\n path('dna/new/',dna_new,name='dna_new'),\n path('dna/create/',dna_create,name='dna_create'),\n path('dna/edit/',dna_edit,name='dna_edit'),\n path('dna/update/',dna_update,name='dna_update'),\n path('dna/delete/',dna_delete,name='dna_delete'),\n\n # dussa\n path('dussa/',dussa,name='dussa'),\n path('dussa/',dussa_detail,name='dussa_detail'),\n path('dussa/new/',dussa_new,name='dussa_new'),\n path('dussa/create/',dussa_create,name='dussa_create'),\n path('dussa/edit/',dussa_edit,name='dussa_edit'),\n path('dussa/update/',dussa_update,name='dussa_update'),\n path('dussa/delete/',dussa_delete,name='dussa_delete'),\n\n # kcc\n path('kcc/',kcc,name='kcc'),\n path('kcc/',kcc_detail,name='kcc_detail'),\n path('kcc/new/',kcc_new,name='kcc_new'),\n path('kcc/create/',kcc_create,name='kcc_create'),\n path('kcc/edit/',kcc_edit,name='kcc_edit'),\n path('kcc/update/',kcc_update,name='kcc_update'),\n path('kcc/delete/',kcc_delete,name='kcc_delete'),\n\n # mecs\n path('mecs/',mecs,name='mecs'),\n path('mecs/',mecs_detail,name='mecs_detail'),\n path('mecs/new/',mecs_new,name='mecs_new'),\n path('mecs/create/',mecs_create,name='mecs_create'),\n path('mecs/edit/',mecs_edit,name='mecs_edit'),\n path('mecs/update/',mecs_update,name='mecs_update'),\n path('mecs/delete/',mecs_delete,name='mecs_delete'),\n\n # nsa\n path('nsa/',nsa,name='nsa'),\n path('nsa/',nsa_detail,name='nsa_detail'),\n path('nsa/new/',nsa_new,name='nsa_new'),\n path('nsa/create/',nsa_create,name='nsa_create'),\n path('nsa/edit/',nsa_edit,name='nsa_edit'),\n path('nsa/update/',nsa_update,name='nsa_update'),\n path('nsa/delete/',nsa_delete,name='nsa_delete'),\n\n # marx\n path('marx/',marx,name='marx'),\n path('marx/',marx_detail,name='marx_detail'),\n path('marx/new/',marx_new,name='marx_new'),\n path('marx/create/',marx_create,name='marx_create'),\n path('marx/edit/',marx_edit,name='marx_edit'),\n path('marx/update/',marx_update,name='marx_update'),\n path('marx/delete/',marx_delete,name='marx_delete'),\n\n # management\n path('management/',management,name='management'),\n path('management/',management_detail,name='management_detail'),\n path('management/new/',management_new,name='management_new'),\n path('management/create/',management_create,name='management_create'),\n path('management/edit/',management_edit,name='management_edit'),\n path('management/update/',management_update,name='management_update'),\n path('management/delete/',management_delete,name='management_delete'),\n\n # economy\n path('economy/',economy,name='economy'),\n path('economy/',economy_detail,name='economy_detail'),\n path('economy/new/',economy_new,name='economy_new'),\n path('economy/create/',economy_create,name='economy_create'),\n path('economy/edit/',economy_edit,name='economy_edit'),\n path('economy/update/',economy_update,name='economy_update'),\n path('economy/delete/',economy_delete,name='economy_delete'),\n\n # international\n path('international/',international,name='international'),\n path('international/',international_detail,name='international_detail'),\n path('international/new/',international_new,name='international_new'),\n path('international/create/',international_create,name='international_create'),\n path('international/edit/',international_edit,name='international_edit'),\n path('international/update/',international_update,name='international_update'),\n path('international/delete/',international_delete,name='international_delete'),\n\n # politics\n path('politics/',politics,name='politics'),\n path('politics/',politics_detail,name='politics_detail'),\n path('politics/new/',politics_new,name='politics_new'),\n path('politics/create/',politics_create,name='politics_create'),\n path('politics/edit/',politics_edit,name='politics_edit'),\n path('politics/update/',politics_update,name='politics_update'),\n path('politics/delete/',politics_delete,name='politics_delete'),\n\n path('elf/',elf,name='elf'),\n path('rcy/',rcy,name='rcy'),\n path('road/',road,name='road'),\n path('hand/',hand,name='hand'),\n path('neighbor/',neighbor,name='neighbor'),\n path('painters/',painters,name='painters'),\n path('green/',green,name='green'),\n path('korean/',korean,name='korean'),\n\n # kusa\n path('kusa/',kusa,name='kusa'),\n path('kusa/',kusa_detail,name='kusa_detail'),\n path('kusa/new/',kusa_new,name='kusa_new'),\n path('kusa/create/',kusa_create,name='kusa_create'),\n path('kusa/edit/',kusa_edit,name='kusa_edit'),\n path('kusa/update/',kusa_update,name='kusa_update'),\n path('kusa/delete/',kusa_delete,name='kusa_delete'),\n\n # rich\n path('rich/',rich,name='rich'),\n path('rich/',rich_detail,name='rich_detail'),\n path('rich/new/',rich_new,name='rich_new'),\n path('rich/create/',rich_create,name='rich_create'),\n path('rich/edit/',rich_edit,name='rich_edit'),\n path('rich/update/',rich_update,name='rich_update'),\n path('rich/delete/',rich_delete,name='rich_delete'),\n\n # unsa\n path('unsa/',unsa,name='unsa'),\n path('unsa/',unsa_detail,name='unsa_detail'),\n path('unsa/new/',unsa_new,name='unsa_new'),\n path('unsa/create/',unsa_create,name='unsa_create'),\n path('unsa/edit/',unsa_edit,name='unsa_edit'),\n path('unsa/update/',unsa_update,name='unsa_update'),\n path('unsa/delete/',unsa_delete,name='unsa_delete'),\n\n # frontier\n path('froniter/',frontier,name='frontier'),\n path('frontier/',frontier_detail,name='frontier_detail'),\n path('frontier/new/',frontier_new,name='frontier_new'),\n path('frontier/create/',frontier_create,name='frontier_create'),\n path('frontier/edit/',frontier_edit,name='frontier_edit'),\n path('frontier/update/',frontier_update,name='frontier_update'),\n path('frontier/delete/',frontier_delete,name='frontier_delete'),\n\n # buddha\n path('buddha/',buddha,name='buddha'),\n path('buddha/',buddha_detail,name='buddha_detail'),\n path('buddha/new/',buddha_new,name='buddha_new'),\n path('buddha/create/',buddha_create,name='buddha_create'),\n path('buddha/edit/',buddha_edit,name='buddha_edit'),\n path('buddha/update/',buddha_update,name='buddha_update'),\n path('buddha/delete/',buddha_delete,name='buddha_delete'),\n\n #ajax\n path('ajax/',ajax,name='ajax'),\n path('ajax/',ajax_detail,name='ajax_detail'),\n path('ajax/new/',ajax_new,name='ajax_new'),\n path('ajax/create/',ajax_create,name='ajax_create'),\n path('ajax/edit/',ajax_edit,name='ajax_edit'),\n path('ajax/update/',ajax_update,name='ajax_update'),\n path('ajax/delete/',ajax_delete,name='ajax_delete'),\n\n\n #hola\n path('hola/',hola,name='hola'),\n path('hola/',hola_detail,name='hola_detail'),\n path('hola/new/',hola_new,name='hola_new'),\n path('hola/create/',hola_create,name='hola_create'),\n path('hola/edit/',hola_edit,name='hola_edit'),\n path('hola/update/',hola_update,name='hola_update'),\n path('hola/delete/',hola_delete,name='hola_delete'),\n\n #odc\n path('odc/',odc,name='odc'),\n path('odc/',odc_detail,name='odc_detail'),\n path('odc/new/',odc_new,name='odc_new'),\n path('odc/create/',odc_create,name='odc_create'),\n path('odc/edit/',odc_edit,name='odc_edit'),\n path('odc/update/',odc_update,name='odc_update'),\n path('odc/delete/',odc_delete,name='odc_delete'),\n\n #opus\n path('opus/',opus,name='opus'),\n path('opus/',opus_detail,name='opus_detail'),\n path('opus/new/',opus_new,name='opus_new'),\n path('opus/create/',opus_create,name='opus_create'),\n path('opus/edit/',opus_edit,name='opus_edit'),\n path('opus/update/',opus_update,name='opus_update'),\n path('opus/delete/',opus_delete,name='opus_delete'),\n\n #drama\n path('drama/',drama,name='drama'),\n path('drama/',drama_detail,name='drama_detail'),\n path('drama/new/',drama_new,name='drama_new'),\n path('drama/create/',drama_create,name='drama_create'),\n path('drama/edit/',drama_edit,name='drama_edit'),\n path('drama/update/',drama_update,name='drama_update'),\n path('drama/delete/',drama_delete,name='drama_delete'),\n\n #lotus\n path('lotus/',lotus,name='lotus'),\n path('lotus/',lotus_detail,name='lotus_detail'),\n path('lotus/new/',lotus_new,name='lotus_new'),\n path('lotus/create/',lotus_create,name='lotus_create'),\n path('lotus/edit/',lotus_edit,name='lotus_edit'),\n path('lotus/update/',lotus_update,name='lotus_update'),\n path('lotus/delete/',lotus_delete,name='lotus_delete'),\n\n #cloud\n path('cloud/',cloud,name='cloud'),\n path('cloud/',cloud_detail,name='cloud_detail'),\n path('cloud/new/',cloud_new,name='cloud_new'),\n path('cloud/create/',cloud_create,name='cloud_create'),\n path('cloud/edit/',cloud_edit,name='cloud_edit'),\n path('cloud/update/',cloud_update,name='cloud_update'),\n path('cloud/delete/',cloud_delete,name='cloud_delete'),\n\n #arirang\n path('arirang/',arirang,name='arirang'),\n path('arirang/',arirang_detail,name='arirang_detail'),\n path('arirang/new/',arirang_new,name='arirang_new'),\n path('arirang/create/',arirang_create,name='arirang_create'),\n path('arirang/edit/',arirang_edit,name='arirang_edit'),\n path('arirang/update/',arirang_update,name='arirang_update'),\n path('arirang/delete/',arirang_delete,name='arirang_delete'),\n\n #eumsem\n path('eumsem/',eumsem,name='eumsem'),\n path('eumsem/',eumsem_detail,name='eumsem_detail'),\n path('eumsem/new/',eumsem_new,name='eumsem_new'),\n path('eumsem/create/',eumsem_create,name='eumsem_create'),\n path('eumsem/edit/',eumsem_edit,name='eumsem_edit'),\n path('eumsem/update/',eumsem_update,name='eumsem_update'),\n path('eumsem/delete/',eumsem_delete,name='eumsem_delete'),\n\n #fearless\n path('fearless/',fearless,name='fearless'),\n path('fearless/',fearless_detail,name='fearless_detail'),\n path('fearless/new/',fearless_new,name='fearless_new'),\n path('fearless/create/',fearless_create,name='fearless_create'),\n path('fearless/edit/',fearless_edit,name='fearless_edit'),\n path('fearless/update/',fearless_update,name='fearless_update'),\n path('fearless/delete/',fearless_delete,name='fearless_delete'),\n\n #yeoul\n path('yeoul/',yeoul,name='yeoul'),\n path('yeoul/',yeoul_detail,name='yeoul_detail'),\n path('yeoul/new/',yeoul_new,name='yeoul_new'),\n path('yeoul/create/',yeoul_create,name='yeoul_create'),\n path('yeoul/edit/',yeoul_edit,name='yeoul_edit'),\n path('yeoul/update/',yeoul_update,name='yeoul_update'),\n path('yeoul/delete/',yeoul_delete,name='yeoul_delete'),\n\n # elephente\n path('elephente/',elephente,name='elephente'),\n path('elephente/',elephente_detail,name='elephente_detail'),\n path('elephente/new/',elephente_new,name='elephente_new'),\n path('elephente/create/',elephente_create,name='elephente_create'),\n path('elephente/edit/',elephente_edit,name='elephente_edit'),\n path('elephente/update/',elephente_update,name='elephente_update'),\n path('elephente/delete/',elephente_delete,name='elephente_delete'),\n\n # doomchit\n path('doomchit/',doomchit,name='doomchit'),\n path('doomchit/',doomchit_detail,name='doomchit_detail'),\n path('doomchit/new/',doomchit_new,name='doomchit_new'),\n path('doomchit/create/',doomchit_create,name='doomchit_create'),\n path('doomchit/edit/',doomchit_edit,name='doomchit_edit'),\n path('doomchit/update/',doomchit_update,name='doomchit_update'),\n path('doomchit/delete/',doomchit_delete,name='doomchit_delete'),\n\n # enactus\n path('enactus/',enactus,name='enactus'),\n path('enactus/',enactus_detail,name='enactus_detail'),\n path('enactus/new/',enactus_new,name='enactus_new'),\n path('enactus/create/',enactus_create,name='enactus_create'),\n path('enactus/edit/',enactus_edit,name='enactus_edit'),\n path('enactus/update/',enactus_update,name='enactus_update'),\n path('enactus/delete/',enactus_delete,name='enactus_delete'),\n\n # jam\n path('jam/',jam,name='jam'),\n path('jam/',jam_detail,name='jam_detail'),\n path('jam/new/',jam_new,name='jam_new'),\n path('jam/create/',jam_create,name='jam_create'),\n path('jam/edit/',jam_edit,name='jam_edit'),\n path('jam/update/',jam_update,name='jam_update'),\n path('jam/delete/',jam_delete,name='jam_delete'),\n \n # qud\n path('qud/',qud,name='qud'),\n path('qud/',qud_detail,name='qud_detail'),\n path('qud/new/',qud_new,name='qud_new'),\n path('qud/create/',qud_create,name='qud_create'),\n path('qud/edit/',qud_edit,name='qud_edit'),\n path('qud/update/',qud_update,name='qud_update'),\n path('qud/delete/',qud_delete,name='qud_delete'),\n\n # elf\n path('elf/',elf,name='elf'),\n path('elf/',elf_detail,name='elf_detail'),\n path('elf/new/',elf_new,name='elf_new'),\n path('elf/create/',elf_create,name='elf_create'),\n path('elf/edit/',elf_edit,name='elf_edit'),\n path('elf/update/',elf_update,name='elf_update'),\n path('elf/delete/',elf_delete,name='elf_delete'),\n \n # rcy\n path('rcy/',rcy,name='rcy'),\n path('rcy/',rcy_detail,name='rcy_detail'),\n path('rcy/new/',rcy_new,name='rcy_new'),\n path('rcy/create/',rcy_create,name='rcy_create'),\n path('rcy/edit/',rcy_edit,name='rcy_edit'),\n path('rcy/update/',rcy_update,name='rcy_update'),\n path('rcy/delete/',rcy_delete,name='rcy_delete'),\n\n # road\n path('road/',road,name='road'),\n path('road/',road_detail,name='road_detail'),\n path('road/new/',road_new,name='road_new'),\n path('road/create/',road_create,name='road_create'),\n path('road/edit/',road_edit,name='road_edit'),\n path('road/update/',road_update,name='road_update'),\n path('road/delete/',road_delete,name='road_delete'),\n\n # hand\n path('hand/',hand,name='hand'),\n path('hand/',hand_detail,name='hand_detail'),\n path('hand/new/',hand_new,name='hand_new'),\n path('hand/create/',hand_create,name='hand_create'),\n path('hand/edit/',hand_edit,name='hand_edit'),\n path('hand/update/',hand_update,name='hand_update'),\n path('hand/delete/',hand_delete,name='hand_delete'),\n\n # neighbor\n path('neighbor/',neighbor,name='neighbor'),\n path('neighbor/',neighbor_detail,name='neighbor_detail'),\n path('neighbor/new/',neighbor_new,name='neighbor_new'),\n path('neighbor/create/',neighbor_create,name='neighbor_create'),\n path('neighbor/edit/',neighbor_edit,name='neighbor_edit'),\n path('neighbor/update/',neighbor_update,name='neighbor_update'),\n path('neighbor/delete/',neighbor_delete,name='neighbor_delete'),\n\n # painters\n path('painters/',painters,name='painters'),\n path('painters/',painters_detail,name='painters_detail'),\n path('painters/new/',painters_new,name='painters_new'),\n path('painters/create/',painters_create,name='painters_create'),\n path('painters/edit/',painters_edit,name='painters_edit'),\n path('painters/update/',painters_update,name='painters_update'),\n path('painters/delete/',painters_delete,name='painters_delete'),\n\n # green\n path('green/',green,name='green'),\n path('green/',green_detail,name='green_detail'),\n path('green/new/',green_new,name='green_new'),\n path('green/create/',green_create,name='green_create'),\n path('green/edit/',green_edit,name='green_edit'),\n path('green/update/',green_update,name='green_update'),\n path('green/delete/',green_delete,name='green_delete'),\n\n # korean\n path('korean/',korean,name='korean'),\n path('korean/',korean_detail,name='korean_detail'),\n path('korean/new/',korean_new,name='korean_new'),\n path('korean/create/',korean_create,name='korean_create'),\n path('korean/edit/',korean_edit,name='korean_edit'),\n path('korean/update/',korean_update,name='korean_update'),\n path('korean/delete/',korean_delete,name='korean_delete'),\n\n # draw\n path('draw/',draw,name='draw'),\n path('draw/',draw_detail,name='draw_detail'),\n path('draw/new/',draw_new,name='draw_new'),\n path('draw/create/',draw_create,name='draw_create'),\n path('draw/edit/',draw_edit,name='draw_edit'),\n path('draw/update/',draw_update,name='draw_update'),\n path('draw/delete/',draw_delete,name='draw_delete'),\n\n # literal\n path('literal/',literal,name='literal'),\n path('literal/',literal_detail,name='literal_detail'),\n path('literal/new/',literal_new,name='literal_new'),\n path('literal/create/',literal_create,name='literal_create'),\n path('literal/edit/',literal_edit,name='literal_edit'),\n path('literal/update/',literal_update,name='literal_update'),\n path('literal/delete/',literal_delete,name='literal_delete'),\n\n # calligraphy\n path('calligraphy/',calligraphy,name='calligraphy'),\n path('calligraphy/',calligraphy_detail,name='calligraphy_detail'),\n path('calligraphy/new/',calligraphy_new,name='calligraphy_new'),\n path('calligraphy/create/',calligraphy_create,name='calligraphy_create'),\n path('calligraphy/edit/',calligraphy_edit,name='calligraphy_edit'),\n path('calligraphy/update/',calligraphy_update,name='calligraphy_update'),\n path('calligraphy/delete/',calligraphy_delete,name='calligraphy_delete'),\n\n # circle\n path('circle/',circle,name='circle'),\n path('circle/',circle_detail,name='circle_detail'),\n path('circle/new/',circle_new,name='circle_new'),\n path('circle/create/',circle_create,name='circle_create'),\n path('circle/edit/',circle_edit,name='circle_edit'),\n path('circle/update/',circle_update,name='circle_update'),\n path('circle/delete/',circle_delete,name='circle_delete'),\n\n # stone\n path('stone/',stone,name='stone'),\n path('stone/',stone_detail,name='stone_detail'),\n path('stone/new/',stone_new,name='stone_new'),\n path('stone/create/',stone_create,name='stone_create'),\n path('stone/edit/',stone_edit,name='stone_edit'),\n path('stone/update/',stone_update,name='stone_update'),\n path('stone/delete/',stone_delete,name='stone_delete'),\n\n # cartoon\n path('cartoon/',cartoon,name='cartoon'),\n path('cartoon/',cartoon_detail,name='cartoon_detail'),\n path('cartoon/new/',cartoon_new,name='cartoon_new'),\n path('cartoon/create/',cartoon_create,name='cartoon_create'),\n path('cartoon/edit/',cartoon_edit,name='cartoon_edit'),\n path('cartoon/update/',cartoon_update,name='cartoon_update'),\n path('cartoon/delete/',cartoon_delete,name='cartoon_delete'),\n\n # rush\n path('rush/',rush,name='rush'),\n path('rush/',rush_detail,name='rush_detail'),\n path('rush/new/',rush_new,name='rush_new'),\n path('rush/create/',rush_create,name='rush_create'),\n path('rush/edit/',rush_edit,name='rush_edit'),\n path('rush/update/',rush_update,name='rush_update'),\n path('rush/delete/',rush_delete,name='rush_delete'),\n\n # dust\n path('dust/',dust,name='dust'),\n path('dust/',dust_detail,name='dust_detail'),\n path('dust/new/',dust_new,name='dust_new'),\n path('dust/create/',dust_create,name='dust_create'),\n path('dust/edit/',dust_edit,name='dust_edit'),\n path('dust/update/',dust_update,name='dust_update'),\n path('dust/delete/',dust_delete,name='dust_delete'),\n\n # cave\n path('cave/',cave,name='cave'),\n path('cave/',cave_detail,name='cave_detail'),\n path('cave/new/',cave_new,name='cave_new'),\n path('cave/create/',cave_create,name='cave_create'),\n path('cave/edit/',cave_edit,name='cave_edit'),\n path('cave/update/',cave_update,name='cave_update'),\n path('cave/delete/',cave_delete,name='cave_delete'),\n\n # action\n path('action/',action,name='action'),\n path('action/',action_detail,name='action_detail'),\n path('action/new/',action_new,name='action_new'),\n path('action/create/',action_create,name='action_create'),\n path('action/edit/',action_edit,name='action_edit'),\n path('action/update/',action_update,name='action_update'),\n path('action/delete/',action_delete,name='action_delete'),\n\n # wind\n path('wind/',wind,name='wind'),\n path('wind/',wind_detail,name='wind_detail'),\n path('wind/new/',wind_new,name='wind_new'),\n path('wind/create/',wind_create,name='wind_create'),\n path('wind/edit/',wind_edit,name='wind_edit'),\n path('wind/update/',wind_update,name='wind_update'),\n path('wind/delete/',wind_delete,name='wind_delete'),\n\n # mountain\n path('mountain/',mountain,name='mountain'),\n path('mountain/',mountain_detail,name='mountain_detail'),\n path('mountain/new/',mountain_new,name='mountain_new'),\n path('mountain/create/',mountain_create,name='mountain_create'),\n path('mountain/edit/',mountain_edit,name='mountain_edit'),\n path('mountain/update/',mountain_update,name='mountain_update'),\n path('mountain/delete/',mountain_delete,name='mountain_delete'),\n\n # water\n path('water/',water,name='water'),\n path('water/',water_detail,name='water_detail'),\n path('water/new/',water_new,name='water_new'),\n path('water/create/',water_create,name='water_create'),\n path('water/edit/',water_edit,name='water_edit'),\n path('water/update/',water_update,name='water_update'),\n path('water/delete/',water_delete,name='water_delete'),\n\n # courtist\n path('courtist/',courtist,name='courtist'),\n path('courtist/',courtist_detail,name='courtist_detail'),\n path('courtist/new/',courtist_new,name='courtist_new'),\n path('courtist/create/',courtist_create,name='courtist_create'),\n path('courtist/edit/',courtist_edit,name='courtist_edit'),\n path('courtist/update/',courtist_update,name='courtist_update'),\n path('courtist/delete/',courtist_delete,name='courtist_delete'),\n\n # dutc\n path('dutc/',dutc,name='dutc'),\n path('dutc/',dutc_detail,name='dutc_detail'),\n path('dutc/new/',dutc_new,name='dutc_new'),\n path('dutc/create/',dutc_create,name='dutc_create'),\n path('dutc/edit/',dutc_edit,name='dutc_edit'),\n path('dutc/update/',dutc_update,name='dutc_update'),\n path('dutc/delete/',dutc_delete,name='dutc_delete'),\n\n\n # fctoto\n path('fctoto/',fctoto,name='fctoto'),\n path('fctoto/',fctoto_detail,name='fctoto_detail'),\n path('fctoto/new/',fctoto_new,name='fctoto_new'),\n path('fctoto/create/',fctoto_create,name='fctoto_create'),\n path('fctoto/edit/',fctoto_edit,name='fctoto_edit'),\n path('fctoto/update/',fctoto_update,name='fctoto_update'),\n path('fctoto/delete/',fctoto_delete,name='fctoto_delete'),\n\n # kendo\n path('kendo/',kendo,name='kendo'),\n path('kendo/',kendo_detail,name='kendo_detail'),\n path('kendo/new/',kendo_new,name='kendo_new'),\n path('kendo/create/',kendo_create,name='kendo_create'),\n path('kendo/edit/',kendo_edit,name='kendo_edit'),\n path('kendo/update/',kendo_update,name='kendo_update'),\n path('kendo/delete/',kendo_delete,name='kendo_delete'),\n\n # lae\n path('lae/',lae,name='lae'),\n path('lae/',lae_detail,name='lae_detail'),\n path('lae/new/',lae_new,name='lae_new'),\n path('lae/create/',lae_create,name='lae_create'),\n path('lae/edit/',lae_edit,name='lae_edit'),\n path('lae/update/',lae_update,name='lae_update'),\n path('lae/delete/',lae_delete,name='lae_delete'),\n\n # baduk\n path('baduk/',baduk,name='baduk'),\n path('baduk/',baduk_detail,name='baduk_detail'),\n path('baduk/new/',baduk_new,name='baduk_new'),\n path('baduk/create/',baduk_create,name='baduk_create'),\n path('baduk/edit/',baduk_edit,name='baduk_edit'),\n path('baduk/update/',baduk_update,name='baduk_update'),\n path('baduk/delete/',baduk_delete,name='baduk_delete'),\n\n # arrow\n path('arrow/',arrow,name='arrow'),\n path('arrow/',arrow_detail,name='arrow_detail'),\n path('arrow/new/',arrow_new,name='arrow_new'),\n path('arrow/create/',arrow_create,name='arrow_create'),\n path('arrow/edit/',arrow_edit,name='arrow_edit'),\n path('arrow/update/',arrow_update,name='arrow_update'),\n path('arrow/delete/',arrow_delete,name='arrow_delete'),\n\n # taekwondo\n path('taekwondo/',taekwondo,name='taekwondo'),\n path('taekwondo/',taekwondo_detail,name='taekwondo_detail'),\n path('taekwondo/new/',taekwondo_new,name='taekwondo_new'),\n path('taekwondo/create/',taekwondo_create,name='taekwondo_create'),\n path('taekwondo/edit/',taekwondo_edit,name='taekwondo_edit'),\n path('taekwondo/update/',taekwondo_update,name='taekwondo_update'),\n path('taekwondo/delete/',taekwondo_delete,name='taekwondo_delete'),\n\n # tuskers\n path('tuskers/',tuskers,name='tuskers'),\n path('tuskers/',tuskers_detail,name='tuskers_detail'),\n path('tuskers/new/',tuskers_new,name='tuskers_new'),\n path('tuskers/create/',tuskers_create,name='tuskers_create'),\n path('tuskers/edit/',tuskers_edit,name='tuskers_edit'),\n path('tuskers/update/',tuskers_update,name='tuskers_update'),\n path('tuskers/delete/',tuskers_delete,name='tuskers_delete'),\n\n]","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":28354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"559211081","text":"#ArelyLizbeth Guzmán Juárez\r\n#24 Septiembre 2019\r\n\r\n\r\n#Ejercicio 1\r\n#Leer archivos externos\r\n\r\nfile = open(\"devices.txt\", \"r\")\r\nfor item in file:\r\n print(item)\r\nfile.close()\r\nprint()\r\n\r\n#Ejercicio 2\r\n#Eliminar las lineas en blanco al imprimir\r\nfile = open(\"devices.txt\", \"r\")\r\nfor item in file:\r\n item=item.strip()\r\n print(item)\r\nfile.close()\r\nprint()\r\n\r\n#Ejercicio 3\r\n#Copiar el archivo en una lista\r\n\r\ndevices = []\r\nfile = open(\"devices.txt\",\"r\")\r\nfor item in file:\r\n item = item.strip()\r\n devices.append(item)\r\nfile.close()\r\nprint (devices)\r\nprint()\r\n\r\n#Ejercicio 4\r\n#Ageragar nuevos elementos al archivo\r\n\r\nfile = open(\"devices.txt\",\"a\")\r\nwhile True:\r\n newItem = input(\"Agrega el nombre del nuevo elemento: \")\r\n if newItem.lower() == \"exit\":\r\n print(\"Listo!\")\r\n break\r\nfile.write(newItem + \"\\n\")\r\nfile.close()\r\n","sub_path":"unidad_1/actividad_7/07_file-access_Arely.py","file_name":"07_file-access_Arely.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"292836560","text":"#!/usr/bin/env python3\n\nimport sys\nimport re\n\nl = sys.stdin.readline()\n\nm = re.compile(\"(.*?)((\\\\[[0-9]{4}-[0-9]{2}-[0-9]{2} [A-Z][a-z]{2} )[0-9]{2}:[0-9]{2})(]--)(\\\\[[0-9]{4}-[0-9]{2}-[0-9]{2} [A-Z][a-z]{2} )([0-9]{2}:[0-9]{2})(].*)\", re.DOTALL).match(l)\nif m:\n prefix, first, date1, sep, date2, time, suffix = m.groups()\n stime = sys.argv[1] if len(sys.argv) > 1 else time\n date = date1 if len(sys.argv) > 2 else date2\n sys.stdout.write(prefix + first + sep + date + stime + suffix + prefix + date + stime + sep + date2 + time + suffix)\nelse:\n sys.stdout.write(l)\nsys.stdout.write(sys.stdin.read())\n","sub_path":"src/split-clock.py","file_name":"split-clock.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"600282445","text":"from django.shortcuts import render, reverse, redirect\nimport markdown\n# Create your views here.\nfrom .models import RedditPost, Pending\nfrom .redditBot import RedditBot\nfrom .tasks import checkExistance\nfrom django.contrib import messages\nfrom django.views.decorators.http import require_http_methods\n\n\ndef getDistinctSubreddits():\n\treturn RedditPost.objects.order_by('subreddit').values_list('subreddit', flat = True).distinct()\n\n\ndef home(request, subreddit = None):\n\t#default subreddit is learnpython\n\tif subreddit is None:\n\t\treturn redirect(reverse('reddit:home', kwargs={'subreddit':\"learnpython\"}))\n\tsubreddits = getDistinctSubreddits()\n\n\tqs = RedditPost.objects.filter(subreddit = subreddit)\n\treturn render(request, \"reddit/home.html\", {'redditPosts' : qs, 'subreddits': subreddits, 'subreddit' : subreddit, 'pending' : Pending.objects.all()})\n\n@require_http_methods(['POST'])\ndef new(request):\n\tsubreddit = request.POST.get('subreddit')\n\tcurr = request.POST.get('current').strip()\n\t#check for empty submission\n\tif not subreddit:\n\t\tmessages.error(request,'Please enter something')\n\t\treturn redirect(reverse('reddit:home'))\n\t\n\t#Check for preexistance of subreddit\n\tif RedditPost.objects.filter(subreddit = subreddit):\n\t\tmessages.error(request, r'The subreddit %s already exists'%subreddit)\n\t\treturn redirect(reverse('reddit:home', kwargs={'subreddit': curr}))\n\t\n\t#Check if subreddit is already added as pending\n\tif not Pending.objects.filter(subreddit = subreddit):\n\t\tPending(subreddit = subreddit).save()\n\t\tcheckExistance.delay(subreddit)\n\t\tmessages.success(request, r'Your request is under process...If the subreddit %s exists, it shall be added'%subreddit)\n\telse:\n\t\tmessages.error(request,'Your request for the subreddit %s is already in pending'%subreddit)\n\n\treturn redirect(reverse('reddit:home', kwargs={'subreddit': curr}))\n\n\ndef remove(request):\n\tif request.POST:\n\t\tsubreddits = request.POST.getlist('subreddit')\n\t\tif len(subreddits) is 0:\n\t\t\tmessages.error(request, 'Please select atleast one subreddit')\n\t\telse :\n\t\t\tfor subreddit in subreddits:\n\t\t\t\tRedditPost.objects.filter(subreddit = subreddit).delete()\n\t\t\tmessages.success(request, 'Successfully removed the selected subreddits')\n\t\t\n\t\treturn redirect(reverse('reddit:home'))\n\t#default reddit is learnpython and so it cannot be removed\t\n\tsubreddits = list(getDistinctSubreddits())\n\tsubreddits.remove('learnpython')\n\n\tif not subreddits:\n\t\tmessages.error(request, \"No subreddit to be removed...Please add some first\")\n\t\treturn redirect(reverse('reddit:home'))\n\treturn render(request, \"reddit/removeSubreddit.html\", {'subreddits' : subreddits})\n","sub_path":"src/Alerts/reddit/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227129030","text":"from flask import url_for\nfrom flask import current_app\n\nclass PaginationHelper():\n\tdef __init__(self, request, query, resource_for_url, key_name, schema):\n\t\tself.request = request\n\t\tself.query = query\n\t\tself.resource_for_url = resource_for_url\n\t\tself.key_name = key_name\n\t\tself.schema = schema\n\t\tself.results_per_page = current_app.config['PAGINATION_PAGE_SIZE']\n\t\tself.page_argument_name = current_app.config['PAGINATION_PAGE_ARGUMENT_NAME']\n\n\tdef paginate_query(self):\n\t\t#if no page number is specified=> page 1 default\n\t\tpage_number = self.request.args.get(self.page_argument_name, 1, type=int)\n\t\tpaginated_objects = self.query.paginate(page_number, per_page=self.results_per_page, error_out=False)\n\t\tobjects = paginated_objects.items\n\n\t\tif paginated_objects.has_prev:\n\t\t\tprevious_page_url = url_for(self.resource_for_url, page=page_number-1, _external=True)\n\t\telse:\n\t\t\tprevious_page_url = None\n\n\t\tif paginated_objects.has_next:\n\t\t\tnext_page_url = url_for(self.resource_for_url, page=page_number+1,_external=True)\n\t\telse:\n\t\t\tnext_page_url = None\n\n\t\tdumped_objects = self.schema.dump(objects, many=True).data\n\n\t\treturn ({\n\t\t\tself.key_name: dumped_objects,\n\t\t\t'previous': previous_page_url,\n\t\t\t'next': next_page_url,\n\t\t\t'count': paginated_objects.total\n\t\t\t})\n","sub_path":"api/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433728847","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport glob\nimport json\nimport tarfile\nimport zipfile\nimport nibabel as nib\nfrom nilearn import plotting\nfrom sklearn.preprocessing import normalize\nimport torch\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport teamc_pipline\nimport psutil\n\nprint(psutil.virtual_memory())\n\n# Hard coded parameters dict\n#params = {'import_path':'','batch_size':25,'init_lr':0.01,'decay_freq':25,'data_length':3700,'record_name':'try','state_name':'MODEL_SAVE','epochs':5} # USE when you do first trainning\nparams = {'import_path':'MODEL_SAVE0','batch_size':25,'init_lr':0.1,'decay_freq':20,'data_length':3000,'record_name':'try','state_name':'MODEL_SAVE','epochs':5}\n\ndata_length = params['data_length']\nbatch_size = params['batch_size']\ninit_lr = params['init_lr']\ndecay_freq = params['decay_freq']\nrecord_name = params['record_name']\nstate_name = params['state_name']\nepochs=params['epochs']\nimport_path = params['import_path']\n\n\n\nmapped_labels = pd.read_excel('NACC_LABELS_CLASSIFICATION_TASK_NEW_debug.xlsx')\nname_touse = mapped_labels.Address_Name[:data_length+1]\n\nX = teamc_pipline.data_mapper.data_mapping(['Sex_Bin','Age_Norm'],mapping_file_path='NACC_LABELS_CLASSIFICATION_TASK_NEW_debug.xlsx',data_path = '/work/03263/jcha9928/sharedirectory/nacc/',data_names = list(name_touse))\ndata_set, labels, features = X.execute(size=64)\nx = torch.from_numpy(data_set).float()\ny = torch.from_numpy(labels).long()\nw = torch.from_numpy(features['Sex_Bin']).float()\nz = torch.from_numpy(features['Age_Norm']).float()\nprint(labels)\nprint(y,w,z)\n\ntrain_data = torch.utils.data.TensorDataset(x, y, w, z)\n\nindices = list(range(len(train_data)))\n\n# Train test val split\ntrain_size = int(0.7 * len(train_data))\nval_size = int(0.1 * len(train_data))\ntest_size = len(train_data)-train_size-val_size\ntrain_indices, val_indices,test_indices = indices[:train_size], indices[train_size:train_size+val_size],indices[train_size+val_size:]\ntrain_sampler = SubsetRandomSampler(train_indices)\nval_sampler = SubsetRandomSampler(val_indices)\ntest_sampler =SubsetRandomSampler(test_indices)\n\n# Set up DL models\nfrom teamc_pipline import resnet152\nfrom teamc_pipline import recorder\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nfrom functools import partial\nimport scipy\nimport tarfile\nimport nibabel as nib\nimport os\nimport zipfile\nfrom nilearn import plotting\nimport time\nfrom nilearn import image\nimport sys\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport torch.utils.model_zoo as model_zoo\n\nimport_path = import_path\ntorch.set_num_threads(48)\nprint(psutil.virtual_memory())\nif import_path == '':\n net = resnet152(pretrained=False, num_classes=2)\n model_num = 0\nelse:\n model_loader = torch.load(import_path)\n net = resnet152(pretrained=False, num_classes=5)\n net.load_state_dict(model_loader['state_dict'])\n model_num = model_loader['epoch']\n\ntrain_loader = torch.utils.data.DataLoader(train_data,sampler=train_sampler, batch_size=batch_size, num_workers=0)\nval_loader = torch.utils.data.DataLoader(train_data,sampler=val_sampler, batch_size=batch_size, num_workers=0)\ntest_loader = torch.utils.data.DataLoader(train_data,sampler=test_sampler ,batch_size=batch_size, num_workers=0)\nprint(psutil.virtual_memory())\n\n\n#init_lr = 0.01 #SELECT INITIAL LR\n\ncriterion = nn.CrossEntropyLoss()\n\ndef adjust_lr(optimizer, epoch,decay_f=decay_freq):\n lr = init_lr * (0.1 ** (epoch // decay_f)) #select decrease function\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef weight_reset(m):\n if isinstance(m, nn.Conv3d) or isinstance(m, nn.Linear):\n m.reset_parameters()\n\n\nif import_path == '':\n optimizer = optim.SGD(net.parameters(), lr=init_lr, momentum=0.9, weight_decay=5e-4)\n net.apply(weight_reset)\nelse:\n optimizer = optim.SGD(net.parameters(), lr=init_lr, momentum=0.9, weight_decay=5e-4)\n optimizer.load_state_dict(model_loader['optimizer'])\n\n\n\n\n#Train\n\nnet.train(True)\nstart_num = model_num\n#epochs=5 #loop over the dataset multiple times\n\n#set up recprder\nrecording_log = recorder(epochs,5)\nfor epoch in range(epochs): \n\n train_class_total,train_class_correct = [0]*5,[0]*5\n val_class_total,val_class_correct = [0]*5,[0]*5\n\n adjust_lr(optimizer, epoch+model_num)\n for param_group in optimizer.param_groups:\n lr = param_group['lr']\n alpha_rate = lr\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n # get the inputs\n inputs, labels, sex, age = data\n optimizer.zero_grad()\n print(psutil.virtual_memory())\n outputs = net(inputs, sex, age)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n \n print_cycle = 1\n running_loss += loss.item()\n if i % print_cycle == print_cycle-1: # print every print_cycle mini-batches\n for param_group in optimizer.param_groups:\n print('[Epoch: %d, Mini Batch: %5d] loss: %.3f / Lr: %.5f' %\n (epoch + start_num + 1, i + 1, running_loss / print_cycle, param_group['lr']))\n running_loss = 0.0 \n del loss\n del outputs\n \n # Get the trainning labels\n with torch.no_grad():\n for data in train_loader: #train loader\n images, labels, sex, age = data\n outputs = net(images, sex, age)\n _, predicted = torch.max(outputs.data, 1)\n c = (predicted == labels).squeeze()\n for i in range(len(labels)): #batchsize of train loader\n label = labels[i]\n train_class_correct[label] += c[i].item()\n train_class_total[label] += 1\n\n del outputs\n del predicted\n del c\n\n print(np.sum(train_class_total))\n print(train_class_total)\n print(train_class_correct)\n\n net.train(False) \n with torch.no_grad():\n for data in val_loader: #train loader\n images, labels, sex, age = data\n outputs = net(images, sex, age)\n _, predicted = torch.max(outputs.data, 1)\n c = (predicted == labels).squeeze()\n for i in range(len(labels)): #batchsize of train loader\n label = labels[i]\n val_class_correct[label] += c[i].item()\n val_class_total[label] += 1\n del outputs\n del predicted\n del c\n\n net.train(True)\n recording_log.add_record(epoch,lr,train_class_total,train_class_correct,val_class_total,val_class_correct)\n out_file_name = record_name + str(epoch+start_num+1) + '.csv'\n recording_log.out_file(start_num,path=out_file_name)\n try:\n os.remove(record_name+str(epoch+start_num)+'.csv')\n except:\n print('No such file')\n file_name = state_name + str(epoch+start_num)\n stats = {'epoch':epoch+start_num+1,'state_dict': net.state_dict(),\n 'optimizer': optimizer.state_dict()}\n torch.save(stats, file_name)\n \n \n\nprint('Finished Training')\n\n\n\n\n","sub_path":"Pipline_templete_cleaned.py","file_name":"Pipline_templete_cleaned.py","file_ext":"py","file_size_in_byte":7205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"591805771","text":"from django.shortcuts import redirect, render, get_object_or_404\n\nfrom django.views.generic import TemplateView, DetailView, ListView\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.contrib.auth.models import Group\nfrom django.template.loader import render_to_string\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\n\nfrom .forms import UserRegistrationForm, AdherentEditForm, UserEditForm\nfrom .tokens import account_activation_token\nfrom .models import Adherent\n\n\n# Create your views here.\n# def user_login(request):\n# if request.method == 'POST':\n# form = LoginForm(request.POST)\n# if form.is_valid():\n# cd = form.cleaned_data\n# user = authenticate(request, username=cd['username'], password=cd['password'])\n# if user is not None:\n# if user.is_active:\n# login(request, user)\n# return HttpResponse('Authentifié avec succès')\n# else:\n# return HttpResponse('Compte désactivé')\n# else:\n# return HttpResponse('Identification invalide')\n# else:\n# form = LoginForm()\n# return render(request, 'account/templates/registration/login.html', {'form': form})\n\n# Create your views here.\n# class UserSignUpView(CreateView):\n# form_class = UserRegistrationForm\n# success_url = reverse_lazy('login')\n# template_name = 'account/signup.html'\n#\n# def form_valid(self, form):\n# user = form.save()\n# current_site = get_current_site(request=self.request)\n# subject = 'Activer mon inscription sur le site LeBlogBNC'\n# message = render_to_string('account/account_activation_email.html', {\n# 'user':user,\n# 'domain':current_site.domain,\n# 'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),\n# 'token': account_activation_token.make_token(user=user),\n# })\n# user.email_user(subject, message)\n# return redirect('accounts:account_activation_sent')\ndef register(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(data=request.POST)\n if user_form.is_valid():\n user = user_form.save(commit=False)\n user.set_password(user_form.cleaned_data['password'])\n user.save()\n g = Group.objects.get(name='adherent')\n g.user_set.add(user)\n Adherent.objects.create(user=user)\n current_site = get_current_site(request=request)\n subject = 'Activer mon inscription sur le site LeBlogBNC'\n message = render_to_string('account/account_activation_email.html', {\n 'user':user,\n 'domain':current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),\n 'token': account_activation_token.make_token(user=user),\n })\n user.email_user(subject, message, fail_silently=False)\n return redirect('account_activation_sent')\n else:\n user_form = UserRegistrationForm()\n return render(request, 'account/signup.html', {'user_form': user_form})\n\n\n@login_required\ndef edit(request):\n if request.method == 'POST':\n user_form = UserEditForm(instance=request.user, data=request.POST)\n adherent_form = AdherentEditForm(instance=request.user.adherent,\n data=request.POST,\n files=request.FILES)\n if user_form.is_valid() and adherent_form.is_valid():\n user_form.save()\n adherent_form.save()\n return redirect('blog:article_list')\n else:\n user_form = UserEditForm(instance=request.user)\n adherent_form = AdherentEditForm(instance=request.user.adherent)\n return render(request, 'account/edit.html', {\n 'user_form': user_form,\n 'adherent_form': adherent_form\n })\n\n\n### activation du compte de l'utilisateur qui vient de s'inscrire ####\ndef activate(request, uidb64, token, backend='django.contrib.auth.backends.ModelBackend'):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n adherent = Adherent.objects.get(user=user)\n adherent.email_confirmed = True\n adherent.save()\n user.save()\n login(request, user, backend='django.contrib.auth.backends.ModelBackend')\n return redirect('index')\n else:\n return render(request, 'account/account_activation_invalid.html')\n\n\nclass AdherentActivationSent(TemplateView):\n template_name = 'account/account_activation_sent.html'\n\n\n# @login_required\n# def user_list(request):\n# users = User.objects.filter(is_active=True)\n# paginator = Paginator(users, 1)\n# page = request.GET.get('page')\n# page_obj = paginator.get_page(page)\n# return render(request, 'account/user/list.html', {'page_obj': page_obj})\n#\n# @login_required\n# def user_detail(request, username):\n# user = get_object_or_404(User, username=username, is_active=True)\n# return render(request, 'account/user/detail.html', {'user': user})\nclass UserDetail(DetailView):\n model = User\n template_name = 'account/user/detail.html'\n\n def get_object(self, queryset=None):\n username = self.kwargs.get('username')\n user = get_object_or_404(User, username=username, is_active=True)\n return user\n\n def get_context_data(self, **kwargs):\n context = super(UserDetail, self).get_context_data(**kwargs)\n user = self.get_object()\n context['adherent'] = Adherent.objects.get(user=user)\n return context\n\nclass UserList(ListView):\n model = User\n template_name = 'account/user/list.html'\n paginate_by = 3\n\n def get_queryset(self):\n users = super(UserList, self).get_queryset().filter(is_active=True)\n adherents = Adherent.objects.filter(user__in=users)\n return adherents\n\n\n\n\n\n\n\n","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"288806821","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 9 14:15:55 2020\n\n@author: b9903\n\"\"\"\n\nimport numpy as np\n\ndef getScaledImg(srcImg):\n imgH, imgW, imgZ = getImgSize(srcImg) \n if srcImg.dtype != np.uint8:\n return srcImg\n elif (imgZ != 0) and (imgZ != 3):\n return srcImg\n resultImg = np.copy(srcImg) \n resultImg.astype(np.int16)\n if imgZ == 0:\n resultImg = getScaledImgByCh(resultImg) \n else:\n for ch in range(imgZ):\n resultImg[:,:,ch] = getScaledImgByCh(resultImg[:,:,ch])\n \n resultImg = resultImg.astype(np.uint8)\n return resultImg\n\ndef getScaledImgByCh(resultImg):\n mn = np.min(resultImg)\n mx = np.max(resultImg)\n if mn != mx:\n resultImg = (resultImg - mn)/(mx-mn)*255\n return resultImg\n\ndef getImgSize(img):\n imgZ = 0\n imgH = img.shape[0]\n imgW = img.shape[1]\n if img.ndim == 3:\n imgZ = img.shape[2]\n return imgH, imgW, imgZ","sub_path":"utility/getScaledImg.py","file_name":"getScaledImg.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"293413622","text":"import torch\nimport numpy as np\nimport torchvision\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable\n\ntorch.backends.cudnn.deterministic = True\ntorch.manual_seed(9)\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n#device = torch.device(\"cuda:0\") # Uncomment this to run on GPU\n\n# Load data\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n# batch size\nbsize = 250\nlearning_rate = 1e-4\nT = 16\nR = 64\nM = 32\nnum_label = 10\nepoch_num = 300\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=bsize, shuffle=True, num_workers=2)\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=bsize, shuffle=False, num_workers=2)\n\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n\ndef onehott(label):\n batch_size = bsize\n nb_digits = 10\n # Dummy input that HAS to be 2D for the scatter (you can use view(-1,1) if needed)\n #y = torch.LongTensor([[label[0]], [label[1]], [label[2]], [label[3]], [label[4]], [label[5]], [label[6]], [label[7]], [label[8]], [label[9]]])\n y = label.view(-1, 1).type(torch.LongTensor)\n # One hot encoding buffer that you create out of the loop and just keep reusing\n y_onehot = torch.FloatTensor(batch_size, nb_digits)\n\n # In your for loop\n y_onehot.zero_()\n y_onehot.scatter_(1, y, 1)\n return y_onehot\n\n\n# some preprocessing function\n# input shape [bsize,3,32,32], 3 represents RGB channel, take R\ndef rgb(images):\n image_np = images.numpy()\n image_input = np.zeros(((bsize, 32, 32)))\n for i in range(0, 31):\n for j in range(0, 31):\n # image_input[:,i,j] =(image_np[:,0,i,j] + image_np[:,1,i,j] + image_np[:,2,i,j])/3\n image_input[:, i, j] = image_np[:, 0, i, j]\n return image_input\n\n\ndef feature_map(X):\n num_split = np.sqrt(T)\n piece_length = int(X.shape[1] ** 2 / T)\n temp = np.zeros(((bsize, piece_length, T)))\n upper1 = np.split(np.split(X, num_split, axis=1)[0], num_split, axis=2)\n upper2 = np.split(np.split(X, num_split, axis=1)[1], num_split, axis=2)\n upper3 = np.split(np.split(X, num_split, axis=1)[2], num_split, axis=2)\n upper4 = np.split(np.split(X, num_split, axis=1)[3], num_split, axis=2)\n\n for i in range(0, bsize):\n for j in range(0, 4):\n temp[i, :, j] = (upper1[j])[i, :, :].flatten()\n for j in range(4, 8):\n temp[i, :, j] = (upper2[j-4])[i, :, :].flatten()\n for j in range(8, 12):\n temp[i, :, j] = (upper3[j-8])[i, :, :].flatten()\n for j in range(12, 16):\n temp[i, :, j] = (upper4[j-12])[i, :, :].flatten()\n\n A = np.random.normal(0, 1, (M, piece_length))\n #rand a vetor: default is a row vector\n b = np.random.rand(M).reshape(1, M)\n\n #f = np.ones(((bsize, M, T)))\n #f = np.random.normal(0, 1, (bsize, M, T))\n f = np.ones(((bsize, M, T)))\n for k in range(0, T):\n for j in range(0, bsize):\n # + care for broadcast!\n fm = np.matmul(A, temp[j, :, k]).reshape(1, M) #+ b\n #f[j, :, k] = torch.clamp(torch.from_numpy(fm), min=0)\n f[j,:,k] = torch.from_numpy(fm)\n #print(\"f shape:\", f.shape)\n #print(\"f:\", f)\n #print(f[bsize-1,:,T-1])\n return f\n\n\n# Ouput f is bsize * M * T\n\n# forward function\n# argument:W:CP decomposition elements f:feature map\ndef inner(weights_CP, images):\n f = torch.from_numpy(feature_map(rgb(images))).float()\n #print(\"fsize:\", f.size())\n y_pred = torch.zeros(bsize, R)\n\n # for batch_axis in range(0,bsize-1):\n # replace by lift a dimension and do matrix multiplication\n \"\"\"\n print('weights_cp 00', weights_CP[0,:,0])\n print(\"f 0\", f[:,:,0])\n a = torch.matmul(f[:, :, 1], weights_CP[1, :, 1])\n print('a', a.size())\n \"\"\"\n print(\"weight:\", weights_CP)\n for rank in range(0, R):\n temp = 1\n for t in range(0, T):\n temp = temp * torch.matmul(f[:, :, t], weights_CP[t, :, rank])\n #print(rank, t, \"temp\",temp)\n #print(\"temp:\", temp)\n y_pred[:, rank] = temp\n print('y_pred', y_pred.size(), y_pred)\n print(\"f:\", f)\n #torch.sum dim1 row sum, squeezed\n y_predict = torch.sum(y_pred, 1)\n #print(\"predict000:\", y_predict[1])\n\n #y_predict = torch.matmul(y_pred, torch.ones(R))\n #print(\"y_predict size\", y_predict.size())\n return y_predict\n\n\ndef g_inner(weights_CP, images):\n f = torch.from_numpy(feature_map(rgb(images))).float()\n y_pred = torch.zeros(num_label, bsize, R)\n # for batch_axis in range(0,bsize-1):\n for l in range(0, num_label):\n for r in range(0, R):\n temp = torch.zeros(1)\n for t in range(1, T):\n temp = torch.max(temp, torch.matmul(f[:, :, t], weights_CP[l, t, :, r]))\n y_pred[l, :, r] = temp\n\n #y_predict = torch.matmul(y_pred, torch.ones(R, 1))\n y_predict = torch.sum(y_pred, 2)\n #y_predict is a vector\n return y_predict\n\n\n# randomly generate the component of CP decomposition\nweights_CP = torch.randn(num_label, T, M, R, device=device, dtype=dtype, requires_grad=True)\nweights_CP = Variable(torch.randn(num_label, T, M, R, device=device, dtype=dtype), requires_grad=True)\n\n\"\"\"\nfor t in range(0, T - 1):\n #weights_CP[t, :, :] = Variable(torch.randn(M, R), requires_grad=True)\n weights_CP[t, :, :] = torch.rand(M, R, requires_grad=True)\n\"\"\"\nstr = input(\"which mode? mode1: sum-product NN, mode2: shollow CNN\")\n\nif str == '1':\n sss = input(\"which training method?\")\n if sss == \"1\":\n for epoch in range(epoch_num+1):\n for i, data in enumerate(trainloader, 0):\n inputs, labels = data\n if inputs.size()[0] != bsize:\n continue\n #print(\"data shape\", inputs.size())\n #print(\"data:\", inputs.data[:, 0, :, :])\n y = labels.float()\n y_predict = inner(weights_CP, inputs)\n\n #print(\"y:\", y.size())\n #print(\"y_predict:\", y_predict.size())\n loss = (y_predict - y).pow(2).sum()\n print(epoch, i, \"loss:\", loss.item())\n\n loss.backward()\n\n print(\"grad\", torch.norm(weights_CP.grad).item())\n with torch.no_grad():\n #for i in range(0, T - 1):\n weights_CP -= learning_rate * weights_CP.grad\n #for i in range(0, T - 1):\n weights_CP.grad.zero_()\n\n print('Finished Training')\n\n else:\n param_list = []\n param_list.append(weights_CP)\n optimizer = torch.optim.Adam(param_list, lr=learning_rate)\n for epoch in range(epoch_num+1):\n\n for i, data in enumerate(trainloader, 0):\n inputs, labels = data\n\n if inputs.size()[0] != bsize:\n continue\n y = labels.float()\n y_predict = inner(weights_CP, inputs)\n loss = (y_predict - y).pow(2).sum()\n print(epoch, i, \"loss:\", loss.item())\n optimizer.zero_grad()\n\n loss.backward()\n\n optimizer.step()\n\n # Test part\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n images, labels = data\n outputs = inner(weights_CP, images)\n predicted = torch.round(outputs.data)\n total += labels.size(0)\n correct += (predicted == labels.float()).sum().item()\n\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))\n\nif str == '2':\n sss = input(\"which training method?\")\n if sss == \"1\":\n for epoch in range(epoch_num+1):\n for i, data in enumerate(trainloader, 0):\n inputs, labels = data\n\n if inputs.size()[0] != bsize:\n continue\n y = labels.float()\n y = onehott(y)\n\n y_predict = g_inner(weights_CP, inputs)\n m = torch.nn.Softmax()\n y_predict = m(y_predict)\n loss = 1/2*(torch.t(y_predict) - y).pow(2).sum()\n print(epoch, i, \"loss:\", loss.item())\n\n loss.backward()\n\n with torch.no_grad():\n weights_CP -= learning_rate * weights_CP.grad\n weights_CP.grad.zero_()\n\n print('Finished Training')\n\n else:\n param_list = []\n param_list.append(weights_CP)\n optimizer = torch.optim.Adam(param_list, lr=learning_rate)\n for epoch in range(epoch_num+1):\n for i, data in enumerate(trainloader, 0):\n inputs, labels = data\n\n if inputs.size()[0] != bsize:\n continue\n\n y = labels.float()\n y = onehott(y)\n\n y_predict = g_inner(weights_CP, inputs)\n m = torch.nn.Softmax()\n y_predict = m(y_predict)\n loss = 1/2 * (torch.t(y_predict) - y).pow(2).sum()\n print(epoch, i, \"loss:\", loss.item())\n optimizer.zero_grad()\n\n loss.backward()\n\n optimizer.step()\n\n # Test part\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n images, labels = data\n outputs = g_inner(weights_CP, images)\n predicted = torch.round(outputs.data)\n total += labels.size(0)\n correct += (predicted == labels.float()).sum().item()\n\n print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))\n\n\n","sub_path":"CPkernel_ult_multi.py","file_name":"CPkernel_ult_multi.py","file_ext":"py","file_size_in_byte":9905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"38206652","text":"# import context_simulator.models as sim\n# from foundations import MemoryNotFound\n\n\nclass LanguageProcessor(object):\n\n def __init__(self, language, environment, memory):\n self.language = language\n self.memory = memory\n self.environment = environment\n self.message = \"\"\n\n ####################\n # Loading Messages #\n ####################\n # must do first!\n def load_message(self, message):\n \"\"\"\n :param message:\n :return:\n \"\"\"\n self.message = message[:-1]\n\n def return_message(self):\n \"\"\"\n :return:\n \"\"\"\n return self.message\n\n #################################\n # Punctuation Filtration System #\n #################################\n\n @staticmethod\n def is_punct(char):\n \"\"\"\n 33-47, 58-64, 91-96, 123-126 TRUE\n :param char:\n :return:\n \"\"\"\n value = ord(char)\n\n if 32 < value < 48:\n return True\n\n elif 57 < value < 65:\n return True\n\n elif 90 < value < 97:\n return True\n\n elif 122 < value < 127:\n return True\n\n else:\n return False\n\n def isolate_puctuation(self):\n \"\"\"\n In basic language there are the words, and then punctuation\n I want to tokenize the sentence so that punctuation and words arent mixed together\n\n NOTE:\n Do not get rid of puctuation, it provides the rests and tempo of the sentence\n \"\"\"\n assert isinstance(self.message, str)\n new_msg = \"\"\n\n for char in self.message:\n if self.is_punct(char):\n new_msg += (\" \" + char + \" \")\n else:\n new_msg += char\n\n self.message = new_msg\n\n ################\n # Tokenization #\n ################\n\n def list_words(self):\n self.message = self.message.split(sep=' ')\n\n # add to models\n","sub_path":"persona/components/language_processor.py","file_name":"language_processor.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"300817739","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 9 16:50:09 2023\r\n\r\n@author: namam\r\n\"\"\"\r\nimport RPi.GPIO as GPIO # Import Raspberry Pi GPIO library\r\nimport time\r\n\r\nclass I2C1:\r\n def __init__(self,addr):\r\n global DEVICE_ADDR \r\n GPIO.setwarnings(False) # Ignore warning for now\r\n GPIO.setmode(GPIO.BOARD) # Use physical pin numbering\r\n GPIO.setup(29, GPIO.OUT, initial=GPIO.HIGH) # Set pin 29 to be an output pin and set initial value to low (off)\r\n GPIO.setup(31, GPIO.OUT, initial=GPIO.HIGH) # Set pin 31 to be an output pin and set initial value to low (off)\r\n DEVICE_ADDR = addr\r\n\r\n def NOP10(self):\r\n j=10\r\n for i in range(0,10):\r\n j = j+1\r\n \r\n #delay 10ms\r\n def dly10(self):\r\n start = time.time()\r\n while(1):\r\n stop =time.time()\r\n val = stop-start\r\n if val > 0.01:\r\n return\r\n\r\n\r\n def scl(self, val):\r\n if val !=0 :\r\n GPIO.output(29, GPIO.HIGH) # Turn on\r\n else:\r\n GPIO.output(29, GPIO.LOW) # Turn off\r\n self.NOP10()\r\n \r\n\r\n def sda(self, val):\r\n if val !=0 :\r\n GPIO.output(31, GPIO.HIGH) # Turn on\r\n else:\r\n GPIO.output(31, GPIO.LOW) # Turn off\r\n self.NOP10()\r\n \r\n\r\n def SendBit(self, val):\r\n self.sda(val)\r\n self.scl(1)\r\n self.scl(0)\r\n \r\n \r\n def starti2c(self):\r\n GPIO.setup(31, GPIO.OUT, initial=GPIO.HIGH) # Set pin 31 to be an output pin and set initial value to low (off)\r\n self.sda(1)\r\n self.scl(1)\r\n self.sda(0)\r\n self.scl(0)\r\n \r\n \r\n def stopi2c(self):\r\n GPIO.setup(31, GPIO.OUT, initial=GPIO.HIGH) # Set pin 31 to be an output pin and set initial value to low (off)\r\n self.scl(0)\r\n self.sda(0)\r\n self.scl(1)\r\n self.sda(1)\r\n \r\n def txi2c(self, val):\r\n GPIO.setup(31, GPIO.OUT, initial=GPIO.HIGH) # Set pin 31 to be an output pin and set initial value to low (off)\r\n a=0x80\r\n for i in range(8):\r\n if(a & val):\r\n self.SendBit(1)\r\n else:\r\n self.SendBit(0)\r\n a = a >> 1\r\n self.sda(0)\r\n GPIO.setup(31, GPIO.IN) \r\n #self.NOP10()\r\n self.scl(1)\r\n if GPIO.input(31)==0:\r\n bb0=1\r\n else:\r\n bb0=0;\r\n self.scl(0)\r\n \r\n \r\n if bb0==1:\r\n return 1\r\n else:\r\n return 0\r\n \r\n \r\n def rxi2ct(self):\r\n GPIO.setup(31, GPIO.IN) \r\n self.NOP10()\r\n dpl=0\r\n for i in range(8):\r\n self.scl(1)\r\n dpl = dpl << 1\r\n if GPIO.input(31)==0:\r\n dpl = dpl | 0x01\r\n self.scl(0)\r\n return dpl\r\n\r\n\r\n def ACK(self):\r\n GPIO.setup(31, GPIO.OUT, initial=GPIO.HIGH) # Set pin 31 to be an output pin and set initial value to low (off)\r\n self.sda(0)\r\n self.scl(1)\r\n self.scl(0)\r\n \r\n\r\n def NACK(self):\r\n GPIO.setup(31, GPIO.OUT, initial=GPIO.HIGH) # Set pin 31 to be an output pin and set initial value to low (off)\r\n self.sda(1)\r\n self.scl(1)\r\n self.scl(0)\r\n \r\n\r\n\r\n def WRITE_BYTE(self, addr,data1):\r\n global DEVICE_ADDR \r\n GPIO.setup(31, GPIO.OUT, initial=GPIO.HIGH) # Set pin 31 to be an output pin and set initial value to low (off)\r\n #self.stopi2c() \r\n count=10\r\n while(count != 0):\r\n self.starti2c()\r\n if(self.txi2c(DEVICE_ADDR)):\r\n break;\r\n self.stopi2c()\r\n self.dly10() #10ms delay\r\n count = count-1\r\n if(count==0):\r\n return\r\n self.txi2c(addr)\r\n self.txi2c(data1)\r\n self.stopi2c()\r\n \r\n\r\n def READ_BYTE(self,addr):\r\n global DEVICE_ADDR \r\n GPIO.setup(31, GPIO.OUT, initial=GPIO.HIGH) # Set pin 31 to be an output pin and set initial value to low (off)\r\n self.stopi2c() \r\n count=10\r\n while(count != 0):\r\n self.starti2c()\r\n if(self.txi2c(DEVICE_ADDR)):\r\n break;\r\n self.dly10() #10ms delay\r\n self.stopi2c()\r\n count = count-1\r\n if(count==0):\r\n return\r\n self.txi2c(addr)\r\n \r\n self.starti2c()\r\n self.txi2c(DEVICE_ADDR+1)\r\n val = self.rxi2ct()\r\n self.stopi2c()\r\n return val\r\n ","sub_path":"i2c1.py","file_name":"i2c1.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"434917301","text":"\"\"\"\n1. 各个consumer注册自己的host\n2. 等待任务到达\n\"\"\"\nimport json\nimport logging\nimport redis\nimport socket\nimport asyncio\nimport aiohttp\nimport signal\nimport aredis\n\n\nclass Consumer:\n def __init__(self):\n self.CONFIG = self._get_config()\n self.cache = self._get_cache()\n self.HOSTNAME = self._get_hostname()\n self.init()\n self.RUNNING_SIG = True\n self.log_cache = ''\n\n def init(self):\n self._set_host()\n self._set_log()\n self._set_signal()\n\n def _set_log(self):\n logging.basicConfig(filename=self.CONFIG['log_path'], level=logging.INFO)\n\n def _get_config(self):\n with open('./config.json', 'r') as f:\n config = json.load(f)\n return config\n\n def _get_cache(self):\n pwd, host = self.CONFIG['broker_uri'].split('@')\n if pwd:\n cache_pool = aredis.ConnectionPool(host=host, password=pwd, decode_responses=True)\n else:\n cache_pool = aredis.ConnectionPool(host=host, decode_responses=True)\n cache = aredis.StrictRedis(connection_pool=cache_pool)\n # cache = redis.Redis(connection_pool=cache_pool)\n return cache\n\n def _get_hostname(self):\n hostname = socket.gethostname()\n return hostname\n\n async def _set_host(self):\n hosts = await self.cache.lrange('hosts', 0, 100)\n print(hosts)\n if self.HOSTNAME not in hosts:\n await self.cache.lpush('hosts', self.HOSTNAME)\n\n def _put_running_sig(self, sig, frame):\n self.RUNNING_SIG = False\n print('shutdowning ...')\n\n def _set_signal(self):\n # Ctrl-C\n signal.signal(signal.SIGINT, self._put_running_sig)\n # nohup\n signal.signal(signal.SIGHUP, self._put_running_sig)\n # kill\n signal.signal(signal.SIGTERM, self._put_running_sig)\n\n async def worker(self):\n \"\"\"\n 1. 检查Running_sig\n 1.1 获取顶部信息,如果获取不到则阻塞\n 1.1.1 检查锁的状态,可用则继续\n todo: 添加次数\n :return:\n \"\"\"\n tasks_key = \"{}.task\".format(self.HOSTNAME)\n while self.RUNNING_SIG:\n cache_data = await self.cache.blpop(tasks_key, 5)\n if cache_data:\n cache_data = cache_data[1] # 获取顶部纪录\n data = json.loads(cache_data)\n name = data.get('name', '')\n # times = int(data.get('times', 1)) - 1\n data['times'] = int(data.get('times', 1)) - 1\n if data['times'] < 0:\n continue\n lock_key = \"{}.{}.lock\".format(self.HOSTNAME, name)\n await self.cache.rpush(tasks_key, json.dumps(data)) # 将纪录插回底部\n # 检查时间锁的状态\n if not await self.cache.get(lock_key):\n url = data['url']\n headers = data['headers']\n body_data = data['data']\n method = data['method'].upper()\n # 重新设置时间锁的状态\n await self.cache.set(lock_key, 1, int(await self.cache.get(name) or 1))\n logging.info(f\"{url} done\")\n await self.request_task(url, method, headers, body_data)\n\n async def request_task(self, url, method, headers, data):\n async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:\n if method == 'GET':\n async with session.get(url, headers=headers, timeout=5000) as resp:\n t = await resp.text()\n await Consumer.result(t)\n # print(t)\n elif method == 'POST':\n async with session.post(url, data=json.dumps(data), headers=headers, timeout=5000) as resp:\n t = await resp.text()\n await Consumer.result(t)\n # print(t)\n\n @staticmethod\n def handle(func):\n Consumer.result = func\n return\n\n @staticmethod\n async def result(resp):\n # await asyncio.sleep(0.1)\n print(resp)\n\n async def run(self):\n task = [asyncio.ensure_future(self.worker()) for i in range(10)]\n await asyncio.wait(task)\n\n def loop_task(self):\n while self.RUNNING_SIG:\n print(\"running ...\")\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(asyncio.ensure_future(self.run()))\n except Exception as e:\n if self.log_cache != e:\n logging.error(e)\n self.log_cache = e\n self.RUNNING_SIG = False\n\n def loop_stop(self):\n self.RUNNING_SIG = False\n","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"451407336","text":"from django.test import TestCase\nfrom tastypie.exceptions import NotFound\nfrom tastypie.contrib.contenttypes.resources import GenericResource\n\nfrom core.tests.mocks import MockRequest\nfrom content_gfk.api.resources import NoteResource, DefinitionResource\nfrom content_gfk.models import Note\n\n\nclass GenericResourceTestCase(TestCase):\n def setUp(self):\n self.resource = GenericResource([NoteResource, DefinitionResource])\n\n def test_bad_uri(self):\n bad_uri = '/bad_uri/'\n self.assertRaises(NotFound, self.resource.get_via_uri, bad_uri)\n\n def test_resource_not_registered(self):\n bad_uri = '/api/v1/quotes/1/'\n self.assertRaises(NotFound, self.resource.get_via_uri, bad_uri)\n\n def test_resource_passes_request(self):\n note = Note.objects.create(\n title='All aboard the rest train',\n content='Sometimes it is just better to lorem ipsum'\n )\n\n uri = '/api/v1/notes/1/'\n\n request = MockRequest()\n request.GET = {'format': 'json'}\n request.method = 'GET'\n\n result = self.resource.get_via_uri(uri, request=request)\n self.assertEqual(result, note)\n","sub_path":"tests/content_gfk/tests/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"156282594","text":"from fabric.decorators import task\nfrom fabric.operations import run, sudo\nfrom fabric.state import env\n\n\n@task\ndef apt_get_update():\n \"\"\"\n \"\"\"\n sudo('apt-get update')\n\n\n@task\ndef apt_get(*packages):\n \"\"\"\n Runs apt-get install command for all provided packages\n \"\"\"\n sudo('apt-get -y -f install %s' % ' '.join(packages), shell=False)\n\n\n@task\ndef add_apt_repository(repository_name):\n sudo('add-apt-repository -y %s' % repository_name)\n\n\n@task\ndef install():\n \"\"\"\n \"\"\"\n run(\"echo export LANGUAGE=en_US.UTF-8 >> ~/.bashrc\")\n run(\"echo export LC_ALL=en_US.UTF-8 >> ~/.bashrc\")\n add_apt_repository('ppa:certbot/certbot')\n add_apt_repository('ppa:jonathonf/python-3.6')\n apt_get_update()\n apt_get(\"certbot\", \"supervisor\", \"python-virtualenv\", \"build-essential\",\n \"libjpeg-dev\", \"libfreetype6\", \"libfreetype6-dev\", \"python3.6-dev\",\n \"zlib1g-dev\", \"wget\", \"libcurl4-openssl-dev\", \"libssl-dev\", \"git\",\n \"libffi-dev\", \"sqlite3\", \"libpq-dev\", \"xvfb\", \"xorg\", \"postgresql\",\n \"postgresql-contrib\", \"python-pip\", \"wget\", \"nginx\",\n \"rabbitmq-server\", \"npm\")\n git_clone()\n run(\"cd ~/; mkdir -p envs; cd envs; virtualenv {0} -p python3.6;\"\n .format(env.repo_name))\n sudo(\"mkdir -p /{0} /{0}/static /{0}/media /{0}/django_logs\".format(env.repo_name))\n sudo(\"chown -R {0} /{0} \".format(env.user, env.repo_name))\n sudo(\"mkdir -p /var/log/gunicorn /var/log/celery;\")\n sudo(\"touch /var/log/gunicorn/{0}.log\".format(env.repo_name))\n\n\n@task\ndef git_clone():\n \"\"\"\n \"\"\"\n run(\"cd ~/; git clone {}\".format(env.repository))\n","sub_path":"fabfile/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"237752610","text":"import re\nimport json\n\nFLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL\nWHITESPACE = re.compile(r'[ \\t\\n\\r]*', FLAGS)\n\nclass ConcatJSONDecoder(json.JSONDecoder):\n def decode(self, s, _w=WHITESPACE.match):\n s_len = len(s)\n\n objs = []\n end = 0\n while end != s_len:\n obj, end = self.raw_decode(s, idx=_w(s, end).end())\n end = _w(s, end).end()\n objs.append(obj)\n return objs","sub_path":"data_cleaning/loadJSON.py","file_name":"loadJSON.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"587316065","text":"import pandas as pd\nimport pytest\nfrom numpy.testing import assert_array_equal\nfrom pandas.testing import assert_frame_equal\n\nfrom powersimdata.data_access.data_access import SSHDataAccess\nfrom powersimdata.data_access.execute_list import ExecuteListManager\n\n\n@pytest.fixture\ndef data_access():\n data_access = SSHDataAccess()\n yield data_access\n data_access.close()\n\n\n@pytest.fixture\ndef execute_table(data_access):\n execute_list_manager = ExecuteListManager(data_access)\n return execute_list_manager.get_execute_table()\n\n\n@pytest.mark.integration\n@pytest.mark.ssh\ndef test_get_execute_file_local(execute_table):\n ecm = ExecuteListManager(None)\n from_local = ecm.get_execute_table()\n assert_frame_equal(from_local, execute_table)\n\n\n@pytest.mark.integration\n@pytest.mark.ssh\ndef test_get_execute_file_from_server_type(execute_table):\n assert isinstance(execute_table, pd.DataFrame)\n\n\n@pytest.mark.integration\n@pytest.mark.ssh\ndef test_get_execute_file_from_server_header(execute_table):\n header = [\"status\"]\n assert_array_equal(execute_table.columns, header)\n assert \"id\" == execute_table.index.name\n","sub_path":"powersimdata/data_access/tests/test_execute_csv.py","file_name":"test_execute_csv.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"441243161","text":"\"\"\"Problem Statement\r\n\r\nLargest Sum Contiguous Subarray\r\nWrite an efficient program to find the sum of contiguous subarray within a one-dimensional array of numbers which has the largest sum.\r\nExpected Solution: O(n) Time ,O(1) Space\r\nExplanation Link:https://medium.com/@rsinghal757/kadanes-algorithm-dynamic-programming-how-and-why-does-it-work-3fd8849ed73d\r\n\"\"\"\r\n\r\n\r\ndef kadanes_alg(arr):\r\n max_ending_here=arr[1]\r\n #Initalize answer with a very large negative value\r\n answer=float(\"-inf\")\r\n for idx in range(1,len(arr)):\r\n max_ending_here=max(max_ending_here+arr[idx],arr[idx])\r\n if(answer>max_ending_here):\r\n answer=max_ending_here\r\n return answer\r\n\r\n\r\n\r\n","sub_path":"Kadanes.py","file_name":"Kadanes.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"243867567","text":"'''\n Educational Codeforces Round 42\n Problem A\n \"Equator\"\n Solution: Jakub Ziółkowski (ziolekjj@gmail.com)\n'''\n\ndays = int(input())\nproblems = input().split(\" \")\ngeneral_sum = 0\nactual_sum = 0\nfor i in range (0, days):\n general_sum += int(problems[i])\n\nfor i in range (0, days):\n actual_sum += int(problems[i])\n if(actual_sum >= general_sum / 2):\n print(i+1)\n break","sub_path":"codeforces-solutions/educational-codeforces-round-42/problem-A/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"359293777","text":"from threading import Thread\nfrom time import sleep\nfrom subprocess import call\nfrom os import path, remove\nfrom collections import namedtuple, defaultdict\n\n#XXX workaround for shelf corruption under OSX - force shelf to use 'dumbdbm' module\nfrom platform import system as _os_descr\nif \"darwin\" in _os_descr().lower():\n import anydbm\n anydbm._defaultmod = __import__(\"dumbdbm\")\n#XXX end workaround\n\nimport shelve\nfrom sublime import status_message, set_timeout\nfrom prolog.analyze_parser import PrologAnalyzerOutputParser as PXmlParser\n\n_PredicateDef = namedtuple(\"PredicateDef\", \"name module arity file start end\")\n\nclass _Predicate(namedtuple(\"Predicate\", \"name module arity file startlines endlines\")):\n def __str__(self):\n return \"[%s] %s:%s/%s\" % (len(self.startlines), self.module, self.name, self.arity)\n\n def _lineno_in_predicate(self, lineno):\n return any(s <= lineno <= e for s, e in zip(self.startlines, self.endlines))\n\n_fpath = path.realpath(__file__)\n\ndef makeAnalyzer(sicstus_path, shelve_path):\n\n pl_path = _fpath.rsplit(\"/\", 1)[0] + \"/analyzer.pl\"\n\n class Analyzer(Thread):\n\n def run(self):\n self._initshelf()\n self._parser = PXmlParser()\n self._analyze_requests = []\n self._done = 0\n while not self._done:\n changed = 0\n while self._analyze_requests:\n self._analyze(self._analyze_requests.pop())\n changed = 1\n if changed:\n self._data.sync()\n sleep(0.05)\n\n def kill(self):\n self._done = 1\n\n def _initshelf(self):\n self._data = shelve.open(shelve_path, protocol=2, writeback=True)\n if not self._data.get(\"initialized\"):\n self._data[\"predicates\"] = []\n self._data[\"calls\"] = defaultdict(list)\n self._data[\"pred_by_file\"] = defaultdict(list)\n self._data[\"initialized\"] = True\n\n def start_analyze(self, fpath):\n self._analyze_requests.append(fpath)\n\n def find_predicates(self, name, module=\"\", arity=None):\n #XXX maybe slow for a huge amount of predicates\n n_len = len(name)\n m_len = len(module)\n #performance optimization to avoid filtering multiple times\n filters = {(0, 0): (p for p in self._data[\"predicates\"] if p.name[:n_len] == name),\n (1, 0): (p for p in self._data[\"predicates\"] if p.name[:n_len] == name and p.module[:m_len] == module),\n (0, 1): (p for p in self._data[\"predicates\"] if p.name[:n_len] == name and p.arity == arity),\n (1, 1): (p for p in self._data[\"predicates\"] if p.name[:n_len] == name and p.module[:m_len] == module and p.arity == arity)}\n #XXX turn generator into list here?\n return filters[(module != \"\", arity is not None)]\n\n def get_all_defs(self):\n return self._data[\"predicates\"]\n\n def get_calls(self, module, pred, arity):\n calls = self._data[\"calls\"][(module, pred, arity)]\n preds = []\n for file in set(c[0] for c in calls):\n fpreds = self._data[\"pred_by_file\"][file]\n preds.extend([p for p in fpreds if (file, p.module, p.name, p.arity) in calls])\n return preds\n\n def get_pred_by_pos(self, file, line):\n file_pred = self._data[\"pred_by_file\"][file]\n for p in file_pred:\n if p._lineno_in_predicate(line):\n return p\n\n def get_calls_by_pos(self, file, line):\n pred = self.get_pred_by_pos(file, line)\n return self.get_calls(pred.module, pred.name, pred.arity) if pred else []\n\n def _mk_analyze_cmd(self, fpath, resultpath):\n return [sicstus_path, \"-l\", pl_path, \"--goal\", \"analyze_file('%s','%s'),halt.\" % (fpath, resultpath)]\n\n def _analyze(self, fpath):\n resultpath = \"%s.xml\" % fpath\n with open(\"/dev/null\", \"wb\") as devnull:\n call(self._mk_analyze_cmd(fpath, resultpath), stdout=devnull, stderr=devnull)\n res = self._parser.parse(resultpath)\n self._update_files(res)\n remove(resultpath)\n set_timeout(lambda: status_message(\"Analyzed file %s\" % fpath), 1)\n\n def _update_files(self, res):\n self._retract_file_data(res[\"file\"])\n self._process_file(res)\n\n def _process_file(self, fdata):\n mod = fdata[\"module\"]\n file = fdata[\"file\"]\n for p in fdata[\"predicates\"]:\n pname = p[\"name\"]\n arity = p[\"arity\"]\n pred = _Predicate(pname, mod, arity, file,\n sorted([x-1 for x in p[\"startlines\"]]), sorted([x-1 for x in p[\"endlines\"]]))\n self._data[\"predicates\"].append(pred)\n self._data[\"pred_by_file\"][file].append(pred)\n for call in p[\"calls\"]:\n #TODO find actual predicate definition(s) that calls this\n cmod = call[\"module\"]\n if cmod == \"built_in\": continue #not interested in calls to builtins\n cname = call[\"name\"] if not call[\"name\"] == \"RECURSIVE_CALL\" else pname\n self._data[\"calls\"][(cmod, cname, int(call[\"arity\"]))].append((file, mod, pname, arity))\n\n def _retract_file_data(self, file):\n #remove all information extracted from 'file'\n self._data[\"predicates\"] = [p for p in self._data[\"predicates\"] if not p.file == file]\n if file in self._data[\"pred_by_file\"]:\n del self._data[\"pred_by_file\"][file]\n for k in self._data[\"calls\"].keys():\n self._data[\"calls\"][k] = [c for c in self._data[\"calls\"][k] if not c[0] == file]\n if not self._data[\"calls\"][k]:\n del self._data[\"calls\"][k]\n\n a = Analyzer()\n a.start()\n return a\n\n","sub_path":"plugin/prolog/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"39896395","text":"import numpy as np\n\n\ndef cov2corr(cov):\n \"\"\"\n Derive the correlation matrix from a covariance matrix\n :param cov: covariance matrix\n :type cov: ndarray\n :return: correlation matrix\n :rtype: ndarray\n \"\"\"\n std = np.sqrt(np.diag(cov))\n corr = cov / np.outer(std, std)\n corr[corr < -1], corr[corr > 1] = -1, 1 # numerical error\n return corr\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"80874961","text":"# The MIT License (MIT)\n\n# Copyright (c) 2021-2022 Krux contributors\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nfrom binascii import a2b_base64, b2a_base64\n\nB43CHARS = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:\"\nassert len(B43CHARS) == 43\n\nB58CHARS = \"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz\"\nassert len(B58CHARS) == 58\n\n\ndef base_decode(v, base):\n \"\"\"Decodes v from base encoding and returns the decoded bytes\"\"\"\n if base not in (43, 58, 64):\n raise ValueError(\"not supported base: {}\".format(base))\n\n if v == b\"\":\n return v\n\n # Base64 is a special case: We just use binascii's implementation without\n # performing bitcoin-specific padding logic\n if base == 64:\n return a2b_base64(v)\n\n chars = B58CHARS if base == 58 else B43CHARS\n long_value = 0\n power_of_base = 1\n for char in reversed(v):\n digit = chars.find(bytes([char]).decode())\n if digit == -1:\n raise ValueError(\"forbidden character {} for base {}\".format(char, base))\n long_value += digit * power_of_base\n power_of_base *= base\n result = bytearray()\n while long_value >= 256:\n div, mod = divmod(long_value, 256)\n result.append(mod)\n long_value = div\n if long_value > 0:\n result.append(long_value)\n n_pad = 0\n for char in v:\n if bytes([char]).decode() == chars[0]:\n n_pad += 1\n else:\n break\n if n_pad > 0:\n result.extend(b\"\\x00\" * n_pad)\n return bytes(reversed(result))\n\n\ndef base_encode(v, base):\n \"\"\"Encodes the data in v as base and returns as bytes\"\"\"\n if base not in (43, 58, 64):\n raise ValueError(\"not supported base: {}\".format(base))\n\n if v == b\"\":\n return v\n\n # Base64 is a special case: We just use binascii's implementation without\n # performing bitcoin-specific padding logic. b2a_base64 always adds a \\n\n # char at the end which we strip before returning\n if base == 64:\n return b2a_base64(v).rstrip()\n\n chars = B58CHARS if base == 58 else B43CHARS\n long_value = 0\n power_of_base = 1\n for char in reversed(v):\n long_value += power_of_base * char\n power_of_base <<= 8\n result = bytearray()\n while long_value >= base:\n div, mod = divmod(long_value, base)\n result.extend(chars[mod].encode())\n long_value = div\n if long_value > 0:\n result.extend(chars[long_value].encode())\n # Bitcoin does a little leading-zero-compression:\n # leading 0-bytes in the input become leading-1s\n n_pad = 0\n for char in v:\n if char == 0x00:\n n_pad += 1\n else:\n break\n if n_pad > 0:\n result.extend((chars[0] * n_pad).encode())\n return bytes(reversed(result))\n","sub_path":"src/krux/baseconv.py","file_name":"baseconv.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"551805187","text":"from django.contrib import admin\nfrom .models import Country, District, School\nfrom simple_salesforce import Salesforce\nfrom django.db import connection\n\nclass DistrictInline(admin.TabularInline):\n model = District\n\nclass SchoolInline(admin.TabularInline):\n model = School\n\nclass CountryAdmin(admin.ModelAdmin):\n view_on_site = True\n list_display = ['name','launch']\n list_filter = ['launch']\n search_fields = ['name']\n ordering = ['name']\n inlines = [\n DistrictInline,\n ]\n\n fieldsets = (\n (None, {\n \"fields\": (\"name\", \"launch\",\"sfid\", \"slug\"),\n }),\n )\n\nclass DistrictAdmin(admin.ModelAdmin):\n view_on_site = True\n list_display = ['name','work_began']\n list_filter = ['country','work_began']\n search_fields = ['name']\n ordering = ['name']\n inlines = [\n SchoolInline,\n ]\n\n fieldsets = (\n (None, {\n \"fields\": (\"name\",\"country\",\"sfid\",\"work_began\", \"slug\"),\n }),\n )\n\nclass SchoolAdmin(admin.ModelAdmin):\n view_on_site = True\n list_display = ['name','district']\n list_filter = ['country','district',\"active\"]\n search_fields = ['name']\n date_hierarchy = 'date_support_began'\n\n fieldsets = (\n (None, {\n \"fields\": (\"name\", \"district\",\"country\", \"slug\", \"schooltype\", \"date_support_began\", \"active\", \"geolatitude\", \"geolongitude\"),\n }),\n )\n\nadmin.site.register(School, SchoolAdmin)\nadmin.site.register(District, DistrictAdmin)\nadmin.site.register(Country, CountryAdmin)\n\n","sub_path":"data/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"529700218","text":"'''\nScript to test functionality from namelist creation to run and postprocessing.\n\nIt is recommended to run this in IPython.\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.ion()\nimport omfit_eqdsk, omfit_gapy\nimport sys, os\nfrom scipy.interpolate import interp1d\n\n# Make sure that package home is added to sys.path\nsys.path.append('../')\nimport aurora\n\ntry: # pass any argument via the command line to show plots\n plot = len(sys.argv)>1\nexcept:\n plot = False\n\n# read in default Aurora namelist\nnamelist = aurora.default_nml.load_default_namelist()\n\n# Use gfile and statefile in local directory:\nexamples_dir = os.path.dirname(os.path.abspath(__file__))\ngeqdsk = omfit_eqdsk.OMFITgeqdsk(examples_dir+'/example.gfile')\ninputgacode = omfit_gapy.OMFITgacode(examples_dir+'/example.input.gacode')\n\n# save kinetic profiles on a rhop (sqrt of norm. pol. flux) grid\nkp = namelist['kin_profs']\nkp['Te']['rhop'] = kp['ne']['rhop'] = np.sqrt(inputgacode['polflux']/inputgacode['polflux'][-1])\nkp['ne']['vals'] = inputgacode['ne']*1e13 # 1e19 m^-3 --> cm^-3\nkp['Te']['vals'] = inputgacode['Te']*1e3 # keV --> eV\n\n# set impurity species and sources rate\nimp = namelist['imp'] = 'Ar'\nnamelist['source_type'] = 'const'\nnamelist['Phi0'] = 2e20 # particles/s\n\n# Change radial resolution from default:\n#namelist['dr_0']=0.2\n#namelist['dr_1']=0.02\n\n# Change time resolution from default:\n#namelist['timing']['dt_increase'] = np.array([1.001, 1.0, 1.])\n#namelist['timing']['dt_start'] = np.array([1e-5, 5e-5, 0.001])\n#namelist['timing']['steps_per_cycle'] = np.array([1,1,1])\n#namelist['timing']['times'] = np.array([0.,0.05,0.2])\n\n# Now get aurora setup\nasim = aurora.core.aurora_sim(namelist, geqdsk=geqdsk)\n\n# check radial grid:\n_ = aurora.create_radial_grid(namelist,plot=plot)\n\n# check time grid:\n_ = aurora.create_time_grid(namelist['timing'], plot=plot)\n\n# set time-independent transport coefficients (flat D=1 m^2/s, V=-2 cm/s)\nD_z = 1e4 * np.ones(len(asim.rvol_grid)) # cm^2/s\nV_z = -2e2 * np.ones(len(asim.rvol_grid)) # cm/s\n\n# run Aurora forward model and plot results\nout = asim.run_aurora(D_z, V_z, plot=plot)\n\n# extract densities and particle numbers in each simulation reservoir\nnz, N_wall, N_div, N_pump, N_ret, N_tsu, N_dsu, N_dsul, rcld_rate, rclw_rate = out\n\n# add radiation\nasim.rad = aurora.compute_rad(imp, nz.transpose(2,1,0), asim.ne, asim.Te,\n prad_flag=True, thermal_cx_rad_flag=False, \n spectral_brem_flag=False, sxr_flag=False)\n\nif plot:\n # plot radiation profiles over radius and time\n aurora.slider_plot(asim.rvol_grid, asim.time_out, asim.rad['line_rad'].transpose(1,2,0),\n xlabel=r'$r_V$ [cm]', ylabel='time [s]', zlabel=r'Line radiation [$MW/m^3$]',\n labels=[str(i) for i in np.arange(0,nz.shape[1])],\n plot_sum=True, x_line=asim.rvol_lcfs)\n\n\n# plot Delta-Zeff profiles over radius and time\nasim.calc_Zeff()\n\nif plot:\n # plot variation of Zeff due to simulated impurity:\n aurora.slider_plot(asim.rvol_grid, asim.time_out, asim.delta_Zeff.transpose(1,0,2),\n xlabel=r'$r_V$ [cm]', ylabel='time [s]', zlabel=r'$\\Delta$ $Z_{eff}$',\n labels=[str(i) for i in np.arange(0,nz.shape[1])],\n plot_sum=True,x_line=asim.rvol_lcfs)\n \n\n# plot expected centrifugal asymmetry from finite rotation\nrhop_gacode = aurora.rad_coord_transform(inputgacode['rho'],'rhon','rhop', asim.geqdsk)\n\n# omega appears unreliable near axis in input.gacode\nomega = interp1d(rhop_gacode[3:], inputgacode['omega0'][3:],\n bounds_error=False,fill_value='extrapolate')(asim.rhop_grid)\n\n# obtain net Zeff in this discharge (exclude last point, unreliable)\nZeff = interp1d(rhop_gacode[:-1], inputgacode['z_eff'][:-1],\n bounds_error=False,fill_value='extrapolate')(asim.rhop_grid)\n\n# Obtain estimates for centrifigal asymmetry and plot expected 2D distribution inside LCFS\nasim.centrifugal_asym(omega, Zeff, plot=plot)\n","sub_path":"examples/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"28724755","text":"\"\"\"\nРассматривается множество целых чисел, принадлежащих числовому отрезку [3712; 8432],\nкоторые удовлетворяют следующим условиям:\n− запись в двоичной и четверичной системах счисления заканчивается одинаковой цифрой;\n− кратны 13, 14 или 15.\nНайдите количество таких чисел и минимальное из них.\n\"\"\"\ncounter = 0\n\nfor i in range(8432, 3712 + 1, -1):\n current_binary_number_i = i\n current_quaternary_number_i = i\n binary_number_i = ''\n quaternary_number_i = ''\n while current_binary_number_i > 0:\n binary_number_i = str(current_binary_number_i % 2) + binary_number_i\n current_binary_number_i //= 2\n while current_quaternary_number_i > 0:\n quaternary_number_i = str(current_quaternary_number_i % 4) + quaternary_number_i\n current_quaternary_number_i //= 4\n if binary_number_i[-1] == quaternary_number_i[-1]:\n if i % 13 == 0 or i % 14 == 0 or i % 15 == 0:\n counter += 1\n min_i = i\nprint(counter, min_i)\n","sub_path":"tasks_17/homework/task_17.3.py","file_name":"task_17.3.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"633538869","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as DjangoUserAdmin\nfrom django.utils.translation import ugettext_lazy as _\nfrom .models import Profile, User, EthereumAccount, SophisticatedInvestor\nfrom .models import JWTAccount\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom django.conf import settings\nfrom django.contrib.admin.models import CHANGE, LogEntry\nfrom django.contrib.contenttypes.models import ContentType\nfrom smtplib import SMTPException\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserAdmin(DjangoUserAdmin):\n \"\"\"Define admin model for custom User model with no username field.\"\"\"\n\n fieldsets = (\n (None, {'fields': ('email', 'password')}),\n (_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',\n 'groups', 'user_permissions')}),\n (_('Important dates'), {'fields': ('last_login', 'date_joined')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'password1', 'password2'),\n }),\n )\n list_display = ('email', 'id', 'is_staff')\n search_fields = ('email', 'id',)\n ordering = ('email',)\n\n\nadmin.site.register(User, UserAdmin)\n\n\nclass ProfileAdmin(admin.ModelAdmin):\n \"\"\"Define admin model for Profile model.\"\"\"\n readonly_fields = ('user', 'first_name', 'last_name',\n 'address_1', 'address_2',\n 'postcode', 'suburb',\n 'state', 'country',\n 'date_of_birth',\n 'email', 'eth_address',\n 'country_from_id', 'verified',\n 'created', 'modified'\n )\n list_display = ('user', 'first_name', 'last_name', 'verified', 'created')\n search_fields = ['first_name', 'last_name', 'email',\n 'eth_address', 'user__email']\n\n\nadmin.site.register(Profile, ProfileAdmin)\n\n\nclass EthereumAccountAdmin(admin.ModelAdmin):\n \"\"\"Define admin model for EthereumAccount model.\"\"\"\n readonly_fields = ('eth_address', 'user')\n list_display = ('eth_address', 'user')\n search_fields = ['eth_address', 'user__email']\n\n\nadmin.site.register(EthereumAccount, EthereumAccountAdmin)\n\n\ndef si_approve(self, request, queryset):\n affected_rows = queryset.update(approved=True)\n for object in queryset:\n LogEntry.objects.log_action(\n user_id=request.user.pk,\n content_type_id=ContentType.objects.get_for_model(object).pk,\n object_id=object.pk,\n object_repr=str(object),\n action_flag=CHANGE,\n change_message=_('Approved')\n )\n self.message_user(\n request,\n \"%s sophisticated investor document(s) approved\" % affected_rows)\n email_content = render_to_string(\n 'user_accounts/email/si_approved.txt'\n )\n try:\n send_mail(\n subject='Approved - Sophisticated Investor check',\n message=email_content,\n from_email=settings.DEFAULT_FROM_EMAIL,\n recipient_list=[request.user.email],\n )\n except SMTPException as e:\n logger.error(\n f'Error sending email. {e}'\n )\n\n\nsi_approve.short_description = \"Approve selected sophisticated investors\"\n\n\ndef si_decline(self, request, queryset):\n affected_rows = queryset.update(approved=False)\n for object in queryset:\n LogEntry.objects.log_action(\n user_id=request.user.pk,\n content_type_id=ContentType.objects.get_for_model(object).pk,\n object_id=object.pk,\n object_repr=str(object),\n action_flag=CHANGE,\n change_message=_('Declined')\n )\n self.message_user(\n request,\n \"%s sophisticated investor document(s) declined\" % affected_rows)\n email_content = render_to_string(\n 'user_accounts/email/si_declined.txt'\n )\n try:\n send_mail(\n subject='Declined - Sophisticated Investor check',\n message=email_content,\n from_email=settings.DEFAULT_FROM_EMAIL,\n recipient_list=[request.user.email]\n )\n except SMTPException as e:\n logger.error(\n f'Error sending email. {e}'\n )\n\n\nsi_decline.short_description = \"Decline selected sophisticated investors\"\n\n\nclass SophisticatedInvestorAdmin(admin.ModelAdmin):\n readonly_fields = ('id', 'user', 'created', 'file', 'approved')\n list_display = ('id', 'user', 'approved', 'created', 'modified')\n search_fields = ['id', 'user', 'approved']\n actions = [si_approve, si_decline]\n\n\nadmin.site.register(SophisticatedInvestor, SophisticatedInvestorAdmin)\n\n\nclass JWTAccountAdmin(admin.ModelAdmin):\n \"\"\"Define admin model for EthereumAccount model.\"\"\"\n list_display = ['user', 'created', 'modified']\n search_fields = ['user__email']\n\n\nadmin.site.register(JWTAccount, JWTAccountAdmin)\n","sub_path":"web/user_accounts/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"519702624","text":"import cv2\nimport time\nimport sys\nimport pandas as pd\nimport keras\nimport pickle as pkl\nimport os\nimport glob\nimport numpy as np\nfrom keras.layers import *\nfrom keras.models import *\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n\ndef gaussian_k(x0, y0, sigma, width, height):\n \"\"\" Make a square gaussian kernel centered at (x0, y0) with sigma as SD.\n \"\"\"\n x = np.arange(0, width, 1, float) # (width,)\n y = np.arange(0, height, 1, float)[:, np.newaxis] # (height,1)\n return np.exp(-((x-x0)**2 + (y-y0)**2) / (2*sigma**2))\n\n\ndef generate_hm(height, width, landmarks, s=3):\n \"\"\" Generate a full Heap Map for every landmarks in an array\n Args:\n height : The height of Heat Map (the height of target output)\n width : The width of Heat Map (the width of target output)\n joints : [(x1,y1),(x2,y2)...] containing landmarks\n maxlenght : Lenght of the Bounding Box\n \"\"\"\n Nlandmarks = len(landmarks)\n hm = np.zeros((height, width, Nlandmarks), dtype=np.float32)\n for i in range(Nlandmarks):\n if not np.array_equal(landmarks[i], [-1, -1]):\n\n hm[:, :, i] = gaussian_k(landmarks[i][0],\n landmarks[i][1],\n s, height, width)\n else:\n hm[:, :, i] = np.zeros((height, width))\n return hm\n\n\ndef get_y_as_heatmap(df, height, width, sigma):\n\n columns_lmxy = df.columns[:-1] # the last column contains Image\n columns_lm = []\n for c in columns_lmxy:\n c = c[:-2]\n if c not in columns_lm:\n columns_lm.extend([c])\n\n y_train = []\n for i in range(df.shape[0]):\n landmarks = []\n for colnm in columns_lm:\n x = df[colnm + \"_x\"].iloc[i]\n y = df[colnm + \"_y\"].iloc[i]\n if pd.isnull(x) or pd.isnull(y):\n x, y = -1, -1\n landmarks.append([x, y])\n\n y_train.append(generate_hm(height, width, landmarks, sigma))\n y_train = np.array(y_train)\n return(y_train, df[columns_lmxy], columns_lmxy)\n\n\ndef load2d(test=False, width=96, height=96, sigma=5):\n if test:\n path = \"./test_data.pkl\"\n else:\n path = \"./train_data.pkl\"\n\n df = pd.read_pickle(path)\n\n # if test:\n # path = \"./test_data_aug.pkl\"\n # df.append(pd.read_pickle(path))\n # path = \"./train_data_aug.pkl\"\n # df.append(pd.read_pickle(path))\n\n df = df.replace(to_replace='None', value=np.nan).dropna()\n\n cols = df.columns[:-1]\n for col in cols:\n df[col] = df[col].astype(np.float32)\n\n y, y0, nm_landmarks = get_y_as_heatmap(\n df, height, width, sigma)\n\n X = df['Image'].values.tolist()\n X = np.array(X) # fix for weird shape (959, ) to (959, height, width)\n X = np.expand_dims(X, axis=3)\n\n return X, y, y0, nm_landmarks\n\n\ninput_height, input_width, sigma = 96, 96, 5\n\n\n# output shape is the same as input\noutput_height, output_width = input_height, input_width\n\nX_train, y_train, y_train0, nm_landmarks = load2d(test=False, sigma=sigma)\n# X_test, y_test, _, _ = load2d(test=True, sigma=sigma)\n\nprint(X_train.shape, y_train.shape, y_train0.shape)\n# print(X_test.shape, y_test.shape)\n\nNplot = y_train.shape[3]+1\n\nfor i in range(3):\n fig = plt.figure(figsize=(20, 6))\n ax = fig.add_subplot(2, Nplot/2, 1)\n ax.imshow(X_train[i, :, :, 0], cmap=\"gray\")\n ax.set_title(\"input\")\n for j, lab in enumerate(nm_landmarks[::2]):\n ax = fig.add_subplot(2, Nplot/2, j+2)\n ax.imshow(y_train[i, :, :, j], cmap=\"gray\")\n ax.set_title(str(j) + \"\\n\" + lab[:-2])\n plt.show()\n\n# nClasses = 15\nnClasses = y_train.shape[3] # 9 keypoints\nn = 32*5\nnfmp_block1 = 64\nnfmp_block2 = 128\nIMAGE_ORDERING = \"channels_last\"\n\nIMAGE_ORDERING = \"channels_last\"\nimg_input = Input(shape=(input_height, input_width, 1))\n\n# Encoder Block 1\nx = Conv2D(nfmp_block1, (3, 3), activation='relu', padding='same',\n name='block1_conv1', data_format=IMAGE_ORDERING)(img_input)\nx = Conv2D(nfmp_block1, (3, 3), activation='relu', padding='same',\n name='block1_conv2', data_format=IMAGE_ORDERING)(x)\nblock1 = MaxPooling2D((2, 2), strides=(\n 2, 2), name='block1_pool', data_format=IMAGE_ORDERING)(x)\n\n# Encoder Block 2\nx = Conv2D(nfmp_block2, (3, 3), activation='relu', padding='same',\n name='block2_conv1', data_format=IMAGE_ORDERING)(block1)\nx = Conv2D(nfmp_block2, (3, 3), activation='relu', padding='same',\n name='block2_conv2', data_format=IMAGE_ORDERING)(x)\nx = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool',\n data_format=IMAGE_ORDERING)(x)\n\n# bottoleneck\no = (Conv2D(n, (int(input_height/4), int(input_width/4)),\n activation='relu', padding='same', name=\"bottleneck_1\", data_format=IMAGE_ORDERING))(x)\no = (Conv2D(n, (1, 1), activation='relu', padding='same',\n name=\"bottleneck_2\", data_format=IMAGE_ORDERING))(o)\n\n\n# upsamping to bring the feature map size to be the same as the one from block1\no_block1 = Conv2DTranspose(nfmp_block1, kernel_size=(2, 2), strides=(\n 2, 2), use_bias=False, name='upsample_1', data_format=IMAGE_ORDERING)(o)\no = Add()([o_block1, block1])\noutput = Conv2DTranspose(nClasses, kernel_size=(2, 2), strides=(\n 2, 2), use_bias=False, name='upsample_2', data_format=IMAGE_ORDERING)(o)\n\n\n# Decoder Block\noutput = Reshape((output_width*input_height*nClasses, 1))(output)\nmodel = Model(img_input, output)\nmodel.summary()\nmodel.compile(loss='mse', optimizer=\"rmsprop\", sample_weight_mode=\"temporal\")\n\n\ndef find_weight(y_tra):\n weight = np.zeros_like(y_tra)\n count0, count1 = 0, 0\n for irow in range(y_tra.shape[0]):\n for ifeat in range(y_tra.shape[-1]):\n if np.all(y_tra[irow,:,:,ifeat] == 0):\n value = 0\n count0 += 1\n else:\n value = 1\n count1 += 1\n weight[irow,:,:,ifeat] = value\n print(\"N landmarks={:5.0f}, N missing landmarks={:5.0f}, weight.shape={}\".format(\n count0,count1,weight.shape))\n return(weight)\n\ndef flatten_except_1dim(weight, ndim=2):\n '''\n change the dimension from:\n (a,b,c,d,..) to (a, b*c*d*..) if ndim = 2\n (a,b,c,d,..) to (a, b*c*d*..,1) if ndim = 3\n '''\n n = weight.shape[0]\n if ndim == 2:\n shape = (n, -1)\n elif ndim == 3:\n shape = (n, -1, 1)\n else:\n print(\"Not implemented!\")\n weight = weight.reshape(*shape)\n return(weight)\n\n\nprop_train = 0.9\nNtrain = int(X_train.shape[0]*prop_train)\nX_tra, y_tra, X_val, y_val = X_train[:Ntrain], y_train[:Ntrain], X_train[Ntrain:], y_train[Ntrain:]\ndel X_train, y_train\n\nweight_val = find_weight(y_val)\nweight_val = flatten_except_1dim(weight_val)\ny_val_fla = flatten_except_1dim(y_val, ndim=3)\n\n# print(\"weight_tra.shape={}\".format(weight_tra.shape))\nprint(\"y_val_fla.shape={}\".format(y_val_fla.shape))\nprint(model.output.shape)\n\nnb_epochs = 300\nbatch_size = 8\nconst = 10\nhistory = {\"loss\": [], \"val_loss\": []}\nfor iepoch in range(nb_epochs):\n start = time.time()\n\n x_batch, y_batch = X_tra, y_tra\n y_batch_fla = flatten_except_1dim(y_batch, ndim=3)\n\n hist = model.fit(x_batch,\n y_batch_fla*const,\n validation_data=(X_val, y_val_fla*const,weight_val),\n batch_size=2,\n epochs=1,\n verbose=1)\n history[\"loss\"].append(hist.history[\"loss\"][0])\n history[\"val_loss\"].append(hist.history[\"val_loss\"][0])\n end = time.time()\n print(\"Epoch {:03}: loss {:6.4f} val_loss {:6.4f} {:4.1f}sec\".format(\n iepoch+1, history[\"loss\"][-1], history[\"val_loss\"][-1], end-start))\n\nfor label in [\"val_loss\", \"loss\"]:\n plt.plot(history[label], label=label)\nplt.legend()\nplt.show()\n\nmodel_json = model.to_json()\nwith open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(\"model.h5\")\nprint(\"Saved model to disk\")\n\n# # load json and create model\n# json_file = open('model.json', 'r')\n# loaded_model_json = json_file.read()\n# json_file.close()\n# loaded_model = model_from_json(loaded_model_json)\n# # load weights into new model\n\n# loaded_model.load_weights(\"my_model_weights.hdf5\")\n# model = loaded_model\n# model.compile(loss='mse', optimizer=\"rmsprop\", sample_weight_mode=\"temporal\")\n\n# print(model.output.shape)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"349683127","text":"import ctypes\nimport numpy as np \nfrom numpy.ctypeslib import ndpointer\n\nar = np.array([1,2,3,4,5,6,7,8,9],dtype=np.int32).reshape([3,3])\n\n# ar = np.ones([4,5],dtype=\"int32\")\nso = ctypes.CDLL('./search.so')\nsearch = so.search\nsearch.argtypes = [ndpointer(ctypes.c_int), ctypes.c_int, ctypes.c_int]\nsearch.restype = None\nsearch(ar,*ar.shape)\nprint(ar)","sub_path":"testcpp.py","file_name":"testcpp.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"235969829","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 15 14:27:35 2020\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport math\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense,LSTM, Flatten,Conv2D,MaxPooling2D\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn.svm import SVR\r\nfrom sklearn.neural_network import MLPRegressor\r\nfrom sklearn.metrics import mean_absolute_error\r\nfrom sklearn.model_selection import KFold\r\nimport matplotlib.pyplot as plt\r\n\r\n\"\"\"\r\n数据预处理\r\n\"\"\"\r\n#把所有数据放在一张表上\r\nfeatures=pd.read_csv(\"features.csv\").drop(columns=\"IsHoliday\")\r\nstores=pd.read_csv(\"stores.csv\")\r\ntrain=pd.read_csv(\"train.csv\")\r\ntest=pd.read_csv(\"test.csv\")\r\ndataset=train.merge(stores,how=\"left\").merge(features,how=\"left\")#merge 多个表格融合,保留train文件中对应的所有信息,其余文件找到有相同的对应行\r\n\r\n#删掉2011-11-11以后的数据\r\ndataset=dataset[dataset.Date >= '2011-11-11']\r\ndataset=dataset.fillna(0)#将NAN填充为0\r\ndataset =pd.get_dummies(dataset, columns=[\"Type\",'IsHoliday']) #one-hot 编码\r\ndataset['Month']=pd.to_datetime(dataset['Date']).dt.month##pandas 还可以时间相减\r\ndataset['Year']=pd.to_datetime(dataset['Date']).dt.year\r\ndataset['Day']=pd.to_datetime(dataset['Date']).dt.day\r\ndataset=dataset.drop(columns='Date')\r\n\r\n\r\n\r\n##疯狂调用回归类型的包 sklearn 中有 分类 回归 聚类 降维 文本挖掘 模型优化 数据预处理\r\ndef knn(train_x,train_y,test_x,k):\r\n knn = KNeighborsRegressor(n_neighbors=k)\r\n knn.fit(train_x,train_y)\r\n test_y=knn.predict(test_x)\r\n return test_y\r\n\r\ndef extraTreesRegressor(train_x,train_y,test_x):\r\n clf = ExtraTreesRegressor(n_estimators=100,max_features='auto', verbose=1,n_jobs=1)\r\n clf.fit(train_x,train_y)\r\n test_y=clf.predict(test_x)\r\n return test_y\r\n\r\ndef randomForestRegressor(train_x,train_y,test_x):\r\n clf = RandomForestRegressor(n_estimators=100,max_features='log2', verbose=1)\r\n clf.fit(train_x,train_y)\r\n test_y=clf.predict(test_x)\r\n return test_y\r\n\r\ndef svm(train_x,train_y,test_x):\r\n clf = SVR(kernel='rbf', gamma='auto')\r\n clf.fit(train_x,train_y)\r\n test_y=clf.predict(test_x)\r\n return test_y\r\n\r\ndef nn(train_x,train_y,test_x):\r\n clf = MLPRegressor(hidden_layer_sizes=(10,), activation='relu', verbose=1)\r\n clf.fit(train_x,train_y)\r\n test_y=clf.predict(test_x)\r\n return test_y\r\n\r\n\r\ndef build_LSTM(train_x,train_y,test_x):\r\n train_x=train_x.reshape(train_x.shape[0],1,train_x.shape[1])\r\n test_x =test_x.reshape(test_x.shape[0],1,test_x.shape[1])\r\n model=Sequential()\r\n model.add(LSTM(64, input_shape=(train_x.shape[1],train_x.shape[2]),return_sequences=True))\r\n model.add(LSTM(16,return_sequences=False))\r\n model.add(Dense(32,activation='relu'))\r\n model.add(Dense(1))\r\n model.compile(loss='MSE', optimizer='adam')\r\n model.fit(train_x,train_y, epochs=50, batch_size=72,verbose=2, shuffle=False) #可以添加validation\r\n test_y=model.predict(test_x)\r\n return test_y\r\n\r\ndef calculate_error(test_y, predicted, weights):\r\n a=mean_absolute_error(test_y, predicted, sample_weight=weights)\r\n return a\r\n\r\n\r\n#构建训练集\r\ntrain_y=dataset.Weekly_Sales.values #dataset.Weekly_Sales会输出一个series的类型,需要输出其values,也可以输出索引index\r\ntrain_y=train_y.reshape(-1,1) #一维的array 需要重新输出其格式(-1,1),不然无法归一化\r\ntrain_x=dataset.drop(columns='Weekly_Sales')\r\n\r\n\r\n#0-1标准化\r\nscaler1 = MinMaxScaler(feature_range=(0, 1))\r\ntrain_x = scaler1.fit_transform(train_x)\r\nscaler2 = MinMaxScaler(feature_range=(0, 1))\r\ntrain_y = scaler2.fit_transform(train_y)\r\n \r\n\r\n#十折交叉生成数据 并调用参数\r\nkf=KFold(n_splits=10)\r\nmin_error=np.float32('inf')\r\nfor train__x_index, validation_x_index in kf.split(train_x):\r\n train_x_new=train_x[train__x_index]\r\n train_y_new=train_y[train__x_index]\r\n validation_x=train_x[validation_x_index]\r\n validation_y=train_y[validation_x_index]\r\n validation_y=scaler2.inverse_transform(validation_y)\r\n#构建权重\r\n weights=np.zeros(shape=(len(validation_x),1))\r\n for i in range (int(len(validation_x))):\r\n if validation_x[i,15]==1:\r\n weights[i]=1\r\n else:\r\n weights[i]=5\r\n#knn \r\n validation_y_knn=knn(train_x_new,train_y_new,validation_x,5).reshape(-1,1)\r\n validation_y_knn=scaler2.inverse_transform(validation_y_knn)\r\n error_knn=calculate_error(validation_y,validation_y_knn,weights)\r\n# extraTreesRegressor \r\n validation_y_extraTreesRegressor=extraTreesRegressor(train_x_new,train_y_new,validation_x).reshape(-1,1)\r\n validation_y_extraTreesRegressor=scaler2.inverse_transform(validation_y_extraTreesRegressor)\r\n error_extraTreesRegressor=calculate_error(validation_y,validation_y_extraTreesRegressor,weights)\r\n#randomForestRegressor\r\n validation_y_randomForestRegressor=randomForestRegressor(train_x_new,train_y_new,validation_x).reshape(-1,1)\r\n validation_y_randomForestRegressor=scaler2.inverse_transform(validation_y_randomForestRegressor)\r\n error_randomForestRegressor=calculate_error(validation_y,validation_y_randomForestRegressor,weights)\r\n#svr \r\n validation_y_svm=svm(train_x_new,train_y_new,validation_x).reshape(-1,1)\r\n validation_y_svm=scaler2.inverse_transform(validation_y_svm)\r\n error_svm=calculate_error(validation_y,validation_y_svm,weights)\r\n#nn \r\n validation_y_nn=nn(train_x_new,train_y_new,validation_x).reshape(-1,1)\r\n validation_y_nn=scaler2.inverse_transform(validation_y_nn)\r\n error_nn=calculate_error(validation_y,validation_y_nn,weights)\r\n#LSTM \r\n validation_y_build_LSTM=build_LSTM(train_x_new,train_y_new,validation_x).reshape(-1,1)\r\n validation_y_build_LSTM=scaler2.inverse_transform(validation_y_build_LSTM)\r\n error_build_LSTM=calculate_error(validation_y,validation_y_build_LSTM,weights)\r\n#输出最好模型\r\n list=[error_knn,error_extraTreesRegressor,error_randomForestRegressor,error_svm,error_nn,error_build_LSTM]\r\n error_index=list.index(min(list)) #返回index\r\n if list[error_index] < min_error:\r\n min_error=min(list)\r\n best_model_index=error_index\r\n print('='*50)\r\n \r\n \r\nif best_model_index==0:\r\n print('最优模型为knn')\r\nif best_model_index==1:\r\n print('最优模型为extraTreesRegressor')\r\nif best_model_index==2:\r\n print('最优模型为randomForestRegressor')\r\nif best_model_index==3:\r\n print('最优模型为svr')\r\nif best_model_index==4:\r\n print('最优模型为nn')\r\nif best_model_index==5:\r\n print('最优模型为lstm')\r\n\r\n \r\n \r\n\r\n\r\n\r\ntest=test.merge(stores,how=\"left\").merge(features,how=\"left\")\r\ntest=test.fillna(0)#将NAN填充为0 \r\ntest =pd.get_dummies(test, columns=[\"Type\",'IsHoliday']) \r\ntest['Month']=pd.to_datetime(test['Date']).dt.month \r\ntest['Year']=pd.to_datetime(test['Date']).dt.year\r\ntest['Day']=pd.to_datetime(test['Date']).dt.day\r\n\r\n#误差的权重 \r\nIsHoliday_False=test.IsHoliday_False.values.reshape(-1,1)\r\nweights_test=np.zeros(shape=(len(IsHoliday_False),1))\r\nfor i in range (int(len(IsHoliday_False))):\r\n if IsHoliday_False[i]==1:\r\n weights_test[i]=1\r\n else:\r\n weights_test[i]=5\r\n\r\n#归一化\r\ntest_x=test\r\ntest_x=test_x.drop(columns='Date')\r\nscaler3=MinMaxScaler(feature_range=(0,1))\r\ntest_x=scaler3.fit_transform(test_x)\r\n\r\n#调用最优模型\r\npredict_y=extraTreesRegressor(train_x,train_y,test_x).reshape(-1,1)\r\npredict_y=scaler2.inverse_transform(predict_y)\r\n\r\n#输出csv\r\nout_put_data=pd.DataFrame(np.zeros(shape=(len(predict_y),2)),columns=['id','Weekly_Sales'])\r\nout_put_data['Weekly_Sales']=predict_y\r\nout_put_data['id']=test['Store'].astype(str)+'_'+test['Dept'].astype(str)+'_'+test['Date'].astype(str)#astype numpy中将对应的数据类型转换为另一种 #dtype是展示对应的数据了类型\r\nout_put_data.to_csv('data_predict.csv',index=False)\r\n\r\n","sub_path":"walmart_predicte.py","file_name":"walmart_predicte.py","file_ext":"py","file_size_in_byte":8142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"263021120","text":"day = 0\r\nwell_height = 125\r\ndaily_distance = 30\r\nnightly_distance = 20\r\nsnail_position = 0\r\n\r\nwhile snail_position < well_height:\r\n snail_position += int(daily_distance) - int(nightly_distance)\r\n day += 1\r\n print(\"Day\", day, \"=\", snail_position, \"cm\")\r\n if snail_position > well_height:\r\n break\r\n\r\nprint(\"It will take\", day, \"days for the snail to come out of the well.\")\r\nprint(\"\\n\")\r\nday = 0\r\nwell_height = 125\r\nadvance_cm = [30, 21, 33, 77, 44, 45, 23, 45, 12, 34, 55]\r\nnightly_distance = 20\r\nsnail_position = 0\r\nsnail_rises = 0\r\n\r\nwhile advance_cm:\r\n snail_rises = (advance_cm.pop(0))\r\n snail_position += snail_rises - nightly_distance\r\n day += 1\r\n print(\"Day\", day, \"=\", snail_position, \"cm\")\r\n if snail_position > well_height:\r\n break\r\n\r\nprint(\"It will take\", day, \"days for the snail to come out of the well.\")\r\nprint(\"\\n\")\r\nday = 0\r\nwell_height = 125\r\nadvance_cm = [30, 21, 33, 77, 44, 45, 23, 45, 12, 34, 55]\r\nnightly_distance = 20\r\nsnail_position = 0\r\nsnail_rises = 0\r\naverage_progress = 0\r\nmax_displacement = 0\r\nmin_displacement = 0\r\nlist_advanced_cm = []\r\ndisplacement = [10, 21, 45, 126, 231, 361]\r\n\r\nwhile advance_cm:\r\n snail_rises = advance_cm[day]\r\n list_advanced_cm.append(snail_rises)\r\n snail_position += snail_rises - nightly_distance\r\n average_progress += snail_position\r\n day += 1\r\n print(\"Day\", day, \"=\", snail_position, \" - average:\", average_progress)\r\n if snail_position > well_height:\r\n break\r\nprint(\"\\n\")\r\nmax_displacement = max(list_advanced_cm)\r\nmin_displacement = min(list_advanced_cm)\r\nprint(\"\\n\")\r\nprint(\"It will take\", day, \"days for the snail to come out of the well. And the average progress is:\", average_progress/day)\r\nprint(max_displacement, min_displacement)\r\nprint(\"\\n\")\r\nfrom statistics import stdev\r\nprint(\"The Standard Deviation of advance_cm is % s\" %(stdev(displacement)))\r\nprint(\"\\n\")\r\nimport statistics\r\nprint(\"The Variance of the advance_cm is % s\" %(statistics.variance(displacement)))","sub_path":"the_snail_and_the_well.py","file_name":"the_snail_and_the_well.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"381832096","text":"first_List=['Wekjnkwwoe Aoij owieg oiwe fowe owigj oiwgw\\n', 'gwgowi gj anl\\n', 'an,dynvv wgowiew oiwjf oiajnk\\n', 'anv,xcvmwwi hfwioh oeoew\\n']\n#second_List_1= [['wekjnkwwoe', 'woij', 'owieg', 'oiwe', 'fowe', 'owigj', 'oiwgw'], ['gwgowi', 'gj', 'anl'], ['an,dynvv', 'wgowiew', 'oiwjf', 'oiajnk'], ['anv,xcvmwwi', 'hfwioh', 'oeoew']]\n\n#count_Strings= len(lis)\n#print(count_Strings)\n#print(lis[1])\n\nsecond_List=[]\nfor eachLine in first_List:\n second_List.append(eachLine.split())\n\n#print(second_List)\n\n#print(second_List_1[0])\n#print(second_List_1[1])\nfinal_List=[]\nfor each_String in second_List:\n for each_String_2 in each_String:\n final_List.append(each_String_2)\nprint(final_List)\n\nfor ea in final_List:\n for k in ea:\n print(k)\n\nlist_vokabel = ['a', 'e', 'i', 'o', 'u']\nif ('asdvjksn' in list_vokabel):\n print(True)\nelse:\n print( False )\ng='A'\nprint( ord(str(g)))\n\ndef anfangsbuchstabe(string_word):\n if( ord(string_word) > 64 and ord( string_word )< 91 ):\n return True\n else:\n return False\n\ncount=0\nfor start_string in final_List:\n if( anfangsbuchstabe(start_string[0]) ==True ):\n count +=1\nprint(count)\n","sub_path":"Uebung7/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"513552279","text":"import argparse\nimport glob\nimport os\nimport numpy as np\nimport SSM as sm\nimport logging\nfrom utils import ReadPLY, ConvertVTKPolyDataToNumpy\nfrom utils import WritePLY, ConvertNumpyToVTKPolyData\nimport tqdm\n\ndef check_input(x):\n n = len(x[0])\n for i in range(1, len(x)):\n if n != len(x[i]): return False\n return True\n \nif __name__ == '__main__':\n\n logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)\n\n parser = argparse.ArgumentParser(description='SSM Builder')\n parser.add_argument('-i', '--inputs', default=r'*.ply', help='Filename wildcard to ply files (.ply)', required=True)\n parser.add_argument('-o', '--output', default=r'ssm.npy', help='Output filename (.npy)', required=True)\n parser.add_argument('-r', '--refined_dir', default=r'', help='Output directory for refined inputs')\n args = parser.parse_args()\n\n input_files = glob.glob(os.path.join(args.inputs))\n\n X = []\n for filename in input_files:\n logging.info(filename)\n vertices, faces = ConvertVTKPolyDataToNumpy( ReadPLY(filename) )\n X.append(vertices)\n\n if not check_input(X):\n logging.error('Number of vertex must be same. Please perform the groupwise registation before build SSM.')\n\n X = np.array(X)\n\n sm = sm.SSM()\n logging.info('Start build SSM')\n model = sm.build(X)\n logging.info('End build SSM')\n options={'faces': faces}\n sm.save(args.output, options)\n\n logging.info(model)\n\n if args.refined_dir != '':\n refined_X = sm.refined_correspoinding_points\n faces = model['faces']\n common_path = os.path.commonpath(input_files)\n\n for filename, x in zip(input_files, refined_X):\n outfile = os.path.relpath(filename, common_path)\n outfile = os.path.join(args.refined_dir, outfile)\n os.makedirs(os.path.dirname(outfile), exist_ok=True)\n out_poly = ConvertNumpyToVTKPolyData(x, faces)\n WritePLY(out_poly, outfile)","sub_path":"SSMBuilder.py","file_name":"SSMBuilder.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"464981798","text":"# Калина Людмила, ИУ7-12\n#\n\ndef f(x):\n y = x ** 2\n return (y)\n\n# Вычисление интеграла\ndef parabolas(a, b, n):\n h = (b-a)/n\n I = f(a)+f(b)\n step = a\n for i in range(1, n):\n step += h\n if i % 2 == 1:\n I += 4* f(step)\n else:\n I += 2* f(step)\n I *= h/3\n return (I)\n\n# Интегрирование с заданной точностью\ndef getClose(eps):\n step = 1; I = 0\n while abs(parabolas(a, b, 2*step)-parabolas(a, b, step)) >= eps:\n step *= 2\n I = parabolas(a, b, step) \n return(step, I)\n\n\ntry:\n print('Интегрируемая функция - y = x**2')\n print('Интегрирование методом парабол.')\n print('Меню:')\n print('0-Выход')\n print('1-Bычислить интеграл с заданным количеством разбиений')\n print('2-Вычислить интеграл с заданной точностью')\n print('')\n s = int(input())\n \n while s != 0: \n if s == 1:\n a, b = map(float, input(\n'Введите пределы интегрирования: ').split())\n step = int(input('Введите количество шагов интегрирования: '))\n while step % 2 != 0:\n print('У данного метода количество шагов должно быть четным.')\n step = int(input('Введите другое количество шагов: '))\n\n I = parabolas (a, b, step)\n print('Получено приближенное значение ', '{:2.6f}'.format(I))\n \n elif s == 2:\n a, b = map(float, input(\n'Введите пределы интегрирования: ').split())\n eps = float(input('Введите необходимую точность вычислений: '))\n step = 0; I = 0\n step, I = getClose(eps)\n print('За ', step,'шагов получено приближенное значение ',\n'{:2.6f}'.format(I))\n\n else:\n print('Неверный ввод, возвращаем в меню.')\n \n print()\n print('Интегрируемая функция - y = x**2')\n print('Интегрирование методом парабол.')\n print('Меню:')\n print('0-Выход')\n print('1-Bычислить интеграл с заданным количеством разбиений')\n print('2-Вычислить интеграл с заданной точностью')\n print('')\n s = int(input()) \n \n print('Программа завершена.') \n \nexcept ValueError:\n print('Пожалуйста, вводите числа как полагается.')\n","sub_path":"меню.py","file_name":"меню.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"357774999","text":"\n\nfrom models.event import Event\nfrom models.host import Host\nfrom pymongo import MongoClient\n#from models.user import user\nimport requests\nimport time\nimport datetime\nimport json\nfrom utils.dateconverter import dateConverter\nfrom utils.emails import send_email\nnotified_people= ['julien.lebourg@ctg.lu']\nimport logging\n#TODO add environment variables to configure this container\nclient = MongoClient('tina_dev_tina-mongo', 27017,username='mongo', password='B186o73l7')\ndb = client['zabbix']\n\n\n\nclass ZabbixApi(object):\n unresolvedProblems=[]\n url=\"\"\n token=\"\"\n historicalEvents= []\n problems=[]\n \n def __init__(self, url, user, password):\n self.request = {\n 'jsonrpc': '2.0',\n 'method': '',\n 'params': { },\n 'id': 2\n }\n self.user = user\n self.password=password\n self.url = url\n\n def __enter__(self):\n self.login(self.user,self.password)\n return self\n\n def __exit__(self,*args):\n self.logout()\n\n def logout(self):\n self.execute(method='user.logout', params={})\n logging.warning(\"LOGGED OUT !\")\n\n \n\n\n def execute(self, method, params):\n \n self.request.update({ 'params' : { } })\n self.request.update({ 'method': method })\n self.request.update({ 'params': params })\n #self.request.update({'auth': self.token})\n #print (self.request)\n res = requests.post(url=self.url,json=self.request)\n #print (res.json())\n return res.json()\n\n def login(self,name, password):\n params = {\n 'user': name,\n 'password': password\n }\n res= self.execute('user.login', params)\n self.token = res[\"result\"]\n logging.warning(\"LOGGED IN !\")\n self.request.update({'auth': self.token})\n \n def getHostItems(self, id):\n params = { \n 'output' : 'extend',\n 'hostids' : f'{id}',\n \"search\": {\n \"key_\": \"system\"\n },\n \"sortfield\": \"name\"\n }\n res = self.execute(\"item.get\", params)\n for r in res['result']:\n print (r['description'])\n return res['result']\n \n def getHost(self, id):\n \n params = { 'output' : 'extend',\n \"hostids\" : id }\n res = self.execute(\"host.get\", params)\n #print (res['result'])\n return Host(res['result'][0])\n \n\n def getMissingEvents(self, start = 1538352000): #1st Jan 2018)\n self.events=[]\n day = 86400\n end = start+day\n today = int(time.time())\n while start 2 and len(event.host.name) > 2: #sometimes we get erased events, with blank description and host. filtering like this seems to work\n logging.warning(\"found unresolved problems\")\n self.unresolvedProblems.append(event)\n print (len(self.unresolvedProblems))\n #print (event)\n \n start = end\n end = start+day\n \n \n \n \"\"\" def gethistoricalMetaData(self):\n evs= []\n \n for event in self.historicalEvents:\n evs.append(event.to_dict())\n \n r = pd.DataFrame.from_records(evs)\n self.descriptions = r['description'].unique()\n self.hosts = r['host_short_name'].unique()\n \n \n self.services = r['host_role'].unique()\n self.problems = []\n \"\"\"\n \n\n def updateUnresolvedProblems(self): #checking if old problems has been solved ?\n #logging.warning(len(self.unresolvedProblems))\n index = 0\n for problem in self.unresolvedProblems:\n \n event = self.getEvent(problem.id)\n if event.resolution_event !='0':\n \n self.problemSolved(event)\n self.unresolvedProblems.pop(index)\n db.zabbix.find_one_and_update( { 'eventid' : problem.id }, { '$set' : { 'timeToResolution': event.timeToResolution, }}, upsert=True )\n index = index+1\n #else:\n #logging.warning(f'problem is still open ! {problem}')\n \n\n def problemSolved(self,event):\n logging.warning('prob solved')\n #print (f'Problem has been solved ! {event.to_dict()}')\n\n\n\n \n def getAllHosts(self):\n params = {\n 'with_monitored_triggers' : True,\n 'output': \"extend\" \n }\n hosts=[]\n res = self.execute(\"host.get\", params)\n for h in res['result']:\n\n hosts.append(Host(h))\n return hosts\n\n def activeTriggers(self):\n \n params = {\n \"only_true\":\"1\",\n \"skipDependent\":\"1\",\n \"monitored\":\"1\",\n \"active\":\"1\",\n \"output\":\"extend\",\n \"expandDescription\":\"1\",\n \"selectHosts\":['host'],\n \"selectLastEvent\" :\"1\" }\n res = self.execute(\"trigger.get\", params)\n #for p in res['result']:\n # print(p)\n return res['result']\n\n \n def getAllProblems(self):\n params = {\n \"output\": \"extend\",\n \"selectAcknowledges\": \"extend\",\n \"selectTags\": \"extend\", \n \"sortfield\": [\"eventid\"],\n \"sortorder\": \"DESC\"\n }\n probs=[]\n res = self.execute(\"problem.get\", params)\n for p in res['result']:\n\n probs.append(Event(p))\n\n return probs\n def startMonitoringTriggers(self, ai, fn_open=None, fn_close=None, notif=None):\n currUnresolvedProblems={}\n while True:\n problems = self.activeTriggers()\n \n for problem in problems:\n #print (problem)\n if problem['value']==\"1\":\n if problem['triggerid'] not in currUnresolvedProblems.keys(): #NEW PROBLEM FOUND ! Lets use AI to determine if its important\n currUnresolvedProblems.update({problem['triggerid'] : problem } )\n event = self.getEvent(problem['lastEvent']['eventid'])\n prediction = ai.predict([event.to_dict()])[0]\n db.events.find_one_and_update( { 'eventid' : prediction['eventid'] }, { '$set' : prediction }, upsert=True )\n if fn_open != None:\n if prediction['proba'] > 0.50:\n fn_open(prediction)\n elif prediction['proba'] > 0.02:\n if notif !=None:\n notif(prediction)\n\n \n \n #TODO new problem\n #TODO problem resolved\n else:\n if problem['triggerid'] in currUnresolvedProblems.keys(): #we found a problem that was active, we need to update the original event\n print('problem resolved')\n \n #lets retrieve the old event\n oldeventid= currUnresolvedProblems[problem['triggerid']]['lastEvent']['eventid']\n oldevent = self.getEvent(oldeventid)\n newevent = self.getEvent(problem['lastEvent']['eventid'])\n oldevent.updateFromResolutionEvent(newevent)\n currUnresolvedProblems.pop(problem['triggerid'])\n db.events.find_one_and_update( { 'eventid' : oldevent.id }, { '$set' : oldevent.to_dict() }, upsert=True )\n if fn_close != None:\n fn_close(oldevent.to_dict()) \n \n time.sleep(30)\n \n def startMonitoring(self,ai, delay=90,added_delay=90):\n print (\"Monitoring started !\")\n delay=90\n lastCheckTime = int(time.time())-delay #init at current time -90 secs to simulate previous call and start immediately\n #every 90 seconds, we check for events of the past 90 + 90 seconds to - timenow -90seconds\n while True:\n timenow= int(time.time())\n if timenow > lastCheckTime+delay:\n from_ts = lastCheckTime-added_delay\n to_ts=timenow-added_delay\n logging.warning(f\"NEW REQUEST ON ZABBIX SERVER at {from_ts} : {to_ts}\")\n #self.updateUnresolvedProblems()\n problems = self.getProblems(from_ts, to_ts)\n p =[]\n for problem in problems:\n p.append(problem.to_dict())\n if len(p)>0:\n \n #here we could check if there is a value ==0 event\n predictions = ai.predict(p)\n for prediction in predictions:\n db['events'].find_one_and_update( { 'eventid' : prediction['eventid'] }, { '$set' : prediction }, upsert=True )\n with open('./data/results.txt', 'a') as the_file:\n the_file.write(json.dumps(prediction, default=dateConverter) + ',\\n')\n if prediction['proba'] > 0.50:\n subject = f\"ALFRED Monitoring Event : high priority, we should create a ticket for this host {prediction['host_short_name']} in {prediction['host_environment']}\"\n body = \"This event has been found to have a high importance based on past data, here is a dump of the event data : \\n\"\n body = f\"{body} {json.dumps(prediction,default=dateConverter)}\"\n send_email(subject,body, notified_people)\n logging.warning(f\"WARNING !!!!!!! We should look at this event ! {prediction['description']}, proba : {prediction['proba']}\")\n logging.warning(prediction)\n elif prediction['proba'] > 0.02:\n subject = f\"ALFRED Monitoring Event : medium priority, we might want to look at this host : {prediction['host_short_name']} in {prediction['host_environment']}\"\n body = \"Probability of something going wrong is higher than usual. Here is a dump of the event data: \\n\"\n body = f\"{body} {json.dumps(prediction,default=dateConverter)}\"\n send_email(subject,body, notified_people)\n logging.warning(f\"HEY ! you might want to look at this event, probability is relelatively high on this one :\")\n logging.warning(prediction)\n else:\n logging.warning(f\"{prediction['time']} : Low importance event : {prediction['description']}, proba : {prediction['proba']} on host : {prediction['host_short_name']}\")\n #logging.warning(prediction)\n\n lastCheckTime = timenow\n time.sleep(delay)\n \n\n \n \n def getHttpTests(self):\n params = {\n 'output': \"extend\"\n }\n\n res = self.execute(\"httptest.get\", params)\n return res['result']\n \n def getEvents(self,id): \n params = {\n 'output': \"extend\",\n 'select_acknowledges': \"extend\",\n 'selectRelatedObject': \"extend\",\n 'selectTags':'extend',\n 'selectHosts' : 'extend',\n 'eventids' : id\n }\n res = self.execute(\"event.get\", params)\n #logging.warning(res['result'][0])\n events= []\n for r in res['result']:\n event=Event(r)\n events.append(event)\n return events\n\n def getEvent(self,id):\n \n params = {\n 'output': \"extend\",\n 'select_acknowledges': \"extend\",\n 'selectRelatedObject': \"extend\",\n 'selectTags':'extend',\n 'selectHosts' : 'extend',\n 'eventids' : id\n }\n res = self.execute(\"event.get\", params)\n #logging.warning(res['result'][0])\n event=Event(res['result'][0])\n return event\n\n def getTemplate(self, id):\n params = {\n 'templateids': id,\n 'output': \"extend\" \n }\n \n res = self.execute(\"template.get\", params)\n #print (res['result'][0])\n return res['result'][0]\n\n def getWebHosts(self):\n params = {\n 'with_httptests' : True,\n 'output': \"extend\" \n }\n hosts=[]\n res = self.execute(\"host.get\", params)\n #print (res['result'])\n for h in res['result']:\n\n hosts.append(Host(h))\n\n return hosts\n\n def getEventsInPeriod(self, from_ts, till_ts):\n params = {\n 'output': \"extend\",\n 'select_acknowledges': \"extend\",\n 'selectRelatedObject': \"extend\",\n 'selectTags':'extend',\n 'selectHosts' : 'extend',\n #'time_from': \"1546304400\",\n #'time_till': \"1551096473\",\n \n 'time_from': from_ts,\n 'time_till': till_ts,\n 'sortfield': [\"clock\", \"eventid\"],\n 'sortorder': \"desc\"\n }\n res = self.execute(\"event.get\", params)\n events = []\n \n #print (json.dumps(res['result']))\n for result in res['result']:\n #print (result)\n event = Event(result)\n events.append(event) \n #except:\n #logging.error(result)\n #print (f\"collected {len(events)} events\")\n\n return events\n def getProblems(self, from_ts, till_ts):\n params = {\n 'output': \"extend\",\n 'select_acknowledges': \"extend\",\n 'selectRelatedObject': \"extend\",\n 'selectTags':'extend',\n 'selectHosts' : 'extend',\n #'time_from': '1552003200',\n #'time_till': '1551222000',\n #1551052800\n #'time_from': '1552089600',\n 'time_from': from_ts,\n 'time_till': till_ts,\n 'sortfield': [\"clock\", \"eventid\"],\n 'sortorder': \"desc\"\n #'value': '1'\n }\n res = self.execute(\"event.get\", params)\n problems = []\n #print (len(res['result']))\n for result in res['result']:\n #problem = Event(result)\n \n if result['value']=='1':\n problem = Event(result)\n #logging.warning(problem.to_dict())\n if problem.description != \"\" and problem.host.name != \"\":\n problems.append(problem)\n return problems\n \n def getHostData(self, host):#THIS WILL BE USED TO GET METRICS DATA\n self.request = {\n \"jsonrpc\": \"2.0\",\n \"id\": \"1400175934496\",\n \"auth\": \"MyAuthToken\",\n \"method\": \"host.get\",\n \"params\": {\n \"selectInventory\": True,\n \"selectItems\": [\n \"name\",\n \"lastvalue\",\n \"units\",\n \"itemid\",\n \"lastclock\",\n \"value_type\",\n \"itemid\"\n ],\n \"output\": \"extend\",\n \"hostids\": \"TheHostID\",\n \"expandDescription\": 1,\n \"expandData\": 1\n }\n }\n","sub_path":"src/models/ZabbixApi.py","file_name":"ZabbixApi.py","file_ext":"py","file_size_in_byte":16538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651135436","text":"from __future__ import unicode_literals\n\nimport os\nimport dvc.prompt as prompt\nimport dvc.logger as logger\n\nfrom dvc.command.base import CmdBase\n\n\nclass CmdGC(CmdBase):\n def run(self):\n msg = \"this will remove all cache except the cache that is used in \"\n if not self.args.all_branches and not self.args.all_tags:\n msg += \"the current git branch\"\n elif self.args.all_branches and not self.args.all_tags:\n msg += \"all git branches\"\n elif not self.args.all_branches and self.args.all_tags:\n msg += \"all git tags\"\n else:\n msg += \"all git branches and all git tags\"\n\n if self.args.projects is not None and len(self.args.projects) > 0:\n msg += \" of the current and the following projects:\"\n\n for project_path in self.args.projects:\n msg += \"\\n - %s\" % os.path.abspath(project_path)\n else:\n msg += \" of the current project.\"\n\n logger.warning(msg)\n\n msg = \"Are you sure you want to proceed?\"\n if not self.args.force and not prompt.confirm(msg):\n return 1\n\n self.project.gc(\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n cloud=self.args.cloud,\n remote=self.args.remote,\n force=self.args.force,\n jobs=self.args.jobs,\n projects=self.args.projects,\n )\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n GC_HELP = \"Collect garbage.\"\n gc_parser = subparsers.add_parser(\n \"gc\", parents=[parent_parser], description=GC_HELP, help=GC_HELP\n )\n gc_parser.add_argument(\n \"-a\",\n \"--all-branches\",\n action=\"store_true\",\n default=False,\n help=\"Collect garbage for all branches.\",\n )\n gc_parser.add_argument(\n \"-T\",\n \"--all-tags\",\n action=\"store_true\",\n default=False,\n help=\"Collect garbage for all tags.\",\n )\n gc_parser.add_argument(\n \"-c\",\n \"--cloud\",\n action=\"store_true\",\n default=False,\n help=\"Collect garbage in remote repository.\",\n )\n gc_parser.add_argument(\n \"-r\", \"--remote\", help=\"Remote repository to collect garbage in.\"\n )\n gc_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Force garbage collection.\",\n )\n gc_parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n default=None,\n help=\"Number of jobs to run simultaneously.\",\n )\n gc_parser.add_argument(\n \"-p\",\n \"--projects\",\n type=str,\n nargs=\"*\",\n default=None,\n help=\"Collect garbage for all given projects.\",\n )\n gc_parser.set_defaults(func=CmdGC)\n","sub_path":"dvc/command/gc.py","file_name":"gc.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399605162","text":"'''\nis there any run or triplet?\n'''\ndef isany(c):\n for i in range(0, 10):\n if c[i] == 3:\n return True\n if i <= 7:\n if c[i] >= 1 and c[i+1] >= 1 and c[i+2] >= 1:\n return True\n else:\n return False\n\n\n'''\nmain function\n'''\nT = int(input())\nfor test_case in range(1, T + 1):\n #일단 한줄 입력받기\n l = list(input().split())\n l = list(map(int, l))\n\n #player1과 player2의 카드 따로 저장\n p1 = [] #player1's card picking list\n p2 = [] #player2's card picking list\n for i in range(0, 6):\n p1.append(l[2*i])\n p2.append(l[2*i+1])\n \n #누가 이길 것인가?!\n c1 = [0]*10 #count list for player 1's cards\n c2 = [0]*10 #count list for player 2's cards\n for i in range(0, 6):\n c1[p1[i]] += 1\n if isany(c1): \n print('#%d %d' %(test_case, 1))\n break\n c2[p2[i]] += 1\n if isany(c2):\n print('#%d %d' %(test_case, 2))\n break\n else:\n print('#%d %d' %(test_case, 0))","sub_path":"swexpert_PS/3 greedy alg/3-3.py","file_name":"3-3.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"354756341","text":"import onfido\n\napi = onfido.Api(\"\")\n\nfake_document_uuid = \"58a9c6d2-8661-4dbd-96dc-b9b9d344a7ce\"\n\n\ndef test_perform_extraction(requests_mock):\n mock_upload = requests_mock.post(\"https://api.onfido.com/v3/extractions/\", json=[])\n\n api.extraction.perform(fake_document_uuid)\n\n assert mock_upload.called is True\n","sub_path":"tests/test_extraction.py","file_name":"test_extraction.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"244179481","text":" \nimport telebot\nfrom telebot import types\nfrom telebot.types import InlineKeyboardMarkup, InlineKeyboardButton\n\nbot = telebot.TeleBot(\"\", skip_pending=True)\n\n@bot.message_handler(commands=[\"start\"])\ndef menu(message):\n markup = InlineKeyboardMarkup(row_width=2)\n kanal = InlineKeyboardButton(\"📣 Kanalımız\", url=\"https://t.me/WixströfkdmraGod\")\n koruma = InlineKeyboardButton(\"Korumaya Al 🛡️\", url=\"https://t.me/kdhsnfkdbot?startgroup=a\")\n markup.add(kanal,koruma)\n bot.send_message(message.chat.id, \"\"\"\n*Spam Protector*\n_Gruplarınızı Korumak Amacı İle Yapılan Bir Bottur Botun Mantığı Spam Barındıran İçerikleri Gizlediği İçin Grubunuz Spama Düşmez Bölelikle Spam Tehlikesi Ortadan Kalkar_\n*Yapmanız Gerekenler*\n\n_•Grubunuza Eklemek. (Korumaya Al Butonuna Tıklayarak) 1.Adım_\n_•Full Yetkiye Sahip Olması İle Grubunuz Güvende Olacaktır. (Yetki Veriniz) 2.Adım_\n_•Grup İçerisinde /spam komutunu yazmak (Komutu Yazmak) 3. Adım_\n\n_LÜTFEN GRUBUNUZA EKLEDİKTEN VE YETKİ VERDİKTEN SONRA GRUP\nİÇERİSİNDE /spam YAZINIZ YOKSA KORUMA İŞLEMİ KAYIT ALITINA ALINMAZ_\n\"\"\", reply_markup=markup, parse_mode=\"Markdown\")\n \n@bot.message_handler(commands=[\"spam\"])\ndef menu(message):\n markup = InlineKeyboardMarkup(row_width=2)\n bilgi = InlineKeyboardButton(\"️Bilgilendirme 💬\", url=\"https://t.me/Saygisizlar\")\n markup.add(bilgi)\n \n bot.send_message(message.chat.id, \"\"\"\n*Yetki Kontrolü* ✅\n\n_Bot Görevindedir Yetki Alınması Durumunda Etkisiz Kalır_\n\n\n\n\n\"\"\", reply_markup=markup, parse_mode=\"Markdown\")\n\n\n\n \nprint(\"Bot Aktif\")\n\nif __name__ ==\"__main__\":\n bot.polling(none_stop=True)\n","sub_path":"spam.py","file_name":"spam.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"435765891","text":"import pygame \n\nclass removableSprite(pygame.sprite.Sprite):\n\tdef __init__(self, location, cell, *groups):\n\t\tsuper(removableSprite, self).__init__(*groups)\n\t\tself.image = pygame.image.load(cell['src'])\n\t\tself.defaultImage = self.image.copy()\n\t\tself.width = int(cell['width'])\n\t\tself.height = int(cell['height'])\n\t\tself.rect = pygame.Rect(location, (self.width,self.height))\n\t\tself.currLocation = location\n\t\tself.hasInteraction = False\n\t\tself.beenMoved = False\t\n\t\tself.location = location\n\n\n\tdef update(self, dt, game):\n\t\tif self.beenMoved == True:\n\t\t\t\n\t\t\tself.currLocation = (-100, -100)\n\t\t\tself.rect = pygame.Rect(self.currLocation, (self.width,self.height))\n\t\t\tself.beenMoved = False\n\t\t\tself.remove(self.groups())\n\t\t\tself.kill()\n\t\t\t","sub_path":"pylletTown-master/classes/removableSprite.py","file_name":"removableSprite.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"220566445","text":"#!/bin/python3\nfrom test_runner import run_tests, Check_Types\n\ntest_cases = [\n # Tuples folow this format:\n # (input_command, output_text, return_code)\n (\n 'echo -e \"\\e[31mCool |\\e[0m\"\\n',\n 'Cool |\\n',\n 0\n ),\n (\n 'ls main.h\\n',\n 'main.h',\n 0\n )\n]\n\nrun_tests(test_cases)\n","sub_path":"task-16.py","file_name":"task-16.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"137042854","text":"import random\r\nimport tkinter as tk\r\n\r\n### CANVAS ###\r\n\r\n# creating the window\r\nroot = tk.Tk()\r\nroot.title(\"Density Simulation\")\r\nroot.geometry(\"%dx%d+%d+%d\" % (1200, 800, 50, 100))\r\ncanvas = tk.Canvas(root, width=1200, height=800)\r\ncanvas.pack()\r\n\r\n# creating the layout\r\nWaterY = 500\r\nWaterX = 290\r\nWater = canvas.create_rectangle(1200, 800, WaterX, WaterY, fill=\"blue\")\r\nSide = canvas.create_rectangle(0, 0, 300, 800, fill=\"#c4c4c4\")\r\n\r\n### CANVAS ###\r\n\r\n### ENTRIES && LABELS ###\r\n\r\n# creating the input-boxes and labels lists\r\nEntrys = []\r\nLables = []\r\n\r\n''' What every num defines\r\n 0. Liquid Density\r\n 1. Liquid Height\r\n 2. Object Mass\r\n 3. Object Volume\r\n 4. Object Density\r\n 5. Object Color\r\n'''\r\n\r\n# setting the input-boxes and labels Tests\r\nTitles = [\"Liquid Density\", \"Liquid Height\", \"Object Mass\", \"Object Volume\", \"Object Density\", \"Object Color\"]\r\nArguments = [\"1.0\", str(WaterY), \"None\", \"None\", \"None\", \"#None\"]\r\n\r\n# creating the input-boxes and labels\r\nfor i in range(6):\r\n if i in [4, 5]:\r\n Entrys.append(tk.Label(root, text=\"0\"))\r\n Entrys[i].config(font=(\"Courier\", 15))\r\n Entrys[i].place(x=40, y=70 + 90 * i)\r\n else:\r\n Entrys.append(tk.Entry(root))\r\n Entrys[i].place(x=40, y=70 + 90 * i, width=220, height=30)\r\n Entrys[i].config(font=(\"Courier\", 20))\r\n Entrys[i].insert(0, Arguments[i])\r\n\r\n Lables.append(tk.Label(root, text=Titles[i] + \" : \"))\r\n Lables[i].config(font=(\"Courier\", 15))\r\n Lables[i].place(x=40, y=30 + i * 90)\r\n\r\n\r\n### ENTRIES && LABELS ###\r\n\r\n### Objects && Functions ###\r\n\r\n# an Object class defines every rectangle on the screen has 12 parameters that's used to draw the rectangle and implements physics\r\nclass Object:\r\n\r\n # default values for the variables\r\n def __init__(self, Mass=2.0, Volume=2.0, Density=1.0, Color=\"#fff\", X=500, Y=200, Flying=False):\r\n\r\n self.Mass = Mass\r\n self.Volume = Volume\r\n self.Density = Density\r\n self.DensityIn = Density\r\n self.Color = Color\r\n self.X = X\r\n self.Y = Y\r\n self.Width = 100\r\n self.Height = 100\r\n self.Speed = 1\r\n self.Flying = Flying\r\n self.HightLight = False\r\n\r\n self.Rectangle = canvas.create_rectangle(self.X, self.Y, self.X + self.Width, self.Y + self.Height,\r\n fill=self.Color, outline=self.Color, width=7)\r\n\r\n # a change function for every variable, if a protocol is needed\r\n def Change(self, NewAmount, Parmeter=\"Mass\"):\r\n if Parmeter == \"Mass\":\r\n self.Mass = NewAmount\r\n self.Density = self.Mass / self.Volume\r\n self.ChangeSpeed()\r\n elif Parmeter == \"Volume\":\r\n if NewAmount not in [0, 0.0]:\r\n self.Volume = NewAmount\r\n self.Density = self.Mass / self.Volume\r\n self.ChangeSpeed()\r\n elif Parmeter == \"Density\":\r\n self.Density = NewAmount\r\n self.Mass = self.Density * self.Volume\r\n self.ChangeSpeed()\r\n elif Parmeter == \"DensityIn\":\r\n self.DensityIn = NewAmount\r\n self.ChangeSpeed()\r\n elif Parmeter == \"Color\":\r\n self.Color = NewAmount\r\n elif Parmeter == \"X\":\r\n self.X = NewAmount\r\n elif Parmeter == \"Y\":\r\n self.Y = NewAmount\r\n elif Parmeter == \"Height\":\r\n self.Height = NewAmount\r\n elif Parmeter == \"Width\":\r\n self.Width = NewAmount\r\n\r\n # moving the object by the SpeedD, Density and LqDensity\r\n def Physics(self):\r\n global WaterY\r\n if not self.Flying:\r\n if self.Y < WaterY - self.Height / 2:\r\n self.Y += 15\r\n else:\r\n if self.Y + self.Speed + self.Height > root.winfo_height():\r\n self.Y = root.winfo_height() - self.Height\r\n elif self.Y + self.Speed < WaterY - self.Height / 2:\r\n self.Y = WaterY - self.Height / 2\r\n else:\r\n self.Y += self.Speed\r\n\r\n # redraw the rectangle\r\n def Draw(self):\r\n canvas.coords(self.Rectangle, self.X, self.Y, self.X + self.Width, self.Y + self.Height)\r\n if self.HightLight:\r\n canvas.itemconfig(self.Rectangle, outline=\"#33FF00\")\r\n else:\r\n canvas.itemconfig(self.Rectangle, outline=self.Color)\r\n\r\n # if the object intersects with a x,y or an object - the object part isn't used un this version\r\n def Intersects(self, X=50, Y=50, AObject=None):\r\n if AObject is None:\r\n if self.X < X < self.X + self.Width and self.Y < Y < self.Y + self.Height:\r\n return True\r\n else:\r\n if ((self.X > AObject.X + AObject.Width) or (self.X + self.Width < AObject.X) or\r\n (self.Y > AObject.Y + AObject.Height) or (self.Y + self.Height < AObject.Y)):\r\n return False\r\n return True\r\n return False\r\n\r\n # changing the speed relative form the Density and LqDensity\r\n def ChangeSpeed(self):\r\n if self.DensityIn in [self.Density, 0, 0.0] or self.Density in [0, 0.0]:\r\n self.Speed = 0\r\n elif self.Density < self.DensityIn:\r\n self.Speed = -(self.DensityIn / self.Density)\r\n elif self.Density > self.DensityIn:\r\n self.Speed = self.Density / self.DensityIn\r\n\r\n\r\n# a cool function that's removes the letters and symbols from a string and give back an float and bool to detect\r\ndef IsFloat(Float):\r\n FList = [Dig for Dig in str(Float) if Dig in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \".\"]]\r\n if not FList == []:\r\n if not (FList.count(\".\") > 1 or \".\" in [FList[0], FList[-1]]):\r\n NFloat = \"\"\r\n for Dig in FList:\r\n NFloat += Dig\r\n return [True, float(NFloat)]\r\n return [False, \"\"]\r\n\r\n\r\n# create a random object with random parameters\r\ndef RandObject():\r\n RColor = \"%06x\" % random.randint(0, 0xFFFFFF)\r\n RM, RV = float(random.randint(1, 10)), float(random.randint(1, 10))\r\n Objects.append(Object(X=100, Y=600, Color=f\"#{RColor}\", Flying=True, Mass=RM, Volume=RV))\r\n\r\n\r\n### Objects && Functions ###\r\n\r\n### Arguments ###\r\n\r\nOM, OV, LD, OD = 0, 0, 0, 0\r\n\r\nClicked = False\r\nFirstStick = False\r\nMoving = False\r\nObjects = []\r\nObjectChose = -1\r\n\r\nRandObject()\r\n\r\n\r\n### Arguments ###\r\n\r\n### Mouse ###\r\n\r\n\r\n# if the mouse is pressed an Clicked bool is True\r\ndef Mouse(event, Click):\r\n global Clicked\r\n Clicked = Click\r\n\r\n\r\nroot.bind('', lambda event: Mouse(event, True))\r\nroot.bind('', lambda event: Mouse(event, False))\r\n\r\n\r\n### Mouse ###\r\n\r\n### Main While Loop ###\r\n\r\n\r\ndef Loop():\r\n # input and refreshing variables\r\n global OM, OV, OD, LD, ObjectChose, Clicked, FirstStick, Moving, WaterY\r\n OM, OV, LD = Entrys[2].get(), Entrys[3].get(), Entrys[0].get()\r\n MouseX = root.winfo_pointerx() - root.winfo_rootx()\r\n MouseY = root.winfo_pointery() - root.winfo_rooty()\r\n\r\n # calculating the density\r\n if IsFloat(OV)[0] and IsFloat(OM)[0] and not (0.0 in [IsFloat(OM)[1], IsFloat(OV)[1]]):\r\n OD = str(IsFloat(OM)[1] / IsFloat(OV)[1])\r\n else:\r\n OD = \"Null\"\r\n\r\n # setting the chosen Object with the new parameters\r\n if IsFloat(OV)[0]:\r\n Objects[ObjectChose].Change(IsFloat(OV)[1], \"Volume\")\r\n if IsFloat(OM)[0]:\r\n Objects[ObjectChose].Change(IsFloat(OM)[1], \"Mass\")\r\n\r\n # refreshing the labels and water\r\n Entrys[4].config(text=str(OD))\r\n if IsFloat(Entrys[1].get())[0]:\r\n WaterY = root.winfo_height() - int(IsFloat(Entrys[1].get())[1])\r\n if WaterY < 70:\r\n WaterY = 70\r\n canvas.coords(Water, 1200, 800, WaterX, WaterY)\r\n\r\n # refreshing the objects\r\n for ObjP in range(len(Objects)):\r\n Objects[ObjP].Physics()\r\n Objects[ObjP].Draw()\r\n if IsFloat(LD)[0] and IsFloat(LD)[1] is not None:\r\n Objects[ObjP].Change(IsFloat(LD)[1], \"DensityIn\")\r\n\r\n # checking for a new height-lighted object and replacing it\r\n if Objects[ObjP].Intersects(MouseX, MouseY) and not Clicked and not FirstStick:\r\n if ObjectChose != ObjP:\r\n Objects[ObjectChose].HightLight = False\r\n Objects[ObjP].HightLight = True\r\n Entrys[2].delete(0, 'end')\r\n Entrys[2].insert(0, str(Objects[ObjP].Mass))\r\n Entrys[3].delete(0, 'end')\r\n Entrys[3].insert(0, str(Objects[ObjP].Volume))\r\n Entrys[5].config(text=Objects[ObjP].Color)\r\n ObjectChose = ObjP\r\n\r\n # if the mouse is intersecting and clicked start moving the object\r\n if Clicked and Objects[ObjectChose].Intersects(MouseX, MouseY):\r\n Moving = True\r\n if not Clicked:\r\n Moving = False\r\n\r\n # change the object x,y relative to the mouse position and not out of bounds\r\n if Moving and not FirstStick:\r\n Objects[ObjectChose].Flying = True\r\n if ObjectChose == len(Objects) - 1:\r\n FirstStick = True\r\n else:\r\n if MouseX <= WaterX + 10:\r\n Objects[ObjectChose].Change(WaterX + 10, \"X\")\r\n elif MouseX >= root.winfo_width() - Objects[ObjectChose].Width:\r\n Objects[ObjectChose].Change(root.winfo_width() - Objects[ObjectChose].Width, \"X\")\r\n else:\r\n Objects[ObjectChose].Change(MouseX, \"X\")\r\n if MouseY <= 0:\r\n Objects[ObjectChose].Change(0, \"Y\")\r\n elif MouseY >= root.winfo_height() - Objects[ObjectChose].Height:\r\n Objects[ObjectChose].Change(root.winfo_height() - Objects[ObjectChose].Height, \"Y\")\r\n else:\r\n Objects[ObjectChose].Change(MouseY, \"Y\")\r\n\r\n # if the object click is the last in the list(the new one) don't let the user place it out side the bounds\r\n if FirstStick:\r\n Objects[ObjectChose].Change(MouseX, \"X\")\r\n Objects[ObjectChose].Change(MouseY, \"Y\")\r\n if WaterX < MouseX < root.winfo_width() and 0 < MouseY < root.winfo_height():\r\n FirstStick = False\r\n RandObject()\r\n\r\n if not ObjectChose == len(Objects) - 1 and not Clicked and ObjectChose >= 0:\r\n Objects[ObjectChose].Flying = False\r\n\r\n # loop the loop :D\r\n root.after(50, Loop)\r\n\r\n\r\n# start the loop\r\nroot.after(50, Loop)\r\nroot.resizable(False, False)\r\nroot.mainloop()\r\n\r\n### Main While Loop ###\r\n","sub_path":"Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":10512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"402224303","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\n# 参数\r\nEPOCH = 100\r\nFILENAME = 'num.csv'\r\nBATCHSIZE = 10\r\nLEARN_RATE = 0.01\r\n\r\n# 数据分batch输出数据\r\nclass minibatch(object):\r\n def __init__(self, filename, batch_size=10):\r\n self.f = open(filename, 'r')\r\n self.batch_size = batch_size\r\n def __iter__(self):\r\n data_batch = []\r\n label_batch = []\r\n for line in self.f:\r\n ls = line.strip().split(',')\r\n ls = list(map(float, ls))\r\n data_batch.append(ls[:-1])\r\n label_batch.append(int(ls[-1]))\r\n if len(data_batch)==self.batch_size:\r\n yield data_batch, label_batch\r\n data_batch = []\r\n label_batch = []\r\n def close(self):\r\n self.f.close()\r\n\r\n\r\n# 网络架构 用一个1dCNN后直接用softmax分类\r\ninput_data = tf.placeholder(name='input_data', shape=[None, 10], dtype=tf.float32)\r\nlabel = tf.placeholder(name='label', shape=[None], dtype=tf.int32)\r\nlearn_rate = tf.placeholder(name='learn_rate', shape=[], dtype=tf.int32)\r\n\r\ncnn_input = tf.reshape(input_data, [-1, 10, 1])\r\n#filter_mat = tf.get_variable(name='filters', dtype=tf.float32, initializer=[[[1.]],[[1.]],[[1.]]])\r\nfilter_mat = tf.get_variable(name='filters', shape=[3,1,1], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())\r\nconv = tf.nn.conv1d(cnn_input, filters=filter_mat, stride=1, padding='VALID', name='conv1d')\r\nlogits = tf.reshape(conv, [-1, 8])\r\npred = tf.argmax(logits, -1)\r\nloss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logits)\r\ntrain_op = tf.train.AdamOptimizer(learn_rate).minimize(loss)\r\n\r\n\r\n# sess\r\nwith tf.Session() as sess:\r\n data_batch = []\r\n label_batch = []\r\n sess.run(tf.global_variables_initializer()) \r\n for i in range(EPOCH):\r\n# print('--------------------------------------')\r\n# print('for epoch {}:'.format(i))\r\n train = minibatch(FILENAME, batch_size=BATCHSIZE)\r\n acc = []\r\n for data_batch, label_batch in train:\r\n \r\n feed_dict = {input_data: data_batch, \r\n label:label_batch,\r\n learn_rate: LEARN_RATE}\r\n _, pred_batch, logits_batch = sess.run([train_op, pred, logits], feed_dict)\r\n# print('label_batch:', label_batch)\r\n# print('pred_batch:', pred_batch)\r\n# print(' ')\r\n acc+=[1 if l==p else 0 for l, p in zip(label_batch, pred_batch)]\r\n print('for {} epoch(s), acc: {}%'.format(i, np.mean(acc)*100))\r\n #print('--------------------------------------')\r\n #print(' ')\r\n train.close()\r\n ","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"126563010","text":"\ndef fun_food_lock(p_name,food, DateToday):\n food_file_pointer = open(\"f_\"+p_name.lower()+\".txt\", \"a\")\n food_file_pointer.write(str(DateToday)+\"\\n\"+food+\" \\n\")\n food_file_pointer.close()\n\n\ndef fun_exercise_lock(e_name, ex_done, DateToday):\n exercise_file_pointer = open(\"e_\"+e_name.lower()+\".txt\", \"a\")\n exercise_file_pointer.write(str(DateToday) + \"\\n\" + ex_done + \" \\n\")\n exercise_file_pointer.close()\n\n# def fun_food_read(user_name):\n food_read_pointer = open(\"f_\"+user_name.lower()+\".txt\", \"r\")\n content_read = food_read_pointer.read()\n food_read_pointer.close()\n return content_read\n\ndef fun_exercise_read(user_name):\n exercise_read_pointer = open(\"e_\"+user_name.lower()+\".txt\", \"r\")\n content_exe = exercise_read_pointer.read()\n exercise_read_pointer.close()\n return content_exe\n\ndef DateTme_function():\n import datetime\n temp = datetime.datetime.today()\n todayDate = temp.strftime(\"%x\")\n return todayDate\n\nget_user_name = input(\"Welcome\\nMay I know who is there? | Hammad OR Sajid \")\nprint(\"What do you want to do? | Lock or Read?\")\nlock_read_choice = input()\nif lock_read_choice == \"lock\" or lock_read_choice==\"Lock\" or lock_read_choice==\"LOCK\":\n lock_choice = input(\"What do you want to lock? | Food or Exercise?\")\n\n if lock_choice == \"food\" or lock_choice == \"FOOD\" or lock_choice == \"Food\":\n food_choice = input(\"What you eat today?\")\n DateToday = DateTme_function()\n fun_food_lock(get_user_name,food_choice,DateToday)\n elif lock_choice == \"exercise\" or lock_choice==\"Exercise\" or lock_choice==\"EXERCISE\":\n exercice_choice = input(\"What Exercise you did today \"+get_user_name )\n DateToday = DateTme_function()\n fun_exercise_lock(get_user_name,exercice_choice,DateToday)\n\nelif lock_read_choice==\"Read\" or lock_read_choice==\"read\" or lock_read_choice == \"READ\":\n read_choice = input(\"Which log do you want to read Mr.\"+get_user_name+\" Your food or Exercise?\")\n\n if read_choice == \"food\" or read_choice==\"Food\" or read_choice==\"Food\":\n food_log_read = fun_food_read(get_user_name)\n print(\"Mr.\"+get_user_name+\"Here is Your food log\\n\")\n print(food_log_read)\n elif read_choice == \"exercise\" or read_choice == \"Exercise\" or read_choice==\"EXERCISE\":\n exercise_log =fun_exercise_read(get_user_name)\n print(\"Mr.\" + get_user_name + \"Here is Your exercise log\\n\")\n print(exercise_log)\nelse:\n print(\"Wrong Choice\")\n\n","sub_path":"Health_Management_System/HealthManagement.py","file_name":"HealthManagement.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"3705535","text":"# -*- coding:utf-8 -*-\n#filename:manage\n#16/2/8 下午1:20\nimport logging\n\n__author__ = 'bingone'\nfrom flask.ext.script import Manager, Server\nfrom app import app\nmanager = Manager(app)\nmanager.add_command(\"runserver\",\n Server(host=\"0.0.0.0\", port=5000, use_debugger=True))\nlogger = logging.getLogger('views')\nlogger.warn(\"bingoneWeb redeply\")\nif __name__ == '__main__':\n manager.run()\n","sub_path":"bingone_web/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"220869093","text":"import requests\nfrom threadManager import ThreadManager\nfrom datetime import datetime\nfrom datetime import timedelta\nimport time\nimport dateutil.parser\nfrom newsfetch.news import newspaper\nimport json\n\napiNewsUri = \"http://127.0.0.1:5000/api/news\"\napiKey = \"f314f8aa6b874a54b58bfa89907c1fd2\"\nnewsApiUri = 'https://newsapi.org/v2/everything'\n\narticleSources = []\n\nwith open(\"../consumers/articleSources.json\", \"r\") as sourcesFile:\n articleSources = json.load(sourcesFile)\n\ndef sanitize(string):\n return string.replace(\"\\\"\", \"'\").replace(\"\\n\",\"\")\n\nclass NewsConsumer:\n def __init__(self):\n self.threadManager = ThreadManager(3, lambda resource: self.processData(resource))\n self.sleepTime = 960 # seconds\n self.params = {\n 'q': 'covid',\n 'pageSize': 100,\n 'apiKey': apiKey,\n 'sortBy': 'publishedAt',\n 'language': 'en',\n 'page': 1\n }\n\n def start(self):\n while True:\n try:\n self.params[\"from\"] = (datetime.now() + timedelta(minutes = -300)).strftime('%Y-%m-%dT%H:%M:%SZ')\n self.params[\"to\"] = (datetime.now() + timedelta(minutes = -285)).strftime('%Y-%m-%dT%H:%M:%SZ')\n news = requests.get(newsApiUri, params=self.params).json()[\"articles\"]\n for n in news:\n self.threadManager.addResource((n['url'], n[\"publishedAt\"], n[\"title\"], n['urlToImage']))\n except:\n pass\n \n time.sleep(self.sleepTime)\n\n def processData(self, resource):\n print(resource)\n if any((resource[0] in source) for source in articleSources):\n return\n \n news = newspaper(resource[0])\n\n if not any(item in news.keywords for item in ['covid', 'coronavirus', 'covid19', 'lockdown', 'virus', 'vaccine', 'illness', 'symptom', 'pandemic']):\n return\n\n requests.post(apiNewsUri, params={\"apiKey\": \"newsConsumerApiKey\"}, json={\"url\": resource[0], \"date\": resource[1], \"title\": sanitize(resource[2]), \"keywords\": news.keywords, \"publication\": sanitize(news.publication), 'imgUrl': resource[3] })\n\n\nconsumer = NewsConsumer()\nconsumer.start()","sub_path":"consumers/newsConsumer.py","file_name":"newsConsumer.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"378084975","text":"import json\nimport responses\nimport unittest\n\nfrom badgecheck.actions.graph import add_node\nfrom badgecheck.actions.tasks import add_task\nfrom badgecheck.extensions import GeoLocation\nfrom badgecheck.openbadges_context import OPENBADGES_CONTEXT_V2_URI\nfrom badgecheck.reducers.graph import graph_reducer\nfrom badgecheck.tasks.extensions import validate_extension_node\nfrom badgecheck.tasks.graph import _get_extension_actions\nfrom badgecheck.tasks import task_named\nfrom badgecheck.tasks.task_types import JSONLD_COMPACT_DATA, VALIDATE_EXTENSION_NODE\n\nfrom tests.utils import setUpContextMock\n\n\nclass CompactJsonExtensionDiscoveryTests(unittest.TestCase):\n def test_can_discover_actions_right(self):\n node = {\n 'string_prop': 'string_val'\n }\n self.assertEqual(_get_extension_actions(node, ['_:b0']), [])\n\n node['dict_prop_1'] = {'type': 'Extension'}\n actions = _get_extension_actions(node, ['_:b0'])\n self.assertEqual(len(actions), 1,\n \"When one Extension-type node is present, file one action\")\n self.assertEqual(actions[0]['node_path'], ['_:b0', 'dict_prop_1'])\n\n node['dict_prop_1'] = {'type': ['Extension', 'extensions:ApplyLink']}\n actions = _get_extension_actions(node, ['_:b0'])\n self.assertEqual(len(actions), 1,\n \"It can handle an Extension-type node declared in list\")\n self.assertEqual(actions[0]['node_path'], ['_:b0', 'dict_prop_1'])\n\n node['dict_prop_2'] = {'type': 'NotAnExtension'}\n self.assertEqual(len(_get_extension_actions(node, ['_:b0'])), 1,\n \"Another non-Extension node doesn't add another action.\")\n\n node['dict_prop_2'] = {'type': 'Extension'}\n self.assertEqual(len(_get_extension_actions(node, ['_:b0'])), 2,\n \"A second Extension node yields another action.\")\n\n node = {\n 'dict_prop_3': {\n 'string_prop_2': 'string_val',\n 'dict_prop_4': {'type': 'Extension'}\n }\n }\n actions = _get_extension_actions(node, ['_:b0'])\n self.assertEqual(len(actions), 1, \"One Extension is found.\")\n self.assertEqual(actions[0]['node_path'], ['_:b0', 'dict_prop_3', 'dict_prop_4'],\n \"A deeply nested extension is properly identified.\")\n\n node = {\n 'list_prop_1': [\n {\n 'prop': 'not an extension'\n },\n {\n 'id': '_:b0',\n 'string_prop_1': 'string_val',\n 'dict_prop_1': {'id': '_:b1'}\n },\n 'string_val'\n ]\n }\n\n actions = _get_extension_actions(node, ['_:b0'])\n self.assertEqual(len(actions), 0, \"No extensions exist in node yet\")\n node['list_prop_1'][1]['type'] = 'Extension'\n actions = _get_extension_actions(node, ['_:b0'])\n self.assertEqual(len(actions), 1, \"An Extension is found inside a many=True value.\")\n self.assertEqual(\n actions[0]['node_path'], ['_:b0', 'list_prop_1', 1],\n \"The action's node_path correctly identifies the list index of the Extension\")\n\n\nclass ExtensionNodeValidationTests(unittest.TestCase):\n def setUp(self):\n self.first_node = {\n 'id': 'http://example.org/assertion',\n 'extensions:exampleExtension': '_:b0',\n 'evidence': '_:b1'\n }\n self.extension = {\n 'id': '_:b0',\n 'type': ['Extension', 'extensions:ExampleExtension'],\n 'http://schema.org/text': 'I\\'m a property, short and sweet'\n }\n self.evidence = {\n 'id': '_:b1',\n 'narrative': 'Rocked the free world'\n }\n self.state = {'graph': [self.first_node, self.extension, self.evidence]}\n\n def test_validate_extension_node_basic(self):\n task_meta = add_task(\n VALIDATE_EXTENSION_NODE, node_id=self.extension['id'])\n\n result, message, actions = validate_extension_node(self.state, task_meta)\n self.assertTrue(result, \"A valid expression of the extension should pass\")\n self.assertIn('validated on node', message)\n self.assertEqual(len(actions), 0)\n\n def test_validate_extension_node_path_based(self):\n task_meta = add_task(\n VALIDATE_EXTENSION_NODE, node_path=[self.extension['id']])\n\n result, message, actions = validate_extension_node(self.state, task_meta)\n self.assertTrue(result, \"A valid expression of the extension should pass\")\n self.assertIn('validated on node', message)\n self.assertEqual(len(actions), 0)\n\n def test_validate_extension_node_declared_type(self):\n task_meta = add_task(\n VALIDATE_EXTENSION_NODE, node_id=self.extension['id'],\n type_to_test='extensions:ExampleExtension')\n\n result, message, actions = validate_extension_node(self.state, task_meta)\n self.assertTrue(result, \"A valid expression of the extension should pass\")\n self.assertIn('validated on node', message)\n self.assertEqual(len(actions), 0)\n\n def test_validate_extension_node_invalid(self):\n task_meta = add_task(\n VALIDATE_EXTENSION_NODE, node_id=self.extension['id'])\n self.extension['http://schema.org/text'] = 1337 # String value required\n\n result, message, actions = validate_extension_node(self.state, task_meta)\n self.assertFalse(result, \"An invalid expression of a rule in schema should fail\")\n self.assertIn('did not validate', message)\n self.assertEqual(len(actions), 0)\n\n def test_validation_breaks_down_multiple_extensions(self):\n self.extension['type'].append('extensions:ApplyLink')\n task_meta = add_task(\n VALIDATE_EXTENSION_NODE, node_id=self.extension['id'])\n\n result, message, actions = validate_extension_node(self.state, task_meta)\n self.assertTrue(result, \"Task breakdown should succeed.\")\n self.assertIn('Multiple extension types', message)\n self.assertEqual(len(actions), 2)\n self.assertTrue(all(a['name'] == VALIDATE_EXTENSION_NODE for a in actions),\n 'All tasks created should be of correct type')\n\n\nclass ComplexExtensionNodeValdiationTests(unittest.TestCase):\n \"\"\"\n Tests for extensions that use nested properties.\n \"\"\"\n def test_node_json_validation(self):\n node = {\n '@context': OPENBADGES_CONTEXT_V2_URI,\n 'id': 'http://example.com/1',\n 'type': 'Assertion',\n 'schema:location': {\n '@context': 'https://w3id.org/openbadges/extensions/geoCoordinatesExtension/context.json',\n 'type': ['Extension', 'extensions:GeoCoordinates'],\n 'description': 'That place in the woods where we built the fort',\n 'schema:geo': {\n 'schema:latitude': 44.580900,\n 'schema:longitude': -123.301815\n }\n }\n }\n state = {'graph': graph_reducer([], add_node(node['id'], node))}\n\n task_meta = add_task(\n VALIDATE_EXTENSION_NODE,\n node_path=['http://example.com/1', 'schema:location'],\n node_json=json.dumps(node['schema:location']))\n\n result, message, actions = validate_extension_node(state, task_meta)\n self.assertTrue(result, \"A valid expression of the extension should pass\")\n self.assertIn('validated on node', message)\n self.assertEqual(len(actions), 0)\n\n del node['schema:location']['schema:geo']['schema:latitude']\n task_meta['node_json'] = json.dumps(node['schema:location'])\n result, message, actions = validate_extension_node(state, task_meta)\n self.assertFalse(result, \"A required property not present should be detected by JSON-schema.\")\n\n @responses.activate\n def test_extension_discovered_jsonld_compact(self):\n \"\"\"\n Ensure an extension node is properly discovered and that the task runs without error.\n \"\"\"\n node = {\n '@context': OPENBADGES_CONTEXT_V2_URI,\n 'id': 'http://example.com/1',\n 'type': 'Assertion',\n 'schema:location': {\n '@context': 'https://w3id.org/openbadges/extensions/geoCoordinatesExtension/context.json',\n 'type': ['Extension', 'extensions:GeoCoordinates'],\n 'description': 'That place in the woods where we built the fort',\n 'geo': {\n 'latitude': 44.580900,\n 'longitude': -123.301815\n }\n }\n }\n state = {'graph': graph_reducer([], add_node(node['id'], node))}\n\n setUpContextMock()\n\n responses.add(\n responses.GET,\n \"https://w3id.org/openbadges/extensions/geoCoordinatesExtension/context.json\",\n body=json.dumps(GeoLocation.context_json),\n status=200,\n content_type='application/ld+json')\n\n compact_task = add_task(JSONLD_COMPACT_DATA, data=json.dumps(node))\n\n result, message, actions = task_named(JSONLD_COMPACT_DATA)(state, compact_task)\n self.assertTrue(result, \"JSON-LD Compact is successful.\")\n self.assertIn(VALIDATE_EXTENSION_NODE, [i.get('name') for i in actions], \"Validation task queued.\")\n\n validate_task = [i for i in actions if i.get('name') == VALIDATE_EXTENSION_NODE][0]\n self.assertIsNotNone(validate_task['node_json'])\n\n result, message, actions = task_named(VALIDATE_EXTENSION_NODE)(state, validate_task)\n self.assertTrue(result, \"Validation task is successful.\")\n\n\nclass UnknownExtensionsTests(unittest.TestCase):\n \"\"\"\n TODO: In the future, dynamic discovery of extensions will be possible.\n Until then, make sure we are reporting on unverified extensions.\n \"\"\"\n def test_report_message_on_unknown_extension(self):\n first_node = {\n 'id': 'http://example.org/assertion',\n 'extensions:exampleExtension': '_:b0',\n 'evidence': '_:b1'\n }\n extension = {\n 'id': '_:b0',\n 'type': ['Extension', 'extensions:UnknownExtension'],\n 'schema:unknownProperty': 'I\\'m a property, short and sweet'\n }\n state = {'graph': [first_node, extension]}\n task_meta = add_task(\n VALIDATE_EXTENSION_NODE, node_id=extension['id'])\n\n result, message, actions = validate_extension_node(state, task_meta)\n self.assertFalse(result, \"An unknown extension will fail for now.\")\n","sub_path":"tests/test_extensions.py","file_name":"test_extensions.py","file_ext":"py","file_size_in_byte":10663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"375075591","text":"import torch.nn as nn\n\nclass Bottleneck(nn.Module):\n\n def __init__(self, in_d, out_d, stride=1):\n\n super(Bottleneck, self).__init__()\n\n self.net = nn.Sequential(\n nn.Conv2d(in_d, in_d, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(in_d),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_d, in_d, kernel_size=3, stride=stride, padding=1, bias=False),\n nn. BatchNorm2d(in_d),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_d, out_d, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(out_d)\n )\n\n self.downsample = nn.Sequential(nn.Conv2d(in_d, out_d, 1, 1),\n nn.BatchNorm2d(out_d)\n )\n\n self.relu=nn.ReLU(inplace=True)\n\n\n def forward(self, x):\n\n temp = x\n x = self.downsample(x)\n output = self.relu(self.net(temp) + x)\n\n return output\n\n","sub_path":"Bottleneck.py","file_name":"Bottleneck.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"456579468","text":"from webtest import TestApp as WebTestApp\nfrom pyramid.paster import get_appsettings\n\nimport pytest\n\nimport fin_web\n\n\n@pytest.fixture(scope='module')\ndef app():\n settings = get_appsettings('app/fin-web/development.ini', name='main')\n fin_app = fin_web.main(None, **settings)\n app = WebTestApp(fin_app)\n return app\n\n\ndef test_index(app):\n res = app.get('/', status=200)\n assert b'Hello, Michael J!' in res.body\n\n\ndef test_css(app):\n res = app.get('/static/css/base.css', status=200)\n assert b'.column' in res.body\n","sub_path":"app/fin-web/test/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"546403454","text":"import pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport matplotlib\nimport matplotlib as mpl\n\n\n#matplotlib.use('Agg')\nmax_val = 4\ntransparency_ticks = 5\ncolors = [mpl.colors.hsv_to_rgb((0.13, a/transparency_ticks, 1)) \n for a in range(transparency_ticks)]\ncmap = mpl.colors.ListedColormap(colors)\n#norm = mpl.colors.Normalize(vmin=0, vmax=1)\n\n\nfilename = sys.argv[-1]\n \nf = np.load(filename, allow_pickle=True)\nfilters = np.concatenate(f['filter'][-1][3])\nfilters = np.concatenate([p/p.max() for p in filters[::4]], 1)\nprint(np.shape(filters))\nplt.figure(figsize=(15,10))\nplt.imshow(filters, aspect='auto', cmap='jet')\nplt.xticks()\nplt.yticks()\nplt.savefig(filename.split('/')[-1][:-4].replace('.', '-')+'.png')\n","sub_path":"special_reader.py","file_name":"special_reader.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"217700989","text":"# sema_signal.py\n#\n# An example of using a semaphore to signal\n\nimport threading\nimport time\n\ndone = threading.Semaphore(0) # Max: unlimit threads\nitem = None\n\ndef producer():\n global item\n print(\"I'm the producer and I produce data.\")\n print(\"Producer is going to sleep.\")\n time.sleep(2)\n item = \"Hello\"\n print(\"Producer is alive. Signaling the consumer.\")\n done.release() # Increments the count and signals waiting threads\n\ndef consumer():\n print(\"I'm a consumer and I wait for data.\")\n print(\"Consumer is waiting.\")\n done.acquire() # Waits if the count is 0, otherwise decrements the count and continues\n print(\"Consumer got\", item)\n\nt1 = threading.Thread(target=producer)\nt2 = threading.Thread(target=consumer)\nt1.start()\nt2.start()\n","sub_path":"Concurrency/sema_signal.py","file_name":"sema_signal.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"224386574","text":"# Dinh Tran\r\n# Madison Fletcher\r\n# CIS 3368\r\n# Professor Otto Dobretsberger\r\n# Final Project Sprint 1\r\n\r\n\r\nimport mysql.connector\r\nimport flask\r\nfrom flask import jsonify\r\nfrom flask import request, make_response\r\nfrom mysql.connector import Error\r\nfrom sql import create_connection\r\nfrom sql import execute_query\r\nfrom sql import execute_read_query\r\n\r\n#setting up an application name\r\napp = flask.Flask(__name__) #sets up the application\r\napp.config[\"DEBUG\"] = True #allow to show errors in browser\r\n\r\n#Prints the entire guest table\r\n@app.route('/guests', methods=['GET'])\r\ndef people():\r\n conn = create_connection(\"cis3368fall2021.c2ksqbnomh6f.us-east-2.rds.amazonaws.com\", \"admin\", \"MadisonFall2021\", \"cis3368fall2021\")\r\n #Selects all data from the table\r\n sql = \"SELECT * FROM guest\"\r\n guest = execute_read_query(conn,sql)\r\n #Results all data to return\r\n results = []\r\n for people in guest:\r\n results.append(people)\r\n return jsonify(results)\r\n\r\n#Add a new guest to the table\r\n@app.route('/api/addguest', methods=['POST'])\r\n#Request data to be added\r\ndef addguest():\r\n request_data = request.get_json()\r\n # guestid = request_data['id']\r\n newfirstname = request_data['firstname']\r\n newlastname = request_data['lastname']\r\n newrestaurant = request_data['restaurantname']\r\n \r\n #connects to mySQL database and allows to add data from Postman to mySQL\r\n conn = create_connection(\"cis3368fall2021.c2ksqbnomh6f.us-east-2.rds.amazonaws.com\", \"admin\", \"MadisonFall2021\", \"cis3368fall2021\")\r\n query = \"INSERT INTO guest (firstname, lastname) VALUES ('%s', '%s')\" % (newfirstname, newlastname)\r\n execute_query(conn,query)\r\n \r\n count = 0\r\n # count how many restaurants the user adds\r\n while (count <= 9):\r\n #make a while loop to ask user to input from 5 to 10 restaurants\r\n print(\"Type a list of restaurants with 5 being minimum and 10 beng maximum (Type 'Q' to quit): \")\r\n newrestaurant = input(\" \")\r\n if newrestaurant == 'Q' and count < 4:\r\n #keep asking for input if the numbers of restaurant is not enough\r\n #stop when enough restaurants or user has entered \"Q\"\r\n print(\"You have to type at least 5 restaurants: \")\r\n elif newrestaurant == 'Q' and count > 4:\r\n break\r\n else:\r\n # after for asking for 5 - 10 restaurants, the query will insert the restaurant name and guest id into the restaurant table\r\n # the code adds the largest id from the guest table, and since the query before this adds a guest to the guest table the guest \r\n # with the largest id will be the last guest added.\r\n query2 = f\"\"\"INSERT INTO restaurant (restaurantname, guestid) VALUES (\"{newrestaurant}\", (SELECT max(id) from guest))\"\"\"\"\"\r\n execute_query(conn, query2)\r\n count = count + 1\r\n return 'POST REQUEST WORKED'\r\n\r\n\r\n#Delete a guest in the table\r\n@app.route('/api/deleteguest', methods=['GET'])\r\ndef deleteguest():\r\n request_data = request.get_json()\r\n #connects to mySQL database and allows to modify data from Postman to mySQL\r\n conn = create_connection(\"cis3368fall2021.c2ksqbnomh6f.us-east-2.rds.amazonaws.com\", \"admin\", \"MadisonFall2021\", \"cis3368fall2021\")\r\n if 'firstname' in request.args: #only if a firstname is provided as an argument, proceed\r\n firstname = str(request.args['firstname'])\r\n else:\r\n return 'ERROR: No ID provided!'\r\n sql = \"SELECT * FROM guest\"\r\n guest = execute_read_query(conn, sql)\r\n results = []\r\n\r\n for user in guest:\r\n if user['firstname'] == firstname:\r\n results.append(user['id'])\r\n # if the name the user entered is in the guest table it will add the ID to the results list\r\n query = \"DELETE from guest WHERE firstname = '%s' \" % (firstname) # Deletes the guest and their information from guest table\r\n execute_query(conn,query)\r\n sql2 = \"SELECT * FROM restaurant\"\r\n rest = execute_read_query(conn, sql2)\r\n for new in rest: # goes through the restaurant table \r\n for x in results: # goes through the results list (which should only have the ID of the guest that was just deleted)\r\n if new['guestid'] == x: \r\n # if the guest id from the restaurant table and the ID of the guest that was just deleted match\r\n # It deletes all the restaurants that that guest had enetered\r\n query2 = f\"\"\"DELETE from restaurant WHERE guestid = {x}\"\"\"\r\n execute_query(conn,query2)\r\n return 'Guest was successfully deleted.' #Returns a statement if the code works successfully\r\n\r\n\r\n#Update a guest in the table\r\n@app.route('/api/updateguest', methods=['PUT'])\r\ndef updateguest():\r\n request_data = request.get_json()\r\n guestid = request_data['id']\r\n updatefirstname = request_data['firstname']\r\n updatelastname = request_data['lastname']\r\n #connects to mySQL database and allows to modify data from Postman to mySQL\r\n conn = create_connection(\"cis3368fall2021.c2ksqbnomh6f.us-east-2.rds.amazonaws.com\", \"admin\", \"MadisonFall2021\", \"cis3368fall2021\")\r\n query = \"UPDATE guest SET firstname = '%s', lastname = '%s' WHERE id = %s\" %(updatefirstname, updatelastname, guestid)\r\n execute_query(conn,query)\r\n return 'Guest was successfully updated'\r\n\r\n\r\n#Print the entire restaurant table:\r\n@app.route('/restaurants', methods=['GET'])\r\ndef place():\r\n conn = create_connection(\"cis3368fall2021.c2ksqbnomh6f.us-east-2.rds.amazonaws.com\", \"admin\", \"MadisonFall2021\", \"cis3368fall2021\")\r\n #Selects all data from the table\r\n sql = \"SELECT * FROM restaurant\"\r\n restaurant = execute_read_query(conn,sql)\r\n #Results all data to return\r\n results = []\r\n for place in restaurant:\r\n results.append(place)\r\n return jsonify(results)\r\n\r\n#Add a new restaurant to the table\r\n@app.route('/api/addrestaurant', methods=['POST'])\r\n#Request data to be added\r\ndef addrestaurant():\r\n request_data = request.get_json()\r\n newname = request_data['restaurantname']\r\n \r\n #connects to mySQL database and allows to add data from Postman to mySQL\r\n conn = create_connection(\"cis3368fall2021.c2ksqbnomh6f.us-east-2.rds.amazonaws.com\", \"admin\", \"MadisonFall2021\", \"cis3368fall2021\")\r\n query = \"INSERT INTO restaurant (restaurantname) VALUES ('%s')\" % (newname)\r\n execute_query(conn,query)\r\n return 'POST REQUEST WORKED'\r\n\r\n#Delete a restaurant in the table\r\n@app.route('/api/deleterestaurant', methods=['DELETE'])\r\ndef deleterestaurant():\r\n request_data = request.get_json()\r\n deletename = request_data['restaurantname']\r\n #connects to mySQL database and allows to modify data from Postman to mySQL\r\n conn = create_connection(\"cis3368fall2021.c2ksqbnomh6f.us-east-2.rds.amazonaws.com\", \"admin\", \"MadisonFall2021\", \"cis3368fall2021\")\r\n query = \"DELETE from restaurant WHERE restaurantname = '%s' \" % (deletename)\r\n execute_query(conn,query)\r\n return 'Restaurant was successfully deleted.' #Returns a statement if the code works successfully\r\n\r\n#Update a restaurant in the table\r\n@app.route('/api/updaterestaurant', methods=['PUT'])\r\ndef updaterestaurant():\r\n request_data = request.get_json()\r\n restaurantid = request_data['id']\r\n updatename = request_data['restaurantname']\r\n \r\n #connects to mySQL database and allows to modify data from Postman to mySQL\r\n conn = create_connection(\"cis3368fall2021.c2ksqbnomh6f.us-east-2.rds.amazonaws.com\", \"admin\", \"MadisonFall2021\", \"cis3368fall2021\")\r\n query = \"UPDATE restaurant SET restaurantname= '%s' WHERE id = %s\" %(updatename, restaurantid)\r\n execute_query(conn,query)\r\n return 'Restaurant was successfully updated'\r\n\r\n@app.route('/api/randomrestaurant', methods=[\"GET\"])\r\ndef randomrestaurant():\r\n conn = create_connection(\"cis3368fall2021.c2ksqbnomh6f.us-east-2.rds.amazonaws.com\", \"admin\", \"MadisonFall2021\", \"cis3368fall2021\")\r\n print(\"How many people are going to dinner?\")\r\n dinner = int(input(\"\"))\r\n list = []\r\n for x in range(dinner): # loops through asking for user first and last name for each person attending dinner\r\n print(\"Please enter your first name:\")\r\n selectf = input(\" \")\r\n list.append(selectf)\r\n print(\"Please enter your lastname:\")\r\n selectl = input(\" \") \r\n list.append(selectl) \r\n # the name of the people attending dinner are all added into a list\r\n sql = \"SELECT * FROM guest\"\r\n guest = execute_read_query(conn, sql)\r\n results = []\r\n for user in guest:\r\n for x in list: # the for loop goes through the list of people attending dinner\r\n # if those names are in the guest table it adds their ID number to a new list\r\n if x in user['firstname'] or x in user['lastname']:\r\n results.append(user['id'])\r\n sql2 = \"SELECT * FROM restaurant\"\r\n rest = execute_read_query(conn, sql2)\r\n for new in rest: # goes through the restaurant table \r\n for x in results: # goes through the results list (which should only have the ID of the guests that were entered)\r\n if new['guestid'] == x: \r\n # if the ID's from the results list match the guest ID's from the restaurant list\r\n # it takes the restaurants of all the guests that were entered and put it into another empty list\r\n sql3 = f\"\"\"SELECT restaurantname FROM restaurant WHERE guestid = {x} ORDER BY RAND () LIMIT 1\"\"\" \r\n # ^ selects a random restauramt from the restaurant list of the people going to dinner\r\n global random_rest\r\n random_rest = execute_read_query(conn, sql3)\r\n results2 = []\r\n for object in random_rest:\r\n results2.append(object)\r\n return jsonify(results2)\r\n\r\napp.run()\r\n","sub_path":"Madison_FinalProjectSprint1_1.py","file_name":"Madison_FinalProjectSprint1_1.py","file_ext":"py","file_size_in_byte":10122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"218554462","text":"import retrive_twitter_info\n\nfrom peewee import *\nfrom create_recruited_database import Recruited, Tweets, Hashtag, SentDate\nimport datetime, time\n\n\nclass target:\n def __init__(self, twitter, db, user):\n self.twitter = twitter\n self.db = db\n self.user = user\n\n def get_hashtags(self):\n # Option 1: Retrieve the tweet to send from a previously filled table in the database\n # Option 2: Create the tweet to send on the fly (easier to do)\n\n \"\"\"\n #retrieve all the hashtags from database\n :return: the lis of the hashtags\n \"\"\"\n hashtag = Hashtag.select(Hashtag.hashtag_text).distinct()\n hashtag_list = []\n for item in hashtag:\n hashtag_list.append(item.hashtag_text)\n\n return hashtag_list\n\n def show_menu(self, hashtag_list):\n \"\"\"\n #shows hashtagas as menu items\n :return: nothing\n \"\"\"\n count = 1;\n for item in hashtag_list:\n print(\"{}:{}\".format(count, item))\n count += 1\n\n def retrieve_users(self, target_hashtag_idx, the_hashtags):\n \"\"\"\n #retrieve all users that have used the selected hashtags and that have not received tweets recently (last 24 hours)\n :return: the list of selected users\n \"\"\"\n now = datetime.datetime.now()\n\n now = datetime.datetime.now()\n pop_user = []\n final_list = []\n print(target_hashtag_idx)\n the_target_hashtag = the_hashtags[target_hashtag_idx - 1]\n # this query gets the user_ids of those users that tweeted the selected hashtag\n users = Hashtag.select(Hashtag.user_of_hashtag).where(Hashtag.hashtag_text == the_target_hashtag)\n users_list = []\n for item in users:\n users_list.append(item.user_of_hashtag_id)\n print(users_list)\n selected_user = []\n # this query selects the datetime of the users that where retrieved with the last query (users who tweeted certain hashtag)\n for item in users_list:\n selected_user = SentDate.select(SentDate.user_sent, SentDate.date_tweet_sent, SentDate.tweet_sent_message).where(SentDate.user_sent == item)\n # if the user has been sent a tweet in the last 24 hours skip it\n print(\"selected users\")\n for item in selected_user:\n print(item.user_sent_id)\n for item in selected_user:\n # print(\"printing selected user\")\n # print (item.date_tweet_sent)\n # print(item.tweet_sent_message)\n # print(item.user_sent_id)\n\n date_retrieved = datetime.datetime.strptime(item.date_tweet_sent, '%Y-%m-%d %H:%M:%S.%f')\n print('sent', date_retrieved)\n print('48 hours', now - datetime.timedelta(hours=48))\n if (now - datetime.timedelta(hours=48)) < date_retrieved:\n print('Not have passed 48 hours, cannot send tweet')\n\n if item.user_sent_id not in pop_user:\n pop_user.append(item.user_sent_id)\n\n print(\"user to pop: \", pop_user)\n # only keeps the users that who didn't receive a tweet in the last 48 hours\n print('user_list', users_list)\n for elem in pop_user:\n users_list = [value for value in users_list if value != elem]\n\n print(\"final list\", users_list)\n return users_list\n\n def construct_tweet(self, list_of_users, message):\n \"\"\"\n #append message to username\n :return: the constructed tweet\n\n \"\"\"\n\n tweets = {}\n print(list_of_users)\n counter = 1\n print(\"Constructing tweets\")\n for item in list_of_users:\n if counter < 25:\n string_name =twitter.get_screen_name(item)\n print(string_name)\n tweets[item]='@' + string_name + ' ' + message\n print(type(tweets))\n print(tweets[item])\n counter +=1\n else:\n break\n return tweets\n\n def send_tweet(self, tweets_to_send):\n \"\"\"\n #just send the tweet to the list, sleep.time(15)\n #save in the data_tweets_sent:\n user_id to whom it was sent\n the tweet_id fo the tweet\n the text of the text\n the date sent (possible hour?)\n\n\n :return: nothing\n\n \"\"\"\n for k,v in tweets_to_send.items():\n print('Mensaje a enviar: ', v)\n # tweet_only= item.split(' ')[:0]\n print(\"The following tweets will be sent\")\n\n print (k , 'corresponds to', v)\n # print(tweet_only)\n\n\n twitter.api.update_status(status=v)\n time.sleep(600)\n self.save_tweet_data(k,v)\n # print \"Tweeting!\"\n\n def save_tweet_data(self, k,v):\n # Get id of the user\n list_tweets = []\n\n id = int(k)\n tweet_sent = v\n\n\n print('id vale', id)\n\n user_sent = id\n # tweet=last_tweet.split(',')[1:2]\n print(\"save tweet \", tweet_sent)\n tweet = self.twitter.get_user_timeline(self.user, 1)\n # tweet = twitter.api.get_user(int_id).id_str\n\n print(tweet_sent)\n\n print(user_sent)\n print(datetime.datetime.now())\n\n list_tweets.append({'user_sent': user_sent, 'tweet_sent_message': tweet_sent,\n 'date_tweet_sent': datetime.datetime.now()})\n for item in list_tweets:\n a = SentDate(**item)\n a.save()\n\n def send_tweet_to_recruited(self):\n # retrieve all the hashtags from database\n the_hashtags = self.get_hashtags()\n print(the_hashtags)\n # show menu and\n self.show_menu(the_hashtags)\n # Ask for the hashtag to target\n target = input(\"Which hashtag do you want to target: \")\n # get targeted hashtag\n target_hashtag_idx = int(target)\n # get list of users according to hashtag\n recruited = self.retrieve_users(target_hashtag_idx, the_hashtags)\n print('recruited', recruited)\n if recruited:\n message = input(\"Write the tweet you want do send: \")\n contructed_tweet = self.construct_tweet(recruited, message)\n print(contructed_tweet)\n send = input('Are you sure you want to send them? Y=yes, N=no: ')\n if send.lower() == 'y':\n self.send_tweet(contructed_tweet)\n else:\n exit()\n\n # confirm send this tweet?\n # if yes: send_tweet(constructed_message returned from def constructed_message)\n # if not: cancel\n else:\n print(\"There are no recruiters\")\n exit()\n\ndef insert_in_database(user, id_tweet, message, date):\n query_user = SentDate.create(user_sent=user, tweet_sent_message=message,\n date_tweet_sent=date)\n query_user.save()\n\n\nuser, ck, cs, at, atc = [line.rstrip('\\n') for line in open('my_twitter_info.txt', 'r')]\nprint(\"the user is\", user)\ntwitter = retrive_twitter_info.GetTwitterInfo(ck, cs, at, atc, user)\nprint(\"antes de procesar archivo\")\ndb = SqliteDatabase('recruited.db')\ndb.connect()\n# insert_in_database(44973121,995,\"just python5\",datetime.datetime.now())\nnew_target = target(twitter, db, user)\nnew_target.send_tweet_to_recruited()\n","sub_path":"target_volunteers.py","file_name":"target_volunteers.py","file_ext":"py","file_size_in_byte":7417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"52669169","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 4 18:23:32 2016\n\n@author: clloyd\n\"\"\"\n\nfrom theano.tensor.shared_randomstreams import RandomStreams\nfrom theano import function\nimport numpy as np\nimport theano as th\n\ndata = np.random.rand(10,3)\n\n\n\n\nit = th.shared(0)\ny = th.shared(data)\n\n\n\n\nsrng = RandomStreams(seed=234)\n\nexpectRvs = srng.normal(size=(3,1))\nexpectRvs.name='expectRvs'\nepochStream = srng.permutation(n=10)\ncurrentBatch = epochStream.reshape((5,2))[:,it]\ny_mini = y[ currentBatch, :]\nL = th.tensor.sum(th.tensor.dot( y_mini, expectRvs ))\nL_func = function([], L, no_default_updates=True)\n\npadding = srng.choice(size=(3,), a=10, replace=False, p=None, ndim=None, dtype='int64')\n\n\n\nf1 = function([], expectRvs, no_default_updates=True)\nf2 = function([], expectRvs)\n\n","sub_path":"sandbox/randomstreamtest.py","file_name":"randomstreamtest.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"545895042","text":"'''TwitterDeepo Class'''\n\n'''Attribut : '''\n\n''' methode : read_timeline(all_stocks_dict as a dict of stock, escaped_stock as a dict of stock)'''\n''' search_other_tweet(stock as String) '''\n''' add_usefull_friends()'''\n''' delete_useless_friends()'''\n''' create_author_nb_tweet_lis()return : list a dict of stock -> nb tweet'''\n''' update_friends()'''\n\n\nfrom Twitter import *\n\nclass TwitterDeepo(Twitter):\n\n\tdef read_timeline(self,DataStocks,DataTwitter):\n\t\t'''put in a binary file SimpleTweet object of tweet about stocks'''\n\n\t\ttweets = self.getDataTimeLine() \n\t\tself.writeLog('Read Timeline ',log_timeline)\n\n\t\tfor tweet in tweets:\n\t\t\tstocks_finded = self.validateTweetForStocks(tweet, DataStocks)\n\t\t\tif(len(stocks_finded) > 0):\n\t\t\t\t'''write tweet'''\n\t\t\t\tnew_tweet = SimpleTweet(str(tweet.id), stocks_finded, str(tweet.author.id) ,str(tweet.created_at), str(tweet.retweeted), tweet.text)\n\t\t\t\tDataTwitter.write_tweet(new_tweet)\n\t\t\t\tself.writeLog('tweet : '+str(tweet.id)+' added'+ ' (' + stocks_finded[0]+')',log_timeline)\n\t\t\t\t\n\t\t\t\t'''search for other tweets about stock mentionned '''\n\t\t\t\tfor stock in stocks_finded:\n\t\t\t\t\tself.search_other_tweet(stock, DataTwitter, DataStocks)\n\t\t\t\t\t\t\t\n\t\tself.read_timeline(DataStocks, DataTwitter)\n\n\n\n\tdef search_other_tweet(self,stock, DataTwitter, DataStocks):\n\t\t'''Twitter search about a stock'''\n\n\t\ttweets_search = self.search(stock)\n\t\tif(len(tweets_search) > 0 ):\n\n\t\t\tfor tweet in tweets_search:\n\n\t\t\t\tstocks_finded = self.validateTweetForStocks(tweet, DataStocks)\n\t\t\t\tif(len(stocks_finded) > 0):\n\n\t\t\t\t\tif(self.validateUser(tweet.author.id)):\n\n\t\t\t\t\t\tself.addUser(tweet.author.id)\n\t\t\t\t\t\tself.writeLog('User : '+str(tweet.author.id) + ' added',log_friends)\n\t\t\t\t\t\tnew_tweet = SimpleTweet(str(tweet.id), stocks_finded, str(tweet.author.id) ,str(tweet.created_at), str(tweet.retweeted), tweet.text)\n\t\t\t\t\t\tDataTwitter.write_tweet(new_tweet)\n\t\t\t\t\t\tself.writeLog('tweet : '+str(tweet.id)+' added'+ ' (' + stocks_finded[0]+')',log_timeline)\n\t\t\n\n\n\tdef validateTweetForStocks(self,tweet, DataStocks):\n\t\t'''validate either or not a tweet contains stocks information'''\t\t\n\t\t'''if the tweet is valide (contains stocks) return the stock otherwise return and empty list'''\n\t\t\t\n\t\tnb_stock_in_tweet = 0\n\t\tstocks_finded = []\n\t\tif(tweet.id not in self.tweet_added):\n\t\t\tfor stock in DataStocks.get_stocks_list():\n\t\t\t\tif((tweet.text.find(' '+stock+' ') != -1 and stock not in DataStocks.get_esc_stocks_list() ) or tweet.text.find('$'+stock+' ') != -1 or tweet.text.find(''+DataStocks.get_stocks_list()[stock][0]+' ') != -1 ):\n\t\t\t\t\tif(nb_stock_in_tweet <3):\n\t\t\t\t\t\tnb_stock_in_tweet =+ 1\n\t\t\t\t\t\tstocks_finded.append('$'+stock)\n\t\t\t\t\t\tself.tweet_added.append(tweet.id)\n\t\treturn stocks_finded\n\n\n\n\tdef validateUser(self,userId):\n\t\t'''Validate either or not a user is a good friend'''\n\t\tUser = self.getUser(userId)\n\t\tif (User.id not in self.MyFriends):\n\t\t\tif(User.followers_count > follower_required):\n\t\t\t\treturn True\n\n\t\treturn False\n\n\n\n\t\t'''A revoir en dessous'''\n\n\tdef delete_useless_friends(self, DataTwitter):\n\t\t'''Look inside my friends and delete the useless one (< follower, no stock mention during last week)'''\n\n\t\tdict_author_nb_tweet = create_author_nb_tweet_list()\n\t\titem_author_nb_tweet = dict_author_nb_tweet.keys()\n\t\tfriends_list_id = self.getMyFriends()\n\n\t\tfor friend_id in friends_list_id:\n\t\t\ttime.sleep(10)\n\t\t\tfriend_object = self.getUser(friend_id)\n\t\t\tif (friend_object.id in self.getMyFriends) :\n\t\t\t\tself.writeLog('id : '+ str(friend_object.id) + ' followers : ' + str(friend_object.followers_count) +' already catch one of his tweet',log_friends)\n\t\t\telse:\n\t\t\t\tself.writeLog('id : '+ str(friend_object.id) + ' followers : ' + str(friend_object.followers_count),log_friends)\n\t\t\tif(friend_object.followers_count < follower_required and friend_object.id not in item_author_nb_tweet):\n\t\t\t\tself.writeLog('User : ' + str(friend_object.id) + ' deleted',log_friends)\n\n\n\n\n\n","sub_path":"TwitterDeepo/TwitterDeepo.py","file_name":"TwitterDeepo.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"169970388","text":"import pymysql\nfrom sqlalchemy import create_engine, Table, Column, String, Integer, DateTime,ForeignKey\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom datetime import datetime\n\n\n\"\"\"\n张三给李四通过网银转账 100 极客币,现有数据库中三张表:\n一张为用户表,包含用户 ID 和用户名字,\n另一张为用户资产表,包含用户 ID 用户总资产,\n第三张表为审计用表,记录了转账时间,转账 id,被转账 id,转账金额。\n请合理设计三张表的字段类型和表结构;\n请实现转账 100 极客币的 SQL(可以使用 pymysql 或 sqlalchemy-orm 实现),\n张三余额不足,转账过程中数据库 crash 等情况需保证数据一致性\n用户表\n\"\"\"\n\n\nBase = declarative_base()\n\n# 用户表\nclass Users_table(Base):\n __tablename__ = 'users'\n uid = Column(Integer(), primary_key=True)\n username = Column(String(20), nullable=False, unique=True)\n\n# 用户资产表\nclass Assets_table(Base):\n __tablename__ = 'userassets'\n id = Column(Integer(), primary_key=True)\n zid = Column(Integer(), ForeignKey(Users_table.uid))\n username = Column(String(20), nullable=False, unique=True)\n assetsnum = Column(Integer(), default=0)\n\n# 审计用表\nclass Audit_table(Base):\n __tablename__ = 'audit'\n id = Column(Integer(), primary_key=True)\n transfer_time = Column(DateTime(), default=datetime.now)\n # 被转用户\n username_in = Column(String(20), nullable=False, unique=False)\n # 被转用户id\n inid = Column(Integer(), ForeignKey(Users_table.uid))\n # 转账用户\n username_out = Column(String(20), nullable=False, unique=False)\n # 转账用户id\n outid = Column(Integer(), ForeignKey(Users_table.uid))\n transfer_amount = Column(Integer())\n\n# 用户操作类\nclass User_actions():\n def __init__(self):\n self.dburl = \"mysql+pymysql://testuser:testpass#AB1234@192.168.3.38:3306/testdb?charset=utf8mb4\"\n self.engine = create_engine(self.dburl, echo=True, encoding=\"utf-8\")\n\n # 转账操作\n def Transfer_deposite(self,oname, iname, nums):\n SessionClass = sessionmaker(bind=self.engine)\n session = SessionClass()\n user_out = oname\n user_in = iname\n # 查询转账用户id\n zz_user = session.query(Users_table.username, Users_table.uid).filter(Users_table.username == user_out).first()\n # 获取转账用户id\n zz_num = int(zz_user[1])\n\n # 查询被账用户id\n bz_user = session.query(Users_table.username, Users_table.uid).filter(Users_table.username == user_in).first()\n # 获取被转用户id\n bz_num = int(bz_user[1])\n\n # 转账用户出账操作\n zz_user_ssets = session.query(Assets_table.username, Assets_table.zid, \\\n Assets_table.assetsnum).filter(Assets_table.zid == zz_num)\n zz_user_ssets.update({Assets_table.assetsnum: int(zz_user_ssets.first()[2])-nums})\n print(zz_user_ssets.first())\n\n # 被账用户进账操作\n bz_user_ssets = session.query(Assets_table.username, Assets_table.zid, \\\n Assets_table.assetsnum).filter(Assets_table.zid == bz_num)\n bz_user_ssets.update({Assets_table.assetsnum: int(bz_user_ssets.first()[2]) + nums})\n print(bz_user_ssets.first())\n\n #记录到转账审计表\n print(oname,iname,zz_num,bz_num,nums,\"---------------------------------------------------\")\n auditadd=Audit_table(username_out=oname, username_in=iname, outid=zz_num, inid=bz_num, transfer_amount=nums)\n session.add(auditadd)\n res = session.query(Audit_table.outid, Audit_table.username_out, Audit_table.inid,\\\n Audit_table.username_in, Audit_table.transfer_amount, Audit_table.transfer_amount).all()\n print(\" -----------转账审计审计表 数据表(audit)\")\n #for i in res:\n # print(i)\n print(\" -----------转转用户资产表 数据表(userassets):\", zz_user_ssets.first())\n print(\" -----------被转用户资产表 数据表(userassets): \", bz_user_ssets.first())\n #session.flush()\n session.commit()\n # 添加用户\n def user_add(self, usadd):\n SessionClass = sessionmaker(bind=self.engine)\n session = SessionClass()\n session.add(usadd)\n session.commit()\n\n # 添加用户资产\n def assets_add(self, asadd):\n SessionClass = sessionmaker(bind=self.engine)\n session = SessionClass()\n session.add(asadd)\n session.commit()\n\ndburl=\"mysql+pymysql://testuser:testpass#AB1234@192.168.3.38:3306/testdb?charset=utf8mb4\"\nengine = create_engine(dburl, echo=True, encoding=\"utf-8\")\n#Base.metadata.create_all(engine)\n\n#增加用户\n\"\"\"\nadd_h1 = Users_table(uid=100, username='tom1')\nadd_h2 = Users_table(uid=101, username='tom2')\nadd_h3 = Users_table(uid=102, username='jack1')\nadd_h4 = Users_table(uid=103, username='jack2')\nuadd = User_actions()\nuadd.user_add(add_h1)\nuadd.user_add(add_h2)\nuadd.user_add(add_h3)\nuadd.user_add(add_h4)\n\"\"\"\n#增加用户资产\n\"\"\"\"\nadd_z1 = Assets_table(zid=100, username='tom1', assetsnum=2000 )\nadd_z2 = Assets_table(zid=101, username='tom2', assetsnum=2000)\nadd_z3 = Assets_table(zid=102, username='jack1', assetsnum=2000)\nadd_z4 = Assets_table(zid=103, username='jack2', assetsnum=2000)\nzadd= User_actions()\nzadd.assets_add(add_z1)\nzadd.assets_add(add_z2)\nzadd.assets_add(add_z3)\nzadd.assets_add(add_z4)\n\"\"\"\n#用户转账\nzzuser = User_actions()\nzzuser.Transfer_deposite('tom1', 'jack2', 123)\n\n\n'''\n2020-12-15 17:55:45,478 INFO sqlalchemy.engine.base.Engine INSERT INTO audit (transfer_time, username_in, inid, username_out, outid, transfer_amount) VALUES (%(transfer_time)s, %(username_in)s, %(inid)s, %(username_out)s, %(outid)s, %(transfer_amount)s)\n2020-12-15 17:55:45,478 INFO sqlalchemy.engine.base.Engine {'transfer_time': datetime.datetime(2020, 12, 15, 17, 55, 45, 478566), 'username_in': 'jack2', 'inid': 103, 'username_out': 'tom2', 'outid': 101, 'transfer_amount': 123}\n2020-12-15 17:55:45,491 INFO sqlalchemy.engine.base.Engine SELECT audit.outid AS audit_outid, audit.username_out AS audit_username_out, audit.inid AS audit_inid, audit.username_in AS audit_username_in, audit.transfer_amount AS audit_transfer_amount \nFROM audit\n2020-12-15 17:55:45,492 INFO sqlalchemy.engine.base.Engine {}\n -----------转账审计审计表 数据表(audit)\n2020-12-15 17:55:45,494 INFO sqlalchemy.engine.base.Engine SELECT userassets.username AS userassets_username, userassets.zid AS userassets_zid, userassets.assetsnum AS userassets_assetsnum\nFROM userassets\nWHERE userassets.zid = %(zid_1)s\n LIMIT %(param_1)s\n2020-12-15 17:55:45,494 INFO sqlalchemy.engine.base.Engine {'zid_1': 101, 'param_1': 1}\n -----------转转用户资产表 数据表(userassets): ('tom2', 101, 1811)\n2020-12-15 17:55:45,495 INFO sqlalchemy.engine.base.Engine SELECT userassets.username AS userassets_username, userassets.zid AS userassets_zid, userassets.assetsnum AS userassets_assetsnum\nFROM userassets\nWHERE userassets.zid = %(zid_1)s\n LIMIT %(param_1)s\n2020-12-15 17:55:45,496 INFO sqlalchemy.engine.base.Engine {'zid_1': 103, 'param_1': 1}\n -----------被转用户资产表 数据表(userassets): ('jack2', 103, 2189)\n2020-12-15 17:55:45,496 INFO sqlalchemy.engine.base.Engine COMMIT\n'''","sub_path":"week03/User_transfer_orm.py","file_name":"User_transfer_orm.py","file_ext":"py","file_size_in_byte":7399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"267639861","text":"import pytest\n\n\ndef test_post_comment(commentable, real_login):\n commentable.post_comment('some_text')\n\ndef test_delete_comment(commentable, real_login, client):\n c = commentable.post_comment('some_text')\n client.delete_comment(comment_id=c.id)\n comments = list(commentable.get_comments())\n assert len(comments) == 1\n assert comments[0].deleted\n assert not comments[0].comment\n\n@pytest.fixture(params=['session', 'test'])\ndef commentable(request, ended_session, ended_test):\n if request.param == 'session':\n return ended_session\n if request.param == 'test':\n return ended_test\n\n raise NotImplementedError() # pragma: no cover\n","sub_path":"tests/test_comments.py","file_name":"test_comments.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"170653309","text":"from flask import Blueprint, jsonify, make_response, request\nimport app\nimport csv\nfrom codecs import iterdecode\n\narticulos = Blueprint(\"articulos\", __name__)\n\n\n@articulos.route('/all', methods=['GET'])\ndef getArticulosAll():\n \"\"\"\n returns all articulos\n \"\"\"\n articles, ok = app.db().getArticles()\n if not ok:\n return make_response(\"error\", 500)\n\n if not articles:\n return make_response(jsonify({}), 200)\n\n return make_response(jsonify(articles), 200)\n\n\n@articulos.route('', methods=['GET'])\ndef getArticulo():\n \"\"\"\n returns articles according to parametares sent\n \"\"\"\n query_parameters = request.args\n id = query_parameters.get('id')\n id_torre = query_parameters.get('id_torre')\n id_producto = query_parameters.get('id_producto')\n estado = query_parameters.get('estado')\n robot, ok = app.db().getArticles(id, id_torre, id_producto, estado)\n if not ok:\n return make_response(\"Error al buscar en la base de datos\", 500)\n\n if not robot:\n return make_response(jsonify({}), 200)\n\n return make_response(jsonify(robot), 200)\n\n\n@articulos.route('/csv', methods=['POST'])\ndef insertArticulos():\n \"\"\"\n Adds new articulos to the list\n \"\"\"\n f = request.files['file']\n row, ok = app.db().insertArticlesCsv(csv.reader(iterdecode(f, 'utf-8')))\n if not ok:\n return make_response(\"La fila {} con los datos {} dio error de integridad, se cancelo la insercion de productos. Favor revise que datos ingresados coincidan con los Id de Torres y Articulos existentes\".format(row[1], row[0]), 500)\n\n return make_response(\"Se insertaron {} articulos con exito\".format(row[1]), 200)\n\n\n@articulos.route('/', methods=['POST'])\ndef insertArticulo():\n \"\"\"\n Adds new articulo to the list\n \"\"\"\n json_data = request.get_json(force=True)\n row, ok = app.db().insertArticles(json_data)\n if not ok:\n return make_response(\"Error al insertar el articulo\".format(row), 500)\n\n return make_response(\"Se inserto el articulos con exito en la row {}\".format(row), 200)\n\n\n@articulos.route('/', methods=['DELETE'])\ndef deleteArticulo(id):\n \"\"\"\n Deletes article according to the id sent\n \"\"\"\n ok, pedidos = app.db().deleteArticle(id)\n if ok == 'pending':\n return make_response(\"El/Los pedidos {} estan pendientes o finalizados con el codigo de articulo {}. Elimine primero los pedidos\".format(' '.join(['%s,' % (i,) for i in pedidos]), id), 200)\n\n if ok is None:\n return make_response(\"Articulo no encontrado\", 404)\n\n if not ok:\n return make_response(\"Error al eliminar articulo\", 500)\n\n return make_response(\"Articulo eliminado correctamente\", 200)\n","sub_path":"app/blueprints/articulos/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"214571622","text":"from dataclasses import dataclass\nimport pickle\nfrom typing import Dict, List\nfrom .CoinMarketCap import CachedGet, CoinMarketCapApi, Ticker\nfrom collections import defaultdict\nfrom redis import StrictRedis\nfrom prettytable import PrettyTable\nfrom imagemaker.makePng import getCryptoLeaderboardPng, getCryptoTopPng\n\n\n@dataclass\nclass User:\n user_name: str\n balance: float\n portfolio: Dict[str, float]\n\n def display_portfolio(self) -> Dict[str, float]:\n # don't include entries with 0 value\n return {\n k: v\n for k, v in self.portfolio.items()\n if v != 0.0\n }\n\n def value(self, prices: Dict[str, float]) -> float:\n sum = 0.0\n for ticker, quantity in self.portfolio.items():\n sum = sum + prices.get(ticker, 0) * quantity\n return sum\n\n\nclass CryptoTrader:\n\n INITIAL_POT_SIZE = 100000\n\n def __init__(self, db: StrictRedis, group: str) -> None:\n self.db = db\n self.group = group\n self.api = CoinMarketCapApi()\n pass\n\n def buy(self, user_name: str, ticker: str, quantity: float) -> None:\n user = self._getUser(user_name)\n prices = self.api.getPrices()\n ticker = ticker.lower()\n\n if (ticker not in prices):\n raise InvalidCoinError(\n \"Price missing for {ticker}. Try a different coin.\"\n .format(ticker=ticker)\n )\n\n purchasePrice = prices[ticker] * quantity\n if (user.balance > purchasePrice):\n user.portfolio[ticker] = user.portfolio.get(\n ticker\n ) or 0 # initialize if needed\n user.portfolio[ticker] += quantity\n user.balance = user.balance - purchasePrice\n self._setUser(user)\n else:\n raise InsufficientFundsError(\n \"{user_name} is out of dough!\"\n .format(user_name=user_name)\n )\n\n def sell(self, user_name: str, ticker: str, quantity: float) -> None:\n user = self._getUser(user_name)\n prices = self.api.getPrices()\n ticker = ticker.lower()\n\n if (\n user.portfolio.get(ticker) and\n user.portfolio[ticker] >= quantity\n ):\n sellPrice = prices[ticker] * quantity\n user.portfolio[ticker] -= quantity\n user.balance += sellPrice\n self._setUser(user)\n else:\n raise InsufficientCoinsError(\n \"{user_name} don't have {coin} coins to sell!\"\n .format(user_name=user_name, coin=ticker)\n )\n\n def _key(self, user_name: str) -> str:\n return \"cryptoTrader.{group}.{user_name}\".format(\n group=self.group,\n user_name=user_name\n )\n\n def _getUser(self, user_name: str) -> User:\n if not self.db.get(self._key(user_name)):\n self._setUser(\n User(\n user_name,\n CryptoTrader.INITIAL_POT_SIZE,\n {}\n )\n )\n return pickle.loads(\n self.db.get(\n self._key(user_name)\n )\n )\n\n def _getAllUsers(self) -> List[User]:\n userKeys = self.db.keys(\"cryptoTrader.{g}.*\".format(g=self.group))\n if not userKeys:\n return []\n return [\n pickle.loads(u)\n for u in self.db.mget(userKeys)\n ]\n\n def _setUser(self, user: User) -> None:\n self.db.set(self._key(user.user_name), pickle.dumps(user))\n\n def status(self, user_name: str) -> str:\n user = self._getUser(user_name)\n return (\n \"```User {user_name} has ${balance} to spend.\\n\" +\n \"Coins owned: {portfolio}\\n\" +\n \"Portfolio value is ${value}```\"\n ).format(\n user_name=user.user_name,\n balance=user.balance,\n portfolio=user.display_portfolio(),\n value=user.value(self.api.getPrices())\n )\n\n def topCoins(self, n: int) -> str:\n topTickers = self.api.getTopNTickersAndPrices(n)\n\n rows = []\n for ticker in topTickers:\n rows.append(\n (\n ticker.symbol,\n _format_money(ticker.quotes['USD'].price),\n _format_suffix(ticker.quotes['USD'].volume_24h),\n _format_suffix(ticker.quotes['USD'].market_cap),\n ticker.quotes['USD'].percent_change_1h,\n ticker.quotes['USD'].percent_change_24h,\n ticker.quotes['USD'].percent_change_7d\n )\n )\n\n return getCryptoTopPng(rows)\n\n def leaderboard(self):\n users = self._getAllUsers()\n\n if not users:\n return 'No leaderboard created yet. `crypto help` to start.'\n\n prices = self.api.getPrices()\n\n # sort users by total $, descending\n sortedUsers = sorted(\n users,\n key=lambda u: u.balance + u.value(prices),\n reverse=True\n )\n\n rows = []\n for user in sortedUsers:\n rows.append(\n (\n user.user_name,\n user.display_portfolio(),\n _format_money(user.value(prices)),\n _format_money(user.balance),\n _format_money(user.balance + user.value(prices)),\n (\n (user.balance + user.value(prices)) -\n CryptoTrader.INITIAL_POT_SIZE\n\n )\n )\n )\n\n return getCryptoLeaderboardPng(rows)\n\n\nclass Error(Exception):\n pass\n\n\nclass InsufficientFundsError(Error):\n pass\n\n\nclass InsufficientCoinsError(Error):\n pass\n\n\nclass InvalidCoinError(Error):\n pass\n\n\ndef _format_money(n: float) -> str:\n return \"{0:.1f}\".format(n)\n\n\ndef _format_pct(n: float) -> str:\n return \"{0:.1f}\".format(n)\n\n\ndef _format_suffix(num):\n magnitude = 0\n while abs(num) >= 1000:\n magnitude += 1\n num /= 1000.0\n # add more suffixes if you need them\n return '%.2f%s' % (num, ['', 'K', 'M', 'B', 'T', 'P'][magnitude])\n","sub_path":"crypto/CryptoTrader.py","file_name":"CryptoTrader.py","file_ext":"py","file_size_in_byte":6187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"99186123","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/_nmf/widgets/OWPivotData.py\n# Compiled at: 2013-02-14 09:13:46\n\"\"\"\nPivot Data\nPivot data\nicons/PivotIcon.png\n30\n\"\"\"\nimport numpy as np\nfrom OWWidget import *\nimport OWGUI\n\nclass OWPivotData(OWWidget):\n settingsList = []\n\n def __init__(self, parent=None, signalManager=None):\n OWWidget.__init__(self, parent, signalManager, 'Pivot Data')\n self.inputs = [\n (\n 'Data', ExampleTable, self.data)]\n self.outputs = [('Pivoted Data', ExampleTable)]\n self.loadSettings()\n box = OWGUI.widgetBox(self.controlArea, 'Info')\n self.infoa = OWGUI.widgetLabel(box, 'No data on input yet, waiting to get something.')\n self.infob = OWGUI.widgetLabel(box, '')\n OWGUI.separator(self.controlArea)\n OWGUI.button(self.controlArea, self, 'Pivot long', callback=self.pivotLong)\n OWGUI.button(self.controlArea, self, 'Pivot short', callback=self.pivotShort)\n self.resize(100, 50)\n\n def data(self, dataset):\n if dataset:\n self.dataset = dataset\n self.infoa.setText('%d variables in input data set' % len(dataset[0]))\n self.infob.setText('%d observations in input data set' % len(dataset))\n else:\n self.send('Pivoted Data', None)\n self.infoa.setText('No data on input yet, waiting to get something.')\n self.infob.setText('')\n return\n\n def pivotLong(self):\n self.outputData = []\n colVal = []\n colNames = []\n colID = []\n for j in range(0, len(self.dataset.domain)):\n for i in range(0, len(self.dataset)):\n colVal.append(self.dataset[i][j].value)\n colNames.append(self.dataset.domain[j].name)\n colID.append(self.dataset[i]['ID'].value)\n\n self.outputData = map(list, zip(*[colVal]))\n dom = [Orange.feature.Continuous('Value')]\n dom = Orange.data.Domain(dom, 0)\n dom.addmetas(self.dataset.domain.getmetas())\n newid = Orange.feature.Descriptor.new_meta_id()\n dom.add_meta(newid, Orange.feature.String('Names'))\n self.outputData = Orange.data.Table(dom, self.outputData)\n for i in range(0, len(self.outputData)):\n self.outputData[i]['ID'] = colID[i]\n self.outputData[i]['Names'] = colNames[i]\n\n self.send('Pivoted Data', self.outputData)\n\n def pivotShort(self):\n self.outputData = []\n domList = []\n nameMeta = self.dataset.domain.getmetas().values()\n if nameMeta[0].name == 'ID':\n nameMeta = nameMeta[1].name\n else:\n nameMeta = nameMeta[0].name\n i = 0\n while i < len(self.dataset):\n col = []\n nameCol = self.dataset[i][nameMeta].value\n domList.append(Orange.feature.Continuous(nameCol))\n while i < len(self.dataset) and self.dataset[i][nameMeta].value == nameCol:\n col.append(self.dataset[i][0].value)\n i += 1\n\n self.outputData.append(col)\n\n self.outputData = map(list, zip(*self.outputData))\n dom = Orange.data.Domain(domList, 0)\n dom.addmetas(self.dataset.domain.getmetas())\n dom.remove_meta(nameMeta)\n self.outputData = Orange.data.Table(dom, self.outputData)\n for i in range(0, len(self.outputData)):\n self.outputData[i]['ID'] = self.dataset[i]['ID']\n\n self.send('Pivoted Data', self.outputData)\n\n\nif __name__ == '__main__':\n appl = QApplication(sys.argv)\n ow = OWPivotData()\n ow.show()\n dataset = orange.ExampleTable('C:/Users/Paul Fogel/Desktop/testPivot.tab')\n ow.data(dataset)\n appl.exec_()","sub_path":"pycfiles/Orange_NMF-0.1.2-py2.6/OWPivotData.py","file_name":"OWPivotData.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583194965","text":"import datetime as dt\nfrom fdfgen import forge_fdf\nimport subprocess\nfrom PyQt4 import QtCore, QtGui\nimport sys\nimport os\nfrom PyQt4 import Qt\nfrom PyQt4.uic import loadUi\nfrom gui4 import Ui_MainWindow\nimport sqlite3\nfrom collections import OrderedDict\n\n# Constants\nNAME = \"form1[0].#subform[0].Table1[0].Row2[0].TextField[0]\"\nSSN = \"form1[0].#subform[0].Table1[0].Row2[0].TextField[1]\"\nORGANIZATION = \"form1[0].#subform[0].Table1[0].Row4[0].TextField[0]\"\n\nANNUAL_FROM_DATE = \"form1[0].#subform[0].Table3[0].Row3[0].DateTimeField1[0]\"\nANNUAL_TO_DATE = \"form1[0].#subform[0].Table3[0].Row3[0].DateTimeField2[0]\"\nANNUAL_FROM_TIME = \"form1[0].#subform[0].Table3[1].Row3[0].DateTimeField1[0]\"\nANNUAL_TO_TIME = \"form1[0].#subform[0].Table3[1].Row3[0].DateTimeField2[0]\"\nANNUAL_TOTAL = \"form1[0].#subform[0].Table4[0].Row2[0].TextField[0]\"\nANNUAL_BOX = \"form1[0].#subform[0].CheckBox1[0]\"\n\nSICK_FROM_DATE = \"form1[0].#subform[0].Table3[0].Row6[0].DateTimeField7[0]\"\nSICK_TO_DATE = \"form1[0].#subform[0].Table3[0].Row6[0].DateTimeField8[0]\"\nSICK_FROM_TIME = \"form1[0].#subform[0].Table3[1].Row6[0].DateTimeField15[0]\"\nSICK_TO_TIME = \"form1[0].#subform[0].Table3[1].Row6[0].DateTimeField16[0]\"\nSICK_TOTAL = \"form1[0].#subform[0].Table4[0].Row5[0].TextField[0]\"\nSICK_BOX = \"form1[0].#subform[0].CheckBox1[3]\"\n\nCOMP_FROM_DATE = \"form1[0].#subform[0].Table7[0].Row1[0].DateTimeField19[0]\"\nCOMP_TO_DATE = \"form1[0].#subform[0].Table7[0].Row1[0].DateTimeField20[0]\"\nCOMP_FROM_TIME = \"form1[0].#subform[0].Table7[0].Row1[0].DateTimeField27[0]\"\nCOMP_TO_TIME = \"form1[0].#subform[0].Table7[0].Row1[0].DateTimeField30[0]\"\nCOMP_TOTAL = \"form1[0].#subform[0].Table7[0].Row1[0].TextField[0]\"\nCOMP_BOX = \"form1[0].#subform[0].CheckBox4[0]\"\n\nOTHER_FROM_DATE = \"form1[0].#subform[0].Table7[0].Row2[0].DateTimeField21[0]\"\nOTHER_TO_DATE = \"form1[0].#subform[0].Table7[0].Row2[0].DateTimeField22[0]\"\nOTHER_FROM_TIME = \"form1[0].#subform[0].Table7[0].Row2[0].DateTimeField28[0]\"\nOTHER_TO_TIME = \"form1[0].#subform[0].Table7[0].Row2[0].DateTimeField31[0]\"\nOTHER_TOTAL = \"form1[0].#subform[0].Table7[0].Row2[0].TextField[0]\"\nOTHER_BOX = \"form1[0].#subform[0].CheckBox4[1]\"\n\nLWOP_FROM_DATE = \"form1[0].#subform[0].Table7[0].Row3[0].DateTimeField23[0]\"\nLWOP_TO_DATE = \"form1[0].#subform[0].Table7[0].Row3[0].DateTimeField24[0]\"\nLWOP_FROM_TIME = \"form1[0].#subform[0].Table7[0].Row3[0].DateTimeField29[0]\"\nLWOP_TO_TIME = \"form1[0].#subform[0].Table7[0].Row3[0].DateTimeField32[0]\"\nLWOP_TOTAL = \"form1[0].#subform[0].Table7[0].Row3[0].TextField[0]\"\nLWOP_BOX = \"form1[0].#subform[0].CheckBox4[2]\"\n\nREMARKS = \"form1[0].#subform[0].Table8[0].Row2[0].TextField[0]\"\n\nSIGN_DATE = \"form1[0].#subform[0].Table8[0].Row5[0].DateTimeField25[0]\"\n\nPAYDAY = \"10-Nov-2016\"\nMISS_MONDAY = \"07-Nov-2016\"\n\nconn = sqlite3.connect('users.db')\ncur = conn.cursor()\ncur.execute('''CREATE TABLE IF NOT EXISTS users(\nssn INT PRIMARY KEY NOT NULL,\nlast TEXT NOT NULL,\nfirst TEXT NOT NULL,\nmiddle TEXT NOT NULL,\ngrade TEXT NOT NULL,\nunit TEXT NOT NULL\n);''')\n\ncur.execute('''CREATE TABLE IF NOT EXISTS leave_forms(\nid INT NOT NULL,\nfrom_date TEXT NOT NULL,\nto_date TEXT NOT NULL,\nfrom_time TEXT NOT NULL,\nto_time TEXT NOT NULL,\nleave_type TEXT NOT NULL,\nremarks TEXT NOT NULL,\nsigned TEXT NOT NULL,\nhours INT NOT NULL,\nFOREIGN KEY(id) REFERENCES users(ssn)\n);''')\n\n\n\n\n\n\nclass AftpSlip:\n def create_form(self, datas):\n fdf = forge_fdf(\"\", datas, [], [], [])\n fdf_file = open(\"data1.fdf\", \"wb\")\n fdf_file.write(fdf)\n fdf_file.close()\n subprocess.call([\"pdftk\", \"AFTP.pdf\", \"fill_form\", \"data1.fdf\", \"output\", \"outputAFTP.pdf\", \"flatten\"])\n\n\nclass LeaveForm:\n def __init__(self, gui):\n self.name = \"\"\n self.ssn = \"\"\n self.gui = gui\n self.datas = []\n self.from_date = \"\"\n self.to_date = \"\"\n self.from_time = \"\"\n self.to_time = \"\"\n self.leave_type = \"\"\n self.remarks = \"\"\n self.signed = \"\"\n self.hours = \"\"\n\n def fill_fields(self):\n\n self.datas.append((REMARKS, self.remarks))\n self.datas.append((NAME, self.name))\n self.datas.append((SSN, self.ssn))\n\n if self.gui.ui.radio_annual.isChecked():\n self.datas.append((ANNUAL_FROM_DATE, self.from_date))\n self.datas.append((ANNUAL_TO_DATE, self.to_date))\n self.datas.append((ANNUAL_BOX, 1))\n self.datas.append((ANNUAL_FROM_TIME, self.from_time))\n self.datas.append((ANNUAL_TO_TIME, self.to_time))\n self.datas.append((ANNUAL_TOTAL, self.hours))\n\n elif self.gui.ui.radio_sick.isChecked():\n self.datas.append((SICK_FROM_DATE, self.from_date))\n self.datas.append((SICK_TO_DATE, self.to_date))\n self.datas.append((SICK_BOX, 1))\n self.datas.append((SICK_FROM_TIME, self.from_time))\n self.datas.append((SICK_TO_TIME, self.to_time))\n self.datas.append((SICK_TOTAL, self.hours))\n\n\n elif self.gui.ui.radio_lwop.isChecked():\n self.datas.append((LWOP_FROM_DATE, self.from_date))\n self.datas.append((LWOP_TO_DATE, self.to_date))\n self.datas.append((LWOP_BOX, 1))\n self.datas.append((LWOP_FROM_TIME, self.from_time))\n self.datas.append((LWOP_TO_TIME, self.to_time))\n self.datas.append((LWOP_TOTAL, self.hours))\n\n elif self.gui.ui.radio_comp.isChecked():\n self.datas.append((COMP_FROM_DATE, self.from_date))\n self.datas.append((COMP_TO_DATE, self.to_date))\n self.datas.append((COMP_BOX, 1))\n self.datas.append((COMP_FROM_TIME, self.from_time))\n self.datas.append((COMP_TO_TIME, self.to_time))\n self.datas.append((COMP_TOTAL, self.hours))\n\n elif self.gui.ui.radio_mil.isChecked():\n self.datas.append((OTHER_FROM_DATE, self.from_date))\n self.datas.append((OTHER_TO_DATE, self.to_date))\n self.datas.append((OTHER_BOX, 1))\n self.datas.append((OTHER_FROM_TIME, self.from_time))\n self.datas.append((OTHER_TO_TIME, self.to_time))\n self.datas.append((OTHER_TOTAL, self.hours))\n\n def create_form(self):\n self.fill_fields()\n fdf = forge_fdf(\"\", self.datas, [], [], [])\n fdf_file = open(\"data.fdf\", \"wb\")\n fdf_file.write(fdf)\n fdf_file.close()\n subprocess.call([\"pdftk\", \"leaveForm.pdf\", \"fill_form\", \"data.fdf\", \"output\", \"output.pdf\", \"flatten\"])\n\nclass Main(Qt.QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n self.connections()\n self.defaults()\n self.logic()\n self.fill_drop_down()\n self.leave_form = LeaveForm(self)\n self.aftp_slip = AftpSlip()\n # self.user_table()\n\n # noinspection PyUnresolvedReferences\n def connections(self):\n\n self.ui.submit_btn.clicked.connect(self.submit_leave)\n self.ui.check_aftp.clicked.connect(self.logic)\n self.ui.radio_mil.clicked.connect(self.logic)\n self.ui.radio_sick.clicked.connect(self.logic)\n self.ui.radio_lwop.clicked.connect(self.logic)\n self.ui.radio_annual.clicked.connect(self.logic)\n self.ui.from_time.timeChanged.connect(self.update_hours)\n self.ui.to_time.timeChanged.connect(self.update_hours)\n self.ui.add_submit_btn.clicked.connect(self.add_user)\n self.ui.user_list.activated[str].connect(self.user_select)\n\n def defaults(self):\n self.ui.from_date.setDate(QtCore.QDate.currentDate())\n self.ui.to_date.setDate(QtCore.QDate.currentDate())\n self.ui.from_time.setTime(QtCore.QTime(7, 0, 0))\n self.ui.to_time.setTime(QtCore.QTime(16, 30, 0))\n if day_of_week(self.ui.from_date.date().toPyDate().strftime(\"%d-%b-%Y\")) == \"Sun\":\n self.ui.aftp_from_time.setTime(QtCore.QTime(7, 30))\n self.ui.aftp_to_time.setTime(QtCore.QTime(15,30))\n else:\n self.ui.aftp_from_time.setTime(QtCore.QTime(15, 30))\n self.ui.aftp_to_time.setTime(QtCore.QTime(23, 30))\n\n self.user_dict = {}\n self.user_dict = OrderedDict(sorted(self.user_dict.items(), key=lambda t: t[0]))\n self.ui.single_dual.addItem(\"Periods...\")\n self.ui.single_dual.addItems([\"Single\", \"Dual\"])\n self.ui.aftp_code.addItem(\"AFTP Code...\")\n self.ui.aftp_code.addItems([\"A\",\"B\",\"G\",\"I\",\"J\",\"L\",\"M\",\"Q\",\"R\",\"S\",\"T\",\"V\"])\n self.ui.tng_code.addItem(\"TNG Code...\")\n self.ui.tng_code.addItems([\"AST\",\"FDM\",\"GSC\",\"INF\",\"MNT\",\"MT1\",\"SNF\",\"OLT\",\"SPT\",\"WX\",\"TD1\",\"TD2\",\"TD3\",\"TD4\",\n \"TD5\",\"TD6\",\"TD7\",\"TD8\",\"TD9\",\"TD10\",\"CK1\",\"CK2\",\"CK3\",\"CK4\",\"CK5\",\"CK6\"])\n self.ui.grade_drop.addItems([\"E-1\",\"E-2\",\"E-3\",\"E-4\",\"E-5\",\"E-6\",\"E-7\",\"E-8\",\"E-9\",\n \"WO1\",\"CW2\",\"CW3\",\"CW4\",\"CW5\",\n \"O-1\",\"O-2\",\"O-3\",\"O-4\",\"O-5\",\"O-6\",\"O-7\",\"O-8\",\"O-9\"])\n\n\n def submit_leave(self):\n\n self.leave_form.from_date = self.ui.from_date.date().toPyDate().strftime(\"%d-%b-%Y\")\n self.leave_form.to_date = self.ui.to_date.date().toPyDate().strftime(\"%d-%b-%Y\")\n self.leave_form.from_time = self.ui.from_time.time().toPyTime().strftime(\"%H%M\")\n self.leave_form.to_time = self.ui.to_time.time().toPyTime().strftime(\"%H%M\")\n self.leave_form.remarks = self.ui.remarksText.toPlainText()\n self.leave_form.hours = self.ui.total_hours.time().toPyTime().strftime(\"%H\").lstrip('0')\n self.leave_form.leave_type = \"\"\n self.leave_form.signed = \"\"\n self.leave_form.create_form()\n cur.execute('''INSERT INTO leave_forms VALUES(\n {0},\n '{1}',\n '{2}',\n '{3}',\n '{4}',\n '{5}',\n '{6}',\n '{7}',\n '{8}')'''.format(self.leave_form.ssn,\n self.leave_form.from_date,\n self.leave_form.to_date,\n self.leave_form.from_time,\n self.leave_form.to_time,\n self.leave_form.leave_type,\n self.leave_form.remarks,\n self.leave_form.signed,\n self.leave_form.hours))\n conn.commit()\n\n if self.ui.check_aftp.isChecked():\n a1 = self.ui.aftp_code.currentText()\n t1 = self.ui.tng_code.currentText()\n p1 = \"X\"\n a2 = self.ui.aftp_code.currentText()\n t2 = self.ui.tng_code.currentText()\n p2 = \"X\"\n p3 = \"\"\n periods = self.ui.single_dual.currentText()\n\n if self.ui.aftp_code.currentText() == \"AFTP Code...\":\n a1, a2 = (\"L\", \"L\")\n if self.ui.tng_code.currentText() == \"TNG Code...\":\n t1, t2 = (\"SPT\", \"SPT\")\n if self.ui.single_dual.currentText() == \"Periods...\":\n periods = \"Dual\"\n if periods == \"Single\":\n a2, t2, p2, p3 = (\"\", \"\", \"\", \"X\")\n\n cur.execute(\"SELECT grade, unit FROM users WHERE ssn=\" + self.leave_form.ssn)\n grade, unit = cur.fetchall()[0]\n\n # if day_of_week(self.leave_form.from_date) == \"Thur\":\n # if periods == \"Single\":\n # from_time = \"1630\"\n # else:\n # from_time = \"1230\"\n # to_time = \"2100\"\n\n # if day_of_week(self.leave_form.from_date) == \"Sun\":\n # from_time = \"0730\"\n # if periods == \"Single\":\n # to_time = \"1130\"\n # else:\n # to_time = \"1600\"\n # else:\n # from_time = \"1530\"\n # to_time = \"2330\"\n\n\n\n self.aftp_data = [\n (\"DATE\", self.ui.from_date.date().toPyDate().strftime(\"%d-%b-%Y\")),\n (\"SINGLE_DUAL\", periods),\n (\"FROM\", self.ui.aftp_from_time.time().toPyTime().strftime(\"%H%M\")),\n (\"TO\", self.ui.aftp_to_time.time().toPyTime().strftime(\"%H%M\")),\n (\"ORGANIZATION\", unit),\n (\"SSN\", self.leave_form.ssn),\n (\"GRADE\", grade),\n (\"NAME\", self.leave_form.name),\n (\"AFTP_CODE_1\", a1),\n (\"AFTP_CODE_2\", a2),\n (\"TNG_CODE_1\", t1),\n (\"TNG_CODE_2\", t2),\n (\"FLYING_TIME\", \"\"),\n (\"FLYING_TIME_2\", \"\"),\n (\"TAIL_1\", \"\"),\n (\"TAIL_2\", \"\"),\n (\"PAY\", p1),\n (\"PAY_2\", p2),\n (\"NON_PAY_1\", \"\"),\n (\"NON_PAY_2]\", \"\")]\n\n self.aftp_slip.create_form(self.aftp_data)\n os.system(\"pdftk output.pdf outputAFTP.pdf cat output outputboth.pdf\")\n os.system(\"start outputboth.pdf\")\n else:\n os.system(\"start output.pdf\")\n def logic(self):\n if self.ui.check_aftp.isChecked():\n self.ui.remarksText.setPlainText(\"Military Leave for AFTP support\")\n self.ui.from_time.setTime(QtCore.QTime(15, 30, 0))\n self.ui.to_time.setTime(QtCore.QTime(16, 30, 0))\n # self.radio_mil.setChecked(True)\n self.ui.remarksText.update()\n\n\n def update_hours(self):\n from_time = self.ui.from_time.time().toPyTime().strftime(\"%H%M\")\n to_time = self.ui.to_time.time().toPyTime().strftime(\"%H%M\")\n self.ui.total_hours.setTime(QtCore.QTime(hours_of_leave(from_time, to_time), 0, 0))\n\n def add_user(self):\n\n first = self.ui.add_first_line.text()\n last = self.ui.add_last_line.text()\n middle = self.ui.add_middle_line.text()\n ssn = self.ui.add_last4_line.text()\n grade = self.ui.grade_drop.currentText()\n unit = self.ui.unit_edit.text()\n\n try:\n cur.execute(\"INSERT INTO users VALUES(\" + ssn + \", '\" + last + \"', '\" + first + \"', '\" + middle + \"', '\"\n + grade + \"', '\" + unit + \"')\")\n conn.commit()\n self.ui.user_list.clear()\n self.fill_drop_down()\n except sqlite3.IntegrityError:\n self.update_user(ssn, last, first, middle, grade, unit)\n\n def update_user(self, ssn, last, first, middle, grade, unit):\n msg = QtGui.QMessageBox()\n msg = QtGui.QMessageBox.question(msg, \"User already enrolled!\",\n \"Do you want to update the user?\",\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)\n if msg == QtGui.QMessageBox.Yes:\n print(\"yes\")\n cur.execute(\"UPDATE users SET last=?, first=?, middle=?, grade=?, unit=? WHERE ssn=?\", (last, first, middle, grade, unit, ssn))\n conn.commit()\n self.ui.user_list.clear()\n self.fill_drop_down()\n\n else:\n pass\n\n def fill_drop_down(self):\n cur.execute('''SELECT * FROM users''')\n rows = cur.fetchall()\n for row in rows:\n row = list(row)\n if len(str(row[0])) == 3:\n row[0] = '0' + str(row[0])\n # self.user_list.addItem('{0} - {1}'.format(row[1], str(row[0])))\n self.user_dict[row[0]] = '{0}, {1} {2} - {3}'.format(row[1], row[2], row[3], str(row[0]))\n self.user_dict = OrderedDict(sorted(self.user_dict.items(), key=lambda t: t[1]))\n self.ui.user_list.addItem(\"Select user...\")\n for key in self.user_dict:\n self.ui.user_list.addItem(self.user_dict[key])\n\n def user_select(self, text):\n if text == \"Select user...\":\n return\n self.leave_form.name, self.leave_form.ssn = text.split(' - ')\n cur.execute(\n \"SELECT from_date, to_date,from_time,to_time,leave_type,remarks,signed,hours FROM leave_forms WHERE ID=\" + self.leave_form.ssn)\n rows = cur.fetchall()\n self.ui.form_table.setRowCount(len(rows))\n try:\n self.ui.form_table.setColumnCount(len(rows[0]))\n except IndexError:\n self.ui.form_table.setColumnCount(11)\n row_num = 0\n for row in rows:\n column = 0\n for cell in row:\n self.ui.form_table.setItem(row_num, column, QtGui.QTableWidgetItem(str(cell)))\n column += 1\n row_num += 1\n self.ui.form_table.setHorizontalHeaderLabels(\n ['from date', 'to date', 'from time', 'to time', 'leave type', 'remarks', 'signed', 'hours'])\n\n # for row in rows:\n\n # print(row)\n\ndef day_of_week(date):\n date = dt.datetime.strptime(date, \"%d-%b-%Y\")\n miss_monday_start = dt.datetime.strptime(\"13-Nov-2016\", \"%d-%b-%Y\")\n delta = abs(date - miss_monday_start).days\n days = [(0, \"Sun\"), (1, \"Mon\"), (2, \"Tues\"), (3, \"Wed\"), (4, \"Thur\"), (5, \"Fri\"), (6, \"Sat\"),\n (7, \"Sun\"), (8, \"Mon\"), (9, \"Tues\"), (10, \"Wed\"), (11, \"Thur\"), (12, \"Fri\"), (13, \"Sat\")]\n for day in days:\n if delta % 14 == day[0]:\n return str(day[1])\n\n# Determines hours_in_day of a day assuming normal work schedule\n# 13-Nov-2016 is start of pay period\ndef hours_in_day(date):\n date = dt.datetime.strptime(date, \"%d-%b-%Y\")\n miss_monday_start = dt.datetime.strptime(\"13-Nov-2016\", \"%d-%b-%Y\")\n delta = abs(date - miss_monday_start).days\n days = [(0, 0), (1, 8), (2, 9), (3, 9), (4, 9), (5, 9), (6, 0),\n (7, 0), (8, 0), (9, 9), (10, 9), (11, 9), (12, 9), (13, 0)]\n for day in days:\n if delta % 14 == day[0]:\n return str(day[1])\n\n\ndef hours_of_leave(from_time, to_time):\n minutes = 60\n hours = 60\n from_time = dt.datetime.strptime(from_time, \"%H%M\")\n to_time = dt.datetime.strptime(to_time, \"%H%M\")\n delta = to_time - from_time\n return int((delta.total_seconds() / minutes) / hours)\n\n\ndef main():\n app = Qt.QApplication(sys.argv)\n main_view = Main()\n main_view.show()\n\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"leaveForm.py","file_name":"leaveForm.py","file_ext":"py","file_size_in_byte":18098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"159680035","text":"import csv\nimport os.path\nimport collections\nimport tldextract\nimport itertools\nfrom urllib.parse import urlsplit\nfrom urllib.parse import urlparse\nfrom datetime import datetime\nimport json\nimport timeit\n\nPATH_SAVE_LOG = '/home/moojokeubuntu/KU/4_2/RealProject/webpattern/Input_new/%s'\nPATH_SAVE_LOG2 = '/home/moojokeubuntu/KU/4_2/RealProject/webpattern/Input_new'\n\ndef readfile():\n count = {}\n max_length = 0\n temp = \"\"\n temp_length = 0\n temp_rea = []\n \n with open (PATH_SAVE_LOG%('input_user_cut_20to23.txt'),'r') as file:\n for i in file:\n line_strip = i.split(' ')\n IP = line_strip[0]\n sequence = line_strip[1]\n sequence = sequence.replace('\\n','')\n web_access = sequence.split(',')\n if len(web_access) not in count :\n count.update({len(web_access):1})\n else :\n count[len(web_access)] += 1\n if len(web_access) > max_length:\n max_length = len(web_access)\n temp = IP\n if int(len(web_access)) > 159 :\n print(len(web_access))\n print(IP)\n row_rea = {}\n row_rea[\"IP\"] = IP\n row_rea[\"C\"] = len(web_access)\n temp_rea.append(row_rea)\n od = collections.OrderedDict(sorted(count.items()))\n # print(od)\n # print(max_length)\n # print(temp)\n # print(temp_rea)\n\n compath = os.path.join(PATH_SAVE_LOG2, 'count_sequence_20to23.csv')\n \n with open(compath,'w',newline = '') as csvfile:\n writer=csv.writer(csvfile)\n for key in od:\n writer.writerow([key,od[key]])\n sort = sorted(temp_rea,key=lambda x:(x['C'],x['IP']))\n compath = os.path.join(PATH_SAVE_LOG2, 'count_sequence_more_20to23.txt')\n with open(compath,'a') as f:\n for i in sort:\n f.writelines(str(i)+'\\n')\n\nif __name__ == '__main__':\n readfile()","sub_path":"URL/WebAcessSequence/count_sequence.py","file_name":"count_sequence.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"60919100","text":"import numpy as np\nimport pandas as pd\nfrom scipy import ndimage\nimport json\nimport h5py\nimport keras\n\ndef preprocess_input(x):\n x /= 255.\n x -= 0.5\n x *= 2.\n return x #归一化输入\n \ndef extract_lable(path):\n with open(path,'rb') as f:\n data=json.load(f)\n data=pd.DataFrame.from_dict(data)\n del data['image_url']\n data.sort_values(by='image_id', inplace=True)\n data = data.reset_index(drop=True)\n image_file=data['image_id']\n label= np.array( list(data['label_id'])).astype(np.int32)\n label= keras.utils.to_categorical(label, 80) #对80个类型标签进行01二元编码\n return image_file,label #返回图片和标签\n\ndef main():\n image_file, label = extract_lable('image/ai_challenger_scene_validation_20170904/scene_validation_annotations_20170904.json')\n image_path = 'image/resize_image_validation/' + image_file\n for times in range(72):\n\n if times == 0:\n h5f = h5py.File('data/val_data.h5', 'w')\n x = h5f.create_dataset(\"x_val\", (100, 299, 299,3),maxshape=(None, 299, 299,3),\n # chunks=(1, 1000, 1000),\n dtype=np.float32)\n y = h5f.create_dataset('y_val',(100,80),maxshape=(None,80),dtype=np.int32) #使用h5py库读写超过内存的大型数据\n\n else:\n h5f = h5py.File('data/val_data.h5', 'a')\n x = h5f['x_val']\n y = h5f['y_val']\n # 关键:这里的h5f与dataset并不包含真正的数据,\n # 只是包含了数据的相关信息,不会占据内存空间\n # 仅当使用数组索引操作(eg. dataset[0:10])\n # 或类方法.value(eg. dataset.value() or dataset.[()])时数据被读入内存中\n\n image = np.array(list(map(lambda x: ndimage.imread(x, mode='RGB'), image_path[times*100:(times+1)*100]))).astype(np.float32)\n # 调整数据预留存储空间(可以一次性调大些)\n\t\t\n image = preprocess_input(image)\n ytem = label[times*100:(times+1)*100]\n if times != 71:\n x.resize([times * 100 + 100, 299, 299,3])\n y.resize([times * 100 + 100,80])\n # 数据被读入内存\n\n x[times * 100:times * 100 + 100] = image\n y[times * 100:times * 100 + 100] = ytem\n # print(sys.getsizeof(h5f))\n print('%d images are dealed with' %(times))\n else:\n x.resize([times * 100 + 20, 299, 299, 3])\n y.resize([times * 100 + 20, 80])\n # 数据被读入内存\n\n x[times * 100:times * 100 + 20] = image\n y[times * 100:times * 100 + 20] = ytem\n # print(sys.getsizeof(h5f))\n print('%d images are dealed with' % (times))\n\n h5f.close()\nif __name__ == '__main__':\n main()","sub_path":"val_data.py","file_name":"val_data.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"288012078","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_django-plans-payments\n------------\n\nTests for `django-plans-payments` models module.\n\"\"\"\nimport json\nfrom decimal import Decimal\n\nfrom django.test import TestCase\n\nfrom plans_payments import models\n\n\nclass TestPlans_payments(TestCase):\n\n def setUp(self):\n pass\n\n def test_save(self):\n p = models.Payment(transaction_fee=1)\n p.save()\n rp = models.Payment.objects.get()\n self.assertEquals(rp.transaction_fee, 1)\n\n def test_save_extra_data(self):\n p = models.Payment()\n extra_data = {\n \"response\": {\n \"transactions\": (\n {\n \"related_resources\": (\n {\n \"sale\": {\n \"transaction_fee\": {\n \"value\": \"5.2\",\n },\n },\n },\n ),\n },\n ),\n },\n }\n p.extra_data = json.dumps(extra_data)\n p.save()\n rp = models.Payment.objects.get()\n self.assertEquals(rp.transaction_fee, Decimal(\"5.20\"))\n\n def test_save_extra_data_without_fee(self):\n p = models.Payment()\n extra_data = {\n \"response\": {\n \"transactions\": (\n {\n \"related_resources\": (\n {\n \"sale\": {\n },\n },\n ),\n },\n ),\n },\n }\n p.extra_data = json.dumps(extra_data)\n p.save()\n rp = models.Payment.objects.get()\n self.assertEquals(rp.transaction_fee, Decimal(\"0.0\"))\n\n def tearDown(self):\n pass\n","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"8631267","text":"import sys\nimport Adafruit_DHT\nimport time\nfrom PyQt5.QtWidgets import QWidget,QDialog,QApplication,QMessageBox,QInputDialog,QLineEdit\nfrom PyQt5.QtCore import QTimer,QTime,QThread,QEventLoop\nfrom Project1 import Ui_Form\nimport matplotlib.pyplot as plt\n\nclass AppWindow(QDialog):\n def __init__(self):\n super().__init__()\n self.ui = Ui_Form()\n self.ui.setupUi(self)\n \n global threshold,deg,fah,temp_count,hum_count,temp_avg,hum_avg\n global temp_samples,hum_samples\n temp_samples=[]\n hum_samples=[]\n deg = 1 #default unit\n fah = 0\n threshold = 35 #default threshold\n temp_count=0\n hum_count=0\n temp_avg=0\n hum_avg=0\n self.show()\n self.check()\n\n## Functionality for Widgets\n self.threshold = QLineEdit()\n self.ui.get_hum.clicked.connect(self.getHum)\n self.ui.get_temp.clicked.connect(self.getTemp)\n self.ui.TempAndHumidity.clicked.connect(self.getTempHum)\n self.ui.threshold_button.clicked.connect(self.setThreshold)\n self.ui.degree_button.clicked.connect(self.degTemp)\n self.ui.fahrenheit_button.clicked.connect(self.fahTemp)\n self.ui.refresh.clicked.connect(self.startTimer)\n self.ui.stop_refresh.clicked.connect(self.stopTimer)\n self.ui.plotGraph.clicked.connect(self.plotGraphs)\n self.ui.exit.clicked.connect(self.close)\n\n# Timer function to start the timer\n def startTimer(self):\n self.timer=QTimer()\n self.timer.timeout.connect(self.getTempHum)\n self.timer.start(1000)\n self.ui.refresh_checkBox.setChecked(True)\n \n# Timer function to stop the timer\n def stopTimer(self):\n self.timer.stop()\n self.ui.refresh_checkBox.setChecked(False)\n \n# Function to check if sensor is connected or not\n def check(self):\n humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, 4)\n\n if humidity is None or temperature is None:\n alert=QMessageBox()\n alert.setIcon(QMessageBox.Critical)\n alert.setText(\"Sensor not detected!!!!!\")\n alert.exec_()\n self.ui.temp.display(\"\")\n self.ui.hum.display(\"\")\n else:\n pass\n \n# Function to get temperature values in degree celsius and degree fahrenheit\n# Also populates the temperature samples list and calculates incremental average\n def getTemp(self):\n global threshold,deg,fah,temp_count,temp_avg\n global temp_samples,temp_count\n self.check()\n humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, 4)\n time=QTime.currentTime().toString()\n self.ui.temp_TOR.display(time)\n \n if humidity is None or temperature is None:\n alert=QtGui.QMessageBox()\n alert.setText(\"Sensor not detected!!!!!\")\n alert.exec_()\n self.ui.temp.display(\"\")\n else:\n \n if deg == 1:\n temp = '{0:.2f}'.format(temperature)\n self.ui.temp.display(temp)\n temp_samples.append(temperature)\n deg = 0\n temp_count += 1\n temp_avg=temp_avg+(temperature-temp_avg)/temp_count\n temp_avg_display = '{0:.2f}'.format(temp_avg)\n self.ui.tempAvg.display(temp_avg_display)\n elif fah == 1:\n temperature = temperature*1.8 + 32\n temp = '{0:.2f}'.format(temperature)\n self.ui.temp.display(temp)\n fah = 0 \n else:\n temp = '{0:.2f}'.format(temperature)\n self.ui.temp.display(temp)\n \n #Check for high temperautre\n if threshold is None:\n pass\n elif temperature > threshold:\n alert=QMessageBox()\n alert.setIcon(QMessageBox.Warning)\n alert.setText(\"High Temperature!!!!!\")\n alert.exec_()\n\n# Function to get %Humidity values\n# Also populates the Humidity samples list and calculates incremental average\n def getHum(self):\n global hum_samples,hum_count,hum_avg\n self.check()\n humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, 4)\n time=QTime.currentTime().toString()\n self.ui.hum_TOR.display(time)\n \n if humidity is None or temperature is None:\n alert=QtGui.QMessageBox()\n alert.setText(\"Sensor not detected!!!!!\")\n alert.exec_()\n self.ui.hum.display(\"\")\n else:\n hum = '{0:.2f}'.format(humidity)\n self.ui.hum.display(hum)\n hum_samples.append(humidity)\n hum_count +=1\n hum_avg=hum_avg+(humidity-hum_avg)/hum_count\n hum_avg_display = '{0:.2f}'.format(hum_avg)\n self.ui.humAvg.display(hum_avg_display)\n\n# Function to get temperature values in degree celsius and degree fahrenheit, and %Humidity values \n# Also populates the temperature,humidity samples list and calculates incremental average\n def getTempHum(self):\n global threshold,temp_count,temp_avg,hum_count,hum_avg\n global hum_samples, temp_samples\n self.check()\n humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, 4)\n time=QTime.currentTime().toString()\n self.ui.temp_TOR.display(time)\n self.ui.hum_TOR.display(time)\n \n if humidity is None or temperature is None:\n alert=QtGui.QMessageBox()\n alert.setText(\"Sensor not detected!!!!!\")\n alert.exec_()\n self.ui.temp.display(\"\")\n self.ui.hum.display(\"\")\n else:\n hum = '{0:.2f}'.format(humidity)\n self.ui.hum.display(hum)\n hum_samples.append(humidity)\n temp = '{0:.2f}'.format(temperature)\n self.ui.temp.display(temp)\n temp_samples.append(temperature)\n temp_count += 1\n hum_count +=1\n temp_avg=temp_avg+(temperature-temp_avg)/temp_count\n hum_avg=hum_avg+(humidity-hum_avg)/hum_count\n hum_avg_display = '{0:.2f}'.format(hum_avg)\n self.ui.humAvg.display(hum_avg_display)\n temp_avg_display = '{0:.2f}'.format(temp_avg)\n self.ui.tempAvg.display(temp_avg_display)\n \n #Check for high temperautre\n if threshold is None:\n pass\n elif temperature > threshold:\n alert=QMessageBox()\n alert.setIcon(QMessageBox.Warning)\n alert.setText(\"High Temperature!!!!!\")\n alert.exec_()\n\n# Function to set threshold based on user input\n def setThreshold(self):\n global threshold\n input, ok = QInputDialog.getInt(self, 'integer Input Dialog', 'Enter Threshold:')\n if ok:\n self.threshold.setText(str(input))\n threshold = input\n\n# Function to enable degree celsius mode\n def degTemp(self):\n global deg\n deg = 1\n self.getTemp()\n\n# Function to enable degree Fahrenheit mode\n def fahTemp(self):\n global fah\n fah = 1\n self.getTemp()\n\n# Function to plot graphs\n def plotGraphs(self):\n global temp_count,hum_count,temp_samples,hum_samples\n x_temp = [(i+1) for i in range(temp_count)]\n x_hum = [(i+1) for i in range(hum_count)]\n plt.subplot(211)\n plt.plot(x_temp, temp_samples)\n plt.xlabel('Sample Number')\n plt.ylabel('Degree Celcius')\n plt.title('Temperature Graph')\n plt.subplot(212)\n plt.plot(x_hum, hum_samples)\n plt.xlabel('Sample Number')\n plt.ylabel('% Humidity')\n plt.title('Humidity Graph')\n plt.subplots_adjust(left=0.2, bottom=None, right=None, top=None, wspace=None, hspace=1.0)\n plt.legend()\n plt.show()\n\n#Function for exit\n def close(self):\n sys.exit(app.exec_())\n\napp = QApplication(sys.argv)\nw = AppWindow()\nw.show()\nsys.exit(app.exec_())\n","sub_path":"Project 1/My_Application/my_application.py","file_name":"my_application.py","file_ext":"py","file_size_in_byte":8127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"198955200","text":"from django.test import TestCase\n\nfrom core.fields import FeaturesField\nfrom core.models import Feature\n\n\nclass FeaturesFieldTest(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.bw_feature = Feature.objects.create(code='SAT-BW', expression=\"^\\d+(\\.\\d+)?[ ]?[KM]bps$\")\n cls.ant_feature = Feature.objects.create(code='ANT-DIA', expression=\"^\\d+(\\.\\d{1,2})?[ ]?M$\")\n\n def setUp(self):\n self.bw_feature_field = FeaturesField()\n super().setUp()\n\n def test_bw_feature_field_matches_expression(self):\n self.assertEqual(\n self.bw_feature_field.to_python('SAT-BW: 35.67 Kbps; ANT-DIA: 1.2M'),\n {'SAT-BW': '35.67 Kbps', 'ANT-DIA': '1.2M'}\n )\n","sub_path":"core/tests/test_fields.py","file_name":"test_fields.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"357439682","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nimport unittest\nfrom marisa_trie import Trie\n\nfrom wikipedia2vec.phrase import PhraseDictionary\nfrom wikipedia2vec.utils.tokenizer.token import Token\nfrom wikipedia2vec.utils.tokenizer.mecab_tokenizer import MeCabTokenizer\n\nfrom nose.tools import *\n\n\nclass TestMeCabTokenizer(unittest.TestCase):\n def setUp(self):\n self._tokenizer = MeCabTokenizer()\n phrase_dict = PhraseDictionary(Trie(['充実野菜']), False, {})\n self._phrase_tokenizer = MeCabTokenizer(phrase_dict)\n\n def test_tokenize(self):\n text = '東京は日本の首都です'\n tokens = self._tokenizer.tokenize(text)\n\n ok_(all([isinstance(t, Token) for t in tokens]))\n eq_(['東京', 'は', '日本', 'の', '首都', 'です'], [t.text for t in tokens])\n eq_([(0, 2), (2, 3), (3, 5), (5, 6), (6, 8), (8, 10)], [t.span for t in tokens])\n\n def test_tokenize_with_phrases(self):\n text = '充実野菜は野菜ジュースです'\n tokens = self._phrase_tokenizer.tokenize(text)\n\n ok_(all([isinstance(t, Token) for t in tokens]))\n eq_(['充実野菜', 'は', '野菜', 'ジュース', 'です'], [t.text for t in tokens])\n eq_([(0, 4), (4, 5), (5, 7), (7, 11), (11, 13)], [t.span for t in tokens])\n","sub_path":"tests/utils/tokenizer/test_mecab_tokenizer.py","file_name":"test_mecab_tokenizer.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"119430712","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework import routers\nfrom schools.apps.general.geolocation.views import *\nfrom schools.apps.general.entities.views import *\n\ngeneral_router = routers.DefaultRouter()\ngeneral_router.register(r'countries', CountryViewSet)\ngeneral_router.register(r'cities', CityViewSet)\ngeneral_router.register(r'district', DistrictViewSet)\n\nentities_router = routers.DefaultRouter()\nentities_router.register(r'employees', EmployeeViewSet)\nentities_router.register(r'employeestatuses', EmployeeStatusViewSet)\nentities_router.register(r'iddocumenttypes', IdDocumentTypeViewSet)\nentities_router.register(r'jobtitles', JobTitleViewSet)\n\nurlpatterns = [\n path('api/general/', include(general_router.urls)),\n path('api/entities/', include(entities_router.urls)),\n path('admin/', admin.site.urls),\n]\n","sub_path":"schools/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"183581921","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n作者:Ted\n日期:\n说明:\n\n处理与数据库的交互\n\ndata will be store into dictionary\n\n\"\"\"\n\nimport pandas as pd\nimport logging\nfrom src.config import *\n\n\ndef write_mysql(table_name: str, data: pd.DataFrame, ):\n \"\"\"写入MySQl数据库, 表格如果存在, 则新增数据\"\"\"\n try:\n data.to_sql(name=f'o_{table_name}', con=RemoteMySQLConfig.engine, if_exists='append', index=0)\n logging.info(f\"mysql write table {table_name} succeed!\")\n except Exception as exc:\n logging.error(f\"mysql write table {table_name} failed, error: {exc}.\")\n raise Exception\n\n\ndef write_local(table_name: str, data: pd.DataFrame):\n \"\"\"写入本地\"\"\"\n try:\n data.to_csv(join(SaveConfig.OUT_DIR, f\"{table_name}.csv\"), index=0)\n logging.info(f\"csv write table {table_name} succeed!\")\n except Exception as exc:\n logging.error(f\"csv write table {table_name} failed, error: {exc}.\")\n raise Exception\n\n\ndef load_from_local(table_name: str):\n \"\"\"本地读取数据,数据格式为csv\"\"\"\n logging.info(msg=f\"Reading local table {table_name}\")\n table = pd.read_csv(join(SaveConfig.DATA_DIR, f\"{table_name}.csv\"))\n return table\n\n\ndef load_from_mysql(table_name: str):\n \"\"\"读取远程mysql数据表\"\"\"\n logging.info(msg=f\"Reading mysql table {table_name}\")\n table = pd.read_sql_table(con=RemoteMySQLConfig.engine, table_name=f\"{table_name}\")\n return table\n\n\ndef get_trucks(is_test: bool=False, is_local: bool=False):\n \"\"\"\n 返回货车数据,字典形式:\n key 为 (货车编号, 到达时间,货车货物路径类型(L/A..))\n value 为 一个货车的 packages 数据表\n \"\"\"\n table_name = \"i_od_parcel_landside\"\n if is_local:\n table = load_from_local(table_name)\n else:\n table = load_from_mysql(table_name)\n if is_test:\n table = table.head(100)\n\n # convert datetime to seconds\n\n table[\"arrive_time\"] = (table[\"arrive_time\"] - TimeConfig.ZERO_TIMESTAMP)\\\n .apply(lambda x: x.total_seconds() if x.total_seconds() > 0 else 0)\n # 'plate_num' 是货车/飞机/的编号\n return dict(list(table.groupby(['plate_num', 'arrive_time', 'src_type'])))\n\n\ndef get_ulds(is_test: bool=False, is_local: bool=False):\n \"\"\"\n 返回uld数据,字典形式:\n key 为 (货车编号, 到达时间,货车货物路径类型(LL/LA..))\n value 为 一个uld的 packages 数据表\n \"\"\"\n table_name = \"i_od_parcel_airside\"\n if is_local:\n table = load_from_local(table_name)\n else:\n table = load_from_mysql(table_name)\n if is_test:\n table = table.head(1000)\n # add path_type: LL/LA/AL/AA\n table['path_type'] = table['origin_type'] + table['dest_type']\n # 'plate_num' 是货车/飞机/的编号\n return dict(list(table.groupby(['uld_num', 'arrive_time', 'path_type'])))\n\n\ndef get_unload_setting(is_local: bool=False):\n \"\"\"\n 返回字典形式:\n unload port 和 truck 类型(L, A) 的映射\n examples:\n {'r1_1': ['L'], 'r3_1': ['L', 'A']}\n \"\"\"\n\n table_name = \"i_unload_setting\"\n\n if is_local:\n table = load_from_local(table_name)\n else:\n table = load_from_mysql(table_name)\n\n table_dict= \\\n table.groupby('equipment_port')['origin_type'].apply(set).apply(list).to_dict()\n return table_dict\n\n\ndef get_reload_setting(is_local: bool=False):\n \"\"\"\n 返回字典形式:\n dest_code 和 reload port 类型的映射\n examples:\n { (\"571J\", \"reload\", \"\"L\"\"): [\"c1_1\", ], (\"571K\", \"small_sort\", \"L\"): [\"c2_3\", \"c2_5\"] }\n \"\"\"\n\n table_name = \"i_reload_setting\"\n\n if is_local:\n table = load_from_local(table_name)\n else:\n table = load_from_mysql(table_name)\n table_dict= \\\n table.groupby(['dest_zone_code', 'sorter_type', 'dest_type'])['equipment_port'].apply(set).apply(list).to_dict()\n return table_dict\n\n\ndef get_resource_limit(is_local: bool=False):\n \"\"\"返回资源表,包含了单个资源处理时间\"\"\"\n table_name1 = \"i_resource_limit\"\n table_name2 = \"i_equipment_resource\"\n table_name3 = \"i_equipment_io\"\n\n if is_local:\n table1 = load_from_local(table_name1)\n table2 = load_from_local(table_name2)\n table3 = load_from_local(table_name3)\n else:\n table1 = load_from_mysql(table_name1)\n table2 = load_from_mysql(table_name2)\n table3 = load_from_mysql(table_name3)\n\n table2 = table2[[\"resource_id\", \"equipment_id\"]].drop_duplicates()\n table3 = table3[[\"equipment_id\", \"process_time\"]].drop_duplicates()\n\n table_temp = table2.merge(table3, how=\"left\", on=\"equipment_id\")\n table_temp = table_temp\n\n table_temp2 = table_temp.groupby([\"resource_id\"])[\"process_time\"].unique().apply(\n lambda x: x[0] if len(x) == 1 else None)\n table_temp2 = table_temp2.to_frame(\"process_time\").reset_index()\n\n table = table1.merge(table_temp2, how=\"left\", on=\"resource_id\")\n\n # checking merge correct\n assert table1.shape[0] == table.shape[0]\n return table\n\n\ndef get_resource_equipment_dict(is_local: bool=False):\n \"\"\"返回资源和设备槽口的对应关系\"\"\"\n table_name = \"i_equipment_resource\"\n table = load_from_local(table_name) if is_local else load_from_mysql(table_name)\n\n table_dict = dict()\n\n for _, row in table.iterrows():\n table_dict[row[\"equipment_port\"]] = row[\"resource_id\"]\n\n return table_dict\n\ndef get_pipelines(is_local: bool=False, ):\n\n \"\"\"返回队列的表, 包含了每个队列对应的功能区域和传送时间\"\"\"\n\n tab_n_queue_io = \"i_queue_io\"\n tab_queue_io = load_from_local(tab_n_queue_io) if is_local else load_from_mysql(tab_n_queue_io)\n line_count_ori = tab_queue_io.shape[0]\n\n # fixme: need to add in database\n # add machine_type\n # m: presort\n # i1 - i8: secondary_sort\n # i17 - i24: secondary_sort\n # i9 - i16: small_sort\n # j: security\n # h: hospital\n # e, x: cross\n\n secon_sort_mark1 = [f'i{n}' for n in range(1, 9)]\n secon_sort_mark2 = [f'i{n}' for n in range(17, 25)]\n secon_sort_mark = secon_sort_mark1 + secon_sort_mark2\n\n ind_presort = tab_queue_io.equipment_port_next.str.startswith('m')\n ind_secondary_sort = tab_queue_io.equipment_port_next.apply(lambda x: x.split('_')[0]).isin(secon_sort_mark)\n ind_small_sort = tab_queue_io.equipment_port_next.str.startswith('u')\n ind_security = tab_queue_io.equipment_port_next.str.startswith('j')\n ind_hospital = tab_queue_io.equipment_port_next.str.startswith('h')\n ind_cross = \\\n tab_queue_io.equipment_port_next.str.startswith('e') | tab_queue_io.equipment_port_next.str.startswith('x')\n\n # i-i, i-c, i-e 当做是需要请求资源的传送带\n ind_pipeline_res = \\\n tab_queue_io.equipment_port_last.str.startswith('i') & \\\n (tab_queue_io.equipment_port_next.str.startswith('c') | tab_queue_io.equipment_port_next.str.startswith('i')\\\n | tab_queue_io.equipment_port_next.str.startswith('e'))\n\n tab_queue_io.loc[ind_presort, \"machine_type\"] = \"presort\"\n tab_queue_io.loc[ind_secondary_sort, \"machine_type\"] = \"secondary_sort\"\n tab_queue_io.loc[ind_small_sort, \"machine_type\"] = \"small_sort\"\n tab_queue_io.loc[ind_security, \"machine_type\"] = \"security\"\n tab_queue_io.loc[ind_cross, \"machine_type\"] = \"cross\"\n tab_queue_io.loc[ind_hospital, \"machine_type\"] = \"hospital\"\n\n tab_queue_io.loc[ind_pipeline_res, \"pipeline_type\"] = \"pipeline_res\"\n tab_queue_io.loc[~ind_pipeline_res, \"pipeline_type\"] = \"pipeline\"\n\n line_count_last = tab_queue_io.shape[0]\n assert line_count_ori == line_count_last\n return tab_queue_io\n\n\ndef get_queue_io(is_local: bool=False):\n \"\"\"返回 io 对: [(r1_1, m1_1), (r1_3, m2_3), ]\"\"\"\n table = get_pipelines(is_local)\n io_list = []\n for _, row in table.iterrows():\n io_list.append((row['equipment_port_last'], row['equipment_port_next']))\n return io_list\n\n\ndef get_equipment_process_time(is_local: bool=False):\n \"\"\"\n 返回设备对应的处理时间,不一定与资源挂钩\n samples:\n {'a1_1': 0.0,\n 'a1_10': 0.0,\n 'a1_11': 0.0,\n 'a1_12': 0.0,\n 'a1_2': 0.0,\n 'a1_3': 0.0,}\n \"\"\"\n table_n = \"i_equipment_io\"\n table = load_from_local(table_n) if is_local else load_from_mysql(table_n)\n table_dict = table.groupby([\"equipment_port\"])[\"process_time\"].apply(lambda x: list(x)[0]).to_dict()\n\n return table_dict\n\nif __name__ == \"__main__\":\n test = get_pipelines()\n print(test)\n","sub_path":"src/db/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":8620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"462463693","text":"import argparse\nimport json\nfrom pathlib import Path\nfrom collections import Counter\nfrom functools import partial\nimport concurrent\n\nfrom tqdm import tqdm\nimport numpy as np\nfrom gensim.models import word2vec, KeyedVectors, fasttext, Word2Vec, FastText\nfrom pyserini.analysis import Analyzer, get_lucene_analyzer\n\n\nGLOVE = \"glove\"\nW2V = \"w2v\"\nFAST_TEXT = \"fast_text\"\nFAST_TEXT_KV = \"fast_text_kv\"\n\n\ndef tokenize_process(text, st_tokenizer):\n t_doc = st_tokenizer([text.lower().split()])\n t_text = [token.text for sentence in t_doc.sentences for token in sentence.tokens]\n return t_text\n\n\ndef w2v_finetune(sentences_tokenized, model, model_path, epoch=3):\n # https://www.kaggle.com/rtatman/fine-tuning-word2vec\n d = model.vector_size\n model_2 = Word2Vec(vector_size=d, min_count=1)\n model_2.build_vocab(sentences_tokenized)\n total_examples = model_2.corpus_count\n model_2.build_vocab([list(model.key_to_index.keys())], update=True)\n print(len(model_2.wv))\n model_2.wv.vectors_lockf = np.ones(len(model_2.wv), dtype=np.float32)\n model_2.wv.intersect_word2vec_format(model_path, binary=True, lockf=1.0)\n model_2.train(sentences_tokenized, total_examples=total_examples, epochs=epoch)\n return model_2\n\n\ndef fasttext_finetune_ftkv(sentences_tokenized, model, model_path, epoch=3):\n # https://www.kaggle.com/rtatman/fine-tuning-word2vec\n d = model.vector_size\n model_2 = FastText(vector_size=d, min_count=1)\n model_2.build_vocab(sentences_tokenized)\n total_examples = model_2.corpus_count\n model_2.build_vocab([list(model.key_to_index.keys())], update=True)\n print(len(model_2.wv))\n model_2.wv.vectors_lockf = np.ones(len(model_2.wv), dtype=np.float32)\n model_2.wv.intersect_word2vec_format(model_path, lockf=1.0)\n model_2.train(sentences_tokenized, total_examples=total_examples, epochs=epoch)\n return model_2\n\n\ndef fasttext_finetune(sentences_tokenized, model, model_path, epoch=3):\n model.build_vocab(sentences_tokenized, update=True)\n total_examples = len(sentences_tokenized)\n print(\"start train\")\n model.train(sentences_tokenized, total_examples=total_examples, epochs=epoch)\n return model\n\n\ndef doc_tokenizer(doc_file, tokenizer):\n tokenized_sentences = []\n with doc_file.open() as f:\n for line in tqdm(f):\n sentence = json.loads(line)[\"contents\"]\n t_sentence = tokenizer(sentence)\n tokenized_sentences.append(t_sentence)\n\n return tokenized_sentences\n\n\ndef main(args):\n input_doc_path = Path(args.input)\n model_path = args.model_path\n output_model_path = args.output\n\n print(\"load model\")\n if args.pretrain_model.lower() == GLOVE:\n model = KeyedVectors.load_word2vec_format(model_path, no_header=True)\n finetune = w2v_finetune\n elif args.pretrain_model.lower() == W2V:\n model = KeyedVectors.load_word2vec_format(model_path, binary=True)\n finetune = w2v_finetune\n elif args.pretrain_model.lower() == FAST_TEXT_KV:\n model = KeyedVectors.load_word2vec_format(model_path)\n finetune = fasttext_finetune_ftkv\n elif args.pretrain_model.lower() == FAST_TEXT:\n model = fasttext.load_facebook_model(model_path)\n model.min_count = 1\n finetune = fasttext_finetune\n else:\n raise ValueError(f\"{args.pretrain_model} doesn't exist\")\n\n analyzer = Analyzer(get_lucene_analyzer())\n tokenizer = analyzer.analyze\n tokenized_sentences = []\n if input_doc_path.is_dir():\n doc_files = sorted(input_doc_path.glob(\"*.json\"))\n for doc_file in doc_files:\n print(doc_file)\n with doc_file.open() as f:\n for line in f:\n sentence = json.loads(line)[\"contents\"]\n t_sentence = tokenizer(sentence)\n tokenized_sentences.append(t_sentence)\n\n else:\n with input_doc_path.open() as f:\n for line in f:\n sentence = json.loads(line)[\"contents\"]\n t_sentence = tokenizer(sentence)\n tokenized_sentences.append(t_sentence)\n\n finetuned_model = finetune(tokenized_sentences, model, model_path)\n if args.pretrain_model.lower() not in {FAST_TEXT, FAST_TEXT_KV}:\n finetuned_model.wv.save_word2vec_format(output_model_path)\n else:\n finetuned_model.save(output_model_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-i\", dest=\"input\")\n parser.add_argument(\"-o\", dest=\"output\")\n parser.add_argument(\"-m\", dest=\"model_path\")\n parser.add_argument(\"-p\", dest=\"pretrain_model\", default=\"\")\n\n args = parser.parse_args()\n\n main(args)\n","sub_path":"utils/w2v_finetune.py","file_name":"w2v_finetune.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"528958672","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/cctyper/prodigal.py\n# Compiled at: 2020-04-19 11:01:09\n# Size of source mod 2**32: 1977 bytes\nimport os, subprocess, logging, sys, re, pandas as pd\n\nclass Prodigal(object):\n\n def __init__(self, obj):\n self.master = obj\n for key, val in vars(obj).items():\n setattr(self, key, val)\n\n def run_prod(self):\n if not self.redo:\n logging.info('Predicting ORFs with prodigal')\n with open(self.out + 'prodigal.log', 'w') as (prodigal_log):\n subprocess.run(['prodigal',\n '-i', self.fasta,\n '-a', self.out + 'proteins.faa',\n '-p', self.prod],\n stdout=(subprocess.DEVNULL),\n stderr=prodigal_log)\n self.check_rerun()\n self.get_genes()\n\n def check_rerun(self):\n if os.stat(self.prot_path).st_size == 0:\n if self.prod == 'single':\n logging.warning('Prodigal failed. Trying in meta mode')\n self.prod = 'meta'\n self.run_prod()\n else:\n logging.critical('Prodigal failed! Check the log')\n sys.exit()\n\n def get_genes(self):\n with open(self.out + 'genes.tab', 'w') as (gene_tab):\n subprocess.run(['grep', '^>', self.out + 'proteins.faa'], stdout=gene_tab)\n genes = pd.read_csv((self.out + 'genes.tab'), sep='\\\\s+', header=None, usecols=(0,\n 2,\n 4,\n 6),\n names=('Contig', 'Start', 'End', 'Strand'))\n genes['Contig'] = [re.sub('^>', '', x) for x in genes['Contig']]\n genes['Pos'] = [int(re.sub('.*_', '', x)) for x in genes['Contig']]\n genes['Contig'] = [re.sub('_[0-9]*$', '', x) for x in genes['Contig']]\n self.genes = genes\n genes.to_csv((self.out + 'genes.tab'), index=False, sep='\\t')","sub_path":"pycfiles/cctyper-1.0.1-py3.8/prodigal.cpython-38.py","file_name":"prodigal.cpython-38.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"304318377","text":"\n\n#calss header\nclass _FULCRUM():\n\tdef __init__(self,): \n\t\tself.name = \"FULCRUM\"\n\t\tself.definitions = [u'the point at which a bar, or something that is balancing, is supported or balances: ', u'the main thing or person needed to support something or to make it work or happen: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_fulcrum.py","file_name":"_fulcrum.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"440133968","text":"#!/usr/bin/env python3\n\n# Requires PyAudio and PySpeech.\nimport speech_recognition as sr\nimport unidecode as normalize\nfrom threading import Thread\nfrom database import load\nfrom phrases import read\nfrom queue import Queue\nfrom time import ctime\n#from gtts import gTTS\nfrom time import sleep\nimport pyttsx\nimport os\n \n#def speak(audioString):\n#\tprint(audioString)\n#\ttts = gTTS(text=audioString, lang='es')\n#\ttts.save(\".record.mp3\")\n#\tos.system(\"mpg321 .record.mp3\")\n\ndef speak(audioString):\n\tspeech_engine = pyttsx.init('espeak')\n\tvoices = speech_engine.getProperty('voices')\n\tspeech_engine.setProperty('voice', voices[20].id)\n\tspeech_engine.setProperty('rate', 135)\n\tspeech_engine.say(audioString)\n\tspeech_engine.runAndWait()\n \ndef recordAudio():\n\t# Record Audio\n\tr = sr.Recognizer()\n\twith sr.Microphone() as source:\n\t\tr.adjust_for_ambient_noise(source)\n\t\tprint(\"Bienvenido señor!\")\n\t\taudio = r.listen(source)\n\n\t# Speech recognition using Google Speech Recognition\n\tdata = \"\"\n\ttry:\n\t\t# Uses the default API key\n\t\t# To use another API key: `r.recognize_google(audio, key=\"GOOGLE_SPEECH_RECOGNITION_API_KEY\")`\n\t\tdata = r.recognize_google(audio, language = \"es-PE\")\n\t\tprint(\"Dijiste: \" + data)\n\texcept sr.UnknownValueError:\n\t\tprint(\"No entiendi bien lo que dijiste!, repitelo por favor\")\n\texcept sr.RequestError as e:\n\t\tprint(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n\n\treturn data\n\ndef remove_accents(s):\n\treturn normalize.unidecode(s)\n\ndef jarvis(data):\n\tdata = data.lower()\n\tdata = remove_accents(data)\n\n\tq = Queue()\n\tsubRead = Thread(target=read, args=(data,q)).start()\n\tresult = q.get()\n\n\t#print(\"====>\", result)\n\n\tif result != '':\n\t\tsubSpeak = Thread(target=speak, args=(result,)).start()\n\t\n\t#if \"hola ferjarvis\" == data:\n\t#\tspeak(messages.bienvenida())\n\t#if \"dime la hora\" in data:\n\t#\tspeak(\"A sus órdenes señor\")\n\t#\tspeak(str(messages.nowHour()))\n\t#if \"donde esta\" in data:\n\t#\tdata = data.split(\" \")\n\t#\tlocation = data[2]\n\t#\tspeak(\"Espere señor, estoy trabajando para ubicar donde esta \" + location)\n\t#\tos.system(\"google-chrome https://www.google.nl/maps/place/\" + location + \"/&\")\n\ndef main():\n\tsubLoad = Thread(target=load, args=()).start()\n\tsleep(2)\n\tspeak(\"Cargando\")\n\twhile 1:\n\t\tdata = recordAudio()\n\t\t#print('===========>',data)\n\t\tjarvis(data)\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"446626916","text":"import numpy as np\nimport pandas as pd\nimport torch\nimport os\nimport sys\nfrom torchsummary import summary\nimport torch.nn as nn\nfrom collections import defaultdict\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nmatplotlib.style.use('ggplot')\nimport seaborn as sns\nsns.set_theme()\n\nimport math\n\nimport models\nimport random\n\nimport torch.optim\nimport torch\nimport argparse\nimport utils\nimport datetime\n\nfrom sklearn.linear_model import LogisticRegression\n\n#from torchvision import models, datasets, transforms\n\ntry:\n from tqdm import tqdm\nexcept:\n def tqdm(x): return x\n\n\nif __name__ == '__main__':\n\n torch.autograd.set_detect_anomaly(True)\n\n parser = argparse.ArgumentParser('Training a classifier to inspect the layers')\n parser.add_argument('--name', default='post-linear', type=str, help='the name of the experiment')\n parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='leraning rate')\n parser.add_argument('--save_model', action='store_true', default=True, help='stores the model after some epochs')\n parser.add_argument('--nepochs', type=int, default=200, help='the number of epochs to train for')\n parser.add_argument('--batch_size', '-bs', type=int, default=100, help='the dimension of the batch')\n parser.add_argument('--debug', action='store_true', help='debug')\n parser.add_argument('--size_max', type=int, default=None, help='maximum number of traning samples')\n parser.add_argument('--ntry', type=int, default=10, help='The number of permutations to test')\n parser_model = parser.add_mutually_exclusive_group(required=True)\n parser_model.add_argument('--model', help='path of the pretrained deep model trained')\n parser_model.add_argument('--checkpoint', help='path of the previous computation checkpoint')\n parser.add_argument('--gd_mode', '-gdm', default='stochastic', choices=['full', 'stochastic'], help='whether the gradient is computed full batch or stochastically')\n parser_device = parser.add_mutually_exclusive_group()\n parser_device.add_argument('--cpu', action='store_true', dest='cpu', help='force the cpu model')\n parser_device.add_argument('--cuda', action='store_false', dest='cpu')\n parser_device.add_argument('--Rs', nargs='*', type=int, default=[1600,1600], help='the number of removed neurons')\n parser.set_defaults(cpu=False)\n\n\n\n args = parser.parse_args()\n\n device = torch.device('cuda' if torch.cuda.is_available() and not args.cpu else 'cpu')\n #device = torch.device('cpu')\n\n\n dtype = torch.float\n num_gpus = torch.cuda.device_count()\n\n if args.checkpoint is not None:\n try:\n checkpoint = torch.load(args.checkpoint, map_location=device)\n #args = checkpoint['args']\n args.__dict__.update(checkpoint['args'].__dict__)\n cont = True # continue the computation\n except RuntimeError:\n print('Could not load the model')\n\n\n else: # new computation\n checkpoint = dict()\n\n try:\n checkpoint_model = torch.load(args.model, map_location=device) # checkpoint is a dictionnary with different keys\n except RuntimeError as e:\n print('Error loading the model at {}'.format(e))\n\n\n\n #if 'seed' in checkpoint.keys():\n # seed = checkpoint['seed']\n # torch.manual_seed(seed)\n #else:\n # seed = torch.random.seed()\n\n #if device.type == 'cuda':\n # torch.cuda.manual_seed(seed)\n #np.random.seed(seed)\n #random.seed(seed)\n\n args_model = checkpoint_model['args'] # restore the previous arguments\n path_output = os.path.join(args_model.output_root, args_model.name, args.name)\n\n #model = models.cnn.CNN(1)\n\n\n # Logs\n log_fname = os.path.join(args_model.output_root, args_model.name, 'logs.txt')\n\n\n os.makedirs(path_output, exist_ok=True)\n\n num_classes = utils.get_num_classes(args_model.dataset)\n\n model, input_size = models.pretrained.initialize_model(args_model.model, pretrained=False, freeze=True, num_classes=num_classes)\n model.load_state_dict(checkpoint_model['model'])\n model.to(device)\n\n imresize = input_size\n train_dataset, test_dataset, num_chs = utils.get_dataset(dataset=args_model.dataset,\n dataroot=args_model.dataroot,\n imresize =imresize)\n train_loader, size_train,\\\n val_loader, size_val,\\\n test_loader, size_test = utils.get_dataloader( train_dataset,\n test_dataset,\n batch_size =args.batch_size,\n ss_factor=1,\n size_max=args.size_max,\n collate_fn=None,\n pin_memory=True)\n\n\n\n\n\n\n num_classes = len(train_dataset.classes)\n imsize = next(iter(train_loader))[0].size()[1:]\n input_dim = imsize[0]*imsize[1]*imsize[2]\n\n if not args.debug:\n logs = open(os.path.join(path_output, 'logs_lin.txt'), 'w')\n else:\n logs = sys.stdout\n# logs = None\n logs_debug = open(os.path.join(path_output, 'debug.log'), 'w')\n\n print(os.sep.join((os.path.abspath(__file__).split(os.sep)[-2:])), file=logs) # folder + name of the script\n print('device= {}, num of gpus= {}'.format(device, num_gpus), file=logs)\n print('dtype= {}'.format(dtype), file=logs)\n\n for k, v in vars(args).items():\n print(\"%s= %s\" % (k, v), file=logs, flush=True)\n\n\n #imresize = (256, 256)\n #imresize=(64,64)\n\n\n\n #min_width = int(args.coefficient *math.sqrt(size_train)+0.5)\n #max_width = int(3*args.coefficient *math.sqrt(size_train)+0.5)\n #model = models.classifiers.FCN3(input_dim=input_dim, num_classes=num_classes, min_width=min_width, max_width=max_width)\n #archi = utils.parse_archi(log_fname)\n\n\n #Rs = [0, 0] # the neurons to remove from L-1, L-2 ... layers of the classifier\n Rs = args.Rs\n if len(Rs) == 1:\n Rs += [Rs[0]]\n linear_classifier = models.classifiers.ClassifierVGG(model, args.ntry, Rs)\n linear_classifier.to(device)\n\n if 'linear_classifier' in checkpoint.keys():\n linear_classifier.load_state_dict(checkpoint['linear_classifier'])\n\n num_parameters = utils.num_parameters(linear_classifier)\n num_samples_train = size_train\n num_samples_val = size_val\n num_samples_test = size_test\n print('Number of parameters: {}'.format(num_parameters), file=logs)\n print('Number of training samples: {}'.format(num_samples_train), file=logs)\n print('Number of testing samples: {}'.format(size_test), file=logs)\n #print('Layer dimensions'.format(linear_classifier.neurons), file=logs)\n print('Image dimension: {}'.format(imsize), file=logs)\n\n #summary(model, [imsize, (1,)])\n #model.apply(models.cnn.init_weights)\n\n\n\n\n\n\n print('Linear classifier: {}'.format(str(linear_classifier)), file=logs)\n #parameters = [ p for p in model.parameters() if not feature_extraction or p.requires_grad ]\n parameters = list(linear_classifier.parameters())\n\n #optimizer = torch.optim.AdamW(\n # parameters, lr=args.learning_rate, betas=(0.95, 0.999), weight_decay=0,\n # )\n #optimizer = torch.optim.RMSprop(parameters, lr=args.learning_rate)\n\n optimizer = torch.optim.SGD(\n #parameters, lr=args.learning_rate, momentum=(args.gd_mode=='full') * 0 + (args.gd_mode =='stochastic')*0.95\n parameters, lr=args.learning_rate, momentum=0.95\n )\n #lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=500, gamma=0.9)\n\n if 'optimizer' in checkpoint.keys():\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n if 'lr_scheduler' in checkpoint.keys():\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n\n names=['set', 'stat', 'layer', 'try']\n tries = np.arange(args.ntry)\n sets = ['train', 'test']\n stats = ['loss', 'err']\n layers = ['last', 'hidden']\n columns=pd.MultiIndex.from_product([sets, stats, layers, tries], names=names)\n index = pd.Index(np.arange(1, args.nepochs+1), name='epoch')\n quant = pd.DataFrame(columns=columns, index=index, dtype=float)\n\n quant.sort_index(axis=1, inplace=True) # sort for quicker access\n\n\n\n if 'quant' in checkpoint.keys():\n quant.update(checkpoint['quant'])\n\n classes = torch.arange(num_classes).view(1, -1).to(device) # the different possible classes\n\n def zero_one_loss(x, targets):\n ''' x: TxBxC\n targets: Bx1\n\n returns: err of size T\n '''\n return (x.argmax(dim=2)!=y).float().mean(dim=1)\n\n #mse_loss = nn.MSELoss()\n #ce_loss_check = nn.CrossEntropyLoss(reduction='none')\n\n def ce_loss(input, target):\n '''Batch cross entropy loss\n\n input: TxBxC output of the linear model\n target: Bx1: the target classes\n\n output: TxB the loss for each try\n '''\n\n\n T, B, C = input.size()\n cond = input.gather(2,target.view(1, -1, 1).expand(T, -1, -1)).squeeze()\n output = - cond + input.logsumexp(dim=2)\n return output\n\n\n def get_checkpoint():\n\n global epoch\n global model\n global args\n global optimizer\n global quant\n\n checkpoint = {\n 'linear_classifier': linear_classifier.state_dict(),\n 'quant': quant,\n 'args': args,\n 'optimizer': optimizer.state_dict(),\n 'epochs': epoch,\n #'seed': seed,\n }\n return checkpoint\n\n def save_checkpoint(fname=None, checkpoint=None):\n '''Save checkpoint to disk'''\n\n global path_output\n\n if fname is None:\n fname = os.path.join(path_output, 'checkpoint_lin.pth')\n\n if checkpoint is None:\n checkpoint = get_checkpoint()\n\n torch.save(checkpoint, fname)\n\n\n start_epoch = 0\n DO_SANITY_CHECK = False\n\n if 'epochs' in checkpoint.keys():\n start_epoch = checkpoint['epochs']\n\n epoch = start_epoch - 1 if DO_SANITY_CHECK else start_epoch\n stop = False\n frozen = False # will freeze the update to check if data is separated\n\n while not stop:\n #for epoch in tqdm(range(start_epoch-1, start_epoch+args.nepochs)):\n\n\n\n if epoch == start_epoch-1: # init the error counting\n err = 0\n else:\n linear_classifier.train()\n loss_tot = np.zeros(args.ntry) # for the\n err_tot = np.zeros(args.ntry)\n err_train_hidden = np.zeros(args.ntry)\n loss_hidden_tot = np.zeros(args.ntry) # for the\n ones = torch.ones(args.ntry, device=device, dtype=dtype)\n\n for idx, (x, y) in enumerate(train_loader, 1):\n\n\n x = x.to(device)\n y = y.to(device)\n if epoch == start_epoch-1:\n out = model(x).unsqueeze(0) # 1xBxC\n loss = ce_loss(out, y).mean() # TxB\n err += zero_one_loss(out,y).mean().detach().cpu().numpy() # just check if the number of error is 0\n\n else:\n optimizer.zero_grad()\n out, out_hidden = linear_classifier(x) # TxBxC, LxBxC # each output for each layer\n loss = ce_loss(out, y) # TxB\n loss_hidden = ce_loss(out_hidden, y)\n err_tot += zero_one_loss(out, y).detach().cpu().numpy() #\n #err_tot = (idx * err_tot + err.detach().cpu().numpy()) / (idx+1)\n loss_tot = (idx * loss_tot + loss.mean(dim=1).detach().cpu().numpy()) / (idx+1)\n loss_hidden_tot = (idx * loss_hidden_tot + loss_hidden.mean(dim=1).detach().cpu().numpy()) / (idx+1)\n err_train_hidden += zero_one_loss(out_hidden, y).detach().cpu().numpy()\n if not frozen:\n for p in linear_classifier.parameters():\n p.grad = None\n loss.mean(dim=1).backward(ones)\n loss_hidden.mean(dim=1).backward(ones)\n optimizer.step()\n\n if epoch == start_epoch - 1: # check if we have null training error (sanity check)\n print('Error: ', err, file=logs, flush=True)\n assert err == 0\n epoch += 1\n continue\n\n epoch += 1 if not frozen else 0\n\n err_min = max(err_tot.min(), err_train_hidden.min())\n\n separated = frozen and err_min == 0\n frozen = err_min == 0 and not frozen # will test with frozen network next time, prevent from freezing twice in a row\n\n if frozen:\n print(\"Freezing the next iteration\", file=logs, flush=True)\n\n stop = (separated\n or epoch > start_epoch + args.nepochs\n )\n\n #quant_train.loc['err'] = err_tot, err_train_hidden\n #quant_train.loc['loss'] = loss_tot, loss_hidden_tot\n quant.loc[pd.IndexSlice[epoch, ('train', 'err', 'last')]] = err_tot/idx\n quant.loc[pd.IndexSlice[epoch, ('train', 'err', 'hidden')]] = err_train_hidden / idx\n quant.loc[pd.IndexSlice[epoch, ('train', 'loss', 'last')]] = loss_tot\n quant.loc[pd.IndexSlice[epoch, ('train', 'loss', 'hidden')]] = loss_hidden_tot\n\n err_test = np.zeros(args.ntry)\n err_hidden_test = np.zeros(args.ntry)\n loss_test = np.zeros(args.ntry)\n loss_hidden_test = np.zeros(args.ntry)\n\n #stats['err_hidden'].append(err_train_hidden/idx)\n\n with torch.no_grad():\n\n testloader_iter = iter(test_loader)\n #for idx, (x, y) in enumerate(train_loader, 1):\n for idx, (v, w) in enumerate(test_loader, 1):\n\n\n # x = x.to(device)\n # y = y.to(device)\n # out_train, out_train_hidden = linear_classifier(x) # TxBxC, LxBxC # each output for each layer\n # err_tot += zero_one_loss(out_train, y).detach().cpu().numpy()\n # err_train_hidden += zero_one_loss(out_train_hidden, y).detach().cpu().numpy()\n #if idx-1 < len(test_loader):\n v, w = next(testloader_iter)\n v = v.to(device)\n w = w.to(device)\n out_test, out_hidden_test = linear_classifier(v)\n loss_test = (idx * loss_test + ce_loss(out_test, w).mean(dim=1).detach().cpu().numpy())/(idx+1)\n loss_hidden_test = (idx * loss_hidden_test + ce_loss(out_hidden_test, w).mean(dim=1).detach().cpu().numpy())/(idx+1)\n err_test += zero_one_loss(out_test, w).detach().cpu().numpy()\n err_hidden_test += zero_one_loss(out_hidden_test, w).detach().cpu().numpy()\n #else:\n # if err_tot.max() >0:\n # break\n\n quant.loc[pd.IndexSlice[epoch, ('test', 'err', 'last')]] = err_test/idx\n quant.loc[pd.IndexSlice[epoch, ('test', 'err', 'hidden')]] = err_hidden_test / idx\n quant.loc[pd.IndexSlice[epoch, ('test', 'loss', 'last')]] = loss_test\n quant.loc[pd.IndexSlice[epoch, ('test', 'loss', 'hidden')]] = loss_hidden_test\n\n\n\n #print('ep {}, train loss (err) {:g} ({:g}), test loss (err) {:g} ({:g})'.format(\n print('ep {}, train loss (min/max): {:g} / {:g}, err (min/max): {:g}/{:g}'.format(\n epoch, quant.loc[epoch, ('train', 'loss', 'last')].min(), quant.loc[epoch, ('train', 'loss', 'last')].max(),\n quant.loc[epoch, ('train', 'err', 'last')].min(), quant.loc[epoch, ('train', 'err', 'last')].max()),\n file=logs, flush=True)\n\n utils.print_cuda_memory_usage(device, logs_debug, epoch)\n\n\n #fig, ax = plt.sub\n quant_reset = quant.reset_index()\n quant_plot = pd.melt(quant_reset, id_vars='epoch')\n g = sns.relplot(\n data = quant_plot,\n col='layer',\n hue='set',\n row='stat',\n x='epoch',\n y='value',\n kind='line',\n ci=100, # the whole spectrum of the data\n facet_kws={\n 'sharey': False,\n 'sharex': True\n }\n )\n\n g.set(yscale='log')\n\n plt.savefig(fname=os.path.join(path_output, 'losses.pdf'))\n\n plt.close('all')\n\n if args.save_model and (epoch) % 5 == 0: # we save every 5 epochs\n save_checkpoint()\n\n if stop:\n save_checkpoint()\n if separated:\n print(\"Data is separated.\", file=logs)\n sys.exit(0) # success\n else:\n print(\"Data is NOT separated.\", file=logs)\n sys.exit(1) # failure\n\n #if err_tot.min() == 0: # the data has been separated\n\n # checkpoint = {\n # 'linear_classifier': linear_classifier.state_dict(),\n # 'stats': stats,\n # 'args': args,\n # 'optimizer': optimizer.state_dict(),\n # 'epochs': epoch,\n # #'seed': seed,\n # }\n # torch.save(checkpoint, os.path.join(path_output, 'checkpoint_lin.pth'))\n # print('the data is separable!', file=logs)\n # sys.exit(1)\n\n","sub_path":"deep_lin.py","file_name":"deep_lin.py","file_ext":"py","file_size_in_byte":17305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"488623187","text":"# -*- coding:utf-8 -*-\n# __author__ = 'L'\n\nfrom multiprocessing import Queue, Process\nfrom threading import Thread\nfrom typing import Union\nfrom queue import Empty\nimport sys\nimport time\nimport os\n\n\nclass ProgressBar:\n def __init__(self, *, total: int, size: int=50,\n graph: str='=-', hide: bool=False):\n self.size = size\n self.graph = str(graph)\n self.hide = hide\n self.total = total\n self._time = time.time()\n self._starttime = time.time()\n self._updatetime = 0\n self._lefttime = 0\n self._progress = 0\n self._bar = '...'\n self._percent = ''\n self._processing = False\n self._kill = False\n self.fps = 4\n self._update_interval = 0.1\n self.q: Queue = Queue()\n self.p: Process = None\n self.t: Thread = None\n\n def _update(self):\n bar_size = int(self._progress * self.size // self.total)\n self._time = time.time()\n try:\n self._lefttime = (self._time - self._starttime) * (self.total - self._progress) // self._progress\n except ZeroDivisionError:\n self._lefttime = 99 * 60\n self._bar = self.graph[0] * bar_size + self.graph[1] * (self.size - bar_size)\n self._percent = f'{self._progress / self.total:.2%}'\n\n def __call__(self, current, total=-1):\n if total is not -1:\n self.total = total\n self._progress = current\n self._update()\n sys.stderr.write('\\r')\n sys.stderr.write(f'[{self._bar}][{self._percent:^8}][{_timestr(self._lefttime):^7}]')\n sys.stderr.flush()\n if self._progress >= self.total:\n self._processing = False\n time.sleep(1 / self.fps)\n print(f'\\n{_timestr(time.time() - self._starttime)}\\n')\n\n def tick(self, current: Union[int, float] = -1):\n if not self._kill:\n if current is -1:\n self._progress += 1\n else:\n self._progress = current\n if not self._processing:\n self._processing = True\n self._starttime = time.time()\n self.q.put((self._bar, self._percent, self._lefttime))\n if not self.hide:\n self.p = Process(target=self._tick, args=(self.q, False))\n self.p.start()\n if time.time() - self._updatetime > self._update_interval:\n self._update()\n self._updatetime = time.time()\n try:\n self.q.get(False)\n except Empty:\n pass\n self.q.put((self._bar, self._percent, self._lefttime))\n if self._progress >= self.total:\n self._processing = False\n if not self.hide:\n try:\n self.p.terminate()\n except AttributeError:\n pass\n time.sleep(1 / self.fps)\n self.__call__(self.total)\n\n def start(self, *, proggetter=None):\n self._processing = True\n if proggetter is None and not self.hide:\n self.p = Process(target=self._tick, args=(None, True))\n self.p.start()\n else:\n self._starttime = time.time()\n self.q.put((self._bar, self._percent, self._lefttime))\n if not self.hide:\n self.p = Process(target=self._tick, args=(self.q, False))\n self.p.start()\n self.t = Thread(target=self._tack, args=(proggetter,))\n self.t.start()\n\n def _tack(self, proggetter):\n while not self._kill and self._processing:\n _progress = proggetter()\n self.tick(_progress)\n\n def _tick(self, q: Queue, stopwatch=False):\n if not stopwatch:\n print(f'\\rPID: {os.getpid()}')\n time.sleep(1 / self.fps)\n _time = time.time()\n _lefttime = 0\n _bar = '...'\n _percent = ''\n while True:\n _signal = ' '\n try:\n _bar, _percent, _lefttime = q.get(False)\n _time = time.time()\n _signal = '.'\n except Empty:\n _lefttime -= time.time() - _time\n _time = time.time()\n sys.stderr.write('\\r')\n sys.stderr.write(f'[{_bar}][{_percent:^8}][{_timestr(_lefttime):^7}]{_signal}')\n sys.stderr.flush()\n time.sleep(1 / self.fps)\n else:\n _time = time.time()\n _neon = ['> ',\n '>> ',\n ' >> ',\n ' >>',\n ' >',\n ' ',\n ' <',\n ' <<',\n ' << ',\n '<< ',\n '< ',\n ' ']\n _passedtime = 0\n while _passedtime < 6000:\n _passedtime = time.time() - _time\n _index = int(_passedtime) % len(_neon)\n sys.stderr.write('\\r')\n sys.stderr.write(f'[{_neon[_index]}][{_timestr(_passedtime):^7}]')\n sys.stderr.flush()\n time.sleep(1 / self.fps)\n\n def stop(self, msg: str=''):\n if msg is not '':\n msg = ': ' + msg\n time.sleep(1 / self.fps)\n self._kill = True\n try:\n self.t.join()\n self.t = None\n except AttributeError:\n pass\n try:\n self.p.terminate()\n self.p = None\n if self._processing:\n self._processing = False\n print(f'\\n{_timestr(time.time() - self._starttime)}')\n print(f'Terminated{msg}\\n')\n except AttributeError:\n pass\n finally:\n self._kill = False\n time.sleep(1 / self.fps)\n\n def refresh(self):\n self.stop()\n self.__init__(size=self.size, graph=self.graph,\n total=self.total, hide=self.hide)\n\n\ndef _timestr(sec: int):\n if sec > 60:\n return f'{int(sec // 60)}m{int(sec % 60):0>2}s'\n else:\n return f'{int(sec)}s'\n\n\nif __name__ == '__main__':\n import random\n progressbar = ProgressBar(total=100)\n s = 0\n\n def getter():\n global s\n return s\n progressbar.start(proggetter=getter)\n for i in range(100):\n s = i + 1\n # progressbar.tick()\n time.sleep(random.uniform(0.01, 0.1))\n # if i is 50:\n # progressbar.stop()\n # break\n progressbar.stop()\n","sub_path":"prgrbr.py","file_name":"prgrbr.py","file_ext":"py","file_size_in_byte":6688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"575994045","text":"\"\"\"\nCreated by donsky for www.donskytech.com\n\"\"\"\nimport os\nimport sqlite3\n\n# Create DB in current file\nDEFAULT_PATH = os.path.join(os.path.dirname(__file__), '3S_Retail.db')\nCREATE_SQL_FILE = os.path.join(os.path.dirname(__file__), 'MemberDB.sql')\n\n\ndef db_connect(db_path=DEFAULT_PATH):\n con = sqlite3.connect(db_path)\n return con\n\n\ndef create_table():\n db_conn = db_connect()\n\n with db_conn:\n try:\n db_conn = db_connect()\n cursor = db_conn.cursor()\n print(\"Successfully Connected to SQLite\")\n\n with open(CREATE_SQL_FILE, 'r') as sqlite_file:\n sql_script = sqlite_file.read()\n\n cursor.executescript(sql_script)\n print(\"SQLite script executed successfully\")\n cursor.close()\n\n except sqlite3.Error as error:\n print(\"Error while executing sqlite script\", error)\n\n print(\"Successfully created table!\")\n\n\ndef create_student_task(conn, student):\n \"\"\"\n Create a new student task\n :param conn:\n :param student:\n :return:\n \"\"\"\n\n sql = ''' INSERT INTO Member(UID, Name, Recently, Email, Redeem, Time)\n VALUES(?,?,?,?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, student)\n conn.commit()\n return cur.lastrowid\n\n\ndef create_members():\n # create a database connection\n db_conn = db_connect()\n\n with db_conn:\n # students\n Member1 = ('FC-C8-13-31', 'Adam Smith', 'Apple', 'Adam@gmail.com', '30', '13:00:45')\n Member2 = ('57-92-DB-C6', 'Steve Davidson', 'Orange', 'Steve@gmail.com', '10', '14:20:35')\n\n # create student\n create_student_task(db_conn, Member1)\n create_student_task(db_conn, Member2)\n\n\ndef main():\n create_table()\n create_members()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"LocalDbServer/dist/Database/previous version/db_utils(old).py","file_name":"db_utils(old).py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"453947455","text":"# -*- coding: utf-8 -*-\n\n\n# import datetime\nimport random\nfrom django.contrib.auth.models import User\nfrom who_is.models import UserInfo, GameRound, Game\nimport json\nfrom datetime import timezone, datetime\n\n\nclass GameProperty():\n def __init__(self, request):\n self.list_u_choice = json.loads(request.POST.get('list_ch_usr'))\n self.last_latter = request.POST.get('last_letter')\n self.game_id = request.POST.get('game_id')\n self.user = request.user\n\n def update_rounde(self):\n self.rounde = GameRound.objects.filter(game=self.game_id).count()\n\n def reg_game(self):\n game = Game(id=self.game_id, date=datetime.now(), user=self.user)\n game.save()\n\n def reg_game_round(self):\n if self.last_latter == 'e':\n result = 1\n else:\n result = 0\n game_round = GameRound(result=result, game_id=self.game_id)\n game_round.save()\n\n def get_variation(self): #(list_u_choice, rounde, game_id):\n roundeX = self.rounde - 1\n\n\n user_xr = User.objects.get(id=self.list_u_choice[roundeX])\n u_sex = UserInfo.objects.get(user_id=user_xr)\n a = [self.list_u_choice[roundeX]]\n l_f_s = UserInfo.objects.filter(sex=u_sex.sex).values('user_id')\n list_of_users = []\n for i in l_f_s:\n if i['user_id'] == self.user.id:\n pass\n else:\n list_of_users.append(i['user_id'])\n while True:\n var = random.choice(list_of_users)\n if var in a:\n pass\n else:\n a.append(var)\n if len(a) == 3:\n break\n context = {}\n print(a)\n name1 = User.objects.get(id=a[1])\n name2 = User.objects.get(id=a[2])\n image = UserInfo.objects.get(user_id=a[0])\n user_imag = 'user_img{}'.format(self.rounde)\n names = 'names{}'.format(self.rounde)\n rounde = GameRound.objects.filter(game=self.game_id).count()\n context['rounde'] = rounde\n context[user_imag] = {'avatar': image.avatar, 'position': image.position}\n user_test = User.objects.get(id=a[0])\n context[names] = [{'Test': 'True', 'Name': user_test.first_name + ' ' + user_test.last_name},\n {'Test': 'Truе', 'Name': name1.first_name + ' ' + name1.last_name},\n {'Test': 'Tru5', 'Name': name2.first_name + ' ' + name2.last_name}\n ]\n random.shuffle(context[names], random.random)\n return context\n\n def finish_game(self):\n result_sum = GameRound.objects.filter(game=self.game_id, result=1).count()\n confirm_result = Game.objects.get(id=self.game_id)\n confirm_result.result = result_sum\n confirm_result.save()\n infa = {'rounde': self.rounde}\n return infa\n\n\ndef get_r_list(user_id, user_list):\n a = []\n while True:\n choice = random.choice(user_list)\n if choice == user_id:\n pass\n elif choice in a:\n pass\n else:\n a.append(choice)\n if len(a) == 5:\n break\n return json.dumps(a)\n\n\ndef eng_str(g):\n g = str(g)\n g = g.replace('[', '')\n g = g.replace(']', '')\n g = g.replace(' ', '')\n return g\n\n\ndef dec_str(g):\n g = g.split(',')\n print(g)\n h = []\n for i in g:\n h.append(int(i))\n g = h\n return g\n\n\ndef get_last_u_res(user_id):\n results_l = []\n results = Game.objects.filter(user=user_id).order_by('-date')[:4]\n for res in results:\n results_l.append(dict(date=str(res.date)[:19], result=res.result))\n return results_l\n\n\nclass GetAllResult():\n def __init__(self):\n self.result_l = []\n self.results = Game.objects.all().order_by('-date')[:10]\n self.kuy = {'11': 'день', '12': 'дня', '13': 'дней', '21': 'час',\n '22': 'часа', '23': 'часов', '31': 'минута', '32': 'минуты',\n '33': 'минут', '41': 'секунда', '42': 'секунды', '43': 'секунд'}\n\n def get_all(self):\n for res in self.results:\n user_id = UserInfo.objects.get(user_id=res.user)\n self.result_l.append(dict(time=str(self.test_get_all_res(res.date)),\n result=res.result,\n name=str(res.user.first_name) + ' ' + str(res.user.last_name),\n position=user_id.position))\n\n def test_get_all_res(self, g):\n h = datetime.now(timezone.utc)\n timer = h - g\n day = timer.days\n hou = timer.seconds // 60 // 60\n min = timer.seconds // 60 % 60\n sec = timer.seconds % 60\n all = [dict(s='1', r=day), dict(s='2', r=hou), dict(s='3', r=min), dict(s='4', r=sec)]\n count = 0\n deletes = []\n for i in all:\n if i['r'] == 0:\n deletes.append(count)\n count += 1\n deletes.reverse()\n for dele in deletes:\n all.pop(dele)\n return self.get_str(all[0]['r'], all[0]['s'])\n\n def get_str(self, a, info):\n x = str(a)\n if len(x) == 1:\n result = self.get_get(a, info)\n else:\n x_x = str(x[-2])\n if x_x == '1':\n r = '3'\n result = '{} {} назад'.format(a, self.kuy[info + r])\n else:\n result = self.get_get(a, info)\n return result\n\n def get_get(self, a, info):\n if a == 1:\n r = '1'\n result = '{} {} назад'.format(a, self.kuy[info + r])\n elif a >= 5 or a == 0:\n r = '3'\n result = '{} {} назад'.format(a, self.kuy[info + r])\n else:\n r = '2'\n result = '{} {} назад'.format(a, self.kuy[info + r])\n return result","sub_path":"who_is/my_modules.py","file_name":"my_modules.py","file_ext":"py","file_size_in_byte":5889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"460406889","text":"from box.settings import * \n\n\nINSTALLED_APPS.extend([\n 'project',\n \n])\n\n\nMULTIPLE_CATEGORY = False\n# MULTIPLE_CATEGORY = True\n\n\n\nUSE_I18N = True\n# DEBUG = False #python manage.py runserver --insecure # for 404 page\nCURRENT_DOMEN = 'mottoex.com.ua'\n\n\n\nLIQPAY_PUBLIC_KEY = \"sandbox_i2664988593\"\nLIQPAY_PRIVATE_KEY = \"sandbox_IkGrxQZTtxmcFs45Igmq61M4UnMlq8nT4imQCgcO\"\n# LIQPAY_PUBLIC_KEY = \"i80896492433\"\n# LIQPAY_PRIVATE_KEY = \"5CsKNZtBfnT98EjLNuAsRZvGyOAQn0QFZHKclCIY\"\n\n\nLANGUAGES = [\n ('uk', ('uk')),\n]\nCMS_TEMPLATES = [\n 'temp1',\n 'temp2',\n 'temp3',\n 'temp4',\n]\nROOT_URLCONF = 'core.urls'\nWSGI_APPLICATION = 'core.wsgi.application'\nCURRENT_DOMEN = 'mottoex.com.ua'\n\n# EMAIL_HOST_USER = \"Mottoaction@gmail.com\"\n# EMAIL_HOST_PASSWORD = \"2109Qwerty\"\nEMAIL_HOST_USER = \"admin@mottoex.com.ua\"\nEMAIL_HOST_PASSWORD = \"mottoex69018\"\nSEND_TO_EMAIL = 'Mottoaction@gmail.com'\n# SEND_TO_EMAIL = 'jurgeon018@gmail.com'\n# EMAIL_HOST_USER = 'jurgeon018@gmail.com'\n# EMAIL_HOST_PASSWORD = 'yfpfhrj69001'\n\nEMAIL_HOST = 'mail.mottoex.com.ua'\n# EMAIL_PORT = 587\n# EMAIL_USE_TLS = True\n\nDEFAULT_FROM_EMAIL = EMAIL_HOST_USER\n\n\n \nTEMPLATES[0]['OPTIONS']['context_processors'].extend([\n 'project.context_processors.context',\n])\n\n","sub_path":"core/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"490559485","text":"import os\nimport io\nimport csv\n\nfrom pokemongo_bot.base_task import BaseTask\nfrom pokemongo_bot.base_dir import _base_dir\n\ndef csv_export(payload):\n \"\"\"\n this function (and functions for all other file types)\n receives a payload object (see 'work') to create a st\n ring that will be written in export file.\n\n file-type functions should be included in 'exporters'\n member\n\n such functions' names should be like this _export\n \"\"\"\n out = io.BytesIO()\n writer = csv.writer(out)\n writer.writerow(['username', payload['username']])\n writer.writerow(['stardust', payload['stardust']])\n writer.writerow(['pokecoin', payload['pokecoin']])\n writer.writerow(['team', payload['team']])\n writer.writerow([])\n writer.writerow(['items'])\n for item, count in payload['inventory'].iteritems():\n writer.writerow([item, count])\n writer.writerow([])\n writer.writerow(['pokemons'])\n pokekeys = list(payload['pokemon_keys'])\n writer.writerow(pokekeys)\n for pokemon in payload['pokemons']:\n temp = []\n for key in pokekeys:\n temp.append(pokemon.get(key, ''))\n writer.writerow(temp)\n\n return out.getvalue()\n\nclass ExportFile(BaseTask):\n \"\"\"\n Exports files with useful infos.\n\n Events: file_exported\n \"\"\"\n SUPPORTED_TASK_API_VERSION = 1\n\n exporters = {\n 'csv_export': csv_export\n }\n\n def initialize(self):\n self.bot.event_manager.register_event('file_exported')\n\n self.export_extension = self.config.get('file_type', 'csv')\n\n self.turn = -3\n\n self.data = {}\n self.payload = {}\n self.text = ''\n self.pokemon_keys = set()\n\n if self.config.get('relative_path', False):\n self.file_path = os.path.join(_base_dir, self.config['relative_path'])\n elif self.config.get('absolute_path', False):\n self.file_path = self.config['absolute_path']\n else:\n self.file_path = os.path.join(_base_dir, 'export.{}'.format(self.export_extension))\n\n def work(self):\n self.turn += 1\n if self.turn % 25 != 0:\n return\n\n def mapper(pokemon):\n for k, v in pokemon.iteritems():\n self.pokemon_keys.add(k)\n return pokemon\n\n self._gatherData()\n\n self.payload = {\n 'pokemons': map(mapper, self.data['pokemons']),\n 'pokemon_keys': self.pokemon_keys,\n 'stardust': self.data['stardust'],\n 'pokecoin': self.data['pokecoin'],\n 'username': self.data['username'],\n 'team': self.data['team'],\n 'inventory': dict(zip(map(lambda x: self.bot.item_list[str(x['item_id'])], self.data['items']), map(lambda x: x.get('count', 0), self.data['items'])))\n }\n\n self._export()\n self._write()\n self.emit_event(\n 'file_exported',\n formatted='{} file exported to {}'.format(self.export_extension, self.file_path)\n )\n\n def _gatherData(self):\n inventory_response = self.bot.api.get_inventory()['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']\n\n self.data = {\n 'pokemons': map(lambda x: x['inventory_item_data']['pokemon_data'], filter(lambda x: x['inventory_item_data'].get('pokemon_data', False), inventory_response)),\n 'items': map(lambda x: x['inventory_item_data']['item'], filter(lambda x: x['inventory_item_data'].get('item', False), inventory_response)),\n 'stardust': self.bot._player['currencies'][1].get('amount', 0),\n 'pokecoin': self.bot._player['currencies'][0].get('amount', 0),\n 'username': self.bot._player['username'],\n 'team': self.bot._player['team']\n }\n\n def _export(self):\n self.text = self.exporters['{}_export'.format(self.export_extension)](self.payload)\n\n def _write(self):\n f = open(self.file_path, 'w+')\n f.write(self.text)\n f.truncate()\n f.close()\n","sub_path":"export_file.py","file_name":"export_file.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"581083617","text":"from django.db import transaction\nfrom django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom froide.foirequest.models import FoiAttachment\n\nfrom .models import Document\nfrom .utils import update_document_index\n\n\n@receiver(signals.post_save, sender=Document,\n dispatch_uid=\"document_created\")\ndef document_created(instance=None, created=False, **kwargs):\n if created and kwargs.get('raw', False):\n return\n if not created:\n update_document_index(instance)\n return\n\n from filingcabinet.tasks import process_document_task\n process_document_task.delay(instance.pk)\n\n\n@receiver(signals.post_save, sender=FoiAttachment,\n dispatch_uid='reprocess_attachment_redaction')\ndef reprocess_attachment_redaction(instance, created=False, **kwargs):\n if created and kwargs.get('raw', False):\n return\n if not instance.document_id:\n return\n if not instance.redacted_id:\n return\n # If attachment has document, but also redacted version\n # move document reference to redacted version\n with transaction.atomic():\n doc_id = instance.document_id\n Document.objects.filter(id=doc_id).update(\n original_id=instance.redacted_id\n )\n instance.document = None\n instance.save()\n FoiAttachment.objects.filter(\n id=instance.redacted_id\n ).update(document_id=doc_id)\n\n d = Document.objects.get(id=doc_id)\n d.process_document()\n\n\n@receiver(FoiAttachment.attachment_redacted,\n dispatch_uid='was_redacted_reprocess_document')\ndef reprocess_document_after_redaction(sender, **kwargs):\n if sender.document:\n sender.document.process_document()\n","sub_path":"froide/document/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"406798322","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n First program\r\n\r\n Source:\r\n - http://zetcode.com/gui/pyqt4/firstprograms/\r\n\"\"\"\r\n\r\nimport sys\r\nfrom PyQt4 import QtGui\r\n\r\n\r\ndef main():\r\n\r\n # initialize Qt\r\n app = QtGui.QApplication(sys.argv)\r\n\r\n\r\n\r\n # Crreat a new qt widget. Widget's without parents are simply \"windows\"\r\n w = QtGui.QWidget()\r\n\r\n w.resize(250, 150)\r\n w.move(300, 300)\r\n w.setWindowTitle('Hello World')\r\n w.show()\r\n\r\n\r\n # Start qt event loop, exit program when it does\r\n sys.exit(app.exec_())\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"python_presentation/AdvancedSource/pyqt/pyqt1.py","file_name":"pyqt1.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"627799392","text":"class BinTreeNode(object):\r\n \r\n def __init__(self, value):\r\n self.value=value\r\n self.left=None\r\n self.right=None\r\n### Stack is needed (or just is easier)\r\n### to store the values in it and then just print them out\r\nclass Stack():\r\n def __init__(self): #constructor\r\n self.stack = []\r\n \r\n def isEmpty(self): #function which returns true when the lenght of the stack is 0\r\n return len(self.stack) == 0\r\n\r\n def push(self,item): # function which appends an element into the stack\r\n self.stack.append(item)\r\n\r\n def pop(self): #function which which pops out an element (and deletes it afterwards)\r\n return self.stack.pop()\r\n \r\ndef tree_insert( tree, item):\r\n if tree==None:\r\n tree=BinTreeNode(item)\r\n else:\r\n if(item < tree.value):\r\n if(tree.left==None):\r\n tree.left=BinTreeNode(item)\r\n else:\r\n tree_insert(tree.left,item)\r\n else:\r\n if(tree.right==None):\r\n tree.right=BinTreeNode(item)\r\n else:\r\n tree_insert(tree.right,item)\r\n return tree\r\n \r\ndef postorder(tree):\r\n if(tree.left!=None):\r\n postorder(tree.left)\r\n if(tree.right!=None):\r\n postorder(tree.right)\r\n print (tree.value)\r\n \r\n### function to print out values in order from the Tree (iteratively) ### \r\ndef in_order(tree):\r\n stack = Stack() #create the stack\r\n t = tree #assign the Tree to a variable\r\n res = True\r\n while res: #while loop which iterates until \"res\" becomes False\r\n if t != None: # checks if there is a Tree ?\r\n stack.push(t) #if there is, appends the first value to the stack\r\n t = t.left # and then continues to the left (thats how it is supposed to be - left then right)\r\n continue #goes back to the beginning\r\n t=stack.pop() # otherwise (when the iterator goes through the entire left side)\r\n ## we start popping out elements and printing them\r\n print(t.value)\r\n t = t.right #directs the iterator so that it goes to the right side of the Tree\r\n \r\n if stack.isEmpty() and t==None: ## if the stack AND the Tree are empty\r\n res=False # break the iterator AND WE ARE DONEEEEE\r\n \r\n\r\n\r\n## Calling the function (first inserting values, then printing them in order ###\r\nif __name__ == '__main__':\r\n \r\n t=tree_insert(None,11);\r\n tree_insert(t,9)\r\n tree_insert(t,5)\r\n tree_insert(t,8)\r\n tree_insert(t,12)\r\n tree_insert(t,20)\r\n tree_insert(t,15)\r\n tree_insert(t,21)\r\n in_order(t)\r\n","sub_path":"task12.py","file_name":"task12.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"258822761","text":"import pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nimport pickle\n\n# 3점 이상이면 1 / 미만이면 0\ndef check(grade) :\n if grade >= 3.0 :\n return 1\n else :\n return 0\n\ndef regressor() :\n df = pd.read_csv('https://raw.githubusercontent.com/taeyoonnoh/Job-Search-Service/main/jasoseol.csv')\n\n # 자기소개서 없는 데이터 제거하기\n remove_index = df[df['자기소개서']=='0'].index.tolist()\n df = df.drop(remove_index,axis=0)\n\n # label 값\n df['자기소개서 평점'] = df['자기소개서 평점'].apply(check)\n\n # X / y 데이터 나누기\n X = df.copy()\n y = X.pop('자기소개서 평점')\n\n # 데이터 Sampling\n X_train,X_test,y_train,y_test = train_test_split(X,y,stratify=y,train_size=0.80)\n\n # 자기소개서 vector 화 시키기\n vectorizer = TfidfVectorizer()\n\n # Train / Test 분리\n X_train = vectorizer.fit_transform(X_train['자기소개서']).toarray()\n X_test = vectorizer.transform(X_test['자기소개서']).toarray()\n\n\n # Label 값 int 로 바꿔주기\n def toint(df) :\n return int(df)\n\n y_train = y_train.apply(toint)\n y_test = y_test.apply(toint)\n \n logistic = LogisticRegression(random_state=42)\n\n logistic.fit(X_train,y_train)\n\n return logistic,vectorizer\n\nlogistic,vectorizer = regressor()\n\nPkl_Filename1 = \"best_classifier.pkl\" \n\nwith open(Pkl_Filename1, 'wb') as file: \n pickle.dump(logistic, file)\n\nPkl_Filename2 = \"best_vectorizer.pkl\" \n\nwith open(Pkl_Filename2, 'wb') as file: \n pickle.dump(vectorizer, file)","sub_path":"flask_app/classifiers/logistic_regression_tfidf_vectorizer/logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"153290381","text":"import glob\r\nimport numpy as np\r\nimport re\r\n#B1: đọc file tạo 1 dictionary\r\nfiles = glob.glob(\"./data/*.txt\", recursive=True)\r\nprint(files)\r\ndictionary=set()\r\nlist_content=[]\r\nfor f in files:\r\n file = open(f,'r',encoding=\"utf-8\")\r\n file_content = file.read()\r\n list_content.append(file_content)\r\n words = set(file_content.replace('. ',' ').replace('\"',' ').split())#xoá các kí tư ko liên quan\r\n dictionary.update(words)\r\n#tạo một term doc & khai báo 0 cho các phần tử\r\nterm_doc_mat = np.zeros((len(dictionary),len(list_content)))\r\n#print(term_doc_mat)\r\nidx = 0 \r\nfor doc in list_content:\r\n term_doc_mat[:,idx]=np.array(([int(words in doc) for words in dictionary]))\r\n idx+=1\r\n#print(\"Ma tran term_doc: \\n\",term_doc_mat)\r\nprint(\"Nhap cau truy van: \")\r\nquery = input()\r\ntokens = re.findall('\"(\\w+)\"',query)\r\nand_or_xor_not = re.findall(' (\\w+)',query)\r\n#print(and_or_xor_not)\r\n\r\n\r\n\r\ndictionary = list(dictionary)\r\n#print(dictionary)\r\narr_vec_tor = []\r\nvec_tor_query_1 = []\r\nvec_tor_query_2 = []\r\n\r\n\r\n\r\n\r\nfor token in tokens:\r\n if token in dictionary:\r\n idx = dictionary.index(token)\r\n #print(idx)\r\n vec_tok = term_doc_mat[idx]\r\n arr_vec_tor.append(vec_tok) \r\n #print('vec_to',token,\":\",vec_tok)\r\n#print(arr_vec_tor)\r\n\r\n#Hàm truy vấn dữ liệu\r\ndef Data_query(arr_vector):\r\n\r\n vec_tor_query_1 = arr_vec_tor [0]\r\n for i in range(0,len(and_or_xor_not)):\r\n#AND \r\n data_query = []\r\n vec_tor_query_2 = arr_vec_tor[i+1]\r\n if and_or_xor_not[i] == \"AND\" :\r\n for i in range(0,len(vec_tor_query_1)):\r\n data_query.append( vec_tor_query_1[i] and vec_tor_query_2[i]) \r\n\r\n#OR\r\n elif and_or_xor_not[i] == \"OR\" :\r\n for i in range(0,len(vec_tor_query_1)):\r\n data_query.append( vec_tor_query_1[i] or vec_tor_query_2[i])\r\n\r\n#XOR\r\n elif and_or_xor_not[i] == \"XOR\" :\r\n for i in range(0,len(vec_tor_query_1)):\r\n d = bool(vec_tor_query_1[i]) != bool(vec_tor_query_2[i])\r\n if d == True:\r\n d = 1\r\n data_query.append(d)\r\n else:\r\n d = 0\r\n data_query.append(d)\r\n\r\n vec_tor_query_1 = data_query\r\n return data_query\r\n\r\nprint(Data_query(arr_vec_tor))\r\n","sub_path":"Boolean_retrival/Boolean_Retrieval.py","file_name":"Boolean_Retrieval.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"212228781","text":"\nimport my_parser\nfrom parser_tree import CompilationError\n\n\nclass Generator():\n def __init__(self):\n self.procedure_declarations = []\n self.parameter_declarations = []\n self.program_identifier = ''\n self.program_date = []\n\n self.current_program_date = {'identifier': '', 'parameters': []}\n self.current_procedure_declaration = {'identifier': '', 'parameters': []}\n self.current_parameter_declaration = None\n self.current_program_identifier = ''\n\n def parse_node(self, node):\n\n node_val = node.value\n print(str(node_val.value))\n print(str(node_val))\n print(str(node_val.value_type))\n\n if node_val.value_type== 1:\n if str(node_val.text)==\"404 VAR\":\n self.parse_node(node.child_nodes[1])\n\n self.parse_node(node.child_nodes[2])\n self.current_program_date['identifier'] = self.current_program_identifier\n self.current_program_date['parameters'] = self.parameter_declarations\n self.program_date.append(self.current_procedure_declaration)\n\n self.current_program_date = {'identifier': '', 'parameters': []}\n self.program_date = []\n\n\n if node_val.value_type == 0:\n\n # for each declaration new\n\n if node_val.value == \"program\":\n\n self.parse_node(node.child_nodes[1])\n\n self.parse_node(node.child_nodes[2])\n\n self.current_procedure_declaration['identifier'] = self.current_program_identifier\n self.current_procedure_declaration['parameters'] = self.parameter_declarations\n self.procedure_declarations.append(self.current_procedure_declaration)\n\n self.current_procedure_declaration = {'identifier': '', 'parameters': []}\n self.parameter_declarations = []\n\n\n # each declaration will add value\n\n elif node_val.value==\"variable-declarations\":\n self.parse_node(node.child_nodes[0])\n\n if node_val.value==\"declarations-list\":\n self.parse_node(node.child_nodes[0])\n\n elif node_val.value == 'procedure-identifier':\n if node.parent.value.value == 'program':\n self.program_identifier = node.child_nodes[0].value.value\n self.current_program_identifier = node.child_nodes[0].value.value\n\n elif node_val.value == 'declaration':\n self.current_parameter_declaration = {'identifiers': [], 'attributes': []}\n self.parameter_declarations.append(self.current_parameter_declaration)\n\n elif node_val.value == 'variable-identifier':\n self.current_parameter_declaration['identifiers'].append(\n node.child_nodes[0].value.value)\n\n elif node_val.value == 'attribute':\n self.current_parameter_declaration['attributes'].append(\n node.child_nodes[0].value.value)\n else:\n pass\n\n for node in node.child_nodes:\n\n self.parse_node(node)\n\n def translate(self, filename):\n global tmt\n tree = my_parser.parse(filename)\n\n self.parse_node(tree.root)\n\n print(self.program_date)\n result = ''\n\n errors = []\n\n code_section = 'codeSeg SEGMENT\\n \\t\\t ASSUME cs:code1, ds:dataSeg, ss:stackSeg\\n'\n data_section = 'dataSeg SEGMENT\\n'\n stack_section = 'stackSeg SEGMENT\\n\\tdb 4096 dup (?)\\nstackSeg ends\\n\\n'\n\n ext_params = []\n proc_identifiers = []\n for proc_decl in self.procedure_declarations:\n\n idn_lexem = proc_decl['identifier']\n if idn_lexem.text in proc_identifiers :\n errors.append(CompilationError('Generator', idn_lexem.line, idn_lexem.column,\n 'identifier \"{}\" already exists'.format(idn_lexem.text)))\n continue\n proc_identifiers.append(idn_lexem.text)\n code_section += idn_lexem.text + ' proc\\n'\n\n total_parameter_length = 4\n param_identifiers = []\n for parameter in proc_decl['parameters']:\n\n \n # determine type of parameters and its length\n\n basic_type = None\n compound_type = None\n contains_ext = False\n for attribute in parameter['attributes']:\n if attribute.text == 'INTEGER' or attribute.text == 'FLOAT' \\\n or attribute.text == 'BLOCKFLOAT':\n if basic_type is not None:\n errors.append(CompilationError('Generator', attribute.line, attribute.column,\n 'attribute \"{}\" can`t be used together with attribute '\n '\"{}\"'.format(attribute.text, basic_type)))\n basic_type = attribute.text\n elif attribute.text == 'COMPLEX' or attribute.text == 'SIGNAL':\n if compound_type is not None:\n errors.append(CompilationError('Generator', attribute.line, attribute.column,\n 'attribute \"{}\" can`t be used together with attribute '\n '\"{}\"'.format(attribute.text, compound_type)))\n compound_type = attribute.text\n elif attribute.text == 'EXT':\n contains_ext = True\n\n parameter_memory_size = 4\n if basic_type == 'INTEGER' or basic_type == 'FLOAT':\n parameter_memory_size = 4\n if compound_type == 'COMPLEX':\n parameter_memory_size *= 2\n\n\n for idn in parameter['identifiers']:\n if idn.text in param_identifiers:\n errors.append(CompilationError('Generator', idn.line, idn.column,\n 'parameter \"{}\" already defined'.format(idn.text)))\n param_identifiers.append(idn.text)\n\n if not contains_ext:\n code_section += '\\t@{}\\t equ \\t [bp+{}]\\n'.format(idn.text, total_parameter_length)\n total_parameter_length += parameter_memory_size\n else:\n if idn.text not in ext_params:\n ext_params.append(idn.text)\n data_section += '\\t{} \\t db\\t{} dup (0)\\n'.format(idn.text, parameter_memory_size)\n\n\n code_section += '\\t\\t push bp\\n'\n code_section += '\\t\\t mov bp, sp\\n\\n'\n\n code_section += '\\t\\t pop bp\\n'\n if total_parameter_length - 4 != 0:\n code_section += '\\t\\t retn ' + str(total_parameter_length - 4) + '\\n'\n else:\n code_section += '\\t\\t ret\\n'\n\n code_section += idn_lexem.text + ' endp\\n\\n'\n\n\n\n code_section += '\\t main:\\n'\n code_section += '\\t\\t mov ax, dataSeg\\n'\n code_section += '\\t\\t mov ds, ax\\n'\n code_section += '\\t\\t mov ax, stackSeg\\n'\n code_section += '\\t\\t mov ss, ax\\n'\n code_section += '\\t\\t mov ax, 0b800h\\n'\n code_section += '\\t\\t mov es, ax\\n\\n'\n\n code_section += '\\t\\t mov ax,4c00h\\n'\n code_section += '\\t\\t int 21h\\n\\n'\n\n code_section += 'codeSeg ends \\n\\tend main\\n'\n\n data_section += 'dataSeg ends\\n\\n'\n\n result += data_section\n result += stack_section\n result += code_section\n\n if len(errors) > 0:\n print('Errors:')\n for error in errors:\n print(error)\n return ''\n else:\n return result\n\n\ndef compile(filename):\n generator = Generator()\n return generator.translate(filename)\n\n\nif __name__ == '__main__':\n print('\\n')\n print('-' * 70)\n print('Final result:')\n print('-' * 70)\n print('\\n')\n print(compile('parser_true_test.txt'))","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":8196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"57556991","text":"import abc\n\nfrom src.command.order_item import OrderItemProcessor, ShipLabelCommand, EmailCommand, SubscriptionCommand, \\\n DiscountCommand\nfrom src.model.order import OrderItem\nfrom src.model.product import ProductType\n\n\nclass ProductTypeStrategy(abc.ABC):\n def __init__(self, order_item: OrderItem):\n self.order_item = order_item\n\n @abc.abstractmethod\n def execute(self):\n pass\n\n\nclass ProductPhysicalStrategy(ProductTypeStrategy):\n def execute(self):\n OrderItemProcessor(self.order_item).add(ShipLabelCommand()).process()\n\n\nclass ProductBookStrategy(ProductTypeStrategy):\n def execute(self):\n OrderItemProcessor(self.order_item).add(ShipLabelCommand('Notification')).process()\n\n\nclass ProductDigitalStrategy(ProductTypeStrategy):\n def execute(self):\n OrderItemProcessor(self.order_item).add(EmailCommand()).process()\n\n\nclass ProductMembershipStrategy(ProductTypeStrategy):\n def execute(self):\n OrderItemProcessor(self.order_item) \\\n .add(SubscriptionCommand()) \\\n .add(EmailCommand()) \\\n .add(DiscountCommand()) \\\n .process()\n\n\nclass ProductTypeStrategyFactory:\n @staticmethod\n def create(order_item: OrderItem):\n types = {\n ProductType.PHYSICAL: ProductPhysicalStrategy(order_item),\n ProductType.BOOK: ProductBookStrategy(order_item),\n ProductType.DIGITAL: ProductDigitalStrategy(order_item),\n ProductType.MEMBERSHIP: ProductMembershipStrategy(order_item)\n }\n strategy = types.get(order_item.product.product_type, None)\n\n if strategy is None:\n raise Exception('Invalid Product Type: {}'.format(order_item.product.product_type))\n\n return strategy\n","sub_path":"backend/shopping/src/factory/product_type_strategy.py","file_name":"product_type_strategy.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"560242594","text":"\n\nfrom wsgiref.simple_server import make_server\nfrom wiwi.core.service import api\n\ndef server(port):\n\tport = int(port)\n\thttpd = make_server('0.0.0.0', port, api)\n\tprint(\"Serving on port %s...\"%port)\n\ttry:\n\t\thttpd.serve_forever()\n\texcept KeyboardInterrupt:\n\t\tprint('Goodbye.')\n","sub_path":"wiwi/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"48881414","text":"# !/usr/bin/env python \n# -*- coding: utf-8 -*-\n# file_name: test_case_for_single_func.py\n# author: ScCcWe\n# time: 2020/5/20 10:58\n\n\nCSI = '\\033['\n\n\ndef code_to_chars(code):\n print(CSI + str(code) + 'm')\n return CSI + str(code) + 'm'\n\n\ncode_to_chars(40)\n","sub_path":"内置方法/有关属性的方法attr/test_case_for_single_func.py","file_name":"test_case_for_single_func.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290087023","text":"import sys\n\nsys.stdin = open(\"반복문자지우기.txt\", \"r\")\n\ndef delete(s):\n for i in range(len(s)-1):\n if s[i] == s[i+1]:\n s = s[:i] + s[i+1+1:]\n delete(s)\n break\n result.append(s)\n\nT = int(input())\nfor test_case in range(1, T + 1):\n S = input()\n result = []\n delete(S)\n \n print(f'#{test_case} {len(result[0])}')","sub_path":"SW expert/python/반복문자지우기.py","file_name":"반복문자지우기.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"61365254","text":"#Autor Claudio Mayoral García\n#Descripción Se preguntará al usuario el numero de boletos de cada tipo y se desplegará el precio total\n\n\ndef calcularPago(numeroBoletosA, numeroBoletosB, numeroBoletosC):\n # Calcula y guarda en la variable totalPago el total a pagar\n # Regresa pagoTotal\n pagoTotal = numeroBoletosA * 925 + numeroBoletosB * 775 + numeroBoletosC * 360\n return pagoTotal\n\n\ndef main():\n # numeroBoletosA = Leer el número de asientos de clase A\n # numeroBoletosB = Leer el número de asientos de clase B\n # numeroBoletosC = Leer el número de asientos de clase C\n # Calcula el resultado con la función calcularPago, envía como argumentos los valores leídos. Guarda el resultado.\n # Imprimir el resultado\n numeroBoletosA = int(input(\"Número de boletos de clase A: \"))\n numeroBoletosB = int(input(\"Número de boletos de clase B: \"))\n numeroBoletosC = int(input(\"Número de boletos de clase C: \"))\n totalPago = calcularPago(numeroBoletosA, numeroBoletosB, numeroBoletosC)\n print(\"El costo total es: $%.2f\" % totalPago)\n\n\n# llama a la funcion \"main\"\nmain()\n","sub_path":"asientosDeUnEstadio.py","file_name":"asientosDeUnEstadio.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"350848607","text":"import pyfiglet\n#colors\nclass bcolors:\n OK = '\\033[92m' #GREEN\n WARNING = '\\033[93m' #YELLOW\n FAIL = '\\033[91m' #RED\n RESET = '\\033[0m' #RESET COLOR\n B = \"\\033[0;34;40m\" # Blue\n orange='\\033[43m' \n purple='\\033[45m'\n cyan='\\033[46m'\n lightgrey='\\033[47m'\n lightgrey='\\033[37m'\n darkgrey='\\033[90m'\n lightred='\\033[91m'\n pink='\\033[95m'\nascii_banner = pyfiglet.figlet_format(\"Similar Element Remover !!\")\nprint(ascii_banner)\nprint(f\"{bcolors.pink}Author: Viraj Vaishnav{bcolors.RESET}\")\nprint(f\"{bcolors.pink}Follow on: https://twitter.com/VirajVaishnav16{bcolors.RESET}\")\na = input(f\"{bcolors.lightred}Path of target File:{bcolors.RESET} \")\nfile = open(a,'r')\nLines = file.readlines()\ncount = 0\nlist_domain = list()\nfor line in Lines:\n list_domain.append(line.strip())\nresult = list(dict.fromkeys(list_domain)) # removing duplicating \nfor l in result:\n f = open(\"SR-\"+a, \"a\")\n f.writelines(l+\"\\n\")\n f.close()\nprint(f\"{bcolors.pink}New File is created with SR-target-file-name.txt name{bcolors.RESET}\")\n","sub_path":"Testing/same-remover.py","file_name":"same-remover.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"553848637","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\r\n\r\n\r\n# HTTPRequestHandler class\r\nclass SimpleHTTP(BaseHTTPRequestHandler):\r\n\r\n # Nhận GET request gửi lên.\r\n def do_GET(self):\r\n # SET http status trả về\r\n self.send_response(200)\r\n\r\n # Thiết lập header trả về\r\n self.send_header('Content-type', 'text/html')\r\n self.end_headers()\r\n # Data\r\n message = \"XIn chao \"\r\n # Write data dưới dạng utf8\r\n self.wfile.write(bytes(message, \"utf8\"))\r\n self.date_time_string()\r\n return\r\n\r\n\r\n\r\n# cấu hình host và cổng port cho server\r\nserver_address = ('127.0.0.1', 8000)\r\n\r\n# Khởi tạo server với thông số cấu hình ở trên.\r\nhttpd = HTTPServer(server_address, SimpleHTTP)\r\n\r\n# Tiến hành chạy server\r\nhttpd.serve_forever()","sub_path":"BTL_Chat_sv_cl/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"627789988","text":"from flask import Blueprint,render_template,redirect,flash,request,url_for\nfrom flask_login import login_user,current_user,logout_user,login_required\nfrom syblogpost import db\nfrom syblogpost.users.forms import Register,Login,updateform\nfrom syblogpost.models import Users,Blogpost\nfrom syblogpost.users.pics_handler import add_profile_pics\n\nusers = Blueprint('users',__name__,url_prefix='/users')\n\n@users.route('/register',methods=['GET','POST'])\ndef register():\n form = Register()\n if form.validate_on_submit():\n user = Users(email = form.email.data,\n username = form.username.data,\n password = form.password.data)\n db.session.add(user)\n db.session.commit()\n flash('Thanks for Registration!')\n return redirect(url_for('users.login'))\n return render_template('register.html', form = form)\n \n# login\n@users.route('/login', methods=['GET','POST'])\ndef login():\n #initializing the form created from the forms.py\n form = Login()\n if form.validate_on_submit():\n #querying the user from the database\n user = Users.query.filter_by(email = form.email.data).first()\n #checking password and confirming is user exist in the database\n if user.check_password(form.password.data) and user is not None:\n # function to carry out login imported from flask login\n login_user(user)\n # return \"Log in Succesful\"\n #to return user to any page he is before logging in \n next = request.args.get('next')\n #if not return user to home page \n if next == None or not next[0] == '/':\n next = url_for('core.index')\n return redirect(next)\n return render_template('login.html', form = form )\n\n#logout\n@users.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('core.index'))\n\n#account info\n@users.route('/account', methods=['POST','GET'])\n@login_required\ndef account():\n form = updateform()\n if form.validate_on_submit():\n if form.picture.data:\n username = current_user.username\n pic = add_profile_pics(form.picture.data,username)\n current_user.profile_image = pic \n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('User Account Updated Successfully')\n return redirect(url_for('users.account'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n profile_image = url_for('static',filename='profile_pics'+current_user.profile_image)\n return render_template('account.html',profile_image = profile_image,form = form)\n\n@users.route('/')\ndef user_posts(username):\n page = request.args.get('page',1,type=int)\n user = Users.query.filter_by(username = username).first()\n # _or_404()\n blog_posts = Blogpost.query.filter_by(author=user).order_by(Blogpost.date.desc()).paginate(page=page,per_page=5)\n return render_template('user_post.html',blog_posts = blog_posts, user=user)","sub_path":"syblogpost/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"555592569","text":"from pyparams import PyParam\n\nstart_index: int = PyParam(1, scope=\"loop\", desc=\"summation start index\")\n\n\ndef sum_numbers():\n \"\"\"Sum numbers \"\"\"\n s = 0\n max_iters: int = PyParam(6, int, \"loop\", \"max number of iterations\")\n for i in range(start_index, max_iters):\n s += i\n return s\n\n\nprint(sum_numbers())\n","sub_path":"resources/code_samples/template1.py","file_name":"template1.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"95386855","text":"#!/usr/bin/env python\n# coding=utf-8\nimport matplotlib.pyplot as plt\nfrom multiprocessing import Pool\nimport numpy as np\nimport time\n\ndef multi_process_helper(opt):\n start = time.time()\n print(opt.get_name() + ' started')\n _ = opt.optimize()\n end = time.time()\n print(opt.get_name() + ' done, total {:.2f}s'.format(end-start))\n return opt\n\n\ndef multiprocess_run(n, exps):\n with Pool(n) as pool:\n exps = pool.map(multi_process_helper, exps)\n return exps\n\n\ndef run_exp(exps, kappa=None, max_iter=None, name=None, save=False, plot=True, legend=None):\n\n with Pool(6) as pool:\n res = pool.map(multi_process_helper, exps)\n\n for x in res:\n if x.verbose == True:\n x.plot_history()\n\n if plot == True:\n if legend is None:\n legend = [x.get_name() for x in res]\n plot_results(\n [x.get_results() for x in res],\n legend,\n kappa=kappa,\n max_iter=max_iter,\n name=name,\n save=save,\n )\n\n if save == True: # Save data files too\n # Save to txt file\n for x in res:\n\n y = x.get_results()\n tmp = [\n list(range(1, len(y['var_error'])+1)),\n y['var_error'],\n y['func_error'],\n y['n_comm'],\n y['n_grad']\n ]\n tmp = np.array(tmp).T\n\n fname = r'data/' + str(name) + '_kappa_' + str(int(kappa)) + '_' + x.get_name() + '.txt'\n np.savetxt(fname, tmp, delimiter=' ')\n with open(fname, 'r') as f:\n content = f.read()\n with open(fname, 'w') as f:\n f.write(\"iter var_error func_error n_comm n_grad\\n\" + content)\n return res\n\n\ndef plot_results(results, legend, kappa=None, max_iter=None, name=None, save=False):\n\n if max_iter == None:\n max_iter = max([len(res['var_error']) for res in results]) \n\n plot_iters(results, legend, kappa=kappa, max_iter=max_iter, name=name, save=save)\n plot_grads(results, legend, kappa=kappa, max_iter=max_iter, name=name, save=save)\n\n\ndef plot_iters(results, legend, kappa=None, max_iter=None, name=None, save=False):\n\n plt.figure()\n for res in results:\n plt.semilogy(range(1, len(res['var_error'][:max_iter])+1), res['var_error'][:max_iter])\n plt.title('Variable error vs. #outer iterations')\n plt.ylabel(r\"$\\frac{\\Vert {\\bar{\\mathbf{x}}}^{(t)} - {\\mathbf{x}}^\\star \\Vert}{\\Vert {\\mathbf{x}}^\\star \\Vert}$\")\n plt.xlabel('#outer iterations')\n if kappa is not None:\n plt.title(r\"$\\kappa$ = \" + str(int(kappa)))\n plt.legend(legend)\n if save == True:\n plt.savefig('figs/' + str(name) + '_kappa_' + str(int(kappa)) + '_var_iter.eps', format='eps')\n \n\n plt.figure()\n for res in results:\n plt.semilogy(range(1, len(res['func_error'][:max_iter])+1), res['func_error'][:max_iter])\n plt.title('Function value error vs. #outer iterations')\n plt.ylabel(r\"$\\frac{f({\\bar{\\mathbf{x}}}^{(t)}) - f({\\mathbf{x}}^\\star)}{f({\\mathbf{x}}^\\star)}$\")\n plt.xlabel('#outer iterations')\n if kappa is not None:\n plt.title(r\"$\\kappa$ = \" + str(int(kappa)))\n plt.legend(legend)\n if save == True:\n plt.savefig('figs/' + str(name) + '_kappa_' + str(int(kappa)) + '_fval_iter.eps', format='eps')\n\n\ndef plot_grads(results, legend, kappa=None, max_iter=None, name=None, save=False):\n plt.figure()\n for res in results:\n plt.loglog(res['n_grad'][1:], res['var_error'][1:])\n plt.title('Variable error vs. #gradient evaluations')\n plt.ylabel(r\"$\\frac{\\Vert {\\bar{\\mathbf{x}}}^{(t)} - {\\mathbf{x}}^\\star \\Vert}{\\Vert {\\mathbf{x}}^\\star \\Vert}$\")\n plt.xlabel('#gradient evaluations / #total samples')\n if kappa is not None:\n plt.title(r\"$\\kappa$ = \" + str(int(kappa)))\n plt.legend(legend)\n if save == True:\n plt.savefig('figs/' + str(name) + '_kappa_' + str(int(kappa)) + '_var_grads.eps', format='eps')\n\n\n plt.figure()\n for res in results:\n plt.loglog(res['n_grad'][1:], res['func_error'][1:])\n plt.title('Function value error vs. #gradient evaluations')\n plt.ylabel(r\"$\\frac{f({\\bar{\\mathbf{x}}}^{(t)}) - f({\\mathbf{x}}^\\star)}{f({\\mathbf{x}}^\\star)}$\")\n plt.xlabel('#gradient evaluations')\n if kappa is not None:\n plt.title(r\"$\\kappa$ = \" + str(int(kappa)))\n plt.legend(legend)\n if save == True:\n plt.savefig('figs/' + str(name) + '_kappa_' + str(int(kappa)) + '_fval_grads.eps', format='eps')\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"489981994","text":"import urllib.request\nimport json\nimport csv\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\nimport csv\n# 获取页面源码\ndef get_html(url):\n browser = webdriver.PhantomJS(executable_path=r\"E:/spider/phantomjs-2.1.1-windows/bin/phantomjs.exe\") #使用无界面的phantomjs浏览器\n browser.get(url) #发送请求\n print(\"### Handling <\" + url + \">\")\n return browser.page_source\n\n\ndef get_info(url, city, file):\n with open(file, 'w') as csvfile:\n writer = csv.writer(csvfile)\n html = get_html(url) #获取html页面\n bsobj = BeautifulSoup(html, \"lxml\") #用lxml解析html\n tbody = bsobj.find(\"tbody\") #查找到显示线路信息的表格\n \n while tbody is None:\n print(\" retry ...\")\n html = get_html(url)\n bsobj = BeautifulSoup(html, \"lxml\")\n tbody = bsobj.find(\"tbody\")\n \n trs = tbody.findAll(\"tr\") #把所有行放入数组trs[]\n infos = []\n for tr in trs: #开始遍历列车行\n tds = tr.findAll(\"td\") #把一行中的所有列放入数组tds[]\n number = tds[0].get_text() #序号\n name = tds[1].get_text() #名称\n level = tds[2].get_text() #等级\n address = tds[4].get_text().split()[0] #地址\n \n info = [number, name, level, address,]\n print(info)\n with open(file, 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(info)\n print(\" [success]\")\n\n\nout_file = '北京市景点.csv'\nurls = ['http://www.bjta.gov.cn/tsfwzt/qyml/394778.htm']\n\n###########30个城市的名字##########\ncity = ['BeiJing','ShangHai','GuangZhou','ShenZhen','WuHan','TianJin','NanJing',\n 'XiangGang','ChongQin','HangZhou','ShenYang','DaLian','ChengDu','ChangChun',\n 'SuZhou','FoShan','KunMing','XiAn','ZhengZhou','ChangSha','NingBo',\n 'WuXi','QingDao','NanChang','FuZhou','DongGuan','NanNing','HeFei',\n 'HaErbin','ShiJiazhuang'\n ]\nfor i in range(len(urls)):\n get_info(urls[i],city[i],out_file)\n","sub_path":"Lesson03/Subway_Data-master/景点数据/北京景点数据.py","file_name":"北京景点数据.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"345002387","text":"import argparse\nimport SpiroAnimator\nimport Spiro\nimport turtle\n\n\n# main() function\ndef main():\n # use sys.argv if needed\n print('generating spirograph...')\n # create parser\n descStr = \"\"\"This program draws Spirograpfs using the Turtle model.\n When run with no arguments, this program draws random Spirographs.\n \n Terminology:\n \n R: radius of outer circle\n r: radius of inner circle\n l: ratio of hole distance to r\n \"\"\"\n\n parser = argparse.ArgumentParser(description=descStr)\n\n # add expected arguments\n parser.add_argument('--sparams', nargs=3, dest='sparams', required=False,\n help=\"The three arguments in sparams: R, r, l.\")\n\n # parse args\n args = parser.parse_args()\n\n # set the width of the drawing window to 80 percent of the screen width\n turtle.setup(width=0.8)\n\n # set the cursor shape to turtle\n turtle.shape('turtle')\n\n # set the title to Spirographs!\n turtle.title(\"Spirographs!\")\n # add the key handler to save our drawings\n turtle.onkey(SpiroAnimator.SpiroAnimator.saveDrawing, \"s\")\n # start listening\n turtle.listen()\n\n # hide the main turtle cursor\n turtle.hideturtle()\n\n # check for any arguments sent to --sparams and draw the Spirograph\n if args.sparams:\n params = [float(x) for x in args.sparams]\n # draw the Spirograph with the given parameters\n col = (0.0, 0.0, 0.0)\n spiro = Spiro(0, 0, col, *params)\n spiro.draw()\n else:\n # create the animator object\n spiroAnim = SpiroAnimator.SpiroAnimator(4)\n # add a key handler to toggle the turtle cursor\n turtle.onkey(spiroAnim.toggleTurtles, \"t\")\n # add a key handler to restart the animation\n turtle.onkey(spiroAnim.restart, \"space\")\n\n\n # start the turtle main loop\n turtle.mainloop()\n\n\n# call main\nif __name__ == '__main__':\n main()\n","sub_path":"MillionFlowersRuler/AlexMain.py","file_name":"AlexMain.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"384434360","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport random\nimport math\n\nimport pygame\n\npygame.init()\n\nsize = width, height = 640, 480\ncenter = offset_x, offset_y = width // 2, height // 2\nzoom = 20\nscreen = pygame.display.set_mode(size)\n\n\n\n# Colores\n\nblack = (0, 0 , 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (51, 102, 255)\nyellow = (255, 255, 0)\nsilver = (102, 102, 102)\n\ndef random_color():\n return pygame.Color(\n random.randint(0, 255),\n random.randint(0, 255),\n random.randint(0, 255),\n 255,\n )\n\n\nclass Point:\n\n def __init__(self, x=0, y=0, color=white):\n self.x = x\n self.y = y\n self.color = color\n\n def scale(self):\n x, y = self.x, self.y\n x = offset_x + x*zoom\n y = offset_y - y*zoom\n return int(round(x)), int(round(y))\n\n def move(self):\n self.x += random.random() / 20.0 - 0.025\n self.y += random.random() / 20.0 - 0.025\n return self\n\n def distance(self, x, y):\n return (self.x - x)**2 + (self.y - y)**2\n \n def __repr__(self):\n name = self.__class__.__name__\n return '{}(x={}, y={}, color={})'.format(\n name, self.x, self.y, self.color\n )\n\n def draw(self, canvas):\n x, y = self.scale()\n canvas.set_at((x, y), self.color) # The point itself\n canvas.set_at((x-1, y), self.color) # cross\n canvas.set_at((x+1, y), self.color)\n canvas.set_at((x, y-1), self.color)\n canvas.set_at((x, y+1), self.color)\n\n @classmethod\n def random(self):\n x = random.randint(0, width)\n y = random.randint(0, width)\n color = random_color()\n return Point(x, y, color)\n\n\nclass Triangle(Point):\n\n def draw(self, canvas):\n x, y = self.scale()\n vertices = [\n (x-4, y+4),\n (x, y-4),\n (x+4, y+4)\n ]\n pygame.draw.polygon(canvas, self.color, vertices, 0)\n\n\nclass Circle(Point):\n\n def draw(self, canvas):\n x, y = self.scale()\n pygame.draw.circle(canvas, self.color, (x,y), 6, 0)\n\n\nclass Square(Point):\n\n def draw(self, canvas):\n x, y = self.scale()\n pygame.draw.rect(canvas, self.color, (x-4, y-4, 9, 9))\n\n\n\npoints = [\n Circle(3, 4, red),\n Circle(5, -3, green),\n Circle(-2, 5, blue),\n Circle(-4, 2, yellow),\n \n Square(2, -2, red),\n Square(-1, -5, green),\n Square(-3, -2, blue),\n Square(4, 0, yellow),\n\n Triangle(-5, 0, red),\n Triangle(0, 6, green),\n Triangle(0, -3, blue),\n Triangle(0, 0, yellow),\n ]\n\ndef draw_axis(screen):\n pygame.draw.line(screen, silver, (0, offset_y), (width, offset_y)) \n for step in range(0, width, zoom):\n pygame.draw.line(screen, silver,\n (step, offset_y-2),\n (step, offset_y+2)\n ) \n for step in range(0, height, zoom):\n pygame.draw.line(screen, silver,\n (offset_x-2, step),\n (offset_x+2, step)\n ) \n\n pygame.draw.line(screen, silver, (offset_x, 0), (offset_x, height)) \n\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit(0)\n if event.type == pygame.MOUSEBUTTONUP:\n x, y = pygame.mouse.get_pos()\n x = int(round((x - offset_x) / zoom))\n y = -int(round((y - offset_y) / zoom))\n print(x, y)\n Shape = random.choice([Square, Triangle, Circle])\n points.append(Shape(x, y, random_color()))\n\n screen.fill(black)\n draw_axis(screen)\n for p in points:\n p.move()\n p.draw(screen)\n pygame.display.flip()\n\n","sub_path":"coords.py","file_name":"coords.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"484080663","text":"# _*_ coding: utf-8 _*_\n__author__ = \"dyq666\"\n__date__ = \"2018/7/15 14:23\"\n\nimport os\nimport sys\nimport django\nimport random\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(BASE_DIR)\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"backend.settings\")\n\ndjango.setup()\n\n\"\"\"开始模拟问题种类数据\"\"\"\nfrom problem.models import Problem, ProblemCategory\n\n# 基础的问题模板\nproblem = {\n 'name': '冒泡排序',\n 'degree': 'easy',\n 'category': ProblemCategory.objects.get(name='算法')\n}\ndegrees = ['easy', 'mid', 'diff']\n\n# 生成许多重复的问题\nproblems = [problem.copy() for _ in range(100)]\nfor i, problem in enumerate(problems):\n problem['name'] += str(i)\n problem['degree'] = random.choice(degrees)\n\n# 存入数据库\nfor problem in problems:\n Problem.objects.create(**problem)\n","sub_path":"utils/db_mock/mock_same_problems.py","file_name":"mock_same_problems.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"325724993","text":"# Unweighted Grapgh\nclass AdjNode():\n def __init__(self, value):\n self.data = value\n self.next= None\n\nclass Graph():\n def __init__(self, vertices):\n self.V = vertices\n self.graph = {} #[None]*self.V\n for i in range(self.V):\n self.graph[i] = None\n\n def addEdge(self, src, dest):\n node = AdjNode(dest)\n node.next = self.graph[src]\n self.graph[src] = node\n\n # node = AdjNode(src)\n # node.next = self.graph[dest]\n # self.graph[dest] = node\n\n def print(self):\n for i in range(self.V):\n print(i, end=' ')\n temp = self.graph[i]\n while temp:\n print(temp.data, end= ' ')\n temp = temp.next\n print('\\n')\n\n def bfs(self, start):\n print(start, end=' ')\n queue = []\n visited = [False]*self.V\n queue.append(start)\n visited[start] = True\n\n while queue:\n node = queue.pop(0)\n temp = self.graph[node]\n while temp:\n \n if visited[temp.data] == True:\n temp = temp.next\n continue\n print(temp.data, end = ' ')\n queue.append(temp.data)\n visited[temp.data] = True\n temp = temp.next\n\n def distance(self, start, find):\n # print(start, end=' ')\n queue = []\n visited = [False]*self.V\n queue.append(start)\n visited[start] = True\n dist = 0\n\n while queue:\n count = 0\n node = queue.pop(0)\n temp = self.graph[node]\n while temp:\n if temp.data == find:\n dist += 1\n return dist\n if visited[temp.data] == True:\n temp = temp.next\n continue\n # print(temp.data, end = ' ')\n queue.append(temp.data)\n visited[temp.data] = True\n temp = temp.next\n\n dist += 1\n\n return dist\n\n def topologicalSortUtil(self, i, visited, stack):\n visited[i] = True\n\n temp = self.graph[i]\n while temp:\n if not visited[temp.data]:\n self.topologicalSortUtil(temp.data, visited, stack)\n temp = temp.next\n\n # for j in range(len(self.graph[i])):\n # if not visited[j]:\n # self.topologicalSortUtil(j, visited, stack)\n\n stack.append(i)\n\n def topologicalSort(self):\n visited = [0]*self.V\n stack = []\n\n for i in range(len(visited)):\n if not visited[0]:\n self.topologicalSortUtil(i, visited, stack)\n\n for _ in range(len(stack)):\n print(stack.pop(), end = ' ')\n\n\n\ngra = Graph(7)\ngra.addEdge(0,1) \ngra.addEdge(0,4) \ngra.addEdge(1,2) \ngra.addEdge(1,3) \ngra.addEdge(1,4) \ngra.addEdge(2,3)\ngra.addEdge(2,5) \ngra.addEdge(3,4)\ngra.addEdge(5,6)\n# gra.addEdge(4,0)\n# gra.addEdge(4,3)\n# gra.print()\n# gra.bfs(0)\nprint(gra.distance(0,2))\ngra.topologicalSort()\n","sub_path":"dataStructuresFall/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"424817910","text":"import socket\r\nimport subprocess\r\nimport sys\r\nfrom datetime import datetime\r\n\r\ncommonly_dict = {\r\n 21: 'FTP',\r\n 22: 'SSH',\r\n 23: 'SMTP',\r\n 53: 'DNS(UDP)',\r\n 69: 'TFTP(cisco,类似FTP)',\r\n 79: 'Finger',\r\n 80: 'HTTP',\r\n 110: 'POP3',\r\n 111: 'RPC 远程过程调用',\r\n 113: 'windows 验证服务',\r\n 119: 'NNTP 网络新闻组传输协议',\r\n 135: 'RPC 远程过程调用',\r\n 137: 'NetBIOS',\r\n 139: 'windows文件和打印机共享,Unix中的samba服务',\r\n 161: 'SNMP 简单网络管理协议',\r\n 389: 'LDAP',\r\n 443: 'HTTPS',\r\n 445: 'SMB',\r\n 1080: 'socks代理服务',\r\n 2601: 'zebra路由',\r\n 2604: '默认密码zebra',\r\n 5900: 'vnc',\r\n 8080: '用户www代理服务',\r\n}\r\n\r\n#清除屏幕\r\nsubprocess.call('cls', shell=True)\r\n\r\n#用户输入主机\r\nremoteServer = input(\"输入要扫描的主机(默认本机IP):\")\r\nif remoteServer == '':\r\n remoteServerIP ='127.0.0.1'\r\nelse:\r\n remoteServerIP = socket.gethostbyname(remoteServer)\r\n\r\n#打印主机IP\r\nprint (\"-\" * 60)\r\nprint (\"请稍后 正在扫描主机常用端口:\", remoteServerIP)\r\nprint (\"-\" * 60)\r\n\r\n#获取当前时间\r\nt1 = datetime.now()\r\n\r\n#引入异常处理\r\n#使用常用端口扫描范围\r\n#初始化socket类\r\n#建立连接\r\ntry:\r\n for commonly in commonly_dict:\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n result = sock.connect_ex((remoteServerIP, commonly))\r\n if result == 0:\r\n print (\"[*]{} Open {}\".format(commonly,commonly_dict[commonly]))\r\n sock.close()\r\n\r\n#按键Ctrl+C退出\r\nexcept KeyboardInterrupt:\r\n print (\"手动退出\")\r\n sys.exit()\r\n\r\n#错误的主机名\r\nexcept socket.gaierror:\r\n print ('无法解析主机名')\r\n sys.exit()\r\n\r\n#无法连接到主机\r\nexcept socket.error:\r\n print (\"无法于主机建立连接\")\r\n sys.exit()\r\n\r\n#获取当前时间\r\nt2 = datetime.now()\r\n\r\n#计算脚本所用时间\r\ntotal = t2 - t1\r\n\r\n#打印时间\r\nprint ('扫描所用时间:',total)\r\n","sub_path":"端口扫描.py","file_name":"端口扫描.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"461848731","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu August 10 23:52:55 2022\n\n@author: jmmauricio\n\"\"\"\n\nimport numpy as np\nimport sympy as sym\n\n\ndef add_shunts(self):\n\n for shunt in self.shunts:\n node_j_str = str(shunt['bus_nodes'][0])\n node_j = '{:s}.{:s}'.format(shunt['bus'], node_j_str)\n col = self.nodes_list.index(node_j) \n row_j = self.it_branch\n self.A[row_j,col] = 1\n \n #node_k_str = str(shunt['bus_nodes'][1])\n #if not node_k_str == '0': # when connected to ground\n # node_k = '{:s}.{:s}'.format(shunt['bus'], str(shunt['bus_nodes'][1]))\n # row_k = self.nodes_list.index(node_k) \n # self.A[row_k,col] = -1\n shunt_name = f\"shunt_{shunt['bus']}_{node_j_str}\"\n g_jk = sym.Symbol(f\"g_{shunt_name}\", real=True) \n b_jk = sym.Symbol(f\"b_{shunt_name}\", real=True) \n self.G_primitive[self.it_branch,self.it_branch] = g_jk\n self.B_primitive[self.it_branch,self.it_branch] = b_jk\n\n Z = shunt['R'] + 1j*shunt['X']\n Y = 1/Z\n self.dae['params_dict'].update({str(g_jk):Y.real})\n self.dae['params_dict'].update({str(b_jk):Y.imag})\n \n self.it_branch += 1\n\n\n\ndef shunts_preprocess(self):\n\n for shunt in self.shunts:\n \n self.N_branches += 1\n\n ","sub_path":"src/pydae/urisi/shunts/shunts.py","file_name":"shunts.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"470622773","text":"import dace\nimport dace.graph.labeling\nimport sys\nimport time\n\nprint(time.time(), 'loading')\na = dace.SDFG.from_file(sys.argv[1])\nprint(time.time(), 'propagating')\ndace.graph.labeling.propagate_labels_sdfg(a)\nprint(time.time(), 'drawing')\na.draw_to_file()\nexit()\n\na.apply_strict_transformations()\n\na.apply_strict_transformations()\n\na.draw_to_file()\n","sub_path":"runsdfg.py","file_name":"runsdfg.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"570762577","text":"\"\"\" Package model views \"\"\"\n\n# From REST Framework\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\n# Serializers\nfrom packages.serializers import PackageModelSerializer, SetPlaceSerializer\n\n# Models\nfrom packages.models import Package\n\n# States\nfrom packages.models import states as package_state\n\n# Permissions\nfrom rest_framework.permissions import IsAuthenticated\nfrom packages.permissions import (\n\tIsClientUser, \n\tIsAdminOrOwner, \n\tIsAdminUser,\n\tIsInTransit,\n)\n\n\nclass PackageViewSet(viewsets.ModelViewSet):\n\t\"\"\" Package model viewset \"\"\"\n\n\tqueryset = Package.objects.all()\n\tserializer_class = PackageModelSerializer\n\n\tdef get_permissions(self):\n\t\tif self.action in ['list', 'create']:\n\t\t\tpermissions = [IsAuthenticated]\n\t\telif self.action in ['transit', 'storage', 'delivered']:\n\t\t\tpermissions = [IsAuthenticated, IsAdminUser]\n\t\telif self.action == 'place':\n\t\t\tpermissions = [IsAuthenticated, IsAdminUser]\n\t\telif self.action == 'destroy':\n\t\t\tpermissions = [IsAuthenticated, IsInTransit, IsAdminOrOwner]\n\t\telse:\n\t\t\tpermissions = [IsAuthenticated, IsClientUser, IsAdminOrOwner]\n\t\treturn [permission() for permission in permissions]\n\n\tdef perform_create(self, serializer):\n\t\tuser = self.request.user\n\t\tstate = package_state.STORAGE\n\t\tserializer.save(package_client=user, state=state, transit_place=\"not in transit\")\n\n\tdef list(self, request, *args, **kwargs):\n\t\tuser = request.user\n\t\tif user.is_admin:\n\t\t\tall_packages = Package.objects.all()\n\t\t\ttransit_packages_count = Package.objects.filter(\n\t\t\t\tstate=package_state.TRANSIT).count()\n\t\t\tdelivered_packages_count = Package.objects.filter(\n\t\t\t\tstate=package_state.DELIVERED).count()\n\t\t\tstorage_packages_count = Package.objects.filter(\n\t\t\t\tstate=package_state.STORAGE).count()\n\t\t\tdata = {\n\t\t\t\t\"in_storage\": storage_packages_count,\n\t\t\t\t\"in_transit\": transit_packages_count,\n\t\t\t\t\"delivered\": delivered_packages_count,\n\t\t\t\t\"packages\": PackageModelSerializer(all_packages, many=True).data\n\t\t\t}\n\t\telse:\n\t\t\tuser_packages = Package.objects.filter(package_client=user)\n\t\t\tdata = PackageModelSerializer(user_packages, many=True).data\n\t\treturn Response(data, status=status.HTTP_200_OK)\n\n\t@action(detail=True, methods=['post'])\n\tdef transit(self, request, *args, **kwargs):\n\t\tpackage = self.get_object()\n\t\tpackage.state = package_state.TRANSIT\n\t\tpackage.transit_place = 'Quetzaltenango'\n\t\tpackage.save()\n\t\tdata = PackageModelSerializer(package).data\n\t\treturn Response(data, status=status.HTTP_200_OK)\n\n\t@action(detail=True, methods=['post'])\n\tdef storage(self, request, *args, **kwargs):\n\t\tpackage = self.get_object()\n\t\tpackage.state = package_state.STORAGE\n\t\tpackage.transit_place = 'No en transito'\n\t\tpackage.save()\n\t\tdata = PackageModelSerializer(package).data\n\t\treturn Response(data, status=status.HTTP_200_OK)\n\n\t@action(detail=True, methods=['post'])\n\tdef delivered(self, request, *args, **kwargs):\n\t\tpackage = self.get_object()\n\t\tpackage.state = package_state.DELIVERED\n\t\tpackage.transit_place = 'Destino'\n\t\tpackage.save()\n\t\tdata = PackageModelSerializer(package).data\n\t\treturn Response(data, status=status.HTTP_200_OK)\n\n\t@action(detail=True, methods=['post'])\n\tdef place(self, request, *args, **kwargs):\n\t\tpackage = self.get_object()\n\t\tserializer = SetPlaceSerializer(\n\t\t\tdata=request.data,\n\t\t\tcontext={\n\t\t\t\t'package':package\n\t\t\t}\n\t\t)\n\t\tserializer.is_valid(raise_exception=True)\n\t\tpackage = serializer.save()\n\t\tdata = PackageModelSerializer(package).data\n\t\treturn Response(data, status=status.HTTP_200_OK)\n\n\n\n","sub_path":"packtracker/packages/views/packages.py","file_name":"packages.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"70468375","text":"file = open('bill.txt')\ntext = file.read()\nimport re\n\nprint('在莎士比亚全集中')\n\n\n#字符\nch = re.findall(r'\\d|\\D',text)\nprint(\"包含\",len(ch),\"字符\")\n\n#字母频率\nprint('各个字母出现次数和频率如下')\nletters = re.findall(r'[a-zA-Z]',text)\nb = len(letters)\n\nletters_list = []\nfor x in range(len(letters)):\n\tletters_list.append(letters[x][0].lower())\n\nletters_set = sorted(set(letters_list))\nfor i in range(len(letters_set)):\n\tletter = letters_set[i]\n\ta = letters_list.count(letter)\n\tprint('%-5s %-3s %10.3f' % (letter,a,a/b))\n\n#行数\n\nwith open('bill.txt','r') as file:\n count = 0\n for i in file:\n if len(i.strip()) == 0:\n count = count\n else:\n count += 1\n print(\"共有\",count,\"行\")\n\n\n\n#单词\nword_dict = {} \nword_list = []\nc = text.strip('- \\n')\nword_list = c.replace(',',' ').split()\nprint(\"单词数量为\",len(word_list))\n\nword_sets = list(set(word_list)) \nword_dict = {word: word_list.count(word) for word in word_sets if word}\nignore_list=['the','of','for','a','and','that','this','to','in','on','with','you','I','me','it','his','your','is','are']\nfor key in ignore_list:\n if key in word_dict.keys():\n del word_dict[key]\nresult = sorted(word_dict.items(), key=lambda d: d[1], reverse=True)[:10]\nprint(result)\n\n\n\n\n\n","sub_path":"project_final/project/18301020076.py","file_name":"18301020076.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"298011810","text":"from Customer import Customer\nfrom Inventory import Inventory\nfrom Rental import Rental\n\nC = Customer(701, 2, 2, 2, 2, 2, 2, 2)\nI = Inventory(2,2,2,'2')\nR = Rental(2,\"erter\",2,2,\"ertert\",2,\"ertert\")\n\n\ndef show_menu():\n user_input = input(\"\\n---\\n---\\nWhat do you want to do ? \\n\\n (a) - Search for a customer by email \\n (b) - List all customers \\n (c) - Search for a DVD \\n (d) - Return a DVD \\n (x) - Exit \\n\\n--> \").lower()\n while user_input not in [\"a\", \"b\", \"c\", \"d\", \"x\"]:\n user_input = input(\"What do you want to do ? Expected a,b,c or x\\n\\n--> \").lower()\n user_answer = []\n user_answer.append(user_input)\n if user_input == \"a\":\n print(\"Ok, you want to search a customer by email\\n\")\n email = input(\"Please enter an Email\\n\\n --> \")\n result_query = C.search_by_email(email)\n if result_query == None:\n print(\"No Idea of what are you talking about\")\n else:\n print(f\"First Name: {result_query[2]}\\nLast Name: {result_query[3]}\\nEmail: {result_query[4]}\")\n\n if user_input == \"b\":\n print(\"Ok, you want to have a look to all our customers\\n\")\n result_query = C.get_all()\n for x in result_query:\n print(f\"First Name: {x[2]}\\nLast Name: {x[3]}\\nEmail: {x[4]}\\n\")\n\n if user_input == \"c\":\n print(\"Ok, you chose to search for a DVD\\n\")\n text = input(\"Which movie ?\\n\\n--> \")\n store_id = input(\"which Store ? 1 or 2 ?\\n\\n--> \")\n result_query = I.search_by_text(store_id, text)\n print(result_query)\n\n if user_input == \"d\":\n print(\"Ok, you want to return a DVD\\n\")\n rental_id = input(\"what is the ID of your Rental ?\\n\\n--> \")\n R.return_rental(rental_id)\n print(\"Done thank you. C U Soon.\")\n\n if user_input == \"x\":\n print(\"Ok, you chose to exit\\n\")\n\n\ndef display():\n for _ in range(3):\n show_menu()\n print(\"\\n\\n---Bye\")\n\n\n\ndisplay()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"162030201","text":"import random\n\nsud = [[0 for col in range(1, 10)] for row in range(1, 10)]\nsud[0][0] = 2\n\n\ndef generarTabla():\n for cont in range(1, random.randint(10, 15)):\n x = random.randint(0, 8)\n y = random.randint(0, 8)\n\n limite = 0\n\n while limite < 50:\n num = random.randint(1, 9)\n sud[x][y] = num\n\n if isValid():\n break\n else:\n sud[x][y] = 0\n limite += 1\n\ndef printBoard():\n texto = \"\"\n for i in range(0, 9):\n for j in range(0,9):\n texto = texto + \" \" + str(sud[i][j])\n num = int(j%3)\n if num == 2:\n texto = texto + \" | \"\n linea = int(i%3)\n print(texto)\n if linea == 2:\n print(\"- - - - - - - - - - - - - \")\n texto = \"\"\n\ndef solve():\n global sud\n for rows in range(0, 9):\n for colums in range(0, 9):\n if sud[rows][colums] == 0:\n for k in range(1, 10):\n sud[rows][colums] = k\n if isValid() and solve():\n return True\n sud[rows][colums] = 0\n return False\n return True\n\n\n\n\n\ndef isValid():\n return verifyRows() and verifyColumns() and verifySquares()\n\n\ndef verifySquares():\n global sud\n for cx in range(0, 9):\n for cy in range(0, 9):\n ix = int(cx / 3) * 3\n iy = int(cy / 3) * 3\n for num in range(1, 10):\n contador = 0\n for j in range(iy, iy + 3):\n for i in range(ix, ix + 3):\n if sud[i][j] == num:\n contador += 1\n if contador > 1:\n return False\n return True\n\n\ndef verifyColumns():\n global sud\n for j in range(0, 9):\n for num in range(1, 10):\n contador = 0\n for i in range(0, 9):\n if sud[i][j] == num:\n contador += 1\n if contador > 1:\n return False\n return True\n\n\ndef verifyRows():\n global sud\n for i in range(0, 9):\n for num in range(1, 10):\n contador = 0\n for j in range(0, 9):\n if sud[i][j] == num:\n contador += 1\n if contador > 1:\n return False\n return True\n\nprint(\"Tablero\")\nprint(\" \")\ngenerarTabla()\n\nprintBoard()\n\nprint(\"Solucion Tablero\")\nprint(\" \")\nsolve()\n\nprintBoard()","sub_path":"2021-3/00.university/2022-1/00.sudoku/Sudoku Interpretado/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"231707825","text":"import numpy as np\nfrom numpy import linalg\nfrom functools import partial\nfrom numpy.linalg import inv\n#from scipy.sparse import csr_matrix\nimport time, os\n\nfrom verifications.basics import density_matrix, variance, f_neumann, runge_kutta, f_neumann_lindblad, sparse_variance\nfrom verifications.doping.helper import new_lindblad, conductivity, doping\n\ndef calculate(U, J, I, T, dim, wp_range, gamma_range, steps_per_period, driving_periods, data_path):\n\n h_0 = np.diagflat([(x-int(dim/2.0))**2*(U/2.0) for x in range(dim)], 0)\n h_0 += np.diagflat([-0.5*J]*(dim-1), 1) + np.diagflat([-0.5*J]*(dim-1), -1)\n\n eigv, eigs = linalg.eigh(h_0)\n\n t_b = eigs\n t_b_i = inv(t_b)\n\n sin_k = 1j*np.diagflat([-0.5]*(dim-1), 1) + 1j*np.diagflat([0.5]*(dim-1), -1)\n sin_k2 = np.diagflat([0.5]*dim, 0) + np.diagflat([-0.25]*(dim-2), 2) + np.diagflat([-0.25]*(dim-2), -2)\n\n\n sin_eig = np.dot(np.dot(t_b_i, sin_k), t_b)\n sin_eig2 = np.dot(np.dot(t_b_i, sin_k2), t_b)\n\n h_eigen = np.dot(np.dot(t_b_i, h_0), t_b)\n\n # h in igenstate basis\n h_eigen = np.diag(eigv)\n eigs = np.identity(dim)\n\n a_eig, ad_eig = new_lindblad(eigs)\n\n driving_shape_k = np.diagflat([(x-int(0.5*dim)) for x in range(dim)])\n driving_shape_eig = np.dot(np.dot(t_b_i, driving_shape_k), t_b)\n\n result = np.zeros((3, len(gamma_range), len(wp_range)), dtype=np.complex128)\n\n for gi, gamma in enumerate(gamma_range):\n for wpi, wp in enumerate(wp_range):\n start_time = time.time()\n\n h = (2*np.pi / wp) / steps_per_period\n if h > 0.05:\n print(\"h to big (%f)\" % h)\n h = 0.05\n print(\"wp: %f, h: %f, gamma: %f, T: %f\" % (wp, h, gamma, T))\n\n steps = int(driving_periods * 2 * np.pi / (wp*h))\n print(\"going to calculate %d steps\" % steps)\n if steps > 200000:\n raise ValueError(\"To many steps!\")\n\n p_eig, _ = density_matrix(T,eigv, eigs)\n\n t = 0.001\n obs = np.zeros((2, steps), dtype=np.complex128)\n for i in range(steps):\n p_eig = runge_kutta(p_eig, partial(doping, dim, I, wp, h_eigen, driving_shape_eig), t, h, partial(f_neumann_lindblad, a=a_eig, ad=ad_eig, gamma=gamma))\n\n obs[0][i] = variance(p_eig, sin_eig, sin_eig2)\n obs[1][i] = conductivity(p_eig, U, driving_shape_eig)\n\n t += h\n\n result[0][gi][wpi] = np.mean(obs[0][int(steps/2.0):])\n result[1][gi][wpi] = np.mean(obs[1][int(steps/2.0):])\n\n foo = np.sum([x*np.exp(1.0j*wp*h*z) for z, x in enumerate(obs[1][int(steps/2.0):])])\n N = len(obs[1][int(steps/2.0):])\n result[2][gi][wpi] = np.mean(foo / float(N))\n\n print(\"Driven %f times\" % (wp*t/(2*np.pi)))\n print(\"calc time %.2f\" % (time.time() - start_time))\n\n print(result)\n\n folder = 'gamma_range-%.2f-%.2f-wp_range-%.2f-%.2f-I-%.2f-U-%.2f-J-%.2f-dim-%d-drivings-%d-T-%.2f' % (\n gamma_range[0], gamma_range[-1],\n wp_range[0], wp_range[-1],\n I, U, J, dim, driving_periods, T\n )\n\n folder_path = os.path.join(data_path, folder)\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n np.save(os.path.join(folder_path, 'data'), result)\n\n np.save(os.path.join(folder_path, 'params'), [\n gamma_range,\n wp_range,\n I, U, J, dim, driving_periods, T\n ])\n\n return \"done\"\n","sub_path":"verifications/doping/gamma_wp.py","file_name":"gamma_wp.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"37143322","text":"#!/usr/bin/python3\n\"\"\" Contains unittests for HBNBCommand class \"\"\"\nfrom unittest import TestCase\nfrom unittest.mock import patch\nfrom console import HBNBCommand\nfrom io import StringIO\nfrom models import storage\nimport os\n\n\nclass TestHBNBCommandClass(TestCase):\n \"\"\" Tests HBNBCommand class \"\"\"\n\n def test_do_create(self):\n \"\"\" Tests create method \"\"\"\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"count State\")\n state_count = int(f.getvalue())\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"count Place\")\n place_count = int(f.getvalue())\n # checks error message if no class argument given\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create\")\n self.assertEqual(f.getvalue(), \"** class name missing **\\n\")\n # checks error message if no class argument is invalid\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create BadClassName\")\n self.assertEqual(f.getvalue(), \"** class doesn't exist **\\n\")\n # tests obj created successfully when first object\n HBNBCommand().onecmd(\"create State name=\\\"California\\\"\")\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"count State\")\n self.assertEqual(f.getvalue(), \"{}\\n\".format(state_count + 1))\n # tests obj created successfully when not first\n HBNBCommand().onecmd(\"create State name=\\\"Nevada\\\"\")\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"count State\")\n self.assertEqual(f.getvalue(), \"{}\\n\".format(state_count + 2))\n # tests obj created with different class, saves id\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\n \"create Place name=\\\"My_little_house\\\" number_rooms=4\")\n p_id = f.getvalue()\n p_id = p_id[:-1]\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"count Place\")\n self.assertEqual(f.getvalue(), \"{}\\n\".format(place_count + 1))\n # checks that after do_create, obj exists in dictionary\n dict_key = \"Place.\" + p_id\n __objects = storage.all()\n self.assertTrue(dict_key in __objects.keys())\n # tests that underscores in value were changed to spaces\n self.assertEqual(__objects[dict_key].name, \"My little house\")\n","sub_path":"tests/test_console.py","file_name":"test_console.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"277706175","text":"\"\"\"\r\n@author: Judyta Gogolewska\r\n\"\"\"\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport scipy as sc\r\nimport numpy as np\r\n\r\nfrom sklearn import datasets, svm\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression, Perceptron\r\nfrom sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier\r\nfrom sklearn.metrics import accuracy_score, auc, f1_score, precision_score, recall_score, roc_auc_score, roc_curve\r\nfrom sklearn import preprocessing\r\n\r\ndef generate_grid_points(xMin, xMax, yMin, yMax, LINSPACE_SIZE):\r\n xx = np.linspace(xMin, xMax, LINSPACE_SIZE)\r\n yy = np.linspace(yMin, yMax, LINSPACE_SIZE)\r\n XX, YY = np.meshgrid(xx, yy)\r\n points = np.vstack((XX.flatten(), YY.flatten())).transpose()\r\n return points\r\n\r\ndef match_labels(clusters, realLabels, predictedLabels):\r\n realLabels = np.array(realLabels)\r\n labels=[]\r\n for i in range(clusters):\r\n idx = predictedLabels == i\r\n newlabel=sc.stats.mode(realLabels[idx])[0][0]\r\n labels.append(newlabel)\r\n return np.array([labels[label] for label in predictedLabels])\r\n\r\ndef draw_roc_curve(X_train, X_test, y_train, y_test, y_score, classifier_name):\r\n\r\n \r\n plt.figure(figsize=(16,9))\r\n\r\n classes = list(range(0, 4))\r\n y_test_dummies = pd.get_dummies(y_test, drop_first=False).values\r\n for i in classes:\r\n fpr, tpr, thresholds = roc_curve(y_test_dummies[:, i], y_score[:, i])\r\n accuracy = auc(fpr, tpr)\r\n plt.plot(fpr, tpr, label=\"accuracy={:.4f}, class={}\".format(accuracy, i))\r\n\r\n plt.plot(fpr, fpr, c='r', linestyle='--')\r\n plt.title(\"ROC Curve of \" + classifier_name)\r\n plt.xlabel(\"False positive rate\")\r\n plt.ylabel(\"True positive rate\")\r\n\r\ndef draw_discrimination_curve(X, y, classifier, classifier_name):\r\n LINSPACE_SIZE = 100\r\n xMin, xMax = X['x'].min(), X['x'].max()\r\n yMin, yMax = X['y'].min(), X['y'].max()\r\n points = generate_grid_points(xMin, xMax, yMin, yMax, LINSPACE_SIZE)\r\n xLabels = classifier.predict(points).reshape(LINSPACE_SIZE, LINSPACE_SIZE)\r\n\r\n plt.figure(figsize=(8,8))\r\n plt.xlim(xMin, xMax)\r\n plt.ylim(yMin, yMax)\r\n plt.contourf(points[:,0].reshape(LINSPACE_SIZE, LINSPACE_SIZE), points[:,1].reshape(LINSPACE_SIZE, LINSPACE_SIZE), xLabels, cmap='viridis', alpha=0.2)\r\n plt.scatter(X['x'], X['y'], c=y, alpha=0.75)\r\n plt.title('Discimination curve of ' + classifier_name)\r\n\r\ndef generate_dataset():\r\n X, y = datasets.make_classification(\r\n n_samples=1500,\r\n n_features=2,\r\n n_informative=2,\r\n n_classes=4,\r\n n_clusters_per_class=1,\r\n n_redundant=0,\r\n n_repeated=0,\r\n random_state=3\r\n )\r\n X = pd.DataFrame(X, columns=[\"x\", \"y\"])\r\n y = pd.Series(y)\r\n return X, y\r\n\r\n\r\ndef get_classifiers():\r\n return {\r\n 'ovo_svc_linear': OneVsOneClassifier(svm.SVC(kernel='linear', probability=True), n_jobs=-1),\r\n 'ovr_svc_linear': OneVsRestClassifier(svm.SVC(kernel='linear', probability=True), n_jobs=-1),\r\n 'ovo_svc_rbf': OneVsOneClassifier(svm.SVC(kernel='rbf', probability=True), n_jobs=-1),\r\n 'ovr_svc_rbf': OneVsRestClassifier(svm.SVC(kernel='rbf', probability=True), n_jobs=-1),\r\n 'ovo_logistic_regression': OneVsOneClassifier(LogisticRegression(), n_jobs=-1),\r\n 'ovr_logistic_regression': OneVsRestClassifier(LogisticRegression(), n_jobs=-1),\r\n 'ovo_perceptron': OneVsOneClassifier(Perceptron(), n_jobs=-1),\r\n 'ovr_perceptron': OneVsRestClassifier(Perceptron(), n_jobs=-1),\r\n }\r\n\r\ndef draw_difference_plot(X_train, X_test, y_train, y_test, y_predicted, classifier_name):\r\n CLUSTER_COUNT = max(y_test)+1\r\n\r\n y_predicted = match_labels(CLUSTER_COUNT, y_test, y_predicted)\r\n prediction_difference = y_predicted != y_test\r\n\r\n plt.figure(figsize=(12,6))\r\n\r\n plt.scatter(X_train['x'], X_train['y'], c=y_train, cmap='viridis')\r\n plt.scatter(X_test['x'], X_test['y'], c=y_predicted, cmap='viridis')\r\n plt.title(classifier_name + ' classification')\r\n \r\n plt.figure(figsize=(12,6))\r\n \r\n plt.scatter(X_test['x'], X_test['y'], c=np.array(prediction_difference), cmap='viridis')\r\n plt.scatter(X_train['x'], X_train['y'], c=np.tile(1, len(y_train)), cmap='viridis')\r\n plt.title(classifier_name + ' differance with original')\r\n\r\ndef draw_results(results, classifiers_keys):\r\n WIDTH = 0.1\r\n POSITION = np.arange(5)\r\n\r\n plt.figure(figsize=(16, 9))\r\n\r\n for i, classifier_name in enumerate(classifiers_keys):\r\n plt.bar(POSITION + WIDTH * (i - 3.5), results.loc[classifier_name,:], WIDTH, label=classifier_name)\r\n plt.xticks(POSITION, results.columns)\r\n\r\n plt.legend()\r\n\r\ndef get_accuracy_scores(y_test, y_pred, classifier_name):\r\n accuracy = accuracy_score(y_test, y_pred)\r\n recall = recall_score(y_test, y_pred, average='macro')\r\n precision = precision_score(y_test, y_pred, average='macro')\r\n f1 = f1_score(y_test, y_pred, average='macro')\r\n\r\n y_pred = preprocessing.label_binarize(y_pred, classes=[0, 1, 2, 3])\r\n\r\n auc = roc_auc_score(y_test, y_pred, multi_class=classifier_name[0:3], average='macro')\r\n\r\n row = pd.Series(\r\n {\r\n \"accuracy\" : accuracy,\r\n \"recall\" : recall,\r\n \"precision\" : precision,\r\n \"f1\" : f1,\r\n \"auc\" : auc,\r\n }, name=classifier_name\r\n )\r\n return row\r\n\r\ndef draw_original_points(X, y):\r\n plt.figure(figsize=(7, 7))\r\n plt.scatter(X['x'], X['y'], c = y)\r\n plt.title(\"Original points\")\r\n\r\nX, y = generate_dataset()\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n X, y, test_size=0.5, random_state=43)\r\nclassifiers = get_classifiers()\r\nresults = pd.DataFrame()\r\n\r\nfor name, classifier in classifiers.items():\r\n classifier.fit(X_train, y_train)\r\n y_pred = classifier.predict(X_test)\r\n draw_difference_plot(X_train, X_test, y_train, y_test, y_pred, name)\r\n\r\n results = results.append(accuracy_score)\r\n\r\n draw_discrimination_curve(X, y, classifier, name)\r\n y_score = classifier.decision_function(X_test)\r\n draw_roc_curve(X_train, X_test, y_train, y_test, y_score, name)\r\n\r\n plt.close('all')\r\ndraw_results(results, classifiers.keys())\r\ndraw_original_points(X, y)","sub_path":"Data Science 2020/lab12/lab12.py","file_name":"lab12.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"347167304","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# session 使用\nfrom flask import Flask, render_template, session, escape, redirect, url_for, request\nimport os\nimport time\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n if 'username' in session:\n return 'Login in as %s' % escape(session['username'])\n else:\n return render_template('login.html')\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n session['username'] = request.form['username']\n app.logger.debug(\n '%s login as %s' % (request.form['username'], time.localtime(time.time())))\n return redirect(url_for('index'))\n else:\n return render_template('login.html')\n\n\n@app.route('/logout')\ndef logout():\n session.pop('username', None)\n return redirect(url_for('login'))\n# 产生秘钥\napp.secret_key = os.urandom(24)\n\nif __name__ == '__main__':\n app.run()\n app.run()\n","sub_path":"flaskuse/sessionuse.py","file_name":"sessionuse.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"295136273","text":"# File: proj2.py\n# Author: Dane Magbuhos\n# Date: 11/03/17\n# Section: 20\n# Email: mag4@umbc.edu\n# Description: This program is a simple cellular game called Conway's Game of Life. \n# In this game, you have a grid where pixels can either be on or off (dead or alive).\n# In the game, as time marches on, there are simple rules that govern whether each \n# pixel will be on or off (dead or alive) at the next time step.\n\n\n# Used to establish that the value 0 and below are not acceptable inputs \nLOWER_BOUND = 0\n\n# Used to denote quit option for user\nQUIT = \"q\" \n\n# Used tp denote empty string\nNO_CELL = \"\"\n\n# Used to modify the length of lists, boundaries, or counters\nSUBTRACT_INDEX = -1 \nADD_INDEX = 1\nINDEX_INCREMENT = 2\nINCREMENT = 1\n\n# Used to denote initial even and odd index values\nEVEN_INDEX = 0\nODD_INDEX = 1\n\n# Used to denote which action is being executed during input validation\nCREATING_BOARD = \"CB\"\nTURNING_ON_CELL = \"TOC\"\nITERATION = \"I\"\n\n# Used to denote representations of dead and alive cells\nDEAD_CELL = \".\"\nALIVE_CELL = \"A\"\n\n# Used to denote small and big amount of neighbors\nSMALL_N = 2\nBIG_N = 3\n \n##################################################################\n# printBoard(): Used to output 2D list of the board\n# Input: board; 2D list that contains dead and alive cells\n# Output: None; Used only as a print function\n\ndef printBoard(board):\n\n row = EVEN_INDEX\n while row < len(board):\n col = EVEN_INDEX\n while col < len(board[row]):\n print(board[row][col], end= \"\")\n col += INCREMENT\n print()\n row += INCREMENT\n\n###########################################################################################\n# inputValidator(): Used to identify if user's inputs are valid and within specified range \n# Input: userInput; an integer that user provides\n# boundary; an integer that determines if userInputs are within range\n# action; a string that indicates how to treat boundary appropriately\n# Output: result; boolean value that determines if user input is valid\n\ndef inputValidator(userInput, boundary, action):\n\n result = False\n \n # Handles the row and column case \n if action == CREATING_BOARD:\n if userInput <= boundary:\n print(\"Invalid value; please enter a number greater than or equal to 1\",end= \"\\n\\n\")\n else:\n # Only returns true if user input is within range of lower bound\n result = True\n\n # Handles the cell row and cell column case\n elif action == TURNING_ON_CELL:\n if userInput < LOWER_BOUND or userInput >= boundary:\n print(\"Invalid value; please enter a number between \" + str(LOWER_BOUND) + \" and \" + str(boundary + SUBTRACT_INDEX) + \" inclusive\", end= \"\\n\\n\")\n else:\n # Only returns true if user input is within range of lower bound and boundary\n result = True\n \n # Handles the amount of iteration(s) case\n elif action == ITERATION:\n if userInput < boundary:\n print(\"Invalid value; please enter a number greater than or equal 0\", end= \"\\n\\n\")\n else:\n # Only returns true if user input is within range of lower bound\n result = True\n\n return result\n\n#####################################################################################################\n# populateCellBoard(): Used to create a new board that consists of newly acquired alive \n# cells based on provided cellList\n# Input: rowInput; an integer that user provided for row input \n# columnInput; an integer that user provided for column input\n# cellList; a 1D list that contains the coordinate points for all alive cells \n# Output: board; a 2D list that contains populated dead and alive cells\n\ndef populateCellBoard(rowInput, columnInput, cellList):\n board = []\n row = []\n rowIndex = EVEN_INDEX\n rowCell = EVEN_INDEX\n columnCell = ODD_INDEX\n\n # Creates board based on user's row and column input\n for i in range(columnInput):\n row.append(DEAD_CELL)\n\n for i in range (rowInput):\n board.append(row[:])\n\n while rowIndex < len(board):\n colIndex = EVEN_INDEX\n while colIndex < len(board[rowIndex]):\n # Traverses through 1D cell list and turns on alive cells at each specificed location\n while rowCell < len(cellList):\n if cellList[rowCell] == rowIndex:\n board[rowIndex][cellList[columnCell]] = ALIVE_CELL\n\n # rowcell conveys even indexes of cellList\n rowCell += INDEX_INCREMENT\n \n # columnCell conveys odd indexes of cellList\n columnCell += INDEX_INCREMENT\n colIndex += INCREMENT\n \n # Resets indexes to default value for next row index iteration\n rowCell = EVEN_INDEX\n columnCell = ODD_INDEX\n\n rowIndex += INCREMENT\n\n return board\n\n########################################################################################\n# aliveCellCounter(): Counts the amount of alive cell neighbors around \n# examined cell\n# Input: examinedCell; a string that represents a cell being examined\n# aliveCellList; a 1D list that contains all dead and alive cells\n# Output: result; a string that either is a dead or alive cell based on \n# amount of amount of alive cells present at the time\n\ndef aliveCellCounter(examinedCell, aliveCellList):\n \n index = EVEN_INDEX\n count = EVEN_INDEX\n result = NO_CELL\n\n while index < len(aliveCellList):\n # Counts for alive cell duplicates\n if aliveCellList[index] == ALIVE_CELL:\n count += INCREMENT\n index += INCREMENT\n\n # The count and examined cell determines if the examined cell lives or dies\n if count < SMALL_N and examinedCell == ALIVE_CELL or count > BIG_N and examinedCell == ALIVE_CELL:\n result = DEAD_CELL\n\n elif count == SMALL_N and examinedCell == ALIVE_CELL or count == BIG_N and examinedCell == ALIVE_CELL:\n result = ALIVE_CELL\n\n elif count == BIG_N and examinedCell == DEAD_CELL:\n result = ALIVE_CELL\n\n return result\n\n##############################################################################\n# nextIteration(): Used to identify the layout of what next cell board should\n# look like\n# Input: board; a 2D list that contains populated dead and alive cells\n# Output: newCellList; a 1D list that contains coordinate points\n# of all alive cells\n \ndef nextIteration(board):\n\n row = EVEN_INDEX\n newCellList = []\n aliveCellList = []\n boardLength = len(board[row])\n boardWidth = len(board)\n\n while row < len(board):\n col = EVEN_INDEX\n while col < len(board[row]):\n\n # Handles the top left corner of board\n if row == EVEN_INDEX and col == EVEN_INDEX:\n \n # Grabs cell from the right of examined cell\n aliveCellList.append(board[row][col + ADD_INDEX])\n \n # Grabs two cells below examined cell\n aliveCellList.append(board[row + ADD_INDEX][col])\n aliveCellList.append(board[row + ADD_INDEX][col + ADD_INDEX])\n\n # Handles the top middle section of board\n elif row == EVEN_INDEX and col >= col + ADD_INDEX or row == EVEN_INDEX and col < boardLength + SUBTRACT_INDEX:\n\n # Grabs two cells from the left and right of examined cell\n aliveCellList.append(board[row][col + SUBTRACT_INDEX])\n aliveCellList.append(board[row][col + ADD_INDEX])\n\n # Grabs three bottom cells below examined cell\n aliveCellList.append(board[row + ADD_INDEX][col + SUBTRACT_INDEX])\n aliveCellList.append(board[row + ADD_INDEX][col])\n aliveCellList.append(board[row + ADD_INDEX][col + ADD_INDEX])\n \n # Handles the top right corner of board\n elif row == EVEN_INDEX and col == boardLength + SUBTRACT_INDEX:\n \n # Grabs cell from the left of examined cell\n aliveCellList.append(board[row][col + SUBTRACT_INDEX])\n\n # Grabs two cells below examined cell\n aliveCellList.append(board[row + ADD_INDEX][col])\n aliveCellList.append(board[row + ADD_INDEX][col + SUBTRACT_INDEX])\n \n # Handles left edge section of board\n elif row > EVEN_INDEX and row < boardWidth + SUBTRACT_INDEX and col == EVEN_INDEX:\n\n # Grabs cell from the right of examined cell\n aliveCellList.append(board[row][col + ADD_INDEX])\n\n # Grabs two cells above examined cell\n aliveCellList.append(board[row + SUBTRACT_INDEX][col])\n aliveCellList.append(board[row + SUBTRACT_INDEX][col + ADD_INDEX])\n\n # Grabs two cells below examined cell\n aliveCellList.append(board[row + ADD_INDEX][col])\n aliveCellList.append(board[row + ADD_INDEX][col + ADD_INDEX])\n\n # Handles middle section of board\n elif row > EVEN_INDEX and row < boardWidth + SUBTRACT_INDEX and col > EVEN_INDEX and col < boardLength + SUBTRACT_INDEX:\n\n # Grabs cells from the left and right of examined cell\n aliveCellList.append(board[row][col + SUBTRACT_INDEX])\n aliveCellList.append(board[row][col + ADD_INDEX])\n \n # Grabs cells from the top of examined cell\n aliveCellList.append(board[row + SUBTRACT_INDEX][col + SUBTRACT_INDEX])\n aliveCellList.append(board[row + SUBTRACT_INDEX][col])\n aliveCellList.append(board[row + SUBTRACT_INDEX][col + ADD_INDEX])\n\n # Grabs cells from the bottom of examined cell\n aliveCellList.append(board[row + ADD_INDEX][col + SUBTRACT_INDEX])\n aliveCellList.append(board[row + ADD_INDEX][col])\n aliveCellList.append(board[row + ADD_INDEX][col + ADD_INDEX])\n \n # Handles right edge section of board\n elif row > EVEN_INDEX and row < boardWidth + SUBTRACT_INDEX and col == boardLength + SUBTRACT_INDEX:\n \n # Grabs cell from the left of examined cell\n aliveCellList.append(board[row][col + SUBTRACT_INDEX])\n\n # Grabs two cells from the top of examined cell\n aliveCellList.append(board[row + SUBTRACT_INDEX][col])\n aliveCellList.append(board[row + SUBTRACT_INDEX][col + SUBTRACT_INDEX])\n\n # Grabs two cells from the bottom of examined cell\n aliveCellList.append(board[row + ADD_INDEX][col])\n aliveCellList.append(board[row + ADD_INDEX][col + SUBTRACT_INDEX])\n\n # Handles left edge corner of board\n elif row == boardWidth + SUBTRACT_INDEX and col == EVEN_INDEX:\n \n # Grabs cell from the right of examined cell\n aliveCellList.append(board[row][col + ADD_INDEX])\n\n # Grabs two cells above examined cell\n aliveCellList.append(board[row + SUBTRACT_INDEX][col])\n aliveCellList.append(board[row + SUBTRACT_INDEX][col + ADD_INDEX])\n\n # Handles bottom edge of board\n elif row == boardWidth + SUBTRACT_INDEX and col > EVEN_INDEX and row == boardWidth + SUBTRACT_INDEX and col < boardLength + SUBTRACT_INDEX:\n \n # Grabs cells from the left and right of examined cell\n aliveCellList.append(board[row][col + SUBTRACT_INDEX])\n aliveCellList.append(board[row][col + ADD_INDEX])\n\n # Grabs three cells above examined cell\n aliveCellList.append(board[row + SUBTRACT_INDEX][col + SUBTRACT_INDEX])\n aliveCellList.append(board[row + SUBTRACT_INDEX][col])\n aliveCellList.append(board[row + SUBTRACT_INDEX][col + ADD_INDEX])\n\n # Handles bottom right corner of board\n elif row == boardLength + SUBTRACT_INDEX and col == boardLength + SUBTRACT_INDEX:\n \n # Grabs cell from the left of examined cell\n aliveCellList.append(board[row][col + SUBTRACT_INDEX])\n\n # Grabs two cells on top of examined cell\n aliveCellList.append(board[row + SUBTRACT_INDEX][col])\n aliveCellList.append(board[row + SUBTRACT_INDEX][col + SUBTRACT_INDEX])\n\n # print(aliveCellList, \"Outcome: \",cellSurvival, \" at row/col: \", row, \" \", col)\n \n examinedCell = board[row][col]\n\n # Calls cell survival to determine if examined lives or dies\n cellSurvival = aliveCellCounter(examinedCell,aliveCellList)\n\n # Stores alive cell coordinates into 1D newCellList\n if cellSurvival == ALIVE_CELL:\n newCellList.append(row)\n newCellList.append(col)\n\n col += INCREMENT\n\n # Resets list for next iteration \n aliveCellList = []\n\n row += INCREMENT\n\n\n return newCellList\n\n#######################################################################################\n# makeCellBoard(): This function is primarily used to call all cell creation\n# related functions and keeps track of the amount of iterations \n# for each newly created board\n# Input: rowInput; an integer that user provided\n# columnInput; an integer that user provided\n# cellList; a 1D list that contains coordinate points for alive cells\n# iteration; an integer that user provided \n# Output: None; Used only as a cohesive driver\n\ndef makeCellBoard(rowInput, columnInput, cellList, iteration):\n \n count = INCREMENT\n print(\"Starting Board: \",end=\"\\n\\n\")\n\n # Calls populateCellBoard to set up starting board\n board = populateCellBoard(rowInput, columnInput, cellList)\n printBoard(board)\n\n # Calls nextIteration function to get next set of alive cells list\n while count <= iteration:\n print(\"\\n\\n\"\"Iteration \"+str(count)+\":\",end=\"\\n\\n\")\n newCellList = nextIteration(board)\n board = populateCellBoard(rowInput, columnInput, newCellList)\n printBoard(board)\n count += INCREMENT\n\n###################################################################################\n# cellInitiation(): Gathers up all coordinates in which the user wants \n# specifc cells to be alive prior to creating board\n# Input: rowRange; an integer that represents the max width of board \n# colRange; an integer that represents that max length of board\n# Output: cellList; a 1D list that contains coordinate points for \n# alive cells \n\ndef cellInitiation(rowRange, colRange):\n\n validCells = False\n validRow = False\n validCol = False\n cellList = []\n\n while validCells != True:\n \n while validRow != True:\n rowCell = input(\"Please enter the row of a cell to turn on (or q to exit): \")\n \n # Checks to see if row cell is not equal to quit before validating row cell input\n if rowCell != QUIT:\n rowCell = int(rowCell)\n result = inputValidator(rowCell, rowRange, TURNING_ON_CELL)\n \n # If row cell input is valid, the valus is stored in rowList\n if result == True:\n cellList.append(rowCell)\n validRow = True\n else:\n # Exits out of validRow and validCells while loops if user enters in QUIT value\n validRow = True\n validCells = True\n\n # Loops through until valid column is given and only enters in loop if row cell is not equal to QUIT value\n while validCol != True and rowCell != QUIT:\n colCell = input(\"Please enter the column for that cell: \")\n\n # Checks to see of column cell is not equal to quit before validating column cell input\n if colCell != QUIT:\n colCell = int(colCell)\n result = inputValidator(colCell, colRange, TURNING_ON_CELL)\n \n # If column input is valid, the value is stored in colList\n if result == True:\n cellList.append(colCell)\n validCol = True\n\n # Resets boolean flag to default value prior to next iteration\n validRow = False\n validCol = False\n \n return cellList\n \ndef main():\n\n validInput = False\n \n # Continues to loop through until both row and column inputs are valid \n while validInput != True:\n\n rowInput = int(input(\"Please enter the number of rows: \"))\n\n # Calls inputValidator to see if input is valid\n result = inputValidator(rowInput, LOWER_BOUND, CREATING_BOARD)\n \n # Checks to see if row input is valid before asking user for column input\n if result == True:\n while validInput != True:\n\n columnInput = int(input(\"Please enter the number of columns: \"))\n result = inputValidator(columnInput, LOWER_BOUND, CREATING_BOARD)\n \n # Checks to see if column input is valid before exiting while loop\n if result == True:\n validInput = True\n\n # Calls cellInitiation function to gather list of specified cells to turn on\n aliveCells = cellInitiation(rowInput, columnInput)\n\n validInput = False\n\n while validInput != True:\n\n iterationInput = int(input(\"How many iterations should I run? \"))\n result = inputValidator(iterationInput, LOWER_BOUND, ITERATION)\n \n # Checks to see if iteration input is valid before exiting while loop\n if result == True:\n validInput = True\n\n # Calls makeCellBoard function to generate starting board\n makeCellBoard(rowInput, columnInput, aliveCells, iterationInput)\n\nmain()\n\n","sub_path":"Projects/proj2/proj2.py","file_name":"proj2.py","file_ext":"py","file_size_in_byte":18552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"524836576","text":"import os\nimport sys\nimport random\nimport bisect\nimport subprocess\nimport jsonread as jread\n\n#python3 sy.py funcname.c sup inf iteration (可以考虑加上一个myfunc即内部函数名区分于func.c) 或者将func.c放在另一个workdir避免与opt得到的.c文件重名\n#loop until ? several times\n\n#average rand\ndef randinitcase(start, stop, length):\n start, stop = (int(start), int(stop)) if start <= stop else (int(stop), int(start))\n length = int(abs(length)) if length else 0\n random_list = []\n for i in range(length):\n random_list.append(random.randint(start, stop))\n '''\n for i in range((int)(length/2)):\n random_list.append(random.randint(start, (int)((stop+start)/2)))\n for i in range((int)(length/2)):\n random_list.append(random.randint((int)((stop+start)/2),stop) )\n '''\n return random_list\n \n#TODO:seems unneccessary to add case to cases due to the redivide process \ndef sampling(reward:list,partition:list,cases:list,allcases:list,k):\n idxes=[i for i in range(len(reward))]\n for i in range(k):\n idx = random.choices(idxes,weights=reward)[0]\n new_case = random.randint(partition[idx],partition[idx+1])\n cases[idx].append(new_case)\n allcases.append(new_case)\n \n\ndef divide_part(reward:list,partition:list):\n if len(reward) == 0 and len(partition) == 2:\n start = partition[0]\n stop = partition[1]\n mid = int((stop + start)/2)\n partition.append(mid)\n partition.sort()\n else:\n idx = reward.index(max(reward))\n start = partition[idx]\n stop = partition[idx+1]\n mid = int((stop + start)/2)\n partition.append(mid)\n partition.sort()\n\n#redivide seems time consuming\ndef divide_cases(allcases:list, cases:list, partition:list):\n cases.clear()\n for case in allcases:\n idx = bisect.bisect(partition,case)-1\n if idx < 0:\n print(\"idx error\")\n while idx > len(cases)-1:\n cases.append([])\n cases[idx].append(case)\n \n\n#also reset the count of gcov TODO:can be modify\ndef recpile(cmdarg):\n cmdstr = \"gcc -fprofile-arcs -ftest-coverage {filename} -o test \".format(filename = cmdarg)\n #print(cmdstr)\n os.system(cmdstr)\n\n#generate {targetfunction}.c , which descripe CFG of the target function TODO:single.ll.c\ndef mvgraph_gen(cmdarg):\n cmdstr = \"clang {filename} -emit-llvm -c -o test.bc -g && opt -load libMyCFGPass.so -MyCFG test.bc \\\n && mv myfunc.c graph_gen.c\".format(filename = cmdarg) #myfunc.c shoule be send to this function as a parameter?\n #print(cmdstr)\n os.system(cmdstr)\n\n#loop of cases\ndef loop_cases(reward:list,allcases:list,cases:list,reached:list,funcname):\n #refresh reached\n print(\"q:\",reached)\n recpile(funcname)\n for case in allcases:\n cmdstr = \"./test {inp}\".format(inp = case)\n os.system(cmdstr)\n cmdstr = \"gcov {filename} --json && gunzip -f {filename}.gcov.json.gz \".format(filename = funcname)\n os.system(cmdstr)\n jread.readall(funcname,reached)\n\n #refresh reward\n reward.clear()\n for i in range(len(cases)): #case[i] is some testcases in a partition\n recpile(funcname)\n for case in cases[i]:\n cmdstr = \"./test {inp}\".format(inp = case)\n #if case > 9900:\n # print(\"yyyyyyyyyyyyyyyyessssssssss\")\n #print(cmdstr)\n os.system(cmdstr)\n cmdstr = \"gcov {filename} --json && gunzip -f {filename}.gcov.json.gz \".format(filename = funcname)\n os.system(cmdstr)\n #& python3 jsonread.py && make reward\n jread.readjson(funcname,reached)\n cmdstr = \"make reward\"\n os.system(cmdstr)\n micmd = \"./reward\"\n res = subprocess.Popen(micmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)\n mi = res.stdout.readlines()\n reward.append(float(str(mi[0],encoding='utf-8')))\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) < 3:\n print('''error''')\n exit()\n\n one_sample_k = 10\n\n reached = []\n\n partition=[]\n partition.append(int(args[2]))\n partition.append(int(args[3]))\n\n iteration = int(args[4])\n\n #rand sample for k=200 times\n #average rand or fully rand\n allcases = randinitcase(args[2],args[3],200) #all cases\n #print(allcases)\n\n #init\n reward = [] #reward of different partition\n cases = [[]]\n\n #choose a partition and divide cases into different partition (half)\n divide_part(reward,partition)\n #print(partition)\n divide_cases(allcases,cases,partition)\n\n mvgraph_gen(args[1])\n for i in range(iteration):\n loop_cases(reward,allcases,cases,reached,args[1])\n print(\"iteration\",i,' partition:',partition,' reward:',reward)\n print(list(map(len,cases)))\n print(reached)\n sampling(reward,partition,cases,allcases,one_sample_k)\n divide_part(reward,partition)\n divide_cases(allcases,cases,partition)\n\n \n \n \n \n\n\n \n\n\n \n ","sub_path":"sy.py","file_name":"sy.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"635020094","text":"import ast\nimport io\nimport csv as pycsv\nfrom typing import Any, Callable, Optional, Mapping, Sequence, TextIO, cast\n\nclass Csv:\n '''\n Csv format with type inference.\n\n Load support is implemented by `csv.DictReader`.\n\n Dump functions are currently not implemented.\n\n Load-time type inference is implemented by `ast.literal_eval()` and can\n be disabled:\n\n 1. for all fields by passing infer=False to the load function\n 2. for a particular field by passing a type conversion callable for the\n field via `typemap` (e.g. `lambda _: _` to return the naked value).\n '''\n\n def loads(\n self,\n data: str,\n **kwargs: Any\n ) -> Sequence[Mapping[str, Any]]:\n\n with io.StringIO(data) as buf:\n return self.loadfd(buf, **kwargs)\n\n def loadf(\n self,\n path: str,\n encoding: Optional[str] = None,\n **kwargs: Any\n ) -> Sequence[Mapping[str, Any]]:\n\n with open(path, 'r', encoding=encoding) as fd:\n return self.loadfd(fd, **kwargs)\n\n def loadfd(\n self,\n fd: TextIO,\n **kwargs: Any\n ) -> Sequence[Mapping[str, Any]]:\n \n infer = cast(bool, kwargs.get('infer', True))\n typemap = cast(Mapping[str, Callable], kwargs.get('typemap', {}))\n fieldnames = cast(Sequence[str], kwargs.get('fieldnames', []))\n restkey = cast(Optional[str], kwargs.get('restkey'))\n restval = cast(Optional[str], kwargs.get('restval'))\n dialect = cast(str, kwargs.get('dialect', 'excel'))\n reader = pycsv.DictReader(\n fd, fieldnames=fieldnames, restkey=restkey, restval=restval, dialect=dialect)\n rows = []\n for row in reader:\n for field in row:\n row[field] = self._value(field, row[field], infer, typemap)\n rows.append(row)\n return rows\n\n def dumps(\n self,\n data: Any,\n **kwargs: Any,\n ) -> str:\n \n raise NotImplementedError()\n\n def dumpf(\n self,\n path: str,\n data: Any,\n encoding: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n \n raise NotImplementedError()\n\n def dumpfd(\n self,\n fd: TextIO,\n data: Any,\n **kwargs: Any\n ) -> None:\n \n raise NotImplementedError()\n\n def _value(\n self,\n name: str,\n value: Any,\n infer: bool,\n typemap: Mapping[str, Callable]\n ) -> Any:\n\n if typemap and name in typemap:\n # always use the mapped type if present\n return typemap[name](value)\n if not infer:\n # return the naked value if not inferring\n return value\n try:\n # evaluate the value as a literal\n return ast.literal_eval(value)\n except (ValueError, SyntaxError):\n # return the naked value if it can't be eval'd\n return value\n","sub_path":"lura/formats/csv.py","file_name":"csv.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"405937920","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom xml.dom.minidom import parse\nimport xml.dom.minidom\n\nATTR_TYPE_BOOL = 1\nATTR_TYPE_INT = 2\nATTR_TYPE_FLOAT = 3\nATTR_TYPE_STR = 4\nATTR_TYPE_VEC2 = 5\nATTR_TYPE_VEC3 = 6\nATTR_TYPE_RGBA = 7\nATTR_TYPE_RECT = 8\nATTR_TYPE_DYN = 9\nATTR_TYPE_SP = 10\n\ndef spValFunc_exclude_emitter(spDatas):\n ret = \"[\"\n for index in range(len(spDatas)):\n ret += '\"' + spDatas[index] + '\"'\n if index < len(spDatas) - 1:\n ret += \", \"\n ret += \"]\"\n return ret\n\ndef spValFunc_exclude_list(spDatas):\n ret = \"[\" + spDatas[0] + \"]\"\n ret = ret.replace(\" \", \", \")\n return ret\n \ndef spValFunc_time_colour(spDatas):\n ret = \"[\"\n for index in range(len(spDatas)):\n ret += spDatas[index]\n if index < len(spDatas) - 1:\n ret += \" \"\n ret += \"]\"\n ret = ret.replace(\" \", \", \")\n return ret\n\ndef spValFunc_time_colour_list(spDatas):\n ret = \"[\" + spDatas[0] + \"]\"\n ret = ret.replace(\" \", \", \")\n return ret\n\ndef spValFunc_pf_pos(spDatas):\n ret = \"[\"\n for index in range(len(spDatas)):\n ret += spDatas[index]\n if index < len(spDatas) - 1:\n ret += \" \"\n ret += \"]\"\n ret = ret.replace(\" \", \", \")\n return ret\n\ndef spValFunc_pf_pos_list(spDatas):\n ret = \"[\" + spDatas[0] + \"]\"\n ret = ret.replace(\" \", \", \")\n return ret\n \ndef spValFunc_amin_type(spDatas):\n if spDatas[0] == \"0\":\n return \"true\"\n else:\n return \"false\"\n\ndef spValFunc_billboard_origin(spDatas): \n if spDatas[0] == \"top_left\":\n return \"0\"\n elif spDatas[0] == \"top_center\":\n return \"1\"\n elif spDatas[0] == \"top_right\":\n return \"2\"\n elif spDatas[0] == \"center_left\":\n return \"3\"\n elif spDatas[0] == \"center\":\n return \"4\"\n elif spDatas[0] == \"center_right\":\n return \"5\"\n elif spDatas[0] == \"bottom_left\":\n return \"6\"\n elif spDatas[0] == \"bottom_center\":\n return \"7\"\n elif spDatas[0] == \"bottom_right\":\n return \"8\"\n else:\n return \"4\"\n \nclass AttrDef:\n def __init__(self, key, attrType, outputKey, spValFunc = None):\n self.key = key\n self.attrType = attrType\n self.outputKey = outputKey\n self.spValFunc = spValFunc\n \n def getOutputKey(self):\n if(len(self.outputKey) > 0):\n return self.outputKey\n else:\n return self.key\n \nRENDERER_BASE_ATTR_DEF = [\n AttrDef(\"texture_name\", ATTR_TYPE_STR, \"textureName\"),\n AttrDef(\"mat_type\", ATTR_TYPE_INT, \"matType\"),\n AttrDef(\"add_power_ratio\", ATTR_TYPE_FLOAT, \"enhanceAlpha\"),\n]\n\nRENDERER_BILLBOARD_ATTR_DEF = [\n AttrDef(\"billboard_origin\", ATTR_TYPE_SP, \"originType\", spValFunc_billboard_origin),\n]\n\nEMITTER_BASE_ATTR_DEF = [\n AttrDef(\"name\", ATTR_TYPE_STR, \"name\"),\n AttrDef(\"direction\", ATTR_TYPE_VEC3, \"direction\"),\n AttrDef(\"position\", ATTR_TYPE_VEC3, \"position\"),\n AttrDef(\"cycle_time\", ATTR_TYPE_VEC2, \"cycleTime\"),\n AttrDef(\"emitter_start_time\", ATTR_TYPE_FLOAT, \"startTime\"),\n AttrDef(\"emitter_end_time\", ATTR_TYPE_FLOAT, \"endTime\"),\n AttrDef(\"emission_rate\", ATTR_TYPE_FLOAT, \"emissionRate\"),\n AttrDef(\"force_emit\", ATTR_TYPE_BOOL, \"forceEmit\"),\n AttrDef(\"live_forever\", ATTR_TYPE_BOOL, \"liveForever\"),\n AttrDef(\"emitted_name\", ATTR_TYPE_STR, \"emittedName\"),\n AttrDef(\"is_cycle\", ATTR_TYPE_BOOL, \"cycle\"),\n AttrDef(\"start_color\", ATTR_TYPE_RGBA, \"startColor\"),\n AttrDef(\"end_color\", ATTR_TYPE_RGBA, \"endColor\"),\n AttrDef(\"emitter_color\", ATTR_TYPE_RGBA, \"emitterColor\"),\n #AttrDef(\"use_all_size\", ATTR_TYPE_BOOL, \"use_all_size\"),\n AttrDef(\"live_time\", ATTR_TYPE_DYN, \"liveTime\"),\n AttrDef(\"angle\", ATTR_TYPE_DYN, \"angle\"),\n AttrDef(\"width\", ATTR_TYPE_DYN, \"width\"),\n AttrDef(\"height\", ATTR_TYPE_DYN, \"height\"),\n AttrDef(\"depth\", ATTR_TYPE_DYN, \"depth\"),\n AttrDef(\"all_xyz\", ATTR_TYPE_DYN, \"size\"),\n AttrDef(\"velocity\", ATTR_TYPE_DYN, \"velocity\"),\n]\n\n\nEMITTER_BOX_ATTR_DEF = [\n AttrDef(\"size\", ATTR_TYPE_VEC3, \"boxSize\"),\n AttrDef(\"box_width\", ATTR_TYPE_FLOAT, \"boxWidth\"),\n AttrDef(\"box_height\", ATTR_TYPE_FLOAT, \"boxHeight\"),\n AttrDef(\"box_depth\", ATTR_TYPE_FLOAT, \"boxDepth\"),\n AttrDef(\"box_dir\", ATTR_TYPE_VEC3, \"boxDir\"),\n]\n\nEMITTER_CIRCLE_ATTR_DEF = [\n AttrDef(\"circle_random\", ATTR_TYPE_BOOL, \"circleRandom\"),\n AttrDef(\"circle_step\", ATTR_TYPE_FLOAT, \"circleStep\"),\n AttrDef(\"circle_angle\", ATTR_TYPE_FLOAT, \"circleAngle\"),\n #AttrDef(\"circle_normal\", ATTR_TYPE_VEC3, \"circleNormal\"),\n AttrDef(\"circle_x_radius\", ATTR_TYPE_FLOAT, \"circleXRadius\"),\n AttrDef(\"circle_z_radius\", ATTR_TYPE_FLOAT, \"circleZRadius\"),\n AttrDef(\"circle_x_width\", ATTR_TYPE_FLOAT, \"circleXWidth\"),\n AttrDef(\"circle_z_width\", ATTR_TYPE_FLOAT, \"circleZWidth\"),\n AttrDef(\"circle_radius\", ATTR_TYPE_FLOAT, \"circleRadius\"),\n AttrDef(\"circle_auto_dir\", ATTR_TYPE_BOOL, \"circleAutoDirection\"),\n #AttrDef(\"axis_type\", ATTR_TYPE_INT, \"axisType\"),\n AttrDef(\"fan_start_angle\", ATTR_TYPE_FLOAT, \"circleFanStartAngle\"),\n AttrDef(\"fan_end_angle\", ATTR_TYPE_FLOAT, \"circleFanEndAngle\"),\n]\n\nEMITTER_LINE_ATTR_DEF = [\n AttrDef(\"start_point\", ATTR_TYPE_VEC3, \"startPoint\"),\n AttrDef(\"line_direction\", ATTR_TYPE_VEC3, \"lineDir\"),\n AttrDef(\"is_randomized\", ATTR_TYPE_BOOL, \"randomized\"),\n AttrDef(\"increment\", ATTR_TYPE_FLOAT, \"increment\"),\n]\n\nEMITTER_POINT_ATTR_DEF = [\n]\n\nEMITTER_SPHERE_ATTR_DEF = [\n AttrDef(\"sphere_radius\", ATTR_TYPE_FLOAT, \"sphereRadius\"),\n AttrDef(\"sphere_aotu_dir\", ATTR_TYPE_BOOL, \"sphereAotuDir\"),\n]\n\nAFFECTOR_BASE_ATTR_DEF = [\n AttrDef(\"name\", ATTR_TYPE_STR, \"name\"),\n AttrDef(\"exclude_emitter\", ATTR_TYPE_SP, \"excludeEmitters\", spValFunc_exclude_emitter),\n AttrDef(\"affect_start\", ATTR_TYPE_FLOAT, \"affectStart\"),\n AttrDef(\"affect_end\", ATTR_TYPE_FLOAT, \"affectEnd\"),\n AttrDef(\"frist_state\", ATTR_TYPE_BOOL, \"fristState\"),\n AttrDef(\"affect_enable\", ATTR_TYPE_BOOL, \"enable\"),\n AttrDef(\"exclude_list\", ATTR_TYPE_SP, \"excludeEmitters\", spValFunc_exclude_list),\n]\n\nAFFECTOR_COLOR_ATTR_DEF = [\n AttrDef(\"time_colour\", ATTR_TYPE_SP, \"colors\", spValFunc_time_colour),\n AttrDef(\"time_colour_list\", ATTR_TYPE_SP, \"colors\", spValFunc_time_colour_list),\n]\n\nAFFECTOR_DEFLECTOR_ATTR_DEF = [\n AttrDef(\"plane_point\", ATTR_TYPE_VEC3, \"planePoint\"),\n AttrDef(\"plane_normal\", ATTR_TYPE_VEC3, \"planeNormal\"),\n AttrDef(\"plane_bounce\", ATTR_TYPE_DYN, \"bounce\"),\n]\n\nAFFECTOR_ELASTICITY_ATTR_DEF = [\n AttrDef(\"reverse_limit\", ATTR_TYPE_FLOAT, \"reverseLimit\"),\n AttrDef(\"distance_factor\", ATTR_TYPE_FLOAT, \"distanceFactor\"),\n AttrDef(\"time_start\", ATTR_TYPE_FLOAT, \"timeStart\"),\n AttrDef(\"reverse_factor\", ATTR_TYPE_FLOAT, \"reverseFactor\"),\n AttrDef(\"offset_radius\", ATTR_TYPE_FLOAT, \"offsetRadius\"),\n]\n\nAFFECTOR_LINEARFORCE_ATTR_DEF = [\n AttrDef(\"force_vector\", ATTR_TYPE_VEC3, \"forceVector\"),\n AttrDef(\"froce_app\", ATTR_TYPE_INT, \"froceApp\"),\n AttrDef(\"dyn_force\", ATTR_TYPE_DYN, \"force\"),\n]\n\nAFFECTOR_PATHFOLLOWER_ATTR_DEF = [\n AttrDef(\"pf_pos\", ATTR_TYPE_SP, \"points\", spValFunc_pf_pos),\n AttrDef(\"pf_pos_list\", ATTR_TYPE_SP, \"points\", spValFunc_pf_pos_list),\n]\n\nAFFECTOR_RANDOMISER_ATTR_DEF = [\n AttrDef(\"is_random_direction\", ATTR_TYPE_BOOL, \"randomDirection\"),\n AttrDef(\"max_deviation\", ATTR_TYPE_VEC3, \"maxDeviation\"),\n AttrDef(\"max_deviation_x\", ATTR_TYPE_FLOAT, \"maxDeviationX\"),\n AttrDef(\"max_deviation_y\", ATTR_TYPE_FLOAT, \"maxDeviationY\"),\n AttrDef(\"max_deviation_z\", ATTR_TYPE_FLOAT, \"maxDeviationZ\"),\n]\n\nAFFECTOR_ROTATION_ATTR_DEF = [\n AttrDef(\"rot_speed\", ATTR_TYPE_DYN, \"rotationSpeed\"),\n AttrDef(\"rot_start_angle\", ATTR_TYPE_DYN, \"rotationStartAngle\"),\n]\n\nAFFECTOR_SCALE_ATTR_DEF = [\n AttrDef(\"scale_x\", ATTR_TYPE_DYN, \"scaleX\"),\n AttrDef(\"scale_y\", ATTR_TYPE_DYN, \"scaleY\"),\n AttrDef(\"scale_z\", ATTR_TYPE_DYN, \"scaleZ\"),\n AttrDef(\"scale_xyz\", ATTR_TYPE_DYN, \"scaleSize\"),\n #AttrDef(\"is_all_scale\", ATTR_TYPE_BOOL, \"reverseLimit\"),\n AttrDef(\"scale_fixed\", ATTR_TYPE_BOOL, \"fixed\"),\n]\n\nAFFECTOR_SUCTION_ATTR_DEF = [\n AttrDef(\"suction_force\", ATTR_TYPE_DYN, \"suction\"),\n AttrDef(\"suction_pos\", ATTR_TYPE_VEC3, \"suctionPos\"),\n AttrDef(\"suction_cv\", ATTR_TYPE_BOOL, \"collideVisible\"),\n AttrDef(\"suction_type\", ATTR_TYPE_INT, \"suctionType\"),\n]\n\nAFFECTOR_TEXANIM_ATTR_DEF = [\n AttrDef(\"time_step\", ATTR_TYPE_FLOAT, \"timeStep\"),\n AttrDef(\"amin_type\", ATTR_TYPE_SP, \"cycle\", spValFunc_amin_type),\n #AttrDef(\"start_frame\", ATTR_TYPE_INT, \"startFrame\"),\n #AttrDef(\"random_start\", ATTR_TYPE_BOOL, \"random_start\"),\n AttrDef(\"row_num\", ATTR_TYPE_INT, \"row\"),\n AttrDef(\"col_num\", ATTR_TYPE_INT, \"col\"),\n #AttrDef(\"anim_num\", ATTR_TYPE_INT, \"anim_num\"),\n AttrDef(\"play_order\", ATTR_TYPE_BOOL, \"forward\"),\n]\n\nAFFECTOR_VORTEX_ATTR_DEF = [\n AttrDef(\"vortex_rot_speed\", ATTR_TYPE_DYN, \"rotationSpeed\"),\n AttrDef(\"vortex_vec\", ATTR_TYPE_VEC3, \"rotationVec\"),\n]\n\nTECHNIQUE_ATTR_DEF = [\n AttrDef(\"name\", ATTR_TYPE_STR, \"name\"),\n AttrDef(\"particle_quota\", ATTR_TYPE_INT, \"particleQuota\"),\n AttrDef(\"emitter_quota\", ATTR_TYPE_INT, \"emitterQuota\"),\n AttrDef(\"tech_pos\", ATTR_TYPE_VEC3, \"position\"),\n AttrDef(\"default_width\", ATTR_TYPE_FLOAT, \"defWidth\"),\n AttrDef(\"default_height\", ATTR_TYPE_FLOAT, \"defHeight\"),\n AttrDef(\"default_depth\", ATTR_TYPE_FLOAT, \"defDepth\"),\n AttrDef(\"tech_axis\", ATTR_TYPE_VEC3, \"axis\"),\n AttrDef(\"tech_angle\", ATTR_TYPE_FLOAT, \"angle\"),\n AttrDef(\"tech_enable\", ATTR_TYPE_BOOL, \"enable\"),\n AttrDef(\"is_local\", ATTR_TYPE_BOOL, \"local\"),\n]\n\nTECHNIQUE_ENABLE_ATTR_DEF = AttrDef(\"tech_enable\", ATTR_TYPE_BOOL, \"enable\")\n\nPARTICLESYSTEM_ATTR_DEF = [\n AttrDef(\"cycle_total_time\", ATTR_TYPE_FLOAT, \"cycleTotalTime\"),\n #AttrDef(\"is_cycle\", ATTR_TYPE_BOOL, \"cycle\"),\n AttrDef(\"template_name\", ATTR_TYPE_STR, \"templateName\"),\n AttrDef(\"ps_scale\", ATTR_TYPE_FLOAT, \"scale\"),\n AttrDef(\"scale_speed\", ATTR_TYPE_FLOAT, \"scaleSpeed\"),\n AttrDef(\"scale_time\", ATTR_TYPE_FLOAT, \"scaleTime\"),\n AttrDef(\"pre_time\", ATTR_TYPE_FLOAT, \"preTime\"),\n AttrDef(\"bound\", ATTR_TYPE_RECT, \"bound\"),\n AttrDef(\"clipper_width\", ATTR_TYPE_FLOAT, \"clipperWidth\"),\n AttrDef(\"clipper_height\", ATTR_TYPE_FLOAT, \"clipperHeight\"),\n AttrDef(\"clipper_pos\", ATTR_TYPE_VEC3, \"clipperPos\"),\n AttrDef(\"clipper_technique_list\", ATTR_TYPE_SP, \"clipperTechniqueList\"),\n AttrDef(\"clipper_inverted\", ATTR_TYPE_BOOL, \"clipperInverted\"),\n]\n\nPARTICLE_ISCYCLE_ATTR_DEF = AttrDef(\"is_cycle\", ATTR_TYPE_BOOL, \"cycle\")\n \nclass DynAttr:\n def __init__(self):\n self.type = \"\"\n self.datas = []\n\nclass Attr:\n def __init__(self, attrDef):\n self.attrDef = attrDef\n \n self.attrDef = attrDef\n self.val = \"\"\n self.spDatas = []\n self.dynAttr = None\n \n def getOutputKey(self):\n return self.attrDef.getOutputKey()\n \n def getOutputVal(self):\n attrType = self.attrDef.attrType\n if attrType == ATTR_TYPE_BOOL:\n return self.val\n elif attrType == ATTR_TYPE_INT:\n return self.val\n elif attrType == ATTR_TYPE_FLOAT:\n return self.val\n elif attrType == ATTR_TYPE_STR:\n return '\"' + self.val + '\"'\n elif attrType == ATTR_TYPE_VEC2:\n return '[' + self.val.replace(' ', ', ', 1) + ']'\n elif attrType == ATTR_TYPE_VEC3:\n return '[' + self.val.replace(' ', ', ', 2) + ']'\n elif attrType == ATTR_TYPE_RGBA:\n return '[' + self.val.replace(' ', ', ', 3) + ']'\n elif attrType == ATTR_TYPE_RECT:\n return '[' + self.val.replace(' ', ', ', 3) + ']'\n elif attrType == ATTR_TYPE_DYN:\n ret = ''\n if self.dynAttr.type == \"fixed\":\n ret = '{\"type\": \"fixed\", \"value\": ' + self.dynAttr.datas[0] + '}'\n elif self.dynAttr.type == \"random\":\n ret = '{\"type\": \"random\", \"value\": [' + self.dynAttr.datas[0] + ', ' + self.dynAttr.datas[1] + ']}'\n elif self.dynAttr.type == \"curve_spline\":\n ret = ''\n for index in range(len(self.dynAttr.datas)):\n ret += self.dynAttr.datas[index]\n if index < len(self.dynAttr.datas) - 1:\n ret += ' '\n \n ret = ret.replace(' ', ', ')\n \n ret = '{\"type\": \"curved\", \"value\": [' + ret + ']}'\n \n return ret\n \n elif attrType == ATTR_TYPE_SP:\n if self.attrDef.spValFunc:\n return self.attrDef.spValFunc(self.spDatas)\n \n return self.spDatas\n \nclass Renderer:\n def __init__(self):\n self.type = \"\"\n self.attrs = []\n \n def createAttr(self, attrDef):\n attr = Attr(attrDef)\n self.attrs.append(attr)\n return attr\n \n def getOutputType(self):\n return self.type\n\nclass Emitter:\n def __init__(self):\n self.type = \"\"\n self.attrs = []\n \n def createAttr(self, attrDef):\n attr = Attr(attrDef)\n self.attrs.append(attr)\n return attr\n \n def getOutputType(self):\n return self.type\n \nclass Affector:\n def __init__(self):\n self.type = \"\"\n self.attrs = []\n \n def createAttr(self, attrDef):\n attr = Attr(attrDef)\n self.attrs.append(attr)\n return attr\n \n def getOutputType(self):\n if self.type == \"TextureAnimator\":\n return \"TexAnim\"\n elif self.type == \"Colour\":\n return \"Color\"\n elif self.type == \"PathFollower\":\n return \"Spline\"\n \n return self.type\n\nclass Technique:\n def __init__(self):\n self.attrs = []\n self.renderer = None\n self.emitters = []\n self.affectors = []\n \n def createAttr(self, attrDef):\n attr = Attr(attrDef)\n self.attrs.append(attr)\n return attr\n \n def createRenderer(self):\n self.renderer = Renderer()\n return self.renderer\n \n def createEmitter(self):\n emitter = Emitter()\n self.emitters.append(emitter)\n return emitter\n \n def createAffector(self):\n affector = Affector()\n self.affectors.append(affector)\n return affector\n \nclass ParticleSystem:\n def __init__(self):\n self.attrs = []\n self.techniques = []\n \n def createAttr(self, attrDef):\n attr = Attr(attrDef)\n self.attrs.append(attr)\n return attr\n \n def createTechnique(self):\n technique = Technique()\n self.techniques.append(technique)\n return technique\n \ndef parseDynAttr(dynAttrElem, attr):\n attr.type = dynAttrElem.getAttribute(\"type\")\n \n dynAttr = DynAttr()\n dynAttr.type = dynAttrElem.getAttribute(\"dyn_type\")\n if(dynAttr.type == \"fixed\"):\n valElem = dynAttrElem.getElementsByTagName(\"value\")[0]\n dynAttr.datas.append(valElem.childNodes[0].data)\n elif(dynAttr.type == \"random\"):\n minElem = dynAttrElem.getElementsByTagName(\"min\")[0]\n maxElem = dynAttrElem.getElementsByTagName(\"max\")[0]\n dynAttr.datas.append(minElem.childNodes[0].data)\n dynAttr.datas.append(maxElem.childNodes[0].data)\n elif(dynAttr.type == \"curve_spline\"):\n pointsElems = dynAttrElem.getElementsByTagName(\"points\")\n for index in range(len(pointsElems)):\n pointsElem = pointsElems[index]\n dynAttr.datas.append(pointsElem.childNodes[0].data)\n \n attr.dynAttr = dynAttr\n \ndef parseRenderer(rendererElem, renderer):\n renderer.type = rendererElem.getAttribute(\"type\")\n \n attrDefList = []\n attrDefList.append(RENDERER_BASE_ATTR_DEF)\n \n if renderer.type == \"Billboard\":\n attrDefList.append(RENDERER_BILLBOARD_ATTR_DEF)\n \n for k in range(len(attrDefList)):\n adl = attrDefList[k]\n \n for index in range(len(adl)):\n attrDef = adl[index]\n if(attrDef.attrType == ATTR_TYPE_DYN):\n dynAttrElems = rendererElem.getElementsByTagName(\"dyn\")\n for dynAttrElemIdx in range(len(dynAttrElems)):\n dynAttrElem = dynAttrElems[dynAttrElemIdx]\n if dynAttrElem.getAttribute(\"type\") == attrDef.key:\n attr = renderer.createAttr(attrDef)\n parseDynAttr(dynAttrElem, attr)\n break\n else:\n attrElems = rendererElem.getElementsByTagName(attrDef.key)\n if(len(attrElems) > 0):\n attr = renderer.createAttr(attrDef)\n if attrDef.attrType == ATTR_TYPE_SP:\n for aei in range(len(attrElems)):\n attr.spDatas.append(attrElems[aei].childNodes[0].data)\n else:\n attr.val = attrElems[0].childNodes[0].data\n \n\ndef parseEmitter(emitterElem, emitter):\n emitter.type = emitterElem.getAttribute(\"type\")\n \n attrDefList = []\n attrDefList.append(EMITTER_BASE_ATTR_DEF)\n if(emitter.type == \"Box\"):\n attrDefList.append(EMITTER_BOX_ATTR_DEF)\n elif(emitter.type == \"Circle\"):\n attrDefList.append(EMITTER_CIRCLE_ATTR_DEF)\n elif(emitter.type == \"Line\"):\n attrDefList.append(EMITTER_LINE_ATTR_DEF)\n elif(emitter.type == \"Point\"):\n attrDefList.append(EMITTER_POINT_ATTR_DEF)\n elif(emitter.type == \"Sphere\"):\n attrDefList.append(EMITTER_SPHERE_ATTR_DEF)\n \n for k in range(len(attrDefList)):\n adl = attrDefList[k]\n \n for index in range(len(adl)):\n attrDef = adl[index]\n if(attrDef.attrType == ATTR_TYPE_DYN):\n dynAttrElems = emitterElem.getElementsByTagName(\"dyn\")\n for dynAttrElemIdx in range(len(dynAttrElems)):\n dynAttrElem = dynAttrElems[dynAttrElemIdx]\n if dynAttrElem.getAttribute(\"type\") == attrDef.key:\n attr = emitter.createAttr(attrDef)\n parseDynAttr(dynAttrElem, attr)\n break\n else:\n attrElems = emitterElem.getElementsByTagName(attrDef.key)\n if(len(attrElems) > 0):\n attr = emitter.createAttr(attrDef)\n if attrDef.attrType == ATTR_TYPE_SP:\n for aei in range(len(attrElems)):\n attr.spDatas.append(attrElems[aei].childNodes[0].data)\n else:\n attr.val = attrElems[0].childNodes[0].data\n \n \ndef parseAffector(affectorElem, affector):\n affector.type = affectorElem.getAttribute(\"type\")\n \n attrDefList = []\n attrDefList.append(AFFECTOR_BASE_ATTR_DEF)\n if(affector.type == \"Colour\"):\n attrDefList.append(AFFECTOR_COLOR_ATTR_DEF)\n elif(affector.type == \"Deflector\"):\n attrDefList.append(AFFECTOR_DEFLECTOR_ATTR_DEF)\n elif(affector.type == \"Elasticity\"):\n attrDefList.append(AFFECTOR_ELASTICITY_ATTR_DEF)\n elif(affector.type == \"LinearForce\"):\n attrDefList.append(AFFECTOR_LINEARFORCE_ATTR_DEF)\n elif(affector.type == \"PathFollower\"):\n attrDefList.append(AFFECTOR_PATHFOLLOWER_ATTR_DEF)\n elif(affector.type == \"Randomiser\"):\n attrDefList.append(AFFECTOR_RANDOMISER_ATTR_DEF)\n elif(affector.type == \"Rotation\"):\n attrDefList.append(AFFECTOR_ROTATION_ATTR_DEF)\n elif(affector.type == \"Scale\"):\n attrDefList.append(AFFECTOR_SCALE_ATTR_DEF)\n elif(affector.type == \"Suction\"):\n attrDefList.append(AFFECTOR_SUCTION_ATTR_DEF)\n elif(affector.type == \"TextureAnimator\"):\n attrDefList.append(AFFECTOR_TEXANIM_ATTR_DEF)\n elif(affector.type == \"Vortex\"):\n attrDefList.append(AFFECTOR_VORTEX_ATTR_DEF)\n \n for k in range(len(attrDefList)):\n adl = attrDefList[k]\n \n for index in range(len(adl)):\n attrDef = adl[index]\n if attrDef.attrType == ATTR_TYPE_DYN:\n dynAttrElems = affectorElem.getElementsByTagName(\"dyn\")\n for dynAttrElemIdx in range(len(dynAttrElems)):\n dynAttrElem = dynAttrElems[dynAttrElemIdx]\n if dynAttrElem.getAttribute(\"type\") == attrDef.key:\n attr = affector.createAttr(attrDef)\n parseDynAttr(dynAttrElem, attr)\n break\n else:\n attrElems = affectorElem.getElementsByTagName(attrDef.key)\n if(len(attrElems) > 0):\n attr = affector.createAttr(attrDef)\n if attrDef.attrType == ATTR_TYPE_SP:\n for aei in range(len(attrElems)):\n attr.spDatas.append(attrElems[aei].childNodes[0].data)\n else:\n attr.val = attrElems[0].childNodes[0].data\n \n \ndef parseTechnique(techniqueElem, technique):\n '''\n attr = technique.createAttr(TECHNIQUE_ENABLE_ATTR_DEF)\n attr.val = \"true\"\n '''\n \n for index in range(len(TECHNIQUE_ATTR_DEF)):\n attrDef = TECHNIQUE_ATTR_DEF[index]\n if(attrDef.attrType == ATTR_TYPE_DYN):\n dynAttrElems = techniqueElem.getElementsByTagName(\"dyn\")\n for dynAttrElemIdx in range(len(dynAttrElems)):\n dynAttrElem = dynAttrElems[dynAttrElemIdx]\n if dynAttrElem.getAttribute(\"type\") == attrDef.key:\n attr = technique.createAttr(attrDef)\n parseDynAttr(dynAttrElem, attr)\n break\n else:\n attrElems = techniqueElem.getElementsByTagName(attrDef.key)\n if(len(attrElems) > 0):\n attr = technique.createAttr(attrDef)\n if attrDef.attrType == ATTR_TYPE_SP:\n for aei in range(len(attrElems)):\n attr.spDatas.append(attrElems[aei].childNodes[0].data)\n else:\n attr.val = attrElems[0].childNodes[0].data\n \n \n rendererElem = techniqueElem.getElementsByTagName(\"render\")[0]\n renderer = technique.createRenderer()\n parseRenderer(rendererElem, renderer)\n \n emitterElemList = techniqueElem.getElementsByTagName(\"Emitter\")\n for emitterElem in emitterElemList:\n emitter = technique.createEmitter()\n parseEmitter(emitterElem, emitter)\n \n affectorElemList = techniqueElem.getElementsByTagName(\"Affector\")\n for affectorElem in affectorElemList:\n affector = technique.createAffector()\n parseAffector(affectorElem, affector)\n \n \ndef parseParticleSystem(particleSystemElem, particleSystem):\n if particleSystemElem.hasAttribute(\"is_cycle\"):\n attr = particleSystem.createAttr(PARTICLE_ISCYCLE_ATTR_DEF)\n attr.val = particleSystemElem.getAttribute(\"is_cycle\")\n \n for index in range(len(PARTICLESYSTEM_ATTR_DEF)):\n attrDef = PARTICLESYSTEM_ATTR_DEF[index]\n if(attrDef.attrType == ATTR_TYPE_DYN):\n dynAttrElems = particleSystemElem.getElementsByTagName(\"dyn\")\n for dynAttrElemIdx in range(len(dynAttrElems)):\n dynAttrElem = dynAttrElems[dynAttrElemIdx]\n if dynAttrElem.getAttribute(\"type\") == attrDef.key:\n attr = particleSystem.createAttr(attrDef)\n parseDynAttr(dynAttrElem, attr)\n break\n else:\n attrElems = particleSystemElem.getElementsByTagName(attrDef.key)\n if(len(attrElems) > 0):\n attr = particleSystem.createAttr(attrDef)\n if attrDef.attrType == ATTR_TYPE_SP:\n for aei in range(len(attrElems)):\n attr.spDatas.append(attrElems[aei].childNodes[0].data)\n else:\n attr.val = attrElems[0].childNodes[0].data\n \n techniqueElemList = particleSystemElem.getElementsByTagName(\"Technique\")\n for techniqueElem in techniqueElemList:\n rendererElem = techniqueElem.getElementsByTagName(\"render\")[0]\n if not rendererElem or rendererElem.getAttribute(\"type\") != \"Billboard\":\n rendererType = \"NONE\"\n if rendererElem:\n rendererType = rendererElem.getAttribute(\"type\")\n print(\"[WARNING]invalid renderer:\" + rendererType)\n continue\n \n technique = particleSystem.createTechnique()\n parseTechnique(techniqueElem, technique)\n \ndef parseXml(xmlPath):\n particleSystem = ParticleSystem()\n \n domtree = xml.dom.minidom.parse(xmlPath)\n particleSystemElem = domtree.documentElement\n \n parseParticleSystem(particleSystemElem, particleSystem)\n \n return particleSystem\n\nTAB_STR = ' '\nCR_STR = '\\n'\n\ndef exportJson_attr(attr, fo, tabCnt, comma):\n fo.write(TAB_STR * tabCnt)\n fo.write('\"' + attr.getOutputKey() + '\": ' + attr.getOutputVal())\n if comma:\n fo.write(',')\n fo.write(CR_STR)\n\ndef exportJson_Renderer(renderer, fo):\n fo.write(TAB_STR * 5 + '\"type\": \"' + renderer.getOutputType() + '\",' + CR_STR)\n for index in range(len(renderer.attrs)):\n attr = renderer.attrs[index]\n exportJson_attr(attr, fo, 5, index < len(renderer.attrs) - 1)\n\ndef exportJson_Emitter(emitter, fo, comma):\n fo.write(TAB_STR * 5 + '{' + CR_STR)\n fo.write(TAB_STR * 6 + '\"type\": \"' + emitter.getOutputType() + '\",' + CR_STR)\n for index in range(len(emitter.attrs)):\n attr = emitter.attrs[index]\n exportJson_attr(attr, fo, 6, index < len(emitter.attrs) - 1)\n fo.write(TAB_STR * 5 + '}')\n if comma:\n fo.write(',')\n fo.write(CR_STR)\n\ndef exportJson_Affector(affector, fo, comma):\n fo.write(TAB_STR * 5 + '{' + CR_STR)\n fo.write(TAB_STR * 6 + '\"type\": \"' + affector.getOutputType() + '\",' + CR_STR)\n for index in range(len(affector.attrs)):\n attr = affector.attrs[index]\n exportJson_attr(attr, fo, 6, index < len(affector.attrs) - 1)\n fo.write(TAB_STR * 5 + '}')\n if comma:\n fo.write(',')\n fo.write(CR_STR)\n \ndef exportJson_Technique(technique, fo, comma):\n fo.write(TAB_STR * 3 + '{' + CR_STR)\n \n for index in range(len(technique.attrs)):\n exportJson_attr(technique.attrs[index], fo, 4, True)\n \n #export renderer\n fo.write(TAB_STR * 4 + '\"Renderer\": {' + CR_STR)\n exportJson_Renderer(technique.renderer, fo)\n fo.write(TAB_STR * 4 + '},' + CR_STR)\n \n #export emitter\n fo.write(TAB_STR * 4 + '\"Emitters\": [' + CR_STR)\n for index in range(len(technique.emitters)):\n exportJson_Emitter(technique.emitters[index], fo, index < len(technique.emitters) - 1)\n fo.write(TAB_STR * 4 + '],' + CR_STR)\n \n #export affector\n fo.write(TAB_STR * 4 + '\"Affectors\": [' + CR_STR)\n for index in range(len(technique.affectors)):\n exportJson_Affector(technique.affectors[index], fo, index < len(technique.affectors) - 1)\n fo.write(TAB_STR * 4 + ']' + CR_STR)\n \n fo.write(TAB_STR * 3 + '}')\n if comma:\n fo.write(',')\n \n fo.write(CR_STR)\n \ndef exportJson_ParticleSystem(particleSystem, fo):\n for index in range(len(particleSystem.attrs)):\n attr = particleSystem.attrs[index]\n exportJson_attr(attr, fo, 2, True)\n \n fo.write(TAB_STR * 2 + '\"Techniques\": [' + CR_STR)\n \n for index in range(len(particleSystem.techniques)):\n technique = particleSystem.techniques[index]\n exportJson_Technique(technique, fo, index < len(particleSystem.techniques) - 1)\n \n fo.write(TAB_STR * 2 + ']' + CR_STR)\n \ndef exportJson(particleSystem, fileName):\n exportPath = fileName + \".json\"\n fo = open(exportPath, 'w')\n \n fo.write('{' + CR_STR + TAB_STR + '\"' + fileName + '\": {' + CR_STR)\n\n exportJson_ParticleSystem(particleSystem, fo)\n \n fo.write(TAB_STR + '}' + CR_STR + '}')\n \n fo.close()\n \nif __name__ == '__main__':\n fileName = sys.argv[1]\n srcDir = sys.argv[2]\n xmlPath = srcDir + fileName + \".xml\"\n print(\"start convert:\" + xmlPath)\n particleSystem = parseXml(xmlPath)\n exportPath = fileName + \".json\"\n exportJson(particleSystem, fileName)\n print(\"finish convert:\" + exportPath)","sub_path":"client/project/minitools/particleconvert.py","file_name":"particleconvert.py","file_ext":"py","file_size_in_byte":28529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"170419719","text":"from datetime import datetime\nfrom datetime import timedelta\n\n\nclass game():\n def __init__(self, title: str, started_ts: datetime = datetime.now(),\n beaten_ts: datetime = None, plat: str = \"\",\n comment: str = \"\"):\n self.title = title\n self.started_timestamp = started_ts\n self.beaten_timestamp = beaten_ts if beaten_ts is not None else \"\"\n self.platform = plat\n self.comment = comment\n self.in_game_play_time = \"\" # Should be a timedelta object\n\n def set_comment(self, comment: str):\n self.comment = comment\n\n def set_platform(self, platform: str):\n self.platform = platform\n\n def get_fields(self):\n return [self.title, self.started_timestamp, self.beaten_timestamp,\n self.in_game_play_time, self.platform, self.comment]\n\n def __str__(self):\n return (\n str(self.title) + \"; \" +\n str(self.started_timestamp.date()) + \"; \" +\n str(self.beaten_timestamp) + \"; \" +\n str(self.in_game_play_time) + \"; \" +\n str(self.platform) + \"; \" +\n str(self.comment) + \"; \" +\n \"\\n\"\n )\n\n\nif __name__ == \"__main__\":\n print(timedelta(hours=157, minutes=14))\n g = game(title=\"Boku no piccolo\", started_ts=datetime.now(),\n plat=\"NintendomegaDS\")\n print(g)\n","sub_path":"application/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"122471145","text":"import plotly.express as px\nimport networkx as nx\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\n\ndef degree_hist(G: nx.Graph, title: str) -> None:\n G_nodes = pd.DataFrame(\n [dict(node=node, degree=deg) for node, deg in G.degree()]\n )\n fig = px.histogram(\n G_nodes,\n x=\"degree\",\n marginal=\"box\",\n histnorm='probability density'\n )\n fig.update_layout(title=title)\n fig.show()\n\n\ndef clust_hist(G: nx.Graph, title: str) -> None:\n G_cc = pd.DataFrame(\n [dict(node=node, clust_coef=cc) for node, cc in nx.algorithms.clustering(G).items()]\n )\n fig = px.histogram(\n G_cc,\n x=\"clust_coef\",\n marginal=\"box\",\n histnorm='probability density'\n )\n fig.update_layout(title=title)\n fig.show()\n\n\ndef path_hist(G: nx.Graph, title, retarted=False):\n shortest_paths = nx.shortest_path_length(G)\n path_lengths = []\n for l in tqdm(shortest_paths):\n path_lengths.extend(l[1].values())\n\n if not retarted:\n G_sp = pd.DataFrame(\n [dict(length=length) for length in path_lengths]\n )\n fig = px.histogram(\n G_sp,\n x=\"length\",\n marginal=\"box\",\n histnorm='probability density'\n )\n fig.update_layout(title=title)\n fig.show()\n else:\n fig = plt.hist(path_lengths)\n plt.title(title)\n plt.show()","sub_path":"problem_sheet2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"168681281","text":"import configparser\nconfigServ = configparser.ConfigParser()\nconfigServ['ServerConfig'] = {'ServerAddress': '10.2.12.52',\n 'ServerPort': '8081',\n 'ReceivingAddress': '10.2.12.52',\n 'ReceivingPort': '10000',\n 'MulticastAddress': '239.255.42.99',\n 'MulticastPort': '9000',\n 'MulticastTTL': '1'\n }\nconfigServ['Messages'] = {'WaitMsg': 'CZEKAJ
',\n 'AbsenceMsg': 'NIKOGO NIE MA',\n 'WelcomeMsg': 'ZAPRASZAMY'\n }\nwith open('config.ini', 'w') as configfile:\n configServ.write(configfile)\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"361756936","text":"from config.fun_api import *\nfrom config.config import *\nfrom plugins.ele_hbao.hongbao_cx import cx_hongbao\n\n\nclass my_thread(threading.Thread):\n def __init__(self, bianhao, group_sn, alink, wxmsg):\n threading.Thread.__init__(self)\n self.th_id = bianhao\n # self.th_name = th_name\n self.group_sn = group_sn\n self.alink = alink\n self.wxmsg = wxmsg\n def run(self):\n jk_hongbao(self.group_sn, self.th_id, self.alink, self.wxmsg)\n\ndef wx_index(group_sn, bianhao, alink, wxmsg):\n th = my_thread(bianhao, group_sn, alink, wxmsg) # id, name\n th.start()\n\n# 红包监控 获取指定账号进行查询 出现最佳或最佳已被领取后退出程序\n\ndef jk_hongbao(group_sn, bianhao, alink, wxmsg):\n try:\n hb_time = DEFAULT_HB_TIME\n hongbaoMax = int(re.findall('第(.*?)个', wxmsg.text)[0])\n k = True\n if hongbaoMax != None:\n write_log(1, '{} - 【红包{}】的最佳手气红包为第{}个'.format(wxmsg.sender, bianhao, hongbaoMax))\n x = -1 # 控制红包监控语句打印,确保只在有人点了红包后才进行打印输出\n num = 1\n z = True\n values = get_eleid()\n if values == '暂无可用账号':\n fid = wxmsg.search('vip_大号')[0]\n fid.send('当前已无饿了么可用账号,请赶紧添加')\n write_log(1, '当前已无饿了么可用账号,请赶紧添加')\n else:\n phone, link, sign, sid, sms_url = values[1], values[2], values[3], values[4], values[5]\n # 死循环查询,领到最佳,最佳已被领走或被服务器限制访问(此情况会重试5次)时退出循环\n begin_time = int(time.time()) # 获取运行该脚本时的时间戳\n wx_beizhu = re.findall(':(.*?)>', str(wxmsg.sender))[0].strip()\n SQL().add_ele_hb(bianhao, group_sn, hongbaoMax, alink, False, wx_beizhu)\n strs = SQL().select_var_info(\"ELE_KL\")\n if strs:\n wxmsg.reply('【红包{}】福利天天领,复制这条信息{},到[手机淘宝]立刻领红包'.format(bianhao, strs))\n while True:\n result = cx_hongbao(phone, link, sign, sid, group_sn)\n if result['status'] == 0:\n if result['value']['promotion_records'] != None:\n hongbao = len(result['value']['promotion_records'])\n if hongbao < hongbaoMax - 1:\n if x == -1:\n wxmsg.reply('【红包{}】监控中,该红包最佳手气为第{}个,当前已有{}人领取,请留意微信消息(注:红包监控周期为3小时,请记得将红包分享至人多的群聊中哦)'.format(bianhao, hongbaoMax, hongbao))\n if hongbao > x:\n write_log(1, '{} - 【红包{}】使用了[{}]账号进行监控'.format(wxmsg.sender, bianhao, phone))\n write_log(1, '{} - 【红包{}】监控中,当前已有{}人领取'.format(wxmsg.sender, bianhao, hongbao))\n SQL().up_ele_hb_time(hongbao, group_sn, phone)\n x = hongbao # 查到最新红包已领取数量后赋值\n num += 1\n t_run_time = int(time.time()) - begin_time\n if t_run_time // 60 >= 180:\n SQL().up_ele_over_hb(group_sn)\n write_log(1, '{} - 【红包{}】监控已达3小时,系统将自动关闭监控'.format(wxmsg.sender, bianhao))\n k = False\n break\n if hongbao <= hongbaoMax - 3:\n time.sleep(17)\n # write_log(1, '{} - 【红包{}】等待{}秒'.format(wxmsg.sender, bianhao, default_cxtime + 10))\n elif hongbao == hongbaoMax - 1:\n if z:\n write_log(1, '{} - 【红包{}】监控中,当前已有{}人领取'.format(wxmsg.sender, bianhao, hongbao))\n # msg = '【红包{}】下一个就是最佳手气红包,快去点开领取吧'.format(bianhao)\n msg = '【红包{}】下一个就是最佳手气红包,请翻阅消息点击源红包领取'.format(bianhao)\n wxmsg.reply(msg)\n # wxmsg.reply(alink)\n write_log(1, '{} - 【红包{}】下一个就是最佳手气红包,快去点开领取吧,{}'.format(wxmsg.sender, bianhao, alink))\n z = False\n t_run_time = int(time.time()) - begin_time\n if t_run_time // 60 >= 180:\n SQL().up_ele_over_hb(group_sn)\n write_log(1, '{} - 【红包{}】监控已达3小时还未被领取,当前已领取{}个,系统将自动关闭监控'.format(wxmsg.sender, hongbao, bianhao))\n # wxmsg.reply('【红包{}】监控已达3小时,系统将自动关闭监控'.format(bianhao))\n k = False\n break\n # break\n elif hongbao > hongbaoMax - 1:\n is_lucky = result['value']['promotion_records'][hongbaoMax - 1]['is_lucky'] # 减一是数组从0开始读\n if num == 1 and is_lucky and z:\n write_log(1, '{} - 【红包{}】的最佳手气已经被领走了,请换个红包吧'.format(wxmsg.sender, bianhao))\n wxmsg.reply('【红包{}】的最佳手气已经被领走了,请换个红包吧'.format(bianhao))\n break\n if num == 1 and is_lucky == False and z:\n write_log(1, '{} - 【红包{}】已领取{}个,但最佳手气还未产生,快去领取试试吧'.format(wxmsg.sender, bianhao, hongbao))\n wxmsg.reply('【红包{}】已领取{}个,但最佳手气还未产生,快去领取试试吧'.format(bianhao, hongbao))\n wxmsg.reply(alink)\n break\n if num > 1 and z == False:\n if is_lucky:\n lucky_name = result['value']['promotion_records'][hongbaoMax - 1]['sns_username']\n lucky_amount = result['value']['promotion_records'][hongbaoMax - 1]['amount']\n lucky_msg = '【红包{}】被[{}]抢走啦,金额为{}元'.format(bianhao, lucky_name, lucky_amount)\n wxmsg.reply(lucky_msg)\n SQL().add_ele_hb_record(bianhao, lucky_name, lucky_amount, \"wx\")\n reg = \"[^0-9A-Za-z\\u4e00-\\u9fa5]\"\n lucky_msg_info = '【红包{}】被[{}]抢走啦,金额为{}元'.format(bianhao, re.sub(reg, '', lucky_name), lucky_amount)\n write_log(1, lucky_msg_info)\n break\n else:\n promotion_records = result['value']['promotion_records']\n for p in promotion_records:\n is_lucky = p['is_lucky'] # 减一是数组从0开始读\n if is_lucky:\n lucky_name = p['sns_username']\n lucky_amount = p['amount']\n lucky_msg = '【红包{}】被[{}]抢走啦,金额为{}元'.format(bianhao, lucky_name, lucky_amount)\n wxmsg.reply(lucky_msg)\n SQL().add_ele_hb_record(bianhao, lucky_name, lucky_amount, \"wx\")\n reg = \"[^0-9A-Za-z\\u4e00-\\u9fa5]\"\n lucky_msg_info = '【红包{}】被[{}]抢走啦,金额为{}元'.format(bianhao,\n re.sub(reg, '', lucky_name),\n lucky_amount)\n write_log(1, lucky_msg_info)\n break\n t_run_time = int(time.time()) - begin_time\n if t_run_time // 60 >= 180:\n SQL().up_ele_over_hb(group_sn)\n write_log(1, '{} - 【红包{}】监控已达3小时还未被领取,当前已领取{}个,系统将自动关闭监控'.format(wxmsg.sender, bianhao, hongbao))\n # wxmsg.reply('【红包{}】监控已达3小时,系统将自动关闭监控'.format(bianhao))\n k = False\n break\n time.sleep(hb_time)\n else:\n write_log(1, '{} - 【红包{}】[{}]查询该红包数量为空了,{}'.format(wxmsg.sender, bianhao, phone, alink))\n break\n elif result['status'] == 1:\n # if num == 1:\n # wxmsg.reply('系统正在调度账号中,请稍等')\n write_log(1, '{} - 【红包{}】{}身份信息过期,需重新验证'.format(wxmsg.sender, bianhao, phone))\n SQL().up_ele_id_info(\"未登录\", phone)\n values = get_eleid()\n if values:\n phone, link, sign, sid, sms_url = values[1], values[2], values[3], values[4], \\\n values[5]\n write_log(1, '{} - 【红包{}】身份信息失效,现在更换手机号为{}监控'.format(wxmsg.sender, bianhao, phone))\n else:\n fid = wxmsg.search('vip_大号')[0]\n fid.send('当前已无饿了么可用账号,请赶紧添加')\n write_log(1, '{} - 【红包{}】当前已无饿了么可用账号,请赶紧添加'.format(wxmsg.sender, bianhao))\n break\n elif result['status'] == 2:\n write_log(3, '{} - 未知错误,{}'.format(wxmsg.sender, result['value']))\n if result['value']['message'] == '领取失败,请刷新再试。':\n values = get_eleid()\n if values:\n phone, link, sign, sid, sms_url = values[1], values[2], values[3], values[4], \\\n values[5]\n write_log(1, '{} - 【红包{}】领取失败,请刷新再试。现在更换手机号为{}监控'.format(wxmsg.sender, bianhao, phone))\n else:\n write_log(1, '{} - 【红包{}】当前已无饿了么可用账号,请赶紧添加'.format(wxmsg.sender, bianhao))\n break\n hb_time += 1\n time.sleep(hb_time)\n if hb_time > 15:\n wxmsg.reply('【红包{}】抱歉,系统出现异常,请重新分享试试'.format(bianhao))\n break\n elif result['status'] == -1:\n write_log(3, '{} - {}'.format(wxmsg.sender, result['value']))\n hb_time += 1\n time.sleep(hb_time)\n if hb_time > 15:\n wxmsg.reply('【红包{}】抱歉,系统出现异常,请重新分享试试'.format(bianhao))\n break\n run_time = int(time.time()) - begin_time\n write_log(1, '{} - 【红包{}】监控完毕~用时{}分{}秒,共查询了{}次'.format(wxmsg.sender, bianhao, run_time//60, run_time%60, num))\n if k:\n SQL().del_ele_group_sn(group_sn)\n # wxmsg.reply('【红包{}】退出监控,用时{}分{}秒'.format(bianhao, run_time//60, run_time%60))\n else:\n write_log(1, '【红包{}】识别出错,已退出监控'.format(bianhao))\n wxmsg.reply('【红包{}】识别出错,已退出监控'.format(bianhao))\n except:\n write_log(1, '【红包{}】Error : {}'.format(bianhao, traceback.format_exc()))\n\ndef get_eleid():\n ele_ids = SQL().select_ele_id_info()\n if ele_ids:\n tup_id = ele_ids[0]\n t = time.time()\n SQL().up_ele_id_time_stamp(int(round(t * 1000)), tup_id[0])\n return tup_id\n else:\n return '暂无可用账号'","sub_path":"wxBot/plugins/ele_hbao/hongbao_jk.py","file_name":"hongbao_jk.py","file_ext":"py","file_size_in_byte":13834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"123807222","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom tests import base\nimport tempfile\nimport time\nimport os\nimport cherrypy\nimport json\nfrom httpserver import Server\n# oh, boy; you'd think we've learned from #include...\n# from plugins.wt_data_manager.server.constants import PluginSettings\n\n\nMB = 1024 * 1024\n\n\ndef setUpModule():\n base.enabledPlugins.append('wt_data_manager')\n base.startServer()\n\n\ndef tearDownModule():\n base.stopServer()\n\n\nclass IntegrationTestCase(base.TestCase):\n def setUp(self):\n base.TestCase.setUp(self)\n\n self.user = self.model('user').createUser('wt-dm-test-user', 'password', 'Joe', 'User',\n 'juser@example.com')\n self.testCollection = \\\n self.model('collection').createCollection('wt_dm_test_col', creator=self.user,\n public=False, reuseExisting=True)\n self.testFolder = \\\n self.model('folder').createFolder(self.testCollection, 'wt_dm_test_fldr',\n parentType='collection')\n\n self.tmpdir = tempfile.mkdtemp()\n self.files = [self.createFile(n, 1 * MB, self.tmpdir) for n in range(1, 5)]\n self.assetstore = list(self.model('assetstore').find({}))[0]\n self.model('assetstore').importData(self.assetstore, self.testFolder, 'folder',\n {'importPath': self.tmpdir}, {}, self.user,\n leafFoldersAsItems=False)\n self.gfiles = [self.model('item').findOne({'name': file}) for file in self.files]\n\n self.apiroot = cherrypy.tree.apps['/api'].root.v1\n\n self.transferredFiles = set()\n\n def createHttpFile(self):\n params = {\n 'parentType': 'folder',\n 'parentId': self.testFolder['_id'],\n 'name': 'httpitem1',\n 'linkUrl': self.testServer.getUrl() + '/1M',\n 'size': MB\n }\n resp = self.request(path='/file', method='POST', user=self.user, params=params)\n self.assertStatusOk(resp)\n self.httpItem = self.model('item').load(resp.json['itemId'], user=self.user)\n\n def createFile(self, suffix, size, dir):\n name = 'file' + str(suffix)\n path = dir + '/' + name\n f = open(path, 'w')\n s = ''.join([chr(x) for x in range(256)])\n for i in range(size // 256):\n f.write(s)\n f.close()\n return name\n\n def tearDown(self):\n base.TestCase.tearDown(self)\n\n def makeDataSet(self, items, objectids=True):\n if objectids:\n return [{'itemId': f['_id'], 'mountPoint': '/' + f['name']} for f in items]\n else:\n return [{'itemId': str(f['_id']), 'mountPoint': '/' + f['name']} for f in items]\n\n def test01LocalFile(self):\n dataSet = self.makeDataSet(self.gfiles)\n self._testItem(dataSet, self.gfiles[0], True)\n\n def test02HttpFile(self):\n self.testServer = Server()\n self.testServer.start()\n self.createHttpFile()\n dataSet = self.makeDataSet([self.httpItem])\n self._testItem(dataSet, self.httpItem)\n self.testServer.stop()\n\n def test03Caching(self):\n dataSet = self.makeDataSet(self.gfiles)\n self._testItem(dataSet, self.gfiles[0])\n self._testItem(dataSet, self.gfiles[0])\n item = self.reloadItem(self.gfiles[0])\n self.assertEqual(item['dm']['downloadCount'], 1)\n self._testItem(dataSet, self.gfiles[1])\n\n def test04SessionApi(self):\n dataSet = self.makeDataSet(self.gfiles)\n self._testSessionApi(dataSet, self.gfiles[0])\n\n def test05SessionDeleteById(self):\n dataSet = self.makeDataSet(self.gfiles)\n session = self.apiroot.dm.createSession(self.user, dataSet)\n self.apiroot.dm.deleteSession(self.user, sessionId=session['_id'])\n\n def _testSessionApi(self, dataSet, item):\n session = self.apiroot.dm.createSession(self.user, dataSet)\n sessions = list(self.model('session', 'wt_data_manager').list(self.user))\n self.assertEqual(len(sessions), 1)\n self._testItemWithSession(session, item)\n self.apiroot.dm.deleteSession(self.user, session=session)\n\n def _testItem(self, dataSet, item, download=False):\n session = self.model('session', 'wt_data_manager').createSession(self.user, dataSet=dataSet)\n self._testItemWithSession(session, item, download=download)\n self.model('session', 'wt_data_manager').deleteSession(self.user, session)\n\n def _testItemWithSession(self, session, item, download=False):\n self.assertNotEqual(session, None)\n lock = self.model('lock', 'wt_data_manager').acquireLock(self.user, session['_id'],\n item['_id'])\n\n locks = list(self.model('lock', 'wt_data_manager').listLocks(self.user, session['_id']))\n self.assertEqual(len(locks), 1)\n\n self.assertNotEqual(lock, None)\n\n item = self.reloadItem(item)\n self.assertHasKeys(item, ['dm'])\n\n psPath = self.waitForFile(item)\n self.transferredFiles.add(psPath)\n\n transfers = self.model('transfer', 'wt_data_manager').list(self.user, discardOld=False)\n transfers = list(transfers)\n self.assertEqual(len(transfers), len(self.transferredFiles))\n\n if download:\n self._downloadFile(lock, item)\n\n self.assertTrue(os.path.isfile(psPath))\n self.assertEqual(os.path.getsize(psPath), item['size'])\n\n self.model('lock', 'wt_data_manager').releaseLock(self.user, lock)\n\n item = self.reloadItem(item)\n self.assertEqual(item['dm']['lockCount'], 0)\n\n def _downloadFile(self, lock, item):\n stream = self.model('lock', 'wt_data_manager').downloadItem(lock)\n sz = 0\n for chunk in stream():\n sz += len(chunk)\n self.assertEqual(sz, item['size'])\n\n def reloadItem(self, item):\n return self.model('item').load(item['_id'], user=self.user)\n\n def waitForFile(self, item, rest=False, sessionId=None):\n max_iters = 300\n while max_iters > 0:\n if 'cached' in item['dm'] and item['dm']['cached']:\n self.assertHasKeys(item['dm'], ['psPath'])\n psPath = item['dm']['psPath']\n self.assertIsNotNone(psPath)\n return psPath\n time.sleep(0.1)\n max_iters -= 1\n if rest:\n item = self.reloadItemRest(sessionId, item)\n else:\n item = self.reloadItem(item)\n self.assertTrue(False, 'No file found after about 30s')\n\n def test06resources(self):\n dataSet = self.makeDataSet(self.gfiles, objectids=False)\n\n resp = self.request('/dm/session', method='POST', user=self.user, params={\n 'dataSet': json.dumps(dataSet)\n })\n self.assertStatusOk(resp)\n sessionId = resp.json['_id']\n\n # list sessions\n resp = self.request('/dm/session', method='GET', user=self.user)\n self.assertStatusOk(resp)\n\n # get session\n resp = self.request('/dm/session/%s' % sessionId, method='GET', user=self.user, params={\n 'loadObjects': 'true'\n })\n self.assertStatusOk(resp)\n self.assertEqual(sessionId, str(resp.json['_id']))\n\n\n item = self.gfiles[0]\n\n # This coverage business, as implemented, is wrong really. Both branches of\n # a condition should be tested, including a failing condition with no else block.\n resp = self.request('/dm/lock', method='POST', user=self.user, params={\n 'sessionId': sessionId,\n 'itemId': str(item['_id']),\n 'ownerId': str(self.user['_id'])\n })\n self.assertStatusOk(resp)\n lockId = resp.json['_id']\n\n resp = self.request('/dm/lock', method='GET', user=self.user, params={\n 'sessionId': sessionId\n })\n self.assertStatusOk(resp)\n locks = resp.json\n self.assertEqual(len(locks), 1)\n\n # test list locks with params\n resp = self.request('/dm/lock', method='GET', user=self.user, params={\n 'sessionId': sessionId,\n 'itemId': str(item['_id']),\n 'ownerId': str(self.user['_id'])\n })\n self.assertStatusOk(resp)\n\n # test list locks for session\n resp = self.request('/dm/session/%s/lock' % sessionId, method='GET', user=self.user)\n self.assertStatusOk(resp)\n\n # test get lock\n resp = self.request('/dm/lock/%s' % lockId, method='GET', user=self.user)\n self.assertStatusOk(resp)\n self.assertEqual(lockId, str(resp.json['_id']))\n\n item = self.reloadItemRest(sessionId, item)\n\n self.assertHasKeys(item, ['dm'])\n\n psPath = self.waitForFile(item, rest=True, sessionId=sessionId)\n shouldHaveBeenTransferred = psPath in self.transferredFiles\n self.transferredFiles.add(psPath)\n\n resp = self.request('/dm/transfer', method='GET', user=self.user, params={\n 'sessionId': sessionId,\n 'discardOld': 'false'\n })\n self.assertStatusOk(resp)\n transfers = resp.json\n self.assertEqual(len(transfers), len(self.transferredFiles))\n\n # test list transfers for session\n resp = self.request('/dm/session/%s/transfer' % sessionId, method='GET', user=self.user)\n self.assertStatusOk(resp)\n transfers = resp.json\n if shouldHaveBeenTransferred:\n self.assertEqual(len(transfers), 1)\n else:\n self.assertEqual(len(transfers), 0)\n\n self.assertTrue(os.path.isfile(psPath))\n self.assertEqual(os.path.getsize(psPath), item['size'])\n\n resp = self.request('/dm/lock/%s/download' % lockId, method='GET', user=self.user,\n isJson=False)\n self.assertStatusOk(resp)\n body = self.getBody(resp, text=False)\n self.assertEqual(len(body), item['size'])\n\n resp = self.request('/dm/lock/%s' % lockId, method='DELETE', user=self.user)\n self.assertStatusOk(resp)\n\n item = self.reloadItemRest(sessionId, item)\n self.assertEqual(item['dm']['lockCount'], 0)\n\n resp = self.request('/dm/session/%s' % sessionId, method='DELETE', user=self.user)\n self.assertStatusOk(resp)\n\n def reloadItemRest(self, sessionId, item):\n resp = self.request('/dm/session/%s/item/%s' % (sessionId, item['_id']), method='GET',\n user=self.user)\n self.assertStatusOk(resp)\n return resp.json\n\n def test07FileGC(self):\n gc = self.apiroot.dm.getFileGC()\n gc.pause()\n\n dataSet = self.makeDataSet(self.gfiles)\n self._testItem(dataSet, self.gfiles[0])\n self._testItem(dataSet, self.gfiles[1])\n\n cachedItems = self._getCachedItems()\n self.assertEqual(2, len(cachedItems))\n\n files = [x['dm']['psPath'] for x in cachedItems]\n\n self.model('setting').set('dm.private_storage_capacity', int(2.2 * MB))\n self.model('setting').set('dm.gc_collect_start_fraction', 0.5) # if over 1.1 MB\n self.model('setting').set('dm.gc_collect_end_fraction', 0.5) # if under 1.1 MB\n\n gc._collect()\n # should have cleaned one file\n remainingCount = 0\n for f in files:\n if os.path.exists(f):\n remainingCount += 1\n\n self.assertEqual(1, remainingCount)\n self.assertEqual(1, len(self._getCachedItems()))\n gc.resume()\n\n\n def _getCachedItems(self):\n return list(self.model('item').find({'dm.cached': True}, user=self.user))\n","sub_path":"plugin_tests/wt_data_manager_test.py","file_name":"wt_data_manager_test.py","file_ext":"py","file_size_in_byte":11721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"538775486","text":"import datetime as dt\nimport json\nfrom marshmallow import Schema, fields, post_load\n\nfrom cryptowatch.utils import log\nfrom cryptowatch.resources.allowance import AllowanceSchema, AllowanceResource\nfrom cryptowatch.resources.markets import MarketSchema, MarketResource\n\n\nclass Assets:\n def __init__(self, http_client):\n self.client = http_client\n\n def get(self, asset):\n log(\"Getting asset {}\".format(asset))\n data, http_resp = self.client.get_resource(\"/assets/{}\".format(asset))\n asset_resp = json.loads(data)\n schema = AssetAPIResponseSchema()\n asset_obj = schema.load(asset_resp)\n if asset_obj._allowance:\n log(\n \"API Allowance: cost={} remaining={}\".format(\n asset_obj._allowance.cost, asset_obj._allowance.remaining\n )\n )\n asset_obj._http_response = http_resp\n return asset_obj\n\n def list(self):\n log(\"Listing all assets\")\n data, http_resp = self.client.get_resource(\"/assets\")\n asset_resp = json.loads(data)\n schema = AssetListAPIResponseSchema()\n assets_obj = schema.load(asset_resp)\n if assets_obj._allowance:\n log(\n \"API Allowance: cost={} remaining={}\".format(\n assets_obj._allowance.cost, assets_obj._allowance.remaining\n )\n )\n assets_obj._http_response = http_resp\n return assets_obj\n\n\nclass AssetResource:\n def __init__(self, id, symbol, name, fiat, markets=None, route=None):\n self.id = id\n self.symbol = symbol\n self.name = name\n self.fiat = fiat\n if markets:\n self.markets = markets\n if route:\n self.route = route\n\n def __repr__(self):\n return \"\".format(self=self)\n\n\nclass AssetSchema(Schema):\n id = fields.Integer()\n symbol = fields.Str()\n name = fields.Str()\n fiat = fields.Boolean()\n route = fields.Url()\n markets = fields.Dict(\n keys=fields.Str(), values=fields.Nested(MarketSchema, many=True)\n )\n\n @post_load\n def make_asset(self, data, **kwargs):\n return AssetResource(**data)\n\n\nclass AssetAPIResponseSchema(Schema):\n result = fields.Nested(AssetSchema)\n allowance = fields.Nested(AllowanceSchema, partial=(\"account\",), missing=None)\n\n @post_load\n def make_asset(self, data, **kwargs):\n return AssetAPIResponse(**data)\n\n\nclass AssetListAPIResponseSchema(Schema):\n result = fields.Nested(AssetSchema, many=True)\n allowance = fields.Nested(AllowanceSchema, partial=(\"account\",), missing=None)\n\n @post_load\n def make_asset(self, data, **kwargs):\n return AssetListAPIResponse(**data)\n\n\nclass AssetAPIResponse:\n def __init__(self, result, allowance):\n self.asset = result\n self._allowance = allowance\n self._fetched_at = dt.datetime.now()\n\n def __repr__(self):\n return \"\".format(self=self)\n\n\nclass AssetListAPIResponse:\n def __init__(self, result, allowance):\n self.assets = result\n self._allowance = allowance\n self._fetched_at = dt.datetime.now()\n\n def __repr__(self):\n return \"\".format(self=self)\n","sub_path":"cryptowatch/resources/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"29225563","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : Sun Apr 28 03:23:10 2019\n# @Author : JRP - Ruipeng Jia\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QListView, QMessageBox, QMainWindow\nfrom PyQt5.QtCore import QStringListModel\nfrom PyQt5.QtGui import QStandardItemModel, QStandardItem\n\nclass MyWindow(QMainWindow):\n\n def __init__(self,parent=None):\n super(MyWindow, self).__init__(parent)\n self.wid = QWidget(self)\n self.setCentralWidget(self.wid)\n\n # self.list_string()\n self.list_item()\n\n self.resize(300, 270)\n self.setWindowTitle('Simple')\n self.show()\n\n def list_string(self):\n slm = QStringListModel() # 实例化列表模型, 添加数据\n self.items = ['Item 1', 'Item 2', 'Item 3', 'Item 4']\n slm.setStringList(self.items) # 设置模型列表视图, 加载数据列表\n\n list_view = QListView() # 实例化列表视图\n list_view.setModel(slm) # 设置列表视图的模型\n list_view.clicked.connect(self.list_clicked) # 单击触发自定义的槽函数\n\n layout = QVBoxLayout()\n layout.addWidget(list_view)\n self.wid.setLayout(layout)\n\n def list_item(self):\n sim = QStandardItemModel()\n self.items = ['Cookie dough', 'Hummus', 'Spaghetti', 'Dal makhani']\n self.stand_items = []\n for item in self.items:\n item = QStandardItem(item) # Create an item with a caption\n item.setCheckable(True) # Add a checkbox to it\n sim.appendRow(item) # Add the item to the model\n self.stand_items.append(item)\n\n for i in range(sim.rowCount()):\n print(sim.takeRow(i)[0].text())\n list_view = QListView()\n list_view.setModel(sim)\n list_view.clicked.connect(self.list_clicked)\n\n layout = QVBoxLayout()\n layout.addWidget(list_view)\n self.wid.setLayout(layout)\n\n def list_clicked(self, qModelIndex):\n QMessageBox.information(self, 'ListWidget', '你选择了: ' + self.items[qModelIndex.row()])\n raw_text = self.stand_items[qModelIndex.row()].text()\n self.stand_items[qModelIndex.row()].setText(raw_text + ' hello')\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n win = MyWindow()\n sys.exit(app.exec_())\n","sub_path":"bin/template/src/jptqt/l9_view.py","file_name":"l9_view.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"589353601","text":"\"\"\"\nModule providing Serializers\n\"\"\"\nimport pytz\nfrom datetime import datetime\nfrom django.contrib.auth.models import User, Group\nfrom django.db import transaction\nfrom rest_framework import serializers, status\nfrom api.models import Expense, Transfer, Currency\n\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"\n Serializer for User model objects.\n \"\"\"\n class Meta:\n model = User\n fields = ['url','username', 'email', 'groups']\n\n\nclass RegisterSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for Registration page data.\n \"\"\"\n class Meta:\n model = User\n fields = ('id', 'username', 'email', 'password')\n extra_kwargs = {'password' : {'write_only': True}}\n\n def create(self, validated_data):\n user = User.objects.create_user(\n validated_data['username'],\n validated_data['email'],\n validated_data['password']\n )\n return user\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for Group model objects.\n \"\"\"\n class Meta:\n model = Group\n fields = ['url', 'name']\n\n\n\nclass ExpenseSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for Expense model objects.\n \"\"\"\n class Meta:\n model = Expense\n fields = [\n 'id',\n 'currency',\n 'total_amount',\n 'to_settle',\n 'settled',\n 'vat',\n 'is_settled',\n 'owner'\n ]\n read_only_fields = ['id', 'to_settle', 'settled', 'is_settled']\n\n def create(self, validated_data):\n validated_data['to_settle'] = validated_data['total_amount']\n return Expense.objects.create(**validated_data)\n\nclass TransferSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for Transfer model objects.\n \"\"\"\n class Meta:\n model = Transfer\n fields = [\n 'id',\n 'netto',\n 'vat',\n 'brutto',\n 'currency',\n 'expense',\n 'is_vat',\n 'sent_date',\n 'is_settled',\n 'owner'\n ]\n read_only_fields = ['id', 'is_settled', 'brutto', 'sent_date','owner']\n\n def create(self, validated_data):\n \"\"\"\n Overrides create method for:\n counting \"brutto\",\n checking if provided outley is valid.\n \"\"\"\n validated_data['brutto'] = (\n validated_data['netto'] + validated_data['vat']\n )\n validated_data['sent_date'] = datetime.now(pytz.utc)\n self.check_expense(validated_data)\n return Transfer.objects.create(**validated_data)\n\n def check_expense(self, data):\n \"\"\"\n Checks provides functions for checking if expense matches the\n transfer.\n Raises error if there is no expense for transfer.\n \"\"\"\n expense = Expense.objects.get(id = data['expense'].id)\n if expense:\n self.check_if_user_is_owner(expense.owner_id, data['owner'].id)\n self.check_if_expense_vat(expense.vat, data['is_vat'])\n self.check_if_is_settled(expense.is_settled)\n self.check_if_same_currency(expense.currency, data['currency'])\n else:\n raise serializers.ValidationError(\n \"There is no expense for this transfer.\"\n )\n\n @staticmethod\n def check_if_user_is_owner(expense_owner, transfer_owner):\n \"\"\"\n Checks if user making transfer is its owner,\n else raises custom error message\n \"\"\"\n if expense_owner != transfer_owner:\n raise serializers.ValidationError(\"User is not owner of expense\")\n\n @staticmethod\n def check_if_is_settled(expense_is_settled):\n \"\"\"\n Checks if expense is not already settled,\n else raises custom error with 409 status.\n \"\"\"\n if expense_is_settled:\n res = serializers.ValidationError(\"This expense is settled.\")\n res.status_code = status.HTTP_409_CONFLICT\n raise res\n\n @staticmethod\n def check_if_expense_vat(expense_is_vat, transfer_is_vat):\n \"\"\"\n Checks if transfers field \"is_vat\" is set to \"True\" and\n selected expenses has \"is_vat\" set to \"False\",\n then raises custom error message with status 409.\n \"\"\"\n if transfer_is_vat and not expense_is_vat:\n res = serializers.ValidationError(\n \"Vat transfer can't be done on this non-vat expense\"\n )\n res.status_code = status.HTTP_409_CONFLICT\n raise res\n\n @staticmethod\n def check_if_same_currency(expense_currency, transfer_currency):\n \"\"\"\n Checks if transfer and selected expense are in same currency,\n else raises custom error message with status 409.\n \"\"\"\n if expense_currency != transfer_currency:\n res = serializers.ValidationError(\n \"Expense is in different currency then transfer.\"\n )\n res.status_code = status.HTTP_409_CONFLICT\n raise res\n\nclass SettleTransferSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for Transfer model objects.\n Used for admin settle transfer functionality.\n \"\"\"\n class Meta:\n model = Transfer\n fields = [\n 'id',\n 'netto',\n 'vat',\n 'brutto',\n 'currency',\n 'expense',\n 'is_vat',\n 'sent_date',\n 'is_settled'\n ]\n read_only_fields = [\n 'id',\n 'netto',\n 'vat',\n 'brutto',\n 'currency',\n 'expense',\n 'is_vat',\n 'sent_date'\n ]\n\n def update(self, instance, validated_data):\n \"\"\"\n Overrides update method.\n If transfers 'is_settled' is true:\n updates expenses 'settled' value with 'brutto' from transfer.\n Updates expense and transfer objects.\n If false,\n \"\"\"\n if validated_data['is_settled']:\n expense = Expense.objects.get(id = instance.expense.id)\n expense.settled += instance.brutto\n with transaction.atomic():\n expense.save()\n return (super(SettleTransferSerializer,self)\n .update(instance, validated_data))\n\n return (super(SettleTransferSerializer,self)\n .update(instance, validated_data))\n\n\nclass CurrencySerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for Currency model objects.\n \"\"\"\n class Meta:\n model = Currency\n fields = '__all__'\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"591764573","text":"from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nimport pandas as pd\nimport numpy as np\n\n# =================================================\n# read the data from csv file\n\ntweets_data_path = '../processed_tweets/twitter_airline/Tweets.csv'\ntweets = pd.read_csv(tweets_data_path, header=0)\ndf = tweets.copy()[['airline_sentiment', 'text']]\n\nnum_dataset = len(df) # number of total tweets\nnum_train_dataset = num_dataset * 0.8 # use 80% of dataset as training set\nnum_test_dataset = num_dataset * 0.2 # use 20% of dataset as testing set\n\ndf_pos = df.copy()[df.airline_sentiment == 'positive']\nnum_pos = len(df_pos)\ntrain_pos = int(num_pos*0.8)\npos_train = df_pos[:train_pos]\npos_test = df_pos[train_pos:]\n\n# pos_train['first_index'] = pd.Series(1, index=pos_train.index)\n# pos_train.loc[pos_train.index, 'first_index'] = 1\n# pos_train.loc[:, 'first_index'] = pd.Series(1, index=pos_train.index) # still get some warnings\n\n\n# adding new label to divide original analysis into two steps\npos_train = pos_train.assign(first_index=pd.Series('polar', index=pos_train.index))\npos_test = pos_test.assign(first_index=pd.Series('polar', index=pos_test.index))\n\n\ndf_neg = df.copy()[df.airline_sentiment == 'negative']\nnum_neg = len(df_neg)\ntrain_neg = int(num_neg*0.8)\nneg_train = df_neg[:train_neg]\nneg_test = df_neg[train_neg:]\n# adding new label to divide original analysis into two steps\nneg_train = neg_train.assign(first_index=pd.Series('polar', index=neg_train.index))\nneg_test = neg_test.assign(first_index=pd.Series('polar', index=neg_test.index))\n\ndf_neu = df.copy()[df.airline_sentiment == 'neutral']\nnum_neu = len(df_neu)\ntrain_neu = int(num_neu * 0.8)\nneu_train = df_neu[:train_neu]\nneu_test = df_neu[train_neu:]\n# adding new label to divide original analysis into two steps\nneu_train = neu_train.assign(first_index=pd.Series('neutral', index=neu_train.index))\nneu_test = neu_test.assign(first_index=pd.Series('neutral', index=neu_test.index))\n\ndf_train = pd.concat([pos_train, neg_train, neu_train], ignore_index=True)\ndf_test = pd.concat([pos_test, neg_test, neu_test], ignore_index=True).reset_index(drop=True)\n\n\n# ==================================\n# implementing Multinomial NB\n# start first step\n\nvectorizer_first = CountVectorizer(ngram_range=(1, 2), token_pattern=r'\\b\\w+\\b', min_df=1, stop_words='english')\n\nvectorised_df_first = vectorizer_first.fit_transform(df_train.text)\ntransformer_first = TfidfTransformer(smooth_idf=False)\ntfidf_df_first = transformer_first.fit_transform(vectorised_df_first)\n\n\n# #\n\n# bnb = BernoulliNB()\nmnb = MultinomialNB()\ntrained_model = mnb.fit(tfidf_df_first, df_train.first_index)\n\n# ===================================\n# second step\n\ndf_train_second = df_train.copy()[df_train.first_index == 'polar'] # retrieve dataset containing 'polar'\nvectorizer_second = CountVectorizer(ngram_range=(1, 3), token_pattern=r'\\b\\w+\\b', min_df=1, stop_words='english')\n\nvectorised_df_second = vectorizer_second.fit_transform(df_train_second.text)\ntransformer_second = TfidfTransformer(smooth_idf=False)\ntfidf_df_second = transformer_second.fit_transform(vectorised_df_second)\n\nmnb_second = MultinomialNB()\ntrained_model_second = mnb_second.fit(tfidf_df_second, df_train_second.airline_sentiment)\n\n\n# ===================================\n# evaluation\n\nvectorised_df_test = vectorizer_first.transform(df_test.text)\ntfidf_df_test = transformer_first.transform(vectorised_df_test)\npredicted = trained_model.predict(tfidf_df_test)\n\npredicted = predicted.tolist() # change np array to just list\n# print(df_test['text'].values[0]) # access the dataframe text\nfor i in range(0, len(predicted)):\n if predicted[i] == \"polar\":\n vectorised_df_test_second = vectorizer_second.transform([df_test['text'].values[i]])\n tfidf_df_test_second = transformer_second.transform(vectorised_df_test_second)\n predicted_second = trained_model_second.predict(tfidf_df_test_second)\n predicted_second = predicted_second.tolist()\n predicted[i] = predicted_second[0]\n\n\n\n# # #\n\n# predicted_second = trained_model_second.predict(tfidf_df_test_second)\n#\n\n\n# neutral - polar accuracy check\n# for i in range(0, len(df_test.airline_sentiment)):\n# if df_test.airline_sentiment[i] != 'neutral':\n# df_test.airline_sentiment[i] = 'polar'\n# print(predicted)\n# print(df_test.airline_sentiment)\n# print(np.mean(predicted == df_test.airline_sentiment))\n\n\n\n\n\n# predicted = predicted.tolist() # change numpy array to just list\n# predicted_second = predicted_second.tolist()\n# for i in range(0, len(predicted)):\n# if predicted[i] == \"polar\":\n# predicted[i] = predicted_second[i]\n# #\n# #\n# #\nprint(predicted)\nprint(df_test.airline_sentiment)\nprint(np.mean(predicted == df_test.airline_sentiment))\n\n\n\n","sub_path":"analysis/algorithm_nb/sklearn_nb_two_steps_airline.py","file_name":"sklearn_nb_two_steps_airline.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"521063191","text":"# SPDX-License-Identifier: MIT\n\nimport os\nimport sys\n\nfrom typing import Set\n\nimport packaging.requirements\nimport resolvelib\n\nimport resolver\n\n\ndef task() -> None:\n package_resolver = resolvelib.Resolver(\n resolver.Provider('/tmp/resolver-cache' if os.name == 'posix' else None),\n resolvelib.BaseReporter(),\n )\n result = package_resolver.resolve(\n packaging.requirements.Requirement(arg)\n for arg in sys.argv[1:]\n )\n\n seen: Set[str] = set()\n print('--- Pinned Candidates ---')\n for key, candidate in result.mapping.items():\n if key.name not in seen:\n print(f'{key.name}: {candidate.name} {candidate.version}')\n seen.add(key.name)\n\n print('\\n--- Dependency Graph ---')\n for key in result.graph:\n targets = ', '.join(str(child) for child in result.graph.iter_children(key))\n print('{} -> {}'.format(key or '(root)', targets))\n\n\ndef main() -> None:\n try:\n task()\n except KeyboardInterrupt:\n print('Exiting...')\n\n\ndef entrypoint() -> None:\n main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"resolver/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"15123085","text":"import sys\nimport re\n#name=\"yagino_uta\"\n#name='02tsukini_hoeru'\n#name = 'aoneko'\n#name = 'haruto_shura'\nname=\"merge\"\ninpath = './text/' + name + '.txt'\noutpath = './data/data_' + name + '.txt'\nbindata = open(inpath,\"rb\")\nlines = bindata.readlines()\nfor line in lines:\n text = line.decode('Shift_JIS')\n text = re.split(r'\\r',text)[0]\n text = re.split(r'底本',text)[0]\n text = text.replace('|','')\n text = re.sub(r'《.+?》','',text)\n text = re.sub(r'[#.+?]','',text)\n #print(text)\n file = open(outpath,'a',encoding='utf-8').write(text)\n","sub_path":"PyPoem/PyPreproc.py","file_name":"PyPreproc.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"547447414","text":"import json\nimport numpy as np\nimport operator\nfrom functools import reduce\nfrom tqdm import tqdm\nimport pathlib\nimport os\nimport glob\nfrom natsort import natsorted\nimport codecs\n\n\n\n\ndef get_file_list(folder_path: str, p_postfix: list = None) -> list:\n \"\"\"\n 获取所给文件目录里的指定后缀的文件,读取文件列表目前使用的是 os.walk 和 os.listdir ,这两个目前比 pathlib 快很多\n :param filder_path: 文件夹名称\n :param p_postfix: 文件后缀,如果为 [.*]将返回全部文件\n :return: 获取到的指定类型的文件列表\n \"\"\"\n assert os.path.exists(folder_path) and os.path.isdir(folder_path)\n if p_postfix is None:\n p_postfix = ['.jpg']\n if isinstance(p_postfix, str):\n p_postfix = [p_postfix]\n file_list = [x for x in glob.glob(folder_path + '/**/*.*', recursive=True) if\n os.path.splitext(x)[-1] in p_postfix or '.*' in p_postfix]\n return natsorted(file_list)\n\n\nsave_path = r'E:\\数据集\\ic19ReCTS\\json_c_txt'\njson_path = r'E:\\数据集\\ic19ReCTS\\gt_unicode'\n\nfor file_path in tqdm(get_file_list(json_path, p_postfix=['.json'])):\n print('file_path----------------------------', file_path)\n # content = load(file_path)\n\n\n with open(file_path, 'r', encoding='utf-8')as fp:\n\n file_path = pathlib.Path(file_path)\n image_name = file_path.stem\n new_save_path = save_path + os.sep + image_name + '.txt'\n with codecs.open(new_save_path, mode='w', encoding='utf-8') as fw:\n\n json_data = json.load(fp)\n for piece_data in json_data['lines']:\n label = piece_data['transcription']\n points = piece_data['points']\n print('points', points)\n\n points_str = [str(i) + ',' for i in points]\n print('points_str', points_str)\n dataset_line = ''.join(points_str) + label\n print('dataset_line', dataset_line)\n\n fw.write(dataset_line + '\\n')\n\n\n# file_path = pathlib.Path(file_path)\n# image_name = file_path.stem\n# new_save_path = save_path + os.sep + image_name + '.txt'\n# with codecs.open(new_save_path, mode='w', encoding='utf-8') as fw:\n# fw.write(dataset_line + '\\n')\n#\n#\n#\n#\n#\n# with open(json_path, 'r', encoding='utf-8')as fp:\n# json_data = json.load(fp)\n# #print('json_data',json_data)\n#\n#\n# for file_path in tqdm(get_file_list(img_path, p_postfix=['.jpg'])):\n# print('file_path----------------------------',file_path)\n# #content = load(file_path)\n# file_path = pathlib.Path(file_path)\n# image_name = file_path.stem\n#\n# new_save_path = save_path + os.sep + image_name + '.txt'\n# with codecs.open(new_save_path, mode='w', encoding='utf-8') as fw:\n#\n# for piece_data in json_data[image_name]:\n# print('piece_data',piece_data)\n# label = piece_data['transcription']\n# points = piece_data['points']\n# #print('points', points)\n# points_str_temp = reduce(operator.add, points)\n# #print('points_str_temp', points_str_temp)\n# points_str = [str(i) + ',' for i in points_str_temp]\n#\n# dataset_line = ''.join(points_str) + label\n# print('dataset_line',dataset_line)\n#\n# #fw = open(new_save_path, 'w', encoding='utf-8')\n# print('new_save_path', new_save_path)\n# #with open(image_name + '.txt', 'a+') as fw:\n#\n# fw.write(dataset_line + '\\n')\n","sub_path":"revies_19ReCTS.py","file_name":"revies_19ReCTS.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"257624416","text":"import sqlite3\nimport os.path\nimport os\n\nclass DataBase():\n\n def __init__(self):\n '''Se conecta con la base de datos.'''\n self.bd = sqlite3.connect(\"Reproductor.db\")\n self.cursor = self.bd.cursor()\n\n def getCursor(self):\n '''Funcion que regresa el cursor'''\n return self.cursor\n\n def runDb(self):\n '''Funcion donde se crean todas las tablas, guarda y cierra la base de datos'''\n self.creatTable()\n self.save()\n self.closeConnection()\n\n def creaTables(self):\n '''Funcion que manda a llamar a las funciones que crean todas las tablas'''\n self.creaTypesTable()\n self.insertValue()\n self.creatPerformesTable()\n self.creatPersonsTable()\n self.creatGroupsTable()\n self.creatAlbumsTable()\n self.creatRolasTable()\n self.creatInGroupTable()\n\n def save(self):\n '''Funcion para guardar los cambios en la base de datos'''\n self.bd.commit()\n\n def closeConnection(self):\n '''Funcion que cierra la conexion con la base de datos y cierra el cursor'''\n self.cursor.close()\n self.bd.close\n\n def creatTable(self):\n '''Funcion que crea la tabla types.'''\n self.cursor.execute('''CREATE TABLE IF NOT EXISTS types(\n id_type INTEGER PRIMARY KEY,\n description TEXT)''')\n\n def insertValue(self):\n '''Funcion que inserta los tipos de persona en la tabla types.'''\n self.cursor.execute(\"INSERT INTO types VALUES(0,'Person')\")\n self.cursor.execute(\"INSERT INTO types VALUES(1,'Group')\")\n self.cursor.execute(\"INSERT INTO types VALUES(2,'Unknown')\")\n\n def creatPerformesTable(self):\n '''Funcion que crea la tabla performers.'''\n self.cursor.execute('''CREATE TABLE performers(\n id_performer INTEGER PRIMARY KEY,\n id_type INTEGER,\n name TEXT,\n FOREIGN KEY (id_type) REFERENCES types(id_type))''')\n\n def creatPersonsTable(self):\n '''Funcion que crea la tabla persons.'''\n self.cursor.execute('''CREATE TABLE persons(\n id_person INTEGER PRIMARY KEY,\n stage_name TEXT,\n real_name TEXT,\n birth_date TEXT,\n death_date TEXT)''')\n\n def creatGroupsTable(self):\n '''Funcion que crea la table groups.'''\n self.cursor.execute('''CREATE TABLE groups(\n id_group INTEGER PRIMARY KEY,\n name TEXT,\n start_date TEXT,\n end_date TEXT)''')\n\n def creatAlbumsTable(self):\n '''Funcion que crea la tabla albums.'''\n self.cursor.execute('''CREATE TABLE albums(\n id_album INTEGER PRIMARY KEY,\n path TEXT,\n name TEXT,\n year INTEGER)''')\n\n def creatRolasTable(self):\n '''Funcion que crea la tabla rolas.'''\n self.cursor.execute('''CREATE TABLE rolas(\n id_rola INTEGER PRIMARY KEY,\n id_performer INTEGER,\n id_album INTEGER,\n path TEXT,\n title TEXT,\n track INTEGER,\n year INTEGER,\n genre TEXT,\n FOREIGN KEY (id_performer) REFERENCES performers(id_performer),\n FOREIGN KEY (id_album) REFERENCES albums(id_album))''')\n\n def creatInGroupTable(self):\n '''Funcion que crea la tabla in_group.'''\n self.cursor.execute('''CREATE TABLE in_group(\n id_person INTEGER,\n id_group INTEGER,\n PRIMARY KEY (id_person, id_group),\n FOREIGN KEY (id_person) REFERENCES persons(id_person),\n FOREIGN KEY (id_group) REFERENCES groups(id_group))''')\n\n\n\nc = DataBase()\n","sub_path":"src/DataBase.py","file_name":"DataBase.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"446015993","text":"from django.db import models\nfrom django.utils import timezone\n\nfrom authentication.models import User\n\nGARMENTS_CHOICES = (\n (0, '---未登録---'),\n (1, '規定あり'),\n (2, '私服OK'),\n (3, '要相談'),\n)\n\nRESTROOM_CHOICES = (\n (0, '---未登録---'),\n (1, '男女別'),\n (2, '男女共用'),\n)\n\nUSE_CHOICES = (\n (0, '---未登録---'),\n (1, '利用可'),\n (2, '利用不可'),\n)\n\n\nclass CompanyBasicInfo(models.Model):\n \"\"\"企業の基本情報を管理するクラス\n \"\"\"\n\n account = models.OneToOneField(User, on_delete=models.CASCADE, related_name='company',\n verbose_name=\"Companyのアカウント\", null=True, blank=True, unique=True)\n\n is_activated = models.BooleanField(default=False,\n verbose_name=\"入力が完了したか否か\")\n\n name = models.CharField(max_length=50, null=True, blank=True,\n verbose_name=\"企業名\")\n\n representative_last_name = models.CharField(max_length=10, null=True, blank=True,\n verbose_name=\"代表者氏名\")\n\n representative_first_name = models.CharField(max_length=10, null=True, blank=True,\n verbose_name=\"代表者氏名\")\n\n establishment_date = models.DateField(null=True, blank=True,\n verbose_name=\"設立日\")\n\n capital = models.IntegerField(null=True, blank=True,\n verbose_name=\"資本金\")\n\n sales = models.IntegerField(null=True, blank=True,\n verbose_name=\"売上高\")\n\n employee_number = models.IntegerField(null=True, blank=True,\n verbose_name=\"従業員数\")\n\n average_age = models.IntegerField(null=True, blank=True,\n verbose_name=\"平均年齢\")\n\n phone_number = models.CharField(max_length=20, null=True, blank=True,\n verbose_name=\"電話番号\")\n\n address = models.CharField(max_length=50, null=True, blank=True,\n verbose_name=\"企業住所\")\n\n closest_station_1 = models.CharField(max_length=20, null=True, blank=True,\n verbose_name=\"最寄り駅1\")\n\n closest_station_2 = models.CharField(max_length=20, null=True, blank=True,\n verbose_name=\"最寄り駅2\")\n\n corporate_url = models.URLField(null=True, blank=True,\n verbose_name=\"企業のコーポレートURL\")\n\n pr_comment = models.TextField(null=True, blank=True,\n verbose_name=\"企業のPRコメント\")\n\n pr_photo_1 = models.TextField(null=True, blank=True,\n verbose_name=\"企業のPR写真1(Base64)\")\n\n pr_photo_2 = models.TextField(null=True, blank=True,\n verbose_name=\"企業のPR写真2(Base64)\")\n\n needs_paper_invoice = models.BooleanField(default=False, null=True, blank=True,\n verbose_name=\"紙ベース請求書要否\")\n\n def __str__(self):\n return f\"{self.name}\"\n\n\nclass CompanyStaff(models.Model):\n \"\"\"企業の担当者など、Workerから検索されない情報を保存\n \"\"\"\n company = models.ForeignKey(CompanyBasicInfo, on_delete=models.CASCADE, related_name='staff',\n verbose_name=\"担当者情報\")\n\n staff_last_name = models.CharField(max_length=10, null=True, blank=True,\n verbose_name=\"担当者:姓\")\n\n staff_first_name = models.CharField(max_length=10, null=True, blank=True,\n verbose_name=\"担当者:名\")\n\n staff_last_name_kana = models.CharField(max_length=10, null=True, blank=True,\n verbose_name=\"担当者:セイ\")\n\n staff_first_name_kana = models.CharField(max_length=10, null=True, blank=True,\n verbose_name=\"担当者:メイ\")\n\n staff_department = models.CharField(max_length=20, null=True, blank=True,\n verbose_name=\"担当者所属部署\")\n\n staff_mail_address = models.EmailField(null=True, blank=True,\n verbose_name=\"請求書宛先メールアドレス\")\n\n def __str__(self):\n return f\"{self.staff_last_name} {self.staff_first_name}\"\n\n\nclass Project(models.Model):\n \"\"\"案件を表すクラス\n \"\"\"\n\n company = models.ForeignKey(CompanyBasicInfo, on_delete=models.CASCADE, related_name='project',\n verbose_name=\"企業\")\n\n name = models.CharField(max_length=20,\n verbose_name=\"案件名\")\n\n is_open = models.BooleanField(default=True,\n verbose_name=\"募集しているか\")\n\n min_fee = models.IntegerField(verbose_name=\"最小単金\")\n\n max_fee = models.IntegerField(verbose_name=\"最大単金\")\n\n workplace = models.CharField(max_length=50,\n verbose_name=\"作業場所(住所/ビル名など)\")\n\n closest_station = models.CharField(max_length=20,\n verbose_name=\"最寄り駅\")\n\n start_term = models.DateField(verbose_name=\"開始時期(xxxx年mm月)\")\n\n end_term = models.DateField(null=True, blank=True,\n verbose_name=\"終了時間(xxxx年mm月)\")\n\n start_time = models.TimeField(verbose_name=\"就業開始時間\")\n\n end_time = models.TimeField(verbose_name=\"就業終了時間\")\n\n rest_time = models.TimeField(verbose_name=\"就業中の休憩時間\")\n\n garments = models.IntegerField(choices=GARMENTS_CHOICES, default=0,\n verbose_name=\"服装\")\n\n restroom = models.IntegerField(choices=RESTROOM_CHOICES, default=0,\n verbose_name=\"トイレ\")\n\n water_server = models.IntegerField(choices=USE_CHOICES, default=0,\n verbose_name=\"ウォーターサーバなどの利用\")\n\n warter_supply_room = models.IntegerField(choices=USE_CHOICES, default=0,\n verbose_name=\"給湯室などの利用\")\n\n conditions = models.CharField(max_length=50, null=True, blank=True,\n verbose_name=\"条件備考\")\n\n content = models.TextField(verbose_name=\"作業内容\")\n\n appeal = models.TextField(null=True, blank=True,\n verbose_name=\"作業のアピール点\")\n\n required_skills = models.TextField(null=True, blank=True,\n verbose_name=\"必須スキル\")\n\n preferred_skills = models.TextField(null=True, blank=True,\n verbose_name=\"尚可スキル\")\n\n def __str__(self):\n return f\"{self.name}\"\n","sub_path":"company/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"57541576","text":"#!/usr/bin/env python\n#\n# Copyright 2014 - 2015 The BCE Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the license.txt file.\n#\n\nimport os as _os\nimport json as _json\nimport importlib as _imp\nimport django.conf.urls as _urls\nimport bce_portal.base.queue as _q\n\n\ndef collect_views(base_path):\n # Initialize.\n work_queue = _q.Queue()\n work_queue.push({\n \"local_path\": base_path,\n \"import_path\": \"bce_portal.packages\"\n })\n ret = []\n module_cache = {}\n\n while len(work_queue) != 0:\n # Pop a module from the queue.\n cur = work_queue.pop()\n\n # Get local path.\n local_path = cur[\"local_path\"]\n\n # Read module information.\n fp = open(_os.path.join(local_path, \"package.json\"), \"r\")\n pkg_info = _json.loads(fp.read())\n fp.close()\n\n # Load views.\n if \"views\" in pkg_info:\n for view_item in pkg_info[\"views\"]:\n # Get route URL.\n route_url = view_item[0]\n\n # Get view path and check it.\n view_px = view_item[1].split(\":\")\n if len(view_px) != 2:\n raise ValueError(\"Invalid view descriptor.\")\n\n # Get module name.\n module_name = cur[\"import_path\"] + \".%s\" % view_px[0]\n\n # Import the module.\n if module_name in module_cache:\n module_hdl = module_cache[module_name]\n else:\n module_hdl = _imp.import_module(module_name)\n\n # Append the function to the result list.\n ret.append(_urls.url(route_url, getattr(module_hdl, view_px[1])))\n\n # Load sub directories.\n sub_dup = {}\n if \"sub_directories\" in pkg_info:\n for sub_dir in pkg_info[\"sub_directories\"]:\n # Ignore duplicated sub directories.\n if sub_dir in sub_dup:\n continue\n\n # Mark the duplicating-check mark.\n sub_dup[sub_dir] = True\n\n # Push the directory to the queue.\n work_queue.push({\n \"local_path\": _os.path.join(local_path, sub_dir),\n \"import_path\": cur[\"import_path\"] + \".%s\" % sub_dir\n })\n\n return ret\n\n# URL patterns.\nurl_patterns = collect_views(_os.path.abspath(_os.path.join(_os.path.dirname(__file__), _os.pardir, \"packages\")))","sub_path":"bce_portal/pkg_mgmt/view_url_collector.py","file_name":"view_url_collector.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"530948839","text":"from flask import Blueprint, g, request, current_app, jsonify, session as flask_session\nfrom werkzeug.exceptions import BadRequest, NotFound\n\nfrom ..models import User\nfrom .helpers import session\n\nbp = Blueprint(\"routes.session\", __name__, url_prefix=\"/session\")\n\n\n@bp.route(\"\", methods=[\"GET\"])\ndef show():\n return jsonify(g.current_user.repr() if g.current_user else None)\n\n\n@bp.route(\"\", methods=[\"POST\"])\n@session(checked_permissions=None)\ndef create():\n payload = request.get_json(force=True)\n if \"email\" not in payload:\n raise BadRequest\n user = g.session.query(User).filter_by(email=payload[\"email\"]).one_or_none()\n if user is None:\n flask_session.pop(\"current_user_id\", None)\n raise NotFound\n flask_session[\"current_user_id\"] = user.id\n return user.repr(), 201\n\n\n@bp.route(\"\", methods=[\"DELETE\"])\ndef delete():\n flask_session.pop(\"current_user_id\", None)\n return current_app.response_class(status=204, mimetype=\"application/json\")\n","sub_path":"backends/flask-sqlalchemy-oso/app/routes/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"593160115","text":"from autoslug import AutoSlugField\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom origin.cms.resource.models import Resource, OwnerMixin, PublishableMixin\n\n\nclass Page(Resource, OwnerMixin, PublishableMixin):\n class Meta:\n db_table = 'origin_page'\n\n title = models.CharField(\n help_text=_('Title of the page'),\n max_length=128\n )\n\n slug = AutoSlugField(\n help_text=_('The slug is used to identify the page in the URL'),\n populate_from='title',\n unique=True\n )\n\n content = models.TextField(\n help_text=_('The content of the page'),\n null=True,\n blank=True\n )\n\n parent = models.ForeignKey(\n 'origin_cms_page.Page',\n help_text=_('The parent this page is a child of'),\n related_name='children',\n on_delete=models.CASCADE,\n null=True\n )\n\n def __str__(self):\n return self.title\n","sub_path":"origin/cms/page/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"119697198","text":"import torch\nfrom torch.utils import data\nimport numpy as np\nimport pickle\n\n\nclass UrbanSound8KDataset(data.Dataset):\n def __init__(self, dataset_path, mode):\n self.dataset = pickle.load(open(dataset_path, 'rb'))\n self.mode = mode\n\n def __getitem__(self, index):\n features = self.dataset[index]['features']\n cst = np.concatenate((features['chroma'], features['spectral_contrast'], features['tonnetz']))\n if self.mode == 'LMC':\n # create the LMC feature\n # combine LM & CST\n lm = features['logmelspec']\n feature = np.concatenate((lm,cst))\n feature = torch.from_numpy(feature.astype(np.float32)).unsqueeze(0)\n elif self.mode == 'MC':\n # create the MC feature\n # combine MFCC & CST\n mfcc = features['mfcc']\n feature = np.concatenate((mfcc,cst))\n feature = torch.from_numpy(feature.astype(np.float32)).unsqueeze(0)\n elif self.mode == 'MLMC':\n # create the MLMC feature\n # combine MFCC, LMC, & CST\n lm = features['logmelspec']\n mfcc = features['mfcc']\n feature = np.concatenate((mfcc,lm,cst))\n feature = torch.from_numpy(feature.astype(np.float32)).unsqueeze(0)\n\n label = self.dataset[index]['classID']\n fname = self.dataset[index]['filename']\n return feature, label, fname\n\n def __len__(self):\n return len(self.dataset)\n","sub_path":"source/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"549858197","text":"\r\nimport pandas as pd\r\nimport math\r\n\r\n#------------ Reading CSV files into dataframes-------------------------------\r\n\r\npath_books = \"C:/Users/moham/OneDrive/Documents/books_task/BX-Books.csv\"\r\npath_ratings = \"C:/Users/moham/OneDrive/Documents/books_task/BX-Book-Ratings.csv\"\r\npath_users = \"C:/Users/moham/OneDrive/Documents/books_task/BX-Users.csv\"\r\ndata_books = pd.read_csv(path_books, encoding='latin-1', error_bad_lines=False, low_memory=False, sep=\";\")\r\ndata_ratings = pd.read_csv(path_ratings, encoding='latin-1', error_bad_lines=False, sep=\";\", engine='python')\r\ndata_users = pd.read_csv(path_users, encoding='latin-1', error_bad_lines=False, sep=\";\", engine='python')\r\n\r\ndef get_info(id):\r\n #----- Returns the location and age of a user given the id------\r\n info = []\r\n df = pd.DataFrame(data_users)\r\n col_name = df.columns[0]\r\n df = df.rename(columns={col_name: 'USER'})\r\n df = data_users.loc[data_users['User-ID'].isin([id])]\r\n try:\r\n info.append(df.Location.values[0].split(\", \")[2])\r\n info.append(df.Age.values[0])\r\n return info\r\n except IndexError:\r\n return info\r\n\r\ndef recommend_by_same_group(id):\r\n #------- Returns a recommended book based on what other users of same age and country read-----\r\n user_info = get_info(int(id))\r\n if len(user_info) == 0 : return # cannot recommend a book by this method, if the user's country is not specified\r\n user_country = user_info[0]\r\n user_age = user_info[1]\r\n if math.isnan(user_age): user_age = 35 # if i don't have the info of the age of the user, i will assume it is 35(Avg age of the pop. of the world)\r\n df_users = pd.DataFrame(data_users)\r\n #------- matching the user with another user having same nationality and somehow similar age --------------\r\n for row in df_users.values:\r\n try:\r\n if list(row)[1].split(\", \")[2] == user_country: #checking for users having same nationality\r\n if math.isnan(list(row)[2]):continue\r\n if list(row)[2]-5 < user_age < list(row)[2]+5 : # checking if the are from the same generation\r\n rec_user_id = list(row)[0]\r\n books = books_read(rec_user_id) # returning the books read by the matched user\r\n return books[0][0] # returning a the highest rated book among the books read by the matched user\r\n\r\n except IndexError :\r\n continue\r\n\r\n#------------returning a list of books read by a certain user given his ID ----------\r\n\r\ndef books_read(id):\r\n book_read= {}\r\n #------returning the list of books read into a dictionary----------\r\n df = data_ratings.loc[data_ratings['User-ID'].isin([id])]\r\n col_name = df.columns[2]\r\n df = df.rename(columns={col_name: 'RATINGS'})\r\n isbns = list(df.ISBN)\r\n ratings = list(df.RATINGS)\r\n #--------sorting the dict. so that the maxim rating come first--------\r\n for i in range(len(isbns)):\r\n if ratings[i] != 0:\r\n book_read[isbns[i]] = ratings[i]\r\n else:\r\n book_read[isbns[i]] = 5 # implicit rating is given as the average rating(5)\r\n book_read = sorted(book_read.items(), key=lambda kv: kv[1], reverse=True)\r\n return book_read\r\n\r\ndef recommend_by_same_author(id):\r\n #-------getting the books read by the user---------\r\n book_read = books_read(id)\r\n #-------getting the highest rated book in the sorted dict-------------\r\n try:\r\n isbn = book_read[0][0]\r\n except IndexError: #This is the case where the user haven't read any books yet\r\n return\r\n df_books = pd.DataFrame(data_books)\r\n col_name = df_books.columns[2]\r\n df_books = df_books.rename(columns={col_name: 'AUTHOR'})\r\n #---- returning a dataframe with rows only containing books with same author-----\r\n df= df_books.loc[df_books['ISBN'].isin([isbn])]\r\n author = df.AUTHOR.values[0]\r\n df = df_books.loc[df_books['AUTHOR'].isin([author])]\r\n recommended_books = list(df.ISBN)\r\n recommended_books.remove(isbn)\r\n return recommended_books\r\n\r\n\r\n\r\n\r\n\r\n\r\n#print(\"RECOMMENDED BOOKS : \")\r\nprint(recommend_by_same_group('2'))\r\nprint(recommend_by_same_author('2'))\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"250818880","text":"import numpy as np\nimport torch\nimport gym\n\nfrom dril.a2c_ppo_acktr import utils\nfrom dril.a2c_ppo_acktr.envs import make_vec_envs\n\n\ndef evaluate(actor_critic, ob_rms, env_name, seed, num_processes, eval_log_dir,\n device, num_episodes=None, atari_max_steps=None):\n eval_envs = make_vec_envs(env_name, seed + num_processes, num_processes,\n None, eval_log_dir, device, True, atari_max_steps)\n\n vec_norm = utils.get_vec_normalize(eval_envs)\n if vec_norm is not None:\n vec_norm.eval()\n vec_norm.ob_rms = ob_rms\n\n eval_episode_rewards = []\n\n obs = eval_envs.reset()\n eval_recurrent_hidden_states = torch.zeros(\n num_processes, actor_critic.recurrent_hidden_state_size, device=device)\n eval_masks = torch.zeros(num_processes, 1, device=device)\n\n while len(eval_episode_rewards) < num_episodes:\n with torch.no_grad():\n _, action, _, eval_recurrent_hidden_states = actor_critic.act(\n obs,\n eval_recurrent_hidden_states,\n eval_masks,\n deterministic=True)\n\n # Obser reward and next obs\n if isinstance(eval_envs.action_space, gym.spaces.Box):\n clip_action = torch.clamp(action, float(eval_envs.action_space.low[0]),\\\n float(eval_envs.action_space.high[0]))\n else:\n clip_action = action\n\n # Obser reward and next obs\n obs, _, done, infos = eval_envs.step(clip_action)\n\n eval_masks = torch.tensor(\n [[0.0] if done_ else [1.0] for done_ in done],\n dtype=torch.float32,\n device=device)\n\n for info in infos:\n if 'episode' in info.keys():\n eval_episode_rewards.append(info['episode']['r'])\n\n eval_envs.close()\n\n print(\" Evaluation using {} episodes: mean reward {:.5f}\\n\".format(\n len(eval_episode_rewards), np.mean(eval_episode_rewards)))\n\n return np.mean(eval_episode_rewards)\n","sub_path":"dril/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"358704176","text":"# -*- coding:utf-8 -*-\nimport queue\nimport requests\nimport os\nimport threading\nimport time\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\nimport numpy as np\n\npicture_num=0\n\n#多线程下载图片,递归下载,一次只启动一个程序\n#另外一种思路,用另外一个python程序去控制启动其他python程序\nheaders = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cache-Control': 'private',\n 'Connection': 'keep-alive',\n 'Host': 'www.logodashi.com',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',\n 'Content-Type': 'text/html; charset=utf-8',\n}\nclass Worker(threading.Thread): # 处理工作请求\n def __init__(self, workQueue, resultQueue, **kwds):\n threading.Thread.__init__(self, **kwds)\n self.setDaemon(True)\n self.workQueue = workQueue\n self.resultQueue = resultQueue\n\n def run(self):\n while 1:\n try:\n callable, args, kwds = self.workQueue.get(False) # get task\n res = callable(*args, **kwds)\n self.resultQueue.put(res) # put result\n except queue.Empty:\n break\n\n\nclass WorkManager: # 线程池管理,创建\n def __init__(self, num_of_workers=10):\n self.workQueue = queue.Queue() # 请求队列\n self.resultQueue = queue.Queue() # 输出结果的队列\n self.workers = []\n self._recruitThreads(num_of_workers)\n\n def _recruitThreads(self, num_of_workers):\n for i in range(num_of_workers):\n worker = Worker(self.workQueue, self.resultQueue) # 创建工作线程\n self.workers.append(worker) # 加入到线程队列\n\n def start(self):\n for w in self.workers:\n w.start()\n\n def wait_for_complete(self):\n while len(self.workers):\n worker = self.workers.pop() # 从池中取出一个线程处理请求\n worker.join()\n if worker.isAlive() and not self.workQueue.empty():\n self.workers.append(worker) # 重新加入线程池中\n print('All jobs were complete.')\n\n def add_job(self, callable, *args, **kwds):\n self.workQueue.put((callable, args, kwds)) # 向工作队列中加入请求\n\n def get_result(self, *args, **kwds):\n return self.resultQueue.get(*args, **kwds)\n\n\ndef get_start_links(url):\n html = requests.get(url, headers=headers,timeout=10)\n time.sleep(0.1)\n #html.encoding = 'utf-8'\n html = html.text\n return html\n\ndef download_file(item):\n global picture_num\n pic=None\n pic = requests.get(item,headers=headers,timeout=10)\n string = './picture_place/'+str(picture_num)+'.jpg'\n try:\n tmp=pic.content\n with open(string, 'wb') as fp:\n fp.write(pic.content)\n except Exception as e:\n print(\"picture with no data\")\n picture_num += 1\n print(string, \"downloading\")\n\ndef main():\n if not os.path.exists('picture_place'):\n os.mkdir('picture_place')\n num_of_threads = 10\n _st = time.time()\n wm = WorkManager(num_of_threads)\n print(num_of_threads)\n num=0\n save_picture_list=[]\n for num in tqdm(range(1)):\n url=\"http://www.logodashi.com/Home/?type=1&CurrentPage=\"+str(num)+\"&SortID=0&KeyWord=\"\n print(url)\n main_page = get_start_links(url)\n soup = BeautifulSoup(main_page, 'lxml')\n A = soup.find('div',class_='body').find_all('img')\n for item in A:\n save_picture_list.append('http://www.logodashi.com/'+item.attrs['src'])\n np.save('save_picture_list.npy',save_picture_list)\n print(\"load url complete! and begin download\")\n for url in tqdm(save_picture_list):\n wm.add_job(download_file, url)\n wm.start()\n print(\"start\")\n wm.wait_for_complete()\n print(time.time() - _st)\nif __name__ == '__main__':\n main()\n\n","sub_path":"picture_similarity/logo spider/logo_duoxiancheng2.py","file_name":"logo_duoxiancheng2.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"56728277","text":"import random\n\ndef display_game_rules():\n print('''\\n\\n\\t\\t\\tROCK, PAPER, SCISSORS GAME RULES\n ---------------------------------------------------------\n Pick #1 for \"Rock\"\n Pick #2 for \"Paper\"\n Pick #3 for \"Scissors\"\n * If one picks \"Rock\" and the other picks \"Scissors\", --> \"Rock\" Wins\n * If one picks \"Scissors\" and the other picks \"Paper\", --> \"Scissors\" Wins\n * If one picks \"Paper\" and the other picks \"Rock\", --> \"Paper\" Wins\n * If both player make the same choice, --> It's a Tie \n * Game ends when either player gets to 5 wins...\n GOOD LUCK ! \\n\n ''')\n\ndef generate_rnd_number():\n '''Returns a random number for the computer pick...'''\n rnd_number = random.randint(1, 3)\n return rnd_number\n\ndisplay_game_rules()\n\ndef get_user_pick():\n '''Returns the player's selection...'''\n plyr_pick = 0\n while plyr_pick < 1 or plyr_pick > 3:\n try:\n plyr_pick = int(input('Enter your choice 1.for rock, 2.for paper, or 3.for scissors -->> '))\n except ValueError:\n print('ERROR! Please enter a valid selection...')\n return plyr_pick\n#-------------------------------------------------------------\npc_cnt = 0\nply_cnt = 0\n\ndef decide_winner(pc, plyr):\n global pc_cnt\n global ply_cnt\n if ((pc == 1) and (plyr == 1)):\n print('Computer: Rock ----- Player: Rock -->> It is a Tie !')\n elif ((pc == 1) and (plyr == 2)):\n print('Computer: Rock ----- Player: Paper -->> Player Wins !')\n ply_cnt = ply_cnt + 1\n elif ((pc == 1) and (plyr == 3)):\n print('Computer: Rock ----- Player: Scissors -->> Computer Wins !')\n pc_cnt = pc_cnt + 1\n elif ((pc == 2) and (plyr == 1)):\n print('Computer: Paper ----- Player: Rock -->> Computer Wins !')\n pc_cnt = pc_cnt + 1\n elif ((pc == 2) and (plyr == 2)):\n print('Computer: Paper ----- Player: Paper -->> It is a Tie !')\n elif ((pc == 2) and (plyr == 3)):\n print('Computer: Paper ----- Player: Scissors -->> Player Wins !')\n ply_cnt = ply_cnt + 1\n elif ((pc == 3) and (plyr == 1)):\n print('Computer: Scissors ----- Player: Rock -->> Player Wins !')\n ply_cnt = ply_cnt + 1\n elif ((pc == 3) and (plyr == 2)):\n print('Computer: Scissors ----- Player: Paper -->> Computer Wins !')\n pc_cnt = pc_cnt + 1\n elif ((pc == 3) and (plyr == 3)):\n print('Computer: Scissors ----- Player: Scissors -->> It is a Tie !')\n\n#----------------------------------------------------------------------\ngame_count = 0\n\nwhile pc_cnt <= 4 and ply_cnt <= 4:\n computer = generate_rnd_number() # Holds the random number for the computer..\n player = get_user_pick() # Holds the player's pick...\n decide_winner(computer, player)\n print('Total Computer Wins:', pc_cnt, '& Total Player Wins:', ply_cnt)\n print('-' * 40)\n game_count += 1\n\n\n\n\n\n\n\n","sub_path":"Rock_Paper_Scissors.py","file_name":"Rock_Paper_Scissors.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"263691696","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2019 RERO.\n#\n# Swiss Open Access Repository is free software; you can redistribute it\n# and/or modify it under the terms of the MIT License; see LICENSE file for\n# more details.\n\n\"\"\"Test documents views.\"\"\"\n\nimport pytest\nfrom flask import g, url_for\n\nimport sonar.modules.documents.views as views\nfrom sonar.modules.documents.api import DocumentRecord\n\n\ndef test_pull_ir(app):\n \"\"\"Test pull IR.\"\"\"\n views.pull_ir(None, {\"ir\": \"sonar\"})\n\n\ndef test_index(client):\n \"\"\"Test frontpage.\"\"\"\n assert isinstance(views.index(), str)\n assert client.get('/').status_code == 200\n\n\ndef test_search(app, client):\n \"\"\"Test search.\"\"\"\n assert isinstance(views.search(), str)\n assert client.get(\n url_for('invenio_search_ui.search')).status_code == 200\n\n\ndef test_detail(app, client):\n \"\"\"Test document detail page.\"\"\"\n record = DocumentRecord.create({\n \"title\": \"The title of the record\"\n }, dbcommit=True)\n\n # assert isinstance(views.detail('1', record, ir='sonar'), str)\n assert client.get('/organization/sonar/documents/1').status_code == 200\n\n\ndef test_authors_format():\n \"\"\"Test author format filter.\"\"\"\n authors = [{'name': 'John Newby'}, {'name': 'Kevin Doner'}]\n\n assert views.authors_format(authors) == 'John Newby ; Kevin Doner'\n\n\ndef test_nl2br():\n \"\"\"Test nl2br conversion.\"\"\"\n text = 'Multiline text\\nMultiline text'\n assert views.nl2br(text) == 'Multiline text
Multiline text'\n\n\ndef test_translate_content(app):\n \"\"\"Test content item translation.\"\"\"\n assert views.translate_content([], 'fr') is None\n\n records = [{\n 'language': 'eng',\n 'value': 'Summary of content'\n }, {\n 'language': 'fre',\n 'value': 'Résumé du contenu'\n }]\n assert views.translate_content(records, 'fr') == 'Résumé du contenu'\n assert views.translate_content(records, 'de') == 'Summary of content'\n assert views.translate_content(records, 'pt') == 'Summary of content'\n\n with pytest.raises(Exception) as e:\n views.translate_content(records, 'de', 'not_existing_key')\n assert str(\n e.value\n ) == 'Value key \"not_existing_key\" in {record} does not exist'.format(\n record=records[0])\n\n\ndef test_get_code_from_bibliographic_language(app):\n \"\"\"Test bibliographic language code to alpha 2 code conversion.\"\"\"\n assert views.get_language_from_bibliographic_code('ger') == 'de'\n\n with pytest.raises(Exception) as e:\n views.get_language_from_bibliographic_code('zzz')\n assert str(e.value) == 'Language code not found for \"zzz\"'\n\n\ndef test_get_bibliographic_code_from_language(app):\n \"\"\"Test bibliographic language code to alpha 2 code conversion.\"\"\"\n with pytest.raises(Exception) as e:\n views.get_bibliographic_code_from_language(\"zz\")\n assert str(e.value) == 'Language code not found for \"zz\"'\n\n assert views.get_bibliographic_code_from_language('de') == 'ger'\n","sub_path":"tests/ui/documents/test_documents_views.py","file_name":"test_documents_views.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"454584859","text":"'''\nCreated on Sep 21, 2014\n\n@author: Ben Greene\n'''\n\n'''\nparams is a tuple. params[0] is iterations, params[1] is threshold\n'''\ndef mandelbrotFractalFunction(x_coord, y_coord, params):\n initial = complex(x_coord, y_coord)\n worknum = complex(0)\n n = 0\n for _ in range(0, params[0]):\n worknum = worknum * worknum + initial\n n = n + 1\n if worknum.real > params[1] or worknum.imag > params[1]:\n break\n if n == params[0]:\n n = -1\n return n","sub_path":"fractal_viewer/Calculation/FractalFunctions.py","file_name":"FractalFunctions.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"512586793","text":"import warnings as _warnings\nimport pandas as _pd\nimport numpy as _np\nimport matplotlib.pylab as _plt\nfrom copy import deepcopy as _deepcopy\ntry:\n from netCDF4 import Dataset as _Dataset\nexcept ModuleNotFoundError:\n _warnings.warn('netCDF4 not installed. You might encounter some functionality limitations.')\n# from netCDF4 import date2num as _date2num\nimport atmPy.general.timeseries\nfrom atmPy.tools import plt_tools as _plt_tools\nfrom atmPy.tools import pandas_tools as _pandas_tools\nimport os as _os\nfrom atmPy.tools import git as _git_tools\nfrom atmPy.tools import array_tools as _array_tools\n\n# _unit_time = 'days since 1900-01-01'\n\ndef none2nan(var):\n if type(var).__name__ == 'NoneType':\n var = _np.nan\n return var\n\ndef save_netCDF(vp, fname, leave_open = False):\n\n # if ts._time_format == 'timedelta':\n # ts.timed\n\n file_mode = 'w'\n try:\n ni = _Dataset(fname, file_mode)\n except RuntimeError:\n if _os.path.isfile(fname):\n _os.remove(fname)\n ni = _Dataset(fname, file_mode)\n\n time_dim = ni.createDimension('altitude', vp.data.shape[0])\n dim_data_col = ni.createDimension('data_columns', vp.data.shape[1])\n\n # ts_time_num = _date2num(ts.data.index.to_pydatetime(), _unit_time)#.astype(float)\n altitude = vp.data.index\n altitude_var = ni.createVariable('altitude', altitude.dtype, 'altitude')\n altitude_var[:] = altitude.values\n altitude_var.units = 'meters'\n\n var_data = ni.createVariable('data', vp.data.values.dtype, ('altitude', 'data_columns'))\n var_data[:] = vp.data.values\n\n vp_columns = vp.data.columns.values.astype(str)\n var_data_collumns = ni.createVariable('data_columns', vp_columns.dtype, 'data_columns')\n var_data_collumns[:] = vp_columns\n\n ni._type = type(vp).__name__\n # ni._data_period = none2nan(vp._data_period)\n ni._x_label = none2nan(vp._x_label)\n ni._y_label = none2nan(vp._y_label)\n # ni.info = none2nan(vp.info)\n ni._atm_py_commit = _git_tools.current_commit()\n\n if leave_open:\n return ni\n else:\n ni.close()\n\ndef correlate(data,correlant, data_column = False, correlant_column = False, remove_zeros=True, data_lim = None, correlant_lim = None):\n data = data.copy()\n correlant = correlant.copy()\n\n if not _np.array_equal(data.data.index, correlant.data.index):\n raise ValueError('The indexes of the two columns are not identical, there is no align for vertical profiles yet ... programm an align function')\n\n if data_column:\n data_values = data.data[data_column].values\n elif data.data.shape[1] > 1:\n raise ValueError('Data contains more than 1 column. Specify which to correlate. Options: %s'%(list(data.data.keys())))\n else:\n data_values = data.data.iloc[:,0].values\n\n if correlant_column:\n correlant_values = correlant.data[correlant_column].values\n elif correlant.data.shape[1] > 1:\n raise ValueError('''Correlant contains more than 1 column. Specify which to correlate. Options:\n%s'''%(list(correlant.data.keys())))\n else:\n correlant_values = correlant.data.iloc[:,0].values\n\n\n if data_lim:\n if data_lim[0]:\n data_values[data_values < data_lim[0]] = _np.nan\n if data_lim[1]:\n data_values[data_values > data_lim[1]] = _np.nan\n\n if correlant_lim:\n if correlant_lim[0]:\n correlant_values[correlant_values < correlant_lim[0]] = _np.nan\n if correlant_lim[1]:\n correlant_values[correlant_values > correlant_lim[1]] = _np.nan\n\n # import pdb\n # pdb.set_trace()\n out = _array_tools.Correlation(data_values, correlant_values, remove_zeros=remove_zeros, index = data.data.index)\n# out._x_label_orig = 'DataTime'\n return out\n\n\nclass VerticalProfile(object):\n def __init__(self, data):\n data.sort_index(inplace=True)\n data = data[~data.index.duplicated()]\n self.data = data\n self._x_label = None\n self._y_label = 'Altitude'\n\n ###########################################\n def __sub__(self, other):\n vp = self.copy()\n vp.data = _pd.DataFrame(vp.data.iloc[:, 0] - other.data.iloc[:, 0])\n return vp\n ###########################################\n\n def align_to(self, ts_other):\n return align_to(self, ts_other)\n\n def merge(self, ts):\n return merge(self,ts)\n\n def plot(self, ax=False, **kwargs):\n if not ax:\n f, a = _plt.subplots()\n else:\n a = ax\n\n for e,k in enumerate(self.data.keys()):\n kwt = kwargs.copy()\n if 'label' not in kwt.keys():\n kwt['label'] = k\n a.plot(self.data[k].values, self.data.index, **kwt)\n\n if len(self.data.keys()) > 1:\n a.legend(loc = 'best')\n a.set_ylabel(self._y_label)\n a.set_xlabel(self._x_label)\n a.set_ylim((self.data.index.min(), self.data.index.max()))\n return a\n\n save_netCDF = save_netCDF\n\n def save(self, fname):\n self.data.to_csv(fname)\n\n def copy(self):\n return _deepcopy(self)\n\n def convert2timeseries(self, ts):\n \"\"\"merges a vertical profile with a timeseries that contains height data\n and returns the a time series where the data of the vertical profile is interpolated\n along the time of the timeseries. ...\n\n Arguments\n ---------\n ts: timeseries\"\"\"\n hk_tmp = ts.convert2verticalprofile()\n data = hk_tmp.data[['TimeUTC']]\n cat_sort_int = _pd.concat([data, self.data]).sort_index().interpolate()\n cat_sort_int = cat_sort_int.dropna()\n cat_sort_int.index = cat_sort_int.TimeUTC\n cat_sort_int = cat_sort_int.drop('TimeUTC', axis=1)\n return atmPy.general.timeseries.TimeSeries(cat_sort_int)\n\n correlate_to = correlate\n\n def drop_all_columns_but(self, keep, inplace = False):\n if inplace:\n ts = self\n else:\n ts = self.copy()\n all_keys = ts.data.keys()\n del_keys = all_keys.drop(keep)\n ts.data = ts.data.drop(labels=del_keys, axis=1)\n if inplace:\n return\n else:\n return ts\n\nclass VerticalProfile_2D(VerticalProfile):\n def plot(self, xaxis = 0, ax = None, autofmt_xdate = True, cb_kwargs = {}, pc_kwargs = {}, **kwargs):\n if 'cb_kwargs' in kwargs.keys():\n cb_kwargs = kwargs['cb_kwargs']\n if 'pc_kwargs' in kwargs.keys():\n pc_kwargs = pc_kwargs\n f, a, pc, cb = _pandas_tools.plot_dataframe_meshgrid(self.data, xaxis=xaxis, ax=ax, pc_kwargs=pc_kwargs, cb_kwargs=cb_kwargs)\n if autofmt_xdate:\n f.autofmt_xdate()\n return f, a, pc, cb\n\n#### Tools\ndef merge(ts, ts_other):\n \"\"\" Merges current with other timeseries. The returned timeseries has the same time-axes as the current\n one (as opposed to the one merged into it). Missing or offset data points are linearly interpolated.\n\n Argument\n --------\n ts_orig: the other time series will be merged to this, therefore this timeseries\n will define the time stamps.\n ts: timeseries or one of its subclasses.\n List of TimeSeries objects.\n\n Returns\n -------\n TimeSeries object or one of its subclasses\n\n \"\"\"\n ts_this = ts.copy()\n # ts_data_list = [ts_this.data, ts_other.data]\n # catsortinterp = _pd.concat(ts_data_list).sort_index().interpolate()\n # merged = catsortinterp.groupby(catsortinterp.index).mean().reindex(ts_data_list[0].index)\n # ts_this.data = merged\n\n ts_data_list = [ts_this.data, ts_other.data]\n bla = _pd.concat(ts_data_list).sort_index()\n catsortinterp = bla.interpolate().where(bla.bfill().notnull())\n merged = catsortinterp.groupby(catsortinterp.index).mean().reindex(ts_data_list[0].index)\n ts_this.data = merged\n return ts_this\n\ndef align_to(ts, ts_other):\n \"\"\"\n Align the TimeSeries ts to another time_series by interpolating (linearly).\n\n Parameters\n ----------\n ts: original time series\n ts_other: timeseries to align to\n\n Returns\n -------\n timeseries eqivalent to the original but with an index aligned to the other\n \"\"\"\n ts = ts.copy()\n ts_other = ts_other.copy()\n ts_other.data = ts_other.data.loc[:,[]]\n ts_t = merge(ts_other, ts)\n ts.data = ts_t.data\n return ts\n","sub_path":"atmPy/general/vertical_profile.py","file_name":"vertical_profile.py","file_ext":"py","file_size_in_byte":8342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"304316327","text":"# A Tic-Tac-Toe board is given as a string array board. Return True if and only if it is possible to reach this board position during the course of a valid tic-tac-toe game.\n\n# The board is a 3 x 3 array, and consists of characters \" \", \"X\", and \"O\". The \" \" character represents an empty square.\n\n# Here are the rules of Tic-Tac-Toe:\n\n# Players take turns placing characters into empty squares (\" \").\n# The first player always places \"X\" characters, while the second player always places \"O\" characters.\n# \"X\" and \"O\" characters are always placed into empty squares, never filled ones.\n# The game ends when there are 3 of the same (non-empty) character filling any row, column, or diagonal.\n# The game also ends if all squares are non-empty.\n# No more moves can be played if the game is over.\n# Example 1:\n# Input: board = [\"O \", \" \", \" \"]\n# Output: false\n# Explanation: The first player always plays \"X\".\n\n# Example 2:\n# Input: board = [\"XOX\", \" X \", \" \"]\n# Output: false\n# Explanation: Players take turns making moves.\n\n# Example 3:\n# Input: board = [\"XXX\", \" \", \"OOO\"]\n# Output: false\n\n# Example 4:\n# Input: board = [\"XOX\", \"O O\", \"XOX\"]\n# Output: true\n# Note:\n\n# board is a length-3 array of strings, where each string board[i] has length 3.\n# Each board[i][j] is a character in the set {\" \", \"X\", \"O\"}.\nclass Solution(object):\n def validTicTacToe(self, board):\n \"\"\"\n :type board: List[str]\n :rtype: bool\n \"\"\"\n # Rules\n # Atmost 5 X and 4 Os\n # At any step we have either equal X and Os or 1 extra X than O\n # Either X or O can be in a winning position at any time\n\n game=''.join(board)\n winning_moves=[[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8],[0,4,8],[2,4,6]]\n\n # Check for X and O count\n x_count=game.count('X')\n o_count=game.count('O')\n\n if(o_count>x_count or x_count-o_count>1):\n return False\n \n x_win=False\n o_win=False\n \n for move in winning_moves:\n if(game[move[0]]==game[move[1]]==game[move[2]]=='X'):\n x_win=True\n elif(game[move[0]]==game[move[1]]==game[move[2]]=='O'):\n o_win=True\n \n if(x_win and o_win):\n return False\n if(x_win):\n return x_count-o_count==1\n if(o_win):\n return x_count==o_count\n\n\n return True\n \ns=Solution()\nprint(s.validTicTacToe([\"OOO\",\"OXX\",\"XX \"]))\n","sub_path":"valid-tic-tac-toe-state.py","file_name":"valid-tic-tac-toe-state.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"340339063","text":"\"\"\"Author: Jabari-Aman Delemore\"\"\"\n\nimport tkinter\nfrom tkinter import ttk\nimport mqtt_remote_method_calls as com\nimport robot_controller as robo\n\n\ndef main():\n mqtt_client = com.MqttClient()\n mqtt_client.connect_to_ev3()\n\n root = tkinter.Tk()\n root.title(\"MQTT Remote\")\n\n main_frame = ttk.Frame(root, padding=20, relief='raised')\n main_frame.grid()\n # Left Speed Value\n left_speed_label = ttk.Label(main_frame, text=\"Left\")\n left_speed_label.grid(row=0, column=0)\n left_speed_entry = ttk.Entry(main_frame, width=8)\n left_speed_entry.insert(0, \"800\")\n left_speed_entry.grid(row=1, column=0)\n # Right Speed Value\n right_speed_label = ttk.Label(main_frame, text=\"Right\")\n right_speed_label.grid(row=0, column=2)\n right_speed_entry = ttk.Entry(main_frame, width=8, justify=tkinter.RIGHT)\n right_speed_entry.insert(0, \"800\")\n right_speed_entry.grid(row=1, column=2)\n # Forward\n forward_button = ttk.Button(main_frame, text=\"Forward\")\n forward_button.grid(row=2, column=1)\n forward_button['command'] = lambda: forward(mqtt_client, left_speed_entry, right_speed_entry)\n root.bind('', lambda event: forward(mqtt_client, left_speed_entry, right_speed_entry))\n # Left\n left_button = ttk.Button(main_frame, text=\"Left\")\n left_button.grid(row=3, column=0)\n left_button['command'] = lambda: left(mqtt_client, left_speed_entry, right_speed_entry)\n root.bind('', lambda event: left(mqtt_client, left_speed_entry, right_speed_entry))\n # Stop\n stop_button = ttk.Button(main_frame, text=\"Stop\")\n stop_button.grid(row=3, column=1)\n stop_button['command'] = lambda: stop(mqtt_client, left_speed_entry, right_speed_entry)\n root.bind('', lambda event: stop(mqtt_client, left_speed_entry, right_speed_entry))\n # Right\n right_button = ttk.Button(main_frame, text=\"Right\")\n right_button.grid(row=3, column=2)\n right_button['command'] = lambda: right(mqtt_client, left_speed_entry, right_speed_entry)\n root.bind('', lambda event: right(mqtt_client, left_speed_entry, right_speed_entry))\n # Backward\n back_button = ttk.Button(main_frame, text=\"Back\")\n back_button.grid(row=4, column=1)\n back_button['command'] = lambda: backwards(mqtt_client, left_speed_entry, right_speed_entry)\n root.bind('', lambda event: backwards(mqtt_client, left_speed_entry, right_speed_entry))\n # Arm Up\n up_button = ttk.Button(main_frame, text=\"Up\")\n up_button.grid(row=5, column=0)\n up_button['command'] = lambda: send_up(mqtt_client)\n root.bind('', lambda event: send_up(mqtt_client))\n # Arm Down\n down_button = ttk.Button(main_frame, text=\"Down\")\n down_button.grid(row=6, column=0)\n down_button['command'] = lambda: send_down(mqtt_client)\n root.bind('', lambda event: send_down(mqtt_client))\n\n # Buttons for quit and exit\n q_button = ttk.Button(main_frame, text=\"Quit\")\n q_button.grid(row=5, column=2)\n q_button['command'] = (lambda: quit_program(mqtt_client, False))\n\n e_button = ttk.Button(main_frame, text=\"Exit\")\n e_button.grid(row=6, column=2)\n e_button['command'] = (lambda: quit_program(mqtt_client, True))\n\n # Find Ball\n f_button = ttk.Button(main_frame, text=\"Find Ball\")\n f_button.grid(row=7, column=2)\n f_button['command'] = (lambda: find_ball(mqtt_client))\n\n # Say Goal\n g_button = ttk.Button(main_frame, text=\"Goal\")\n g_button.grid(row=7, column=1)\n g_button['command'] = (lambda: goal(mqtt_client))\n\n g_button = ttk.Button(main_frame, text=\"Obtain Ball\")\n g_button.grid(row=7, column=0)\n g_button['command'] = (lambda: grab_ball(mqtt_client))\n\n root.mainloop()\n\n\n# Tkinter Functions\ndef forward(mqtt_client, left_speed_entry, right_speed_entry):\n left_speed_entry.get()\n print(left_speed_entry.get())\n right_speed_entry.get()\n print(right_speed_entry.get())\n print(\"I'm reving up!\")\n mqtt_client.send_message(\"forward_to_goal\", (int(left_speed_entry.get()), int(right_speed_entry.get())))\n\n\ndef left(mqtt_client, left_speed_entry, right_speed_entry):\n left_speed_entry.get()\n print(left_speed_entry.get())\n right_speed_entry.get()\n print(right_speed_entry.get())\n print(\"I'm reving left!\")\n mqtt_client.send_message(\"go_left\", (int((left_speed_entry.get())), int(right_speed_entry.get())))\n\n\ndef right(mqtt_client, left_speed_entry, right_speed_entry):\n left_speed_entry.get()\n print(left_speed_entry.get())\n right_speed_entry.get()\n print(right_speed_entry.get())\n print(\"I'm reving right!\")\n mqtt_client.send_message(\"go_right\", (int(left_speed_entry.get()), int((right_speed_entry.get()))))\n\n\ndef stop(mqtt_client, left_speed_entry, right_speed_entry):\n left_speed_entry.get()\n print(left_speed_entry.get())\n right_speed_entry.get()\n print(right_speed_entry.get())\n print(\"I'm stopping!\")\n mqtt_client.send_message(\"stop\", (int(left_speed_entry.get()), int(right_speed_entry.get())))\n\n\ndef backwards(mqtt_client, left_speed_entry, right_speed_entry):\n left_speed_entry.get()\n print(left_speed_entry.get())\n right_speed_entry.get()\n print(right_speed_entry.get())\n print(\"I'm backing up!\")\n mqtt_client.send_message(\"go_backward\", (int((left_speed_entry.get())), int((right_speed_entry.get()))))\n\n\ndef send_up(mqtt_client):\n print(\"arm_up\")\n mqtt_client.send_message(\"arm_up\")\n\n\ndef send_down(mqtt_client):\n print(\"arm_down\")\n mqtt_client.send_message(\"arm_down\")\n\n\ndef find_ball(mqtt_client):\n print(\"Finding Ball\")\n mqtt_client.send_message(\"ball_finder\")\n\n\ndef goal(mqtt_client):\n print(\"Score\")\n mqtt_client.send_message(\"say_goal\")\n\n\ndef grab_ball(mqtt_client):\n print(\"Obtaining the ball.\")\n mqtt_client.send_message(\"ball_obtain\")\n\n\n# Quit and Exit button callbacks\ndef quit_program(mqtt_client, shutdown_ev3):\n if shutdown_ev3:\n print(\"shutdown\")\n mqtt_client.send_message(\"shutdown\")\n mqtt_client.close()\n exit()\n\n\nmain()\n","sub_path":"projects/delemojw/pc_project.py","file_name":"pc_project.py","file_ext":"py","file_size_in_byte":5986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"280500936","text":"class Resume(object):\n def __init__(self,request):\n self.settings = request.registry.settings\n self.collection = request.db['resume']\n self.collection_school=request.db['school']\n self.collection_college=request.db['graduate']\n self.collection_project=request.db['project']\n self.collection_employment=request.db['employment']\n self.collection_skill_set=request.db['skill_set']\n self.collection_skill=request.db['skill']\n self.collection_school_set=request.db['school_set']\n self.collection_college_set=request.db['graduate_set']\n self.collection_employment_set=request.db['employment_set']\n self.collection_users=request.db['users']\n\n def resumeread(self,uname):\n user=self.collection_users.find_one({'username':uname})\n fname=user['first_name']\n lname=user['last_name']\n full_name=fname+\" \"+lname\n res=self.collection.find_one({'username':uname})\n #print res\n #check_address=Resume.get('address',None)\n try:\n\t email=res['email_id']\n except TypeError:\n email=\"enter your email_id\"\t\t\n except KeyError:\n email=\"enter your email_id\"\t\n try:\n Address=res['address']\n except TypeError:\n Address='Enter your address'\n except KeyError:\n Address='Enter your address'\n schools=[]\n colleges=[]\n name=[]\n d_o_j=[]\n d_o_l=[]\n place=[]\n m_s=[]\n o_f=[]\n no_of_p=0 #school_p_tag\n try:\n schools=res['school']\n except TypeError:\n schools=[]\n i=0\n for school in schools:\n school_detail=self.collection_school.find_one({'sid':school})\n schid=int(school_detail[\"schid\"])\n sch_detail=self.collection_school_set.find_one({'schid':schid})\n name.append(sch_detail['name_of_school'])\n d_o_j.append(school_detail['date_of_joining'])\n d_o_l.append(school_detail['date_of_leaving'])\n place.append(sch_detail['place'])\n #print place\n m_s.append(school_detail['marks_secured'])\n o_f.append(school_detail['out_of'])\n i=i+1\n no_of_p=i;\n degree=[]\n course=[]\n name_coll=[]\n place_coll=[]\n d_o_j_coll=[]\n d_o_l_coll=[]\n project_ids=[]\n m_s_coll=[]\n o_f_coll=[]\n no_of_pc=0 #college_p_tag\n try:\n colleges=res['college']\n except KeyError:\n colleges=[]\n except TypeError:\n colleges=[]\n i=0\n for college in colleges:\n college_detail=self.collection_college.find_one({'gid':college})\n colid=int(college_detail[\"colid\"])\n colg_detail=self.collection_college_set.find_one({'colid':colid})\n degree.append(college_detail['degree'])\n course.append(college_detail['course'])\n name_coll.append(colg_detail['name_of_college'])\n place_coll.append(colg_detail['place'])\n d_o_j_coll.append(college_detail['date_of_joining'])\n d_o_l_coll.append(college_detail['date_of_leaving'])\n m_s_coll.append(college_detail['marks_secured'])\n o_f_coll.append(college_detail['out_of'])\n i=i+1\n no_of_pc=i\n projects=[]\n pro_title=[]\n pro_description=[]\n pro_members=[]\n pro_publications=[]\n pro_from=[]\n pro_to=[]\n pro_links=[]\n no_of_pro=0 #project_p_tag\n try:\n projects=res['project']\n except KeyError:\n projects=[]\n except TypeError:\n projects=[]\n i=0\n for project in projects:\n project_detail=self.collection_project.find_one({'pid':project})\n pro_title.append(project_detail['title'])\n pro_description.append(project_detail['description'])\n pro_members.append(project_detail['members'])\n pro_publications.append(project_detail['publications'])\n pro_from.append(project_detail['from'])\n pro_to.append(project_detail['to'])\n pro_links.append(project_detail['links'])\n i=i+1\n no_of_pro=i\n employments=[]\n name_company=[]\n place_company=[]\n from_company=[]\n to_company=[]\n pos_company=[]\n no_of_emp=0 #employment_p_tag\n try:\n employments=res['employment']\n except KeyError:\n employments=[]\n except TypeError:\n employments=[]\n i=0\n for employment in employments:\n employment_detail=self.collection_employment.find_one({'eid':employment})\n cmpid=int(employment_detail[\"cmpid\"])\n emp_detail=self.collection_employment_set.find_one({'cmpid':cmpid})\n name_company.append(emp_detail['name_of_company'])\n place_company.append(emp_detail['place'])\n from_company.append(employment_detail['from'])\n to_company.append(employment_detail['to'])\n pos_company.append(employment_detail['position'])\n i=i+1\n no_of_emp=i\n #print no_of_emp\n skills=[]\n name_skill=[]\n level_skill=[]\n no_of_skill=0\n i=0\n try: \n skills=res['skill']\n except KeyError:\n skills=[]\n except TypeError:\n skills=[]\n for skill in skills:\n skill_detail=self.collection_skill.find_one({'skl_id':skill})\n sk_id=int(skill_detail[\"sk_id\"])\n name_of_skill_dict=self.collection_skill_set.find_one({'skid':sk_id})\n name_of_skill=name_of_skill_dict[\"name_of_skill\"]\n name_skill.append(name_of_skill)\n level_skill.append(skill_detail[\"level_skill\"])\n i=i+1\n no_of_skill=i\n return {'email':email,'full_name':full_name,'address':Address,'username':uname,'no_of_p':no_of_p,'name':name,'d_o_j':d_o_j,'d_o_l':d_o_l,'place':place,'m_s':m_s,'o_f':o_f,\n 'no_of_pc':no_of_pc,'degree':degree,'course':course,'name_coll':name_coll,'place_coll':place_coll,'d_o_j_coll':d_o_j_coll,'d_o_l_coll':d_o_l_coll,\n 'm_s_coll':m_s_coll,'o_f_coll':o_f_coll,'no_of_pro':no_of_pro,'project_title':pro_title,'project_desc':pro_description,'project_mem':pro_members, 'project_pub':pro_publications,'project_from':pro_from,'project_to':pro_to,'project_link':pro_links,'name_company':name_company,'place_company':place_company,\n 'from_company':from_company,'to_company':to_company,'pos_company':pos_company,'no_of_emp':no_of_emp,'no_of_skill':no_of_skill,'name_skill':name_skill,\"level_skill\":level_skill}\n","sub_path":"neuron/utilities/resume.py","file_name":"resume.py","file_ext":"py","file_size_in_byte":6146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"7589772","text":"from django.urls import path, include\nfrom voca.views import *\n\napp_name='voca'\n\nurlpatterns = [\n path('main/', mainview.as_view(), name='main'),\n path('upload/', uploadview.as_view(), name='upload'),\n path('upload/process/', process, name='process' )\n #path('', views.main, name='main'),\n]\n","sub_path":"voca/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"34464750","text":"# Uses python3\n\"\"\"\nTask. Compute the last digit of 𝐹_0^2 + 𝐹_1^2 + · · · + 𝐹_𝑛^2 .\n\nInput Format. Integer 𝑛.\n\nConstraints. 0 ≤ 𝑛 ≤ 10^14 .\n\n2 Output Format. The last digit of 𝐹_0^2 + 𝐹_1^2 + · · · + 𝐹_𝑛^2 .\n\"\"\"\n\n\ndef get_pisano_period(m):\n a = 0\n b = 1\n for i in range(6*m): # 皮萨诺周期的长度不会超过 6 * m\n c = (a + b) % m\n a = b\n b = c\n if a == 0 and b == 1:\n return i+1\n\n\ndef fibonacci_sum_squares(n):\n n = n % get_pisano_period(10)\n if n <= 1:\n return n\n\n previous1 = 0\n current1 = 1\n\n previous2 = 0\n current2 = 1\n\n for _ in range(n - 1):\n previous1, current1 = current1, (previous1 + current1) % 10\n\n for _ in range(n):\n previous2, current2 = current2, (previous2 + current2) % 10\n\n return current1 * current2 % 10\n\n\nif __name__ == '__main__':\n n = int(input())\n print(fibonacci_sum_squares(n))\n","sub_path":"C1W2 Algorithmic Warm-up/8_Last_Digit_of_the_Sum_of_Squares_of_Fibonacci_Numbers.py","file_name":"8_Last_Digit_of_the_Sum_of_Squares_of_Fibonacci_Numbers.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"90839328","text":"#!/usr/bin/env python3\nfrom command import addCommand\nfrom utility import compareValueInList\n\n\ndef mergeSortInPlaceCall(lst, commandList):\n \"\"\"\n Call the in place merge sort\n\n Input: @lst: List of integers that we need to sort.\n @commandList: List. The list of commands that the GUI will run on\n \"\"\"\n mergeSortInPlace(lst, 0, len(lst), commandList)\n\n\ndef mergeSortInPlace(lst, start, end, commandList):\n \"\"\"\n In-place Merge Sort\n\n Input: @lst: list. The list we need to sort.\n @start: integer. The starting index of the part.\n @end: integer. The ending index of the part.\n @commandList: List. The list of commands that the GUI will run on\n \"\"\"\n if start < end - 1:\n # Define the split point\n mid = start + (end - start) // 2\n addCommand(commandList, \"Split\", start, mid, end)\n\n # Using merge sort on the two halves.\n mergeSortInPlace(lst, start, mid, commandList)\n mergeSortInPlace(lst, mid, end, commandList)\n\n # After that, merge them\n mergeInPlace(lst, start, end, mid, commandList)\n print(*lst[start:end])\n\n\ndef mergeInPlace(lst, start, end, mid, commandList):\n \"\"\"\n Merge the two parts of the list.\n\n Input: @lst: list. The list we need to sort.\n @start: integer. The starting index of the part.\n @end: integer. The next index after the end of the part\n @mid: integer. The split point between the two parts.\n @commandList: List. The list of commands that the GUI will run on.\n \"\"\"\n # Return if this part of the list only has 1 element.\n if start >= end - 1:\n return\n\n addCommand(commandList, \"UpdateStatus\",\n [index for index in range(len(lst))\n if index < start or index >= end],\n \"locked\")\n\n # Mark the starting index of the second part. It will start at the split\n # point.\n start2 = mid\n if compareValueInList(lst, commandList, start2 - 1, start2,\n lst[start2 - 1], lst[start2],\n \"lst[mid - 1] <= lst[mid]?\"):\n addCommand(commandList, \"UpdateStatus\",\n [index for index in range(len(lst))\n if index < start or index >= end - 1],\n \"normal\")\n return\n addCommand(commandList, \"UpdateStatus\", [start2, start2 - 1], \"normal\")\n # Start rearranging the elements between the two parts until one of them\n # meet their end.\n while start < mid and start2 < end:\n start, start2, mid = rearrangeInPlace(lst, start, start2,\n mid, commandList)\n addCommand(commandList, \"UpdateStatus\",\n [index for index in range(len(lst))\n if index < start or index >= end - 1],\n \"normal\")\n\n\ndef rearrangeInPlace(lst, start, start2, mid, commandList):\n \"\"\"\n Rearrange the two elements inside different parts of the list\n\n Input: @start: integer. The marker for current index of the first part.\n Which is the index of the 1st element\n @start2: integer. The marker for current index of the second part.\n Which is the index of the 2nd element.\n @mid: integer. The split point of the two parts that these 2\n elements are in.\n @commandList: List. The list of commands that the GUI will run on.\n\n Ouput: @start: The next index that needs to be checked for the first part\n @start2: The next index that needs to be checked for the second part\n @mid: The new split point of the two parts\n \"\"\"\n # Compare the two elements, if the 1st one is greater, move to the next\n # element of the 1st part.\n if compareValueInList(lst, commandList, start, start2, lst[start],\n lst[start2], \"lst[left] <= lst[right]?\"):\n addCommand(commandList, \"UpdateStatus\", [start, start2],\n \"normal\")\n start += 1\n\n # Else\n else:\n # Store the current index and value of the 2nd element\n value, index = lst[start2], start2\n addCommand(commandList, \"UpdateStatus\", [start],\n \"normal\")\n addCommand(commandList, \"Shift\", start2, start)\n\n # Shift all elements between the 1st and the 2nd to the right.\n # (Including the 1st, but not the 2nd)\n while index > start:\n lst[index] = lst[index - 1]\n index -= 1\n\n # Set the index and value of the current 1st element to the stored\n # index and value\n lst[start] = value\n\n # Move the marker to the next element, as well as push the split point\n # between two halves to the right by 1.\n start += 1\n mid += 1\n start2 += 1\n return start, start2, mid\n","sub_path":"sort_merge_inplace.py","file_name":"sort_merge_inplace.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"572725120","text":"import numpy as np\n\n## define training data\n#X = np.array([[1.0,2104.0,5.0,1.0,45.0],[1.0,1416.0,3.0,2.0,40.0],[1.0,1534.0,3.0,2.0,30.0],[1.0,852.0,2.0,1.0,36.0]])\nX = np.array([[1.0,2104.0,5.0,1.0],[1.0,1416.0,3.0,2.0],[1.0,1534.0,3.0,2.0],[1.0,852.0,2.0,1.0]])\nY = np.array([460.0,232.0,315.0,178.0])\n\nprint(X)\n\n## scaling\nXT = X.T\nfor h in range(len(XT)):\n x = XT[h]\n mean = np.mean(x)\n max = x[np.where(np.amax(x) == x)[0][0]]\n min = x[np.where(np.amin(x) == x)[0][0]]\n r = (max - min) / 2\n if r != 0:\n XT[h] = (XT[h] - mean) / r\n\n## gradient decent\nm = len(X)\nn = len(X[0])\nth = np.zeros(n)\n\nalpha = 0.01\nchange = 1.0\nsteps = 0\n\n#while change > 0.00001:\nfor k in range(300000):\n change = 0.0\n steps += 1\n\n for i in range(m):\n x = X[i]\n y = Y[i]\n for j in range(n):\n theta = th[j]\n j_th = (1/(2*m)) * (np.dot(x,th.T)-y) * x[j]\n new_theta = theta - alpha * j_th\n change += new_theta - theta\n th[j] = new_theta\n\n print(steps,change, end='\\r')\n\n# normalized equation\nA = np.dot(X.T,X)\nth_n = np.dot(np.dot(np.linalg.inv(A),X.T),Y)\n\n# Gradient descent(勾配降下法による推定)\nprint(\"Closed test in GD:\",np.dot(X,th.T))\n# Normal Equation?(行列の積から係数を計算する)\nprint(\"Closed test in NE:\",np.dot(X,th_n.T))\n# Gold(正解?)\nprint(\"Closed test Gold: \",Y)\n\n# 積をする前の係数行列\nprint(th)\nprint(th_n)\n\n","sub_path":"3_linear_regression/sample/linear-regression.py","file_name":"linear-regression.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"607206008","text":"ALL_POS = ['QB', 'RB', 'WR', 'TE', 'DST']\nALL_POS_TEAM = ['QB', 'RB1', 'RB2',\n 'WR1', 'WR2', 'WR3', 'FLEX',\n 'TE', 'DST']\n\nCOMBO_TEAM_LIMITS_NFL = []\nALL_NFL_TEAMS = [\n 'GB',\n 'MIN',\n 'MIA',\n 'CAR',\n 'ATL',\n 'OAK',\n 'CIN',\n 'NYJ',\n 'DEN',\n 'DET',\n 'BAL',\n 'NYG',\n 'TEN',\n 'NO',\n 'DAL',\n 'NE',\n 'SEA',\n 'CLE',\n 'TB',\n 'PIT',\n 'CHI',\n 'HOU',\n 'WAS',\n 'JAX',\n 'KC',\n 'PHI',\n 'BUF',\n 'IND',\n 'ARI',\n 'SF',\n 'LA',\n 'D/ST',\n 'SD']\n\nfor team in ALL_NFL_TEAMS:\n COMBO_TEAM_LIMITS_NFL.append([team, 0, 1])\n\nSALARY_CAP = 50000\n\nROSTER_SIZE = {\n 'NFL': 9,\n 'NBA': 8\n}\n\n\ndef get_nfl_positions(te_upper=2):\n return [\n [\"QB\", 1, 1],\n [\"RB\", 2, 3],\n [\"WR\", 3, 4],\n [\"TE\", 1, te_upper],\n [\"DST\", 1, 1]\n ]\n\nPOSITIONS = {\n 'NBA': [\n [\"PG\", 1, 3],\n [\"SG\", 1, 3],\n [\"SF\", 1, 3],\n [\"PF\", 1, 3],\n [\"C\", 1, 2]\n ],\n 'NFL': get_nfl_positions()\n}\n\nDUO_TYPE = {\n 'wr': [\n [\"QB\", 1, 1],\n [\"WR\", 1, 1]\n ],\n 'te': [\n [\"QB\", 1, 1],\n [\"TE\", 1, 1]\n ]\n}\n\nDK_TO_NFL_DRAFTKINGS = {\n 'Panthers': 'Carolina Panthers',\n 'Buccaneers': 'Tampa Bay Buccaneers',\n 'Dolphins': 'Miami Dolphins',\n 'Bears': 'Chicago Bears',\n 'Raiders': 'Oakland Raiders',\n 'Patriots': 'New England Patriots',\n 'Vikings': 'Minnesota Vikings',\n 'Eagles': 'Philadelphia Eagles',\n '49ers': 'San Francisco 49ers',\n 'Bengals': 'Cincinnati Bengals',\n 'Bills': 'Buffalo Bills',\n 'Broncos': 'Denver Broncos',\n 'Browns': 'Cleveland Browns',\n 'Cardinals': 'Arizona Cardinals',\n 'Chargers': 'San Diego Chargers',\n 'Chiefs': 'Kansas City Chiefs',\n 'Colts': 'Indianapolis Colts',\n 'Cowboys': 'Dallas Cowboys',\n 'Falcons': 'Atlanta Falcons',\n 'Giants': 'New York Giants',\n 'Jaguars': 'Jacksonville Jaguars',\n 'Jets': 'New York Jets',\n 'Lions': 'Detroit Lions',\n 'Packers': 'Green Bay Packers',\n 'Rams': 'Los Angeles Rams',\n 'Ravens': 'Baltimore Ravens',\n 'Redskins': 'Washington Redskins',\n 'Saints': 'New Orleans Saints',\n 'Seahawks': 'Seattle Seahawks',\n 'Steelers': 'Pittsburgh Steelers',\n 'Texans': 'Houston Texans',\n 'Titans': 'Tennessee Titans'\n}\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"282898227","text":"# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nUnit Tests for AMQP-based remote procedure calls\n\"\"\"\n\nimport logging\n\nfrom eventlet import greenthread\nimport mock\nfrom oslo.config import cfg\n\nfrom openstack.common.rpc import amqp as rpc_amqp\nfrom openstack.common.rpc import common as rpc_common\nfrom tests.unit.rpc import common\n\n\nFLAGS = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\nclass MyException(Exception):\n pass\n\n\nclass BaseRpcAMQPTestCase(common.BaseRpcTestCase):\n \"\"\"Base test class for all AMQP-based RPC tests.\"\"\"\n def test_proxycallback_handles_exceptions(self):\n \"\"\"Make sure exceptions unpacking messages don't cause hangs.\"\"\"\n if not self.rpc:\n self.skipTest('rpc driver not available.')\n\n orig_unpack = rpc_amqp.unpack_context\n\n info = {'unpacked': False}\n\n def fake_unpack_context(*args, **kwargs):\n info['unpacked'] = True\n raise Exception('moo')\n\n self.stubs.Set(rpc_amqp, 'unpack_context', fake_unpack_context)\n\n value = 41\n self.rpc.cast(FLAGS, self.context, self.topic,\n {\"method\": \"echo\", \"args\": {\"value\": value}})\n\n # Wait for the cast to complete.\n for x in range(50):\n if info['unpacked']:\n break\n greenthread.sleep(0.1)\n else:\n self.fail(\"Timeout waiting for message to be consumed\")\n\n # Now see if we get a response even though we raised an\n # exception for the cast above.\n self.stubs.Set(rpc_amqp, 'unpack_context', orig_unpack)\n\n value = 42\n result = self.rpc.call(FLAGS, self.context, self.topic,\n {\"method\": \"echo\",\n \"args\": {\"value\": value}})\n self.assertEqual(value, result)\n\n def test_notification_envelope(self):\n raw_msg = {'a': 'b'}\n self.test_msg = None\n\n def fake_notify_send(_conn, topic, msg):\n self.test_msg = msg\n\n self.stubs.Set(self.rpc.Connection, 'notify_send', fake_notify_send)\n\n self.rpc.notify(FLAGS, self.context, 'notifications.info', raw_msg,\n envelope=False)\n self.assertEqual(self.test_msg, raw_msg)\n\n # Now turn it on for notifications\n self.rpc.notify(FLAGS, self.context, 'notifications.info', raw_msg,\n envelope=True)\n # Make sure the msg envelope was applied\n self.assertTrue('oslo.version' in self.test_msg)\n\n def test_single_reply_queue_caller_on(\n self, single_reply_queue_for_callee_off=False):\n if not self.rpc:\n self.skipTest('rpc driver not available.')\n\n self.orig_unpack_context = rpc_amqp.unpack_context\n\n def my_unpack_context(conf, msg):\n self.assertTrue('_reply_q' in msg)\n if single_reply_queue_for_callee_off:\n # Simulate a downlevel RPC callee by removing the reply_q.\n # This will make the callee think it got a request\n # from a downlevel caller and thus respond in a downlevel\n # way. In fact we are testing an uplevel caller.\n msg.pop('_reply_q')\n return self.orig_unpack_context(conf, msg)\n\n self.stubs.Set(rpc_amqp, 'unpack_context', my_unpack_context)\n\n self.ReplyProxy_was_called = False\n\n class MyReplyProxy(rpc_amqp.ReplyProxy):\n def _process_data(myself, message_data):\n # with open('mylog', 'a') as f:\n # f.write('my_process_data: ' + str(message_data) + '\\n')\n self.assertTrue('_msg_id' in message_data)\n self.ReplyProxy_was_called = True\n super(MyReplyProxy, myself)._process_data(message_data)\n\n self.orig_reply_proxy = self.conn.pool.reply_proxy\n self.conn.pool.reply_proxy = MyReplyProxy(FLAGS, self.conn.pool)\n\n value = 42\n result = None\n try:\n result = self.rpc.call(\n FLAGS, self.context, self.topic,\n {\"method\": \"echo\", \"args\": {\"value\": value}},\n timeout=1)\n except rpc_common.Timeout:\n # expect a timeout in this case\n if single_reply_queue_for_callee_off:\n result = 42\n\n self.assertEqual(value, result)\n if single_reply_queue_for_callee_off:\n self.assertFalse(self.ReplyProxy_was_called)\n else:\n self.assertTrue(self.ReplyProxy_was_called)\n\n self.stubs.UnsetAll()\n self.conn.pool.reply_proxy = self.orig_reply_proxy\n\n def test_single_reply_queue_caller_on_callee_off(self):\n self.test_single_reply_queue_caller_on(\n single_reply_queue_for_callee_off=True)\n\n def test_duplicate_message_check(self):\n \"\"\"Test sending *not-dict* to a topic exchange/queue.\"\"\"\n\n conn = self.rpc.create_connection(FLAGS)\n message = {'args': 'topic test message', '_unique_id': 'aaaabbbbcccc'}\n\n self.received_message = None\n cache = rpc_amqp._MsgIdCache()\n self.exc_raised = False\n\n def _callback(message):\n try:\n cache.check_duplicate_message(message)\n except rpc_common.DuplicateMessageError:\n self.exc_raised = True\n\n conn.declare_topic_consumer('a_topic', _callback)\n conn.topic_send('a_topic', rpc_common.serialize_msg(message))\n conn.topic_send('a_topic', rpc_common.serialize_msg(message))\n conn.consume(limit=2)\n conn.close()\n\n self.assertTrue(self.exc_raised)\n\n def test_context_dict_type_check(self):\n \"\"\"Test that context is handled properly depending on the type.\"\"\"\n fake_context = {'fake': 'context'}\n mock_msg = mock.MagicMock()\n rpc_amqp.pack_context(mock_msg, fake_context)\n\n # assert first arg in args was a dict type\n args = mock_msg.update.call_args[0]\n self.assertIsInstance(args[0], dict)\n\n def test_callback_wrapper_exception_no_wait(self):\n def my_callback(message, **kwargs):\n raise MyException(\"boom\")\n\n x = rpc_amqp.CallbackWrapper(FLAGS, my_callback, self.conn.pool,\n wait_for_consumers=False)\n try:\n x({'foo': 'blah'})\n except Exception:\n self.fail(\"Should not raise\")\n\n def test_callback_wrapper_exception_wait(self):\n def my_callback(message, **kwargs):\n raise MyException(\"boom\")\n\n x = rpc_amqp.CallbackWrapper(FLAGS, my_callback, self.conn.pool,\n wait_for_consumers=True)\n self.assertRaises(MyException, x, {'foo': 'blah'})\n\n def test_callback_wrapper_no_exception_wait(self):\n def my_callback(message, **kwargs):\n pass\n\n x = rpc_amqp.CallbackWrapper(FLAGS, my_callback, self.conn.pool,\n wait_for_consumers=True)\n try:\n x({'foo': 'blah'})\n except Exception:\n self.fail(\"Should not raise\")\n","sub_path":"tests/unit/rpc/amqp.py","file_name":"amqp.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"212239294","text":"'''\nQuickSort picks an element as pivot and partitions the given list around it.\nWe will choose our pivot randomly.\nWe will implement the quicksort function recursively. During each iteration our function will partition the array based on the randomly picked pivot:\n'''\n\n\nfrom random import randint\n\ndef quicksort(lst, start, end):\n if start < end:\n pivot = randint(start, end)\n # swap with the last element\n lst[end],lst[pivot] = lst[pivot],lst[end]\n # partition the list\n split = partition(lst, start, end)\n # sort both halves\n quicksort(lst, start, split-1)\n quicksort(lst, split+1, end)\n\ndef partition(lst, start, end):\n pivot_index = start-1\n for index in range(start, end):\n # compare with pivot\n if lst[index] < lst[end]:\n pivot_index = pivot_index + 1\n # swap\n lst[pivot_index],lst[index] = lst[index],lst[pivot_index]\n\n # swap with the last element\n lst[pivot_index+1],lst[end] = lst[end],lst[pivot_index+1]\n\n return pivot_index+1\n\n\nnums = [7,2,5,1,29,6,4,19,11]\n\nquicksort(nums,0,len(nums)-1)\n\nprint(nums)\n","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"69934782","text":"import random\nimport cocotb\nfrom cocotb.clock import Clock\nfrom cocotb.triggers import FallingEdge, RisingEdge, Timer\n\nimport png\nfrom datetime import datetime\n\ncodes_to_send = []\nasync def mouse(dut):\n while True:\n while len(codes_to_send) == 0:\n dut.mouse_clk_in = 1\n dut.mouse_data_in = 1\n await Timer(1, units=\"us\")\n\n code = codes_to_send.pop(0)\n \n start = 0\n parity = 0\n stop = 1\n\n num_ones = 0\n\n bits = []\n bits.append(start)\n for i in range(0, 8):\n bit = (code >> i) & 0x01\n bits.append(bit)\n num_ones += bit\n parity = num_ones % 2 == 0\n bits.append(parity)\n bits.append(stop)\n\n dut.mouse_clk_in = 1\n dut.mouse_data_in = 1\n await Timer(50, units=\"us\")\n\n i = 0\n for bit in bits:\n dut.mouse_clk_in = 1\n await Timer(20, units=\"us\")\n dut.mouse_data_in = bit\n await Timer(20, units=\"us\")\n dut.mouse_clk_in = 0\n await Timer(20, units=\"us\")\n await Timer(20, units=\"us\")\n\n dut.mouse_clk_in = 1\n dut.mouse_data_in = 1\n await Timer(200, units=\"us\")\n \nasync def mouse_tester(dut):\n cocotb.fork(mouse(dut))\n def send(code): codes_to_send.append(code)\n def move_mouse(xrel, yrel):\n yrel = -yrel;\n send(0b00001000 |\n ((xrel >> 31) << 4) |\n ((yrel >> 31) << 5));\n send(xrel & 0xFF)\n send(yrel & 0xFF)\n\n # await Timer(50, units=\"us\")\n move_mouse(5, 10)\n\nH_RES = 640\nV_RES = 480\n\ndef write_png(filename, screenbuffer):\n f = open(filename, 'wb')\n w = png.Writer(H_RES, V_RES, greyscale=False)\n w.write_array(f, screenbuffer)\n f.close()\n\n@cocotb.test()\nasync def test_wiggly_ic_1(dut):\n def info(fmt, *args):\n dut._log.info(\"%s\\t\" + fmt, datetime.now(), *args)\n\n # reset (need to trigger all clocks)\n dut.rst.value = 1\n dut.clk.value = 0\n dut.vga_clk_pix.value = 0\n await Timer(1, units=\"ns\")\n dut.clk.value = 1\n dut.vga_clk_pix.value = 1\n await Timer(1, units=\"ns\")\n dut.clk.value = 0\n dut.vga_clk_pix.value = 0\n await Timer(1, units=\"ns\")\n dut.rst.value = 0\n\n clk = Clock(dut.clk, 60, units=\"ns\") # 60ns period = 16.6MHz\n cocotb.fork(clk.start()) # Start the clock\n\n vga_clk_pix = Clock(dut.vga_clk_pix, 40, units=\"ns\") # 40ns period = 25MHz\n cocotb.fork(vga_clk_pix.start())\n\n info(\"hello\")\n\n await Timer(500, units=\"ns\") # wait a bit\n\n screenbuffer = [0]*(H_RES*V_RES*3)\n def index(x, y): return (y*H_RES + x) * 3\n\n async def read_frame(frame_num):\n await RisingEdge(dut.vga_vsync)\n \n while True:\n await FallingEdge(dut.vga_clk_pix)\n if dut.vga_de.value == 1:\n i = (dut.vga_sy.value*H_RES + dut.vga_sx.value) * 3\n screenbuffer[i] = dut.vga_r.value << 6\n screenbuffer[i+1] = dut.vga_g.value << 6\n screenbuffer[i+2] = dut.vga_b.value << 6\n\n if dut.vga_vsync.value == 0:\n break\n\n info('frame %d', frame_num)\n write_png('frame' + str(frame_num) + '.png', screenbuffer)\n\n assert screenbuffer[index(2, 2)] == 0b11 << 6\n\n # blue cursor\n info(\"mouse_x=%d, mouse_y=%d\", dut.mouse_x.value, dut.mouse_y.value)\n cursor = index(dut.mouse_x.value + 2, dut.mouse_y.value + 2)\n assert screenbuffer[cursor] == 0b00 << 6\n assert screenbuffer[cursor + 1] == 0b00 << 6\n assert screenbuffer[cursor + 2] == 0b11 << 6\n\n await read_frame(0)\n\n cocotb.fork(mouse_tester(dut))\n # wait until mouse movement has registered\n await Timer(3000, units=\"us\")\n\n # now read next frame\n await read_frame(1)\n","sub_path":"test/test_wiggly_ic_1.py","file_name":"test_wiggly_ic_1.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"432540155","text":"# From\n# https://stackoverflow.com/questions/61626067/python-add-arbitrary-exif-data-to-image-usercomment-field\n\n# https://stackoverflow.com/a/63400376/54745\n\nfrom PIL import Image\nimport piexif\nimport pickle\n\nimage = Image.open(\"image.jpg\")\n\nexifData = image._getexif()\n\nif exifData is None:\n exifData = {}\n\n\ntags = {\n \"url_current\": \"https://stackoverflow.com/q/52729428/1846249\",\n \"contains_fish\": False,\n 3: 0.14159265358979323,\n}\n\ndata = pickle.dumps(tags)\nexifIfd = {\n piexif.ExifIFD.MakerNote: data,\n piexif.ExifIFD.UserComment: b\"UNICODE\\0\" + \"my message\".encode(\"utf-8\"),\n}\n\nexifDict = {\"0th\": {}, \"Exif\": exifIfd, \"1st\": {}, \"thumbnail\": None, \"GPS\": {}}\n\nexifDataBytes = piexif.dump(exifDict)\n\nimage.save(\"image_mod.jpg\", format=\"jpeg\", exif=exifDataBytes)\n","sub_path":"python/add-exif-data.py","file_name":"add-exif-data.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"171143230","text":"import sys\r\nimport warnings\r\nfrom math import log10\r\nimport random\r\nimport time\r\nimport numpy\r\nimport get_data_uai\r\nimport get_evidence_data\r\nimport get_pr\r\nimport helper\r\nimport sampling_VE\r\nimport sampling_VE_adaptive_proposal_distribution\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\narguments = list(sys.argv)\r\n\r\ntry:\r\n # Please give the actual directory of the files\r\n uai_file_name = str(arguments[1])\r\n evidence_file_name = str(arguments[2])\r\n pr_file_name = str(arguments[3])\r\n type_of_algorithm = str(arguments[4])\r\n w_cutset = int(arguments[5])\r\n num_samples = int(arguments[6])\r\nexcept:\r\n print(\"You have given less arguments\")\r\n print(\"The code to run the algorithm :-\")\r\n print(\r\n \"python main.py \")\r\n print(\"Example :-\")\r\n print(\"python main.py 1.uai 1.uai.evid 1.uai.PR -vec 3 50\")\r\n print(\"python main.py 1.uai 1.uai.evid 1.uai.PR -avec 3 50\")\r\n\r\ndef main():\r\n \"\"\"\r\n This is the main function which is used to run all the algorithms\r\n :return:\r\n \"\"\"\r\n log_actual_pr = log10(get_pr.get_pr(pr_file_name))\r\n if type_of_algorithm == \"-vec\":\r\n start_time_normal = time.time()\r\n num_of_var, cardinalities, num_of_cliques, num_of_var_in_clique, var_in_clique, distribution_array = get_data_uai.get_uai_data(\r\n uai_file_name)\r\n evidence = get_evidence_data.get_evidence(evidence_file_name)\r\n var_in_clique, distribution_array = helper.instantiate(num_of_var_in_clique, evidence, cardinalities,\r\n var_in_clique, distribution_array)\r\n estimate = sampling_VE.sampling_VE(num_of_var, cardinalities, num_of_cliques, num_of_var_in_clique,\r\n var_in_clique, distribution_array, w_cutset, num_samples)\r\n time_normal = (time.time() - start_time_normal)\r\n print(\"The estimate for partition function or the probability of evidence is\", estimate)\r\n print(\"The time is\", time_normal)\r\n\r\n elif type_of_algorithm == \"-avec\":\r\n start_time_normal = time.time()\r\n num_of_var, cardinalities, num_of_cliques, num_of_var_in_clique, var_in_clique, distribution_array = get_data_uai.get_uai_data(\r\n uai_file_name)\r\n evidence = get_evidence_data.get_evidence(evidence_file_name)\r\n var_in_clique, distribution_array = helper.instantiate(num_of_var_in_clique, evidence, cardinalities,\r\n var_in_clique, distribution_array)\r\n estimate_2 = sampling_VE_adaptive_proposal_distribution.sampling_VE(num_of_var, cardinalities,\r\n num_of_cliques, num_of_var_in_clique,\r\n var_in_clique, distribution_array,\r\n w_cutset, num_samples)\r\n time_normal = (time.time() - start_time_normal)\r\n print(\"The estimate partition function or the probability of evidence is\", estimate_2)\r\n print(\"The time is\", time_normal)\r\n else:\r\n print(\"Please give correct algorithm name\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Sampling-based-Variable-Elimination-and-Conditioning/Sampling-based-Variable-Elimination-and-Conditioning-master/part_1.py","file_name":"part_1.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"133600111","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSistema Automatizado de Planificación Integral Comunal SAPIC\n\nCopyleft (@) 2017 CENDITEL nodo Mérida - Copyleft (@) 2017 CENDITEL nodo Mérida - https://planificacion.cenditel.gob.ve/trac/wiki/WikiStart#a5.-SistemaAutomatizadodePlanificaciónIntegralComunalSAPIC\n\"\"\"\n## @package explicacion_situacional.views\n#\n# Vistas correspondientes a la explicacion situacional\n# @author Ing. Erwin Paredes (eparedes at cenditel.gob.ve)\n# @author Centro Nacional de Desarrollo e Investigación en Tecnologías Libres\n# (CENDITEL) nodo Mérida - Venezuela\n# @copyright GNU Public License versión 2 (GPLv2)\n# @version 1.0\n\nimport time\nimport datetime\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\n\nfrom explicacion_situacional.modelsEncuestas.modelsParticipacion import (\n RespuestaSino, RespuestaOpciones,\n RespuestaAbierta, RespuestaUbicacion\n )\n\nfrom django.views.generic import (\n TemplateView,\n UpdateView\n)\nfrom django.contrib import messages\nfrom utils.views import LoginRequeridoPerAuth\n\nfrom django.contrib.gis import forms\n\nfrom explicacion_situacional.modelsEncuestas.modelsConsultas import (\n Consulta,\n Opcion,\n)\n\n\n\nfrom .forms import (\n RespuestaSinoForm, RespuestaAbiertaForm ,\n RespuestaOpcionesForm\n)\n\n\n\ndef ParticipoCaracterizacionEconomica(request,pk):\n \"\"\"!\n Chequea la participacion del usuario en la caracterización economica de la comunidad\n\n @author Ing. Erwin Leonel P. (eparedes at cenditel.gob.ve)\n @copyright GNU Public License versión 2 (GPLv2)\n @date 30-005-2017\n @version 1.0.0\n \"\"\"\n user = request.user\n if(user and pk):\n respuesta_sino = RespuestaSino.objects.filter(pregunta__consulta=pk,user=user)\n respuesta_abierta = RespuestaAbierta.objects.filter(pregunta__consulta=pk,user=user)\n respuesta_opciones = RespuestaOpciones.objects.filter(opcion__pregunta__consulta=pk,user=user)\n if(respuesta_sino or respuesta_abierta or respuesta_opciones):\n return redirect('explicacion:caracterizacion_economica')\n return redirect('explicacion:participar_encuesta_economica',pk=2)\n else:\n return redirect('explicacion:explicacion_situacional')\n\n\ndef ModificarRespuesta(request):\n if request.method == \"POST\":\n id = request.POST[\"ID\"]\n respuesta = request.POST.get(\"respuesta\")\n registro = RespuestaSino.objects.get(id=id)\n registro.respuesta = respuesta\n registro.save()\n return redirect('explicacion:caracterizacion_economica')\n\n\nclass ModificarRespuestaView(LoginRequeridoPerAuth, TemplateView):\n \"\"\"!\n Clase que mustra el template y gestiona la vista para modificar una respuesta en una encuesta\n \n @author Manuel Zambrano \n @copyright GNU Public License versión 2 (GPLv2)\n @date 30-08-2018\n @version 1.0.0\n \"\"\"\n\n template_name = 'modificar.respuesta.html'\n group_required = [u\"Administradores\", u\"Voceros\", u\"Integrantes\"] \n\n\n def get(self,*arg,**kwargs): \n \"\"\"!\n Metodo que maneja las peticiones HTTP GET de la vista, Carga los valores iniciales al formulario\n\n @author Manuel Zambrano\n @copyright GNU Public License versión 2 (GPLv2)\n @date 30-08-2018\n @param self {object} Objeto que instancia la clase\n @param kwargs {object} Objeto que contiene las variables de la url\n @param arg {object} \n @return retorna los datos de contexto\n \"\"\"\n\n if kwargs['tipo'] == '1' :\n respuesta = RespuestaOpciones.objects.select_related().get(\n pk = kwargs['pk'],\n user=self.request.user\n )\n formulario = RespuestaOpcionesForm()\n formulario.fields['respuesta']._set_queryset(Opcion.objects.filter(\n pregunta=respuesta.opcion.pregunta\n ))\n formulario.fields['respuesta'].initial= respuesta.opcion\n \n\n if kwargs['tipo'] == '4' : #Hacer para cada tipo de respuesta8 \n respuesta = RespuestaSino.objects.select_related().get(\n pk = kwargs['pk'],\n user=self.request.user\n )\n if respuesta.respuesta == True:\n formulario = RespuestaSinoForm(initial={\n 'respuesta':respuesta.respuesta,\n })\n else:\n justificacion = RespuestaAbierta.objects.select_related().get(\n user = self.request.user, \n pregunta = respuesta.pregunta\n )\n formulario = RespuestaSinoForm(initial={\n 'respuesta':respuesta.respuesta,\n 'justificacion':justificacion.texto_respuesta,\n })\n\n if kwargs['tipo'] == '5' :\n respuesta = RespuestaAbierta.objects.select_related().get(\n pk = kwargs['pk'],\n user=self.request.user\n )\n formulario = RespuestaAbiertaForm(initial={\n 'respuesta':respuesta.texto_respuesta,\n })\n\n kwargs['formulario'] = formulario\n kwargs['o'] = respuesta\n\n\n return super(ModificarRespuestaView,self).get(self,**kwargs)\n\n def post(self,*arg,**kwargs):\n \"\"\"!\n Metodo que maneja las peticiones HTTP POST de la vista, Guarda la modificacion de la pregunta\n\n @author Manuel Zambrano\n @copyright GNU Public License versión 2 (GPLv2)\n @date 03-09-2018\n @param self {object} Objeto que instancia la clase\n @param kwargs {object} Objeto que contiene las variables de la url\n @param arg {object} \n @return Redirecciona a la tabla de ecuestas\n \"\"\"\n\n if kwargs['tipo'] == '1' :\n respuesta = RespuestaOpciones.objects.select_related().get(\n pk = kwargs['pk'],\n user=self.request.user\n )\n formulario = RespuestaOpcionesForm(self.request.POST,initial={\n 'respuesta':respuesta.opcion,\n })\n if formulario.is_valid() and formulario.has_changed():\n opcion = formulario.cleaned_data['respuesta']\n respuesta.opcion = opcion\n respuesta.save()\n\n if kwargs['tipo'] == '5':\n respuesta = RespuestaAbierta.objects.select_related().get(\n pk = kwargs['pk'], \n user=self.request.user\n )\n formulario = RespuestaAbiertaForm(self.request.POST, initial={\n 'respuesta':respuesta.texto_respuesta,\n })\n if formulario.is_valid() and formulario.has_changed():\n texto_respuesta = formulario.cleaned_data['respuesta']\n respuesta.texto_respuesta = texto_respuesta\n respuesta.save()\n\n if kwargs['tipo'] == '4':\n respuesta = RespuestaSino.objects.select_related().get(\n pk = kwargs['pk'],\n user = self.request.user\n )\n formulario = RespuestaSinoForm(self.request.POST) \n if formulario.is_valid():\n if formulario.cleaned_data['respuesta'] :\n respuesta.respuesta = formulario.cleaned_data['respuesta']\n respuesta.save()\n else: \n try:\n justificacion = RespuestaAbierta.objects.select_related().get(\n user = self.request.user, \n pregunta = respuesta.pregunta\n )\n texto_respuesta = formulario.cleaned_data['justificacion']\n justificacion.texto_respuesta = texto_respuesta\n justificacion.es_justificacion = True\n respuesta.respuesta = formulario.cleaned_data['respuesta']\n respuesta.save()\n justificacion.save()\n except:\n justificacion = RespuestaAbierta()\n justificacion.pregunta = respuesta.pregunta\n justificacion.texto_respuesta = formulario.cleaned_data['justificacion']\n justificacion.user = self.request.user\n justificacion.es_justificacion = True\n justificacion.save()\n respuesta.respuesta = formulario.cleaned_data['respuesta']\n respuesta.save()\n else : \n messages.error(self.request, 'Error el formulario \\\n No es Valido') #provicional, mientras se valida el formulario con js\n return redirect(self.request.META.get(\"HTTP_REFERER\"))\n\n","sub_path":"explicacion_situacional/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":10444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433745893","text":"from cliente import Cliente\nfrom controller import *\n\n# Cria clientes\ncliente = Cliente(1550, '46618865859', 'Daniel', 4000.0)\ncliente2 = Cliente(2000, '12345678901234', 'Valemobi', 100000.0)\ncliente3 = Cliente(100, '43210987654321', 'Dev Inc', 50000.0)\n\nclientes = [cliente, cliente2, cliente3]\n\n# Conecta com banco\nconn = conecta_banco()\n\n# Insere clientes\nfor cliente in clientes:\n insere_cliente(conn, cliente)\n\n# Calcula média de acordo com o retorno da consulta\ncalcula_media(conn)\n\n# Imprime clientes\nimprime_clientes(conn)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"318951220","text":"import unittest\nfrom typing import List\nfrom pprint import pprint\nfrom collections import Counter\n\n\nclass Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n # backtrack\n res = []\n counter = Counter(nums)\n\n def backtrack(cur):\n if len(cur) == len(nums):\n res.append(cur[:])\n\n for n in counter:\n if counter[n] == 0:\n continue\n counter[n] -= 1\n cur.append(n)\n backtrack(cur)\n cur.pop()\n counter[n] += 1\n\n backtrack([])\n return res\n\n def permuteUnique2(self, nums: List[int]) -> List[List[int]]:\n def sub(nums):\n if not nums:\n return {(), }\n\n res = set()\n for i, n in enumerate(nums):\n for perm in sub(nums[:i]+nums[i+1:]):\n res.add((n,)+perm)\n return res\n\n return list(list(num) for num in sub(nums))\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_case_1(self):\n sol = Solution()\n nums = [1, 1, 2]\n expected = [\n [1, 1, 2],\n [1, 2, 1],\n [2, 1, 1]\n ]\n self.assertCountEqual(sol.permuteUnique(nums), expected)\n\n # def test_edge_case_1(self):\n # s = Solution()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"Week_04/47_permutations_ii.py","file_name":"47_permutations_ii.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123411403","text":"\"\"\"Some code to deal with atis data.\"\"\"\nimport collections\nimport glob\nimport os\nimport re\nimport sys\n\nIN_DIR = os.path.join(\n os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),\n 'data/atis/raw-oneshot')\nOUT_DIR = os.path.join(\n os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),\n 'data/atis/processed')\n\ndef read_examples(filename):\n examples = []\n utterance = None\n logical_form = None\n with open(filename) as f:\n for i, line in enumerate(f):\n if i % 3 == 0:\n utterance = line.strip()\n elif i % 3 == 1:\n logical_form = line.strip()\n examples.append((utterance, logical_form))\n return examples\n\ndef split_logical_form(lf):\n words = lf.split()\n\n # Insert spaces and obscure predicates\n replacements = [\n ('(', ' ( _'),\n (')', ' ) '),\n (':', ':_'),\n ]\n for r in replacements:\n lf = lf.replace(r[0], r[1])\n return ' '.join(lf.split())\n\ndef process(filename, stemmer=None, less_copy=False):\n print >> sys.stderr, 'Processing %s' % filename\n basename = os.path.basename(filename)\n domain = basename.split('.')[0]\n stage = basename.split('.')[1]\n \n in_data = read_examples(filename)\n out_data = []\n for (utterance, logical_form) in in_data:\n y = split_logical_form(logical_form)\n out_data.append((utterance, y))\n\n out_basename = '%s_%s.tsv' % (domain, stage)\n out_path = os.path.join(OUT_DIR, out_basename)\n with open(out_path, 'w') as f:\n for x, y in out_data:\n print >> f, '%s\\t%s' % (x, y)\n\ndef main():\n if not os.path.exists(OUT_DIR):\n os.makedirs(OUT_DIR)\n for filename in sorted(glob.glob(os.path.join(IN_DIR, 'atis.*'))):\n process(filename)\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/py/atis.py","file_name":"atis.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"47314913","text":"\"\"\" tests for the iptt processing class (in indicators.tests.iptt_sample_data.iptt_utility)\n\nYes, this is in fact a test suite for a test utility class, but it ensures we can abstract IPTT responses\nin a way that is faithful to the data they are representing, letting us test the view without digging inti the HTML.\nThis is currently written in such a way that a change to table structure will break this class, so front end changes\nwill necessitate updating this class.\n\"\"\"\n\nimport unittest\nfrom datetime import datetime\nimport os\nfrom iptt_sample_data import response_one, iptt_utility\nfrom bs4 import BeautifulSoup\n\nclass TestResponseClass(unittest.TestCase):\n\n def setUp(self):\n \"\"\"written in such a way that expanding to more test responses defined are easy to add\"\"\"\n self.expected = response_one.indicators\n self.responsedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"iptt_sample_data\")\n with open(os.path.join(self.responsedir, \"response1.html\")) as rawfile:\n self.responseraw = rawfile.read()\n self.soup = BeautifulSoup(self.responseraw, 'html.parser')\n self.processed = iptt_utility.IPTTResponse(self.responseraw)\n\n def test_indicator_count(self):\n self.assertEqual(len(self.processed.indicators),\n len(self.expected),\n u\"Expected {0} indicators, found {1} indicators from response:\\n{2}\".format(\n len(self.expected), len(self.processed.indicators),\n self.soup.prettify())\n )\n\n def test_indicator_values(self):\n for j, indicator in enumerate(self.expected):\n for key, value in indicator.items():\n if key != 'ranges':\n self.assertEqual(self.processed.indicators[j][key],\n value,\n u\"Expected {0} to have value {1}, got {2} instead.\".format(\n key, value, self.processed.indicators[j][key]\n ))\n\n def test_range_count(self):\n for j, indicator in enumerate(self.expected):\n self.assertEqual(len(self.processed.indicators[j]['ranges']),\n len(indicator['ranges']),\n u\"Expected {0} date ranges, got {1} instead.\".format(\n len(indicator['ranges']),\n len(self.processed.indicators[j]['ranges'])\n ))\n\n def test_range_values(self):\n for i, indicator in enumerate(self.expected):\n for j, daterange in enumerate(indicator['ranges']):\n for k, expected_value in daterange.items():\n if k in ['start_date', 'end_date'] and expected_value is not None:\n expected_value = datetime.strptime(expected_value, \"%Y-%m-%d\").strftime(\"%Y-%m-%d\")\n self.assertEqual(self.processed.indicators[i]['ranges'][j][k],\n expected_value,\n \"Expected indicator {0} range {1} key {2} to have value {3}, got {4}.\".format(\n i, j, k, expected_value, self.processed.indicators[i]['ranges'][j][k]\n ))\n ","sub_path":"indicators/tests/test_iptt_test_utilities.py","file_name":"test_iptt_test_utilities.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"580395115","text":"# The MIT License (MIT)\n#\n# Copyright (c) 2014-2015 WUSTL ZPLAB\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Authors: Erik Hvatum \n\nfrom PyQt5 import Qt\nimport ris_widget\nimport ris_widget.image\nimport ris_widget.layer\nimport ris_widget.ris_widget\nimport ris_widget.qwidgets.layer_table\nfrom .. import scope_client\n\n# Show layer name column in LayerTable\nris_widget.qwidgets.layer_table.LayerTableModel.PROPERTIES.insert(\n ris_widget.qwidgets.layer_table.LayerTableModel.PROPERTIES.index('opacity') + 1, 'name')\n\nclass ScopeViewerWidgetQtObject(ris_widget.ris_widget.RisWidgetQtObject):\n RW_LIVE_STREAM_BINDING_LIVE_UPDATE_EVENT = Qt.QEvent.registerEventType()\n OVEREXPOSURE_GETCOLOR_EXPRESSION = 's.r < 1.0f ? vec4(s.rrr, 1.0f) : vec4(1.0f, 0.0f, 0.0f, 1.0f)'\n\n def __init__(\n self,\n app_prefs_name,\n app_prefs_version,\n window_title,\n parent,\n window_flags,\n msaa_sample_count,\n layers,\n scope,\n scope_properties,\n **kw):\n \n super().__init__(\n app_prefs_name=app_prefs_name,\n app_prefs_version=app_prefs_version,\n window_title=window_title,\n parent=parent,\n window_flags=window_flags,\n msaa_sample_count=msaa_sample_count,\n layers=layers,\n **kw)\n hh = self.layer_table_view.horizontalHeader()\n col = ris_widget.qwidgets.layer_table.LayerTableModel.PROPERTIES.index('name')\n hh.resizeSection(col, hh.sectionSize(col) * 1.5)\n self.scope = scope\n self.scope_toolbar = self.addToolBar('Scope')\n self.live_streamer = scope_client.LiveStreamer(scope, scope_properties, self.post_live_update)\n import freeimage\n import pathlib\n if pathlib.Path('/home/zplab/vignette_mask.png').exists():\n self.layer_stack.imposed_image_mask = freeimage.read('/home/zplab/vignette_mask.png')\n self.show_over_exposed_action = Qt.QAction('Show Over-Exposed Live Pixels', self)\n self.show_over_exposed_action.setCheckable(True)\n self.show_over_exposed_action.setChecked(False)\n self.show_over_exposed_action.toggled.connect(self.on_show_over_exposed_action_toggled)\n self.show_over_exposed_action.setChecked(True)\n self.scope_toolbar.addAction(self.show_over_exposed_action)\n\n def event(self, e):\n # This is called by the main QT event loop to service the event posted in post_live_update().\n if e.type() == self.RW_LIVE_STREAM_BINDING_LIVE_UPDATE_EVENT:\n image_data, timestamp, frame_no = self.live_streamer.get_image()\n target_layer = self.get_live_target_layer()\n target_layer.image = ris_widget.image.Image(\n image_data,\n mask=self.layer_stack.imposed_image_mask,\n is_twelve_bit=self.live_streamer.bit_depth == '12 Bit',\n use_open_mp=True)\n if self.show_over_exposed_action.isChecked() and target_layer.image.type == 'G':\n target_layer.getcolor_expression = self.OVEREXPOSURE_GETCOLOR_EXPRESSION\n return True\n return super().event(e)\n\n def post_live_update(self):\n # posting an event does not require calling thread to have an event loop,\n # unlike sending a signal\n Qt.QCoreApplication.postEvent(self, Qt.QEvent(self.RW_LIVE_STREAM_BINDING_LIVE_UPDATE_EVENT))\n\n def get_live_target_layer(self):\n \"\"\"The first Layer in self.layers with name \"Live Target\" is returned. If self.layers contains no Layer with name\n \"Live Target\", one is created, inserted at index 0, and returned.\"\"\"\n if self.layers is None:\n self.layers = []\n else:\n for layer in self.layers:\n if layer.name == 'Live Target':\n return layer\n t = ris_widget.layer.Layer(name='Live Target')\n self.layers.insert(0, t)\n return t\n\n def embed_widget_flow_pop_button(self, pop_button):\n self.scope_toolbar.addWidget(pop_button)\n\n def on_show_over_exposed_action_toggled(self, show_over_exposed):\n layer = self.get_live_target_layer()\n if show_over_exposed:\n if layer.image is not None and layer.image.type == 'G':\n layer.getcolor_expression = self.OVEREXPOSURE_GETCOLOR_EXPRESSION\n else:\n # Revert to default getcolor_expression\n del layer.getcolor_expression\n\nclass ScopeViewerWidget(ris_widget.ris_widget.RisWidget):\n APP_PREFS_NAME = \"ScopeViewerWidget\"\n COPY_REFS = ris_widget.ris_widget.RisWidget.COPY_REFS + [\n #'something'\n ]\n QT_OBJECT_CLASS = ScopeViewerWidgetQtObject\n\n @staticmethod\n def can_run(scope):\n return hasattr(scope, 'camera')\n\n def __init__(\n self,\n host,\n scope,\n scope_properties,\n window_title='Scope Viewer',\n parent=None,\n window_flags=Qt.Qt.WindowFlags(0),\n msaa_sample_count=2,\n show=True,\n layers = tuple(),\n **kw):\n super().__init__(\n window_title=window_title,\n parent=parent,\n window_flags=window_flags,\n msaa_sample_count=msaa_sample_count,\n show=show,\n layers=layers,\n scope=scope,\n scope_properties=scope_properties,\n **kw)\n #fooprop = ProxyProperty('fooprop', 'qt_object', ScopeViewerWidgetQtObject)\n","sub_path":"scope/gui/scope_viewer_widget.py","file_name":"scope_viewer_widget.py","file_ext":"py","file_size_in_byte":6593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42768130","text":"import os, re, pickle, json, matplotlib, time, socket\r\nimport numpy as np\r\nimport sympy as sp\r\nimport datetime\r\n#import anytree\r\nfrom itertools import chain\r\nfrom PyQt5.QtCore import (Qt, QObject, pyqtSignal, QTimer)\r\nfrom PyQt5.QtWidgets import (QApplication, QScrollArea, QFrame, QMenu, QGridLayout, QVBoxLayout, QHBoxLayout,\r\n QDialog, QLabel, QLineEdit, QPushButton, QWidget, QComboBox,QRadioButton, QCheckBox,\r\n QTabWidget, QFileDialog, QAction, QMessageBox, QDoubleSpinBox, QSpinBox,QSpacerItem,\r\n QMenuBar,QInputDialog)\r\nfrom copy import deepcopy\r\nimport pyqtgraph as pg\r\nfrom sympy.utilities.lambdify import lambdify\r\nfrom sympy.parsing.sympy_parser import parse_expr\r\ntry:\r\n from .device_lib import COMPortDevice\r\nexcept:\r\n from device_lib import COMPortDevice, COMPortWidget\r\n\r\nDEBUG = 1\r\n# from Devices.shutter import Shutter\r\n# from Devices.DAQ import DAQHandler\r\n\r\n\r\nmatplotlib.use('Qt5Agg',force=True)\r\nfrom collections import OrderedDict\r\n\r\nfrom Lib import *\r\n\r\nSCAN_PARAMS_STR = 'available_scan_params'\r\nNAME_IN_SCAN_PARAMS = 'DDS_function'\r\n\r\nvoltage_validator = QDoubleValidator(-10.0,10.0,1)\r\nn_avr_validator = QIntValidator(1,1000)\r\nAnalogInLineDict = OrderedDict([\r\n (\"Del\",['MPB','Del',40]),\r\n ('Channel',['CB','I0',['I%i'%i for i in range(8)]+['I23'],60]),\r\n ('Name', ['LE', 'New shutter', 120]),\r\n ('Vmin',['MDB',0,voltage_validator,60]),\r\n ('Vmax',['MDB',1,voltage_validator,60]),\r\n ('Navr',['MIB',1,n_avr_validator,50]),\r\n ('oOff',['MChB',False,20]),\r\n])\r\n\r\n\r\nclass AnalogInWidget(QScrollArea):\r\n class Line(QWidget):\r\n def __init__(self, parent,input_channels=[],data={}):\r\n # print('---- shutterLine construct, data',data)\r\n # print(other_channels)\r\n super().__init__(parent)\r\n self.parent = parent\r\n layout = QHBoxLayout()\r\n self.data = data\r\n self._update_from_scanner = False\r\n self.autoUpdate = QTimer()\r\n self.autoUpdate.setInterval(500)\r\n self.autoUpdate.timeout.connect(self.update)\r\n self.widgets = {}\r\n for key, val in AnalogInLineDict.items():\r\n # print(key,val)\r\n if key == 'Del':\r\n w = MyPushButton(name=key,handler=lambda: self.parent.delete(self), fixed_width=val[-1])\r\n layout.addWidget(w, val[-1])\r\n continue\r\n self.data[key] = data.get(key, val[1])\r\n if val[0] == 'CB':\r\n # create a combo box widget\r\n if key == 'Channel':\r\n items = input_channels if input_channels != [] else val[2]\r\n w = MyComboBox(items=items, current_text=data.get(key, val[1]),\r\n current_index_changed_handler=self.autoUpdate.start,\r\n max_width=val[-1])\r\n elif val[0] == 'LE':\r\n w = MyLineEdit(name=data.get(key, val[1]),\r\n text_changed_handler=self.textEdited,\r\n text_edited_handler=self.textEdited,\r\n max_width=val[-1])\r\n elif val[0] == 'MDB':\r\n w = MyDoubleBox(validator=val[2], value=data.get(key, val[1]),\r\n text_changed_handler=self.update,\r\n text_edited_handler=self.textEdited,\r\n max_width=val[-1])\r\n elif val[0] == 'MIB':\r\n w = MyIntBox(validator=val[2], value=data.get(key, val[1]),\r\n text_changed_handler=self.update,\r\n text_edited_handler=self.textEdited,\r\n max_width=val[-1])\r\n elif val[0] == 'MChB':\r\n w = MyCheckBox(is_checked=data.get(key, val[1]), handler=self.autoUpdate.start,\r\n max_width=val[-1])\r\n self.widgets[key] = w\r\n layout.addWidget(w, val[-1])\r\n # print('---- shutterLine - end construct')\r\n # layout.setSpacing(0)\r\n layout.addStretch(1)\r\n layout.setSpacing(10)\r\n layout.setContentsMargins(5, 2, 5, 2)\r\n self.main_layout = layout\r\n self.setLayout(layout)\r\n self.setMinimumHeight(20)\r\n self.setMaximumWidth(550)\r\n # self.setMinimumHeight(50)\r\n # self.update()\r\n\r\n def update(self):\r\n if DEBUG: print('---- AnalogInLine update')\r\n # print(str(self))\r\n self.autoUpdate.stop()\r\n # print('Here1')\r\n changed_item = {}\r\n for key, val in AnalogInLineDict.items():\r\n if val[0] == 'CB': # do a combo box widget\r\n if self.data[key] != self.widgets[key].currentText():\r\n # print(self.data[key])\r\n # print(self.widgets[key].currentText())\r\n changed_item[key] = (self.data[key], self.widgets[key].currentText())\r\n self.data[key] = self.widgets[key].currentText()\r\n elif val[0] == 'LE':\r\n if self.data[key] != self.widgets[key].text():\r\n changed_item[key] = (self.data[key], self.widgets[key].text())\r\n self.data[key] = self.widgets[key].text()\r\n elif val[0] in ['MDB', 'MIB']:\r\n if self.data[key] != self.widgets[key].value():\r\n changed_item[key] = (self.data[key], self.widgets[key].value())\r\n self.data[key] = self.widgets[key].value()\r\n elif val[0] in ['MChB']:\r\n if self.data[key] != self.widgets[key].isChecked():\r\n changed_item[key] = (self.data[key], self.widgets[key].isChecked())\r\n self.data[key] = self.widgets[key].isChecked()\r\n if DEBUG: print('AnalogIn line data changed: line:', self.data['Name'], changed_item)\r\n\r\n if not self._update_from_scanner:\r\n # autoSave.start() # do we need it here?\r\n if self.parent:\r\n self.parent.lineChanged(self, changed_item) # figure out how to handle this properly\r\n self._update_from_scanner = False\r\n\r\n def textEdited(self):\r\n # print('TextEdited')\r\n if self._update_from_scanner:\r\n self.update()\r\n else:\r\n self.autoUpdate.start()\r\n\r\n def constructData(self):\r\n return {key: self.widgets[key].getValue() for key in self.widgets}\r\n\r\n def getVmin(self):\r\n return self.widgets[\"Vmin\"].value()\r\n\r\n def getVmax(self):\r\n return self.widgets[\"Vmax\"].value()\r\n\r\n def getNavr(self):\r\n return self.widgets[\"Navr\"].value()\r\n\r\n def __init__(self,parent=None,globals={},signals={},port=None,input_channels=[],data={},config_file=None):\r\n super().__init__(parent)\r\n self.config_file = config_file\r\n self.globals = globals\r\n self.signals = signals\r\n self.parent = parent\r\n self.port = port # now it is arduino com-port\r\n self.data = data\r\n self.input_channels = input_channels\r\n self.rates = [\"1\",\"2\",\"5\",\"10\",\"20\",\"50\",\"100\"]\r\n self.current_rate = \"10\"\r\n self.lines = []\r\n self.load()\r\n self.plot_widget = PlotPulse(parent=self)\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n self.sock.settimeout(0.01)\r\n self.menuBar = QMenuBar(self)\r\n self.initUI()\r\n # self.sendScanParams()\r\n self.t_start = 0\r\n self.t_end = 1000\r\n if self.signals:\r\n self.signals.pulsesChanged.connect(self.constructPulses)\r\n\r\n def initUI(self):\r\n fileMenu = self.menuBar.addMenu('&File')\r\n # connect = QAction('&Connect', self)\r\n # connect.triggered.connect(self.connect)\r\n # fileMenu.addAction(connect)\r\n # load = QAction('&Load', self)\r\n # load.triggered.connect(self.loadDialog)\r\n # fileMenu.addAction(load)\r\n save = QAction('&Save', self)\r\n save.triggered.connect(self.saveClicked)\r\n fileMenu.addAction(save)\r\n\r\n self.setWindowTitle('AnalogIN')\r\n # self.setWindowIcon(QIcon('Devices\\dds.jpg'))\r\n main_widget = QWidget()\r\n top_layout = QHBoxLayout()\r\n\r\n rate_layout = QVBoxLayout()\r\n rate_layout.addWidget(QLabel(\"\"))\r\n rate_layout.addWidget(QLabel(\"dt, us\"))\r\n self.rate_box = MyComboBox(items=self.rates,current_text=self.current_rate,current_text_changed_handler=self.currentRateChanged)\r\n rate_layout.addWidget(self.rate_box)\r\n rate_layout.addStretch(1)\r\n top_layout.addLayout(rate_layout)\r\n\r\n mainLayout = QVBoxLayout()\r\n\r\n fields = QHBoxLayout()\r\n # fields.addSpacing(15)\r\n for key,val in AnalogInLineDict.items():\r\n lbl = QLabel(key)\r\n lbl.setMinimumWidth(val[-1])\r\n fields.addWidget(lbl)#, val[-1])\r\n # fields.addStretch(50)\r\n fields.setSpacing(10)\r\n fields.addStretch(1)\r\n mainLayout.addLayout(fields)\r\n\r\n for d in self.data:\r\n w = self.Line(self, data=d, input_channels=self.input_channels)\r\n self.lines.append(w)\r\n mainLayout.addWidget(w)\r\n\r\n updateAllBtn = MyPushButton(name=\"update all\", handler=lambda :self.constructPulses(None,None), max_width=550)\r\n mainLayout.addWidget(updateAllBtn)\r\n\r\n addLine = QPushButton('add channel')\r\n addLine.setMaximumWidth(550)\r\n addLine.clicked.connect(self.addLine)\r\n mainLayout.addWidget(addLine)\r\n mainLayout.addStretch(1)\r\n top_layout.addLayout(mainLayout)\r\n top_layout.addWidget(self.plot_widget)\r\n main_widget.setLayout(top_layout)\r\n main_widget.setMaximumWidth(1400)\r\n self.setWidgetResizable(True)\r\n self.setWidget(main_widget)\r\n self.setMinimumHeight(200)\r\n self.setMinimumWidth(550)\r\n self.main_layout = mainLayout\r\n\r\n def addLine(self):\r\n # do not call anything after since one should first set \"Channel to lock\"\r\n w = self.Line(parent=self, input_channels=self.input_channels, data={})\r\n self.lines.append(w)\r\n self.main_layout.insertWidget(len(self.lines), self.lines[-1])\r\n # self.save()\r\n return\r\n\r\n def delete(self, line):\r\n self.main_layout.removeWidget(line)\r\n line.deleteLater()\r\n self.lines.remove(line)\r\n # self.save()\r\n return\r\n\r\n def lineChanged(self,line, changes):\r\n if DEBUG: print('--- AnalogInWidget - lineChanged', changes)\r\n # self.sendScanParams()\r\n self.constructPulses(self.t_start,self.t_end)\r\n\r\n def currentRateChanged(self, new_rate):\r\n self.current_rate = new_rate\r\n self.constructPulses(None,None)\r\n\r\n def constructData(self):\r\n data = [line.constructData() for line in self.lines]\r\n return data\r\n\r\n def constructPulses(self,t_start,t_end):\r\n \"\"\"constructs and updates pulses produced by Shutters channels\"\"\"\r\n if t_start == None:\r\n update_all_command = True\r\n else:\r\n self.t_start = t_start\r\n self.t_end = t_end\r\n update_all_command = False\r\n lines_channeled = {line.data[\"Channel\"]:line for line in self.lines}\r\n if self.globals and \"pulses\" in self.globals:\r\n new_pulses = {channel: self.globals[\"pulses\"][channel] for channel in self.globals[\"pulses\"] if\r\n channel in sorted(self.available_channels)}\r\n # if DEBUG: print('current pulses', new_pulses)\r\n else:\r\n new_pulses = {}\r\n channels_data = {}\r\n for channel in sorted(lines_channeled):\r\n line = lines_channeled[channel]\r\n if line.data[\"oOff\"]: # if channel is turned off\r\n continue\r\n elif channel not in new_pulses or len(new_pulses[channel][0])==1: # if there are no pulses in this channel\r\n continue\r\n else:\r\n channels_data[channel] = []\r\n for pulse in new_pulses[channel]:\r\n channels_data[channel].append([pulse[0][0]-self.t_start,pulse[1][0]-self.t_start]) # add start and end time\r\n self.channels_data = deepcopy(channels_data)\r\n data_to_send = {\"rate\":1e6/int(self.current_rate),\r\n \"lines\":{},\r\n \"limits\":{},\r\n \"n_avrs\":{},\r\n \"samples\":0}\r\n t_max = 0\r\n for channel in channels_data:\r\n data_to_send[\"n_avrs\"][channel] = lines_channeled[channel].getNavr()\r\n data_to_send[\"limits\"][channel] = (lines_channeled[channel].getVmin(),lines_channeled[channel].getVmax())\r\n data_to_send[\"lines\"][channel] = [[int(pulse[0]*(data_to_send[\"rate\"]/1e3)),int(pulse[1]*(data_to_send[\"rate\"]/1e3))] for pulse in channels_data[channel]]\r\n t_pulse_max = max([pulse[1] for pulse in channels_data[channel]]) # compare ends of each pulse\r\n if t_pulse_max > t_max:\r\n t_max = t_pulse_max\r\n data_to_send[\"samples\"] = int(t_max*(data_to_send[\"rate\"]/1e3))\r\n # print(\"AnalogIn data\", data_to_send)\r\n # msg += final_msg\r\n # print(\"Message to shutters\")\r\n # print(msg)\r\n msg_to_server = \"Send \" + json.dumps({\"name\":\"DAQin\",\"msg\":data_to_send})\r\n print(msg_to_server)\r\n self.sock.sendto(bytes(msg_to_server, \"utf-8\"), self.globals[\"host_port\"])\r\n self.plot_widget.updatePlot(self.t_start,self.t_end)\r\n\r\n def load(self):\r\n if DEBUG: print('load AnalogIn', end='\\t')\r\n with open(self.config_file, 'r') as f:\r\n if DEBUG: print('config_load')\r\n config = json.load(f)\r\n print(config['Input'])\r\n self.__dict__.update(config['Input']) # here one should upload current_scheme and available_channels\r\n # print('other channels',self.other_channels)\r\n\r\n def saveClicked(self):\r\n # print('save shutters', self)\r\n self.data = self.constructData()\r\n with open(self.config_file, 'r') as f:\r\n if DEBUG: print('config_load_before_saving')\r\n all_config = json.load(f)\r\n config = all_config['Input']\r\n for key in config:\r\n config[key] = self.__dict__[key]\r\n with open(self.config_file, 'w') as f:\r\n if DEBUG: print('config_save')\r\n json.dump(all_config, f)\r\n\r\n # def sendScanParams(self):\r\n # params = {}\r\n # data = self.constructData()\r\n # for d in data:\r\n # key = d['Name']\r\n # params[key] = list(d.keys())\r\n # if self.globals != None:\r\n # if SCAN_PARAMS_STR not in self.globals:\r\n # self.globals[SCAN_PARAMS_STR] = {}\r\n # self.globals[SCAN_PARAMS_STR][NAME_IN_SCAN_PARAMS] = params\r\n # return\r\n\r\n\r\nclass PlotPulse(pg.GraphicsWindow):\r\n def __init__(self, parent=None, globals={}, signals=None, **argd):\r\n self.signals = signals\r\n self.parent = parent\r\n self.globals = globals\r\n super().__init__(title=\"AnalogInPlot\")\r\n # self.resize(600, 600)\r\n # self.signals.updateDigitalPlot.connect(self.updatePlot)\r\n self.setMinimumHeight(150)\r\n # self.updatePlot()\r\n\r\n def updatePlot(self,t_start,t_end):\r\n \"\"\"used as a slot called by Pulses class to redraw pulses\r\n CAN BE DONE IN THREAD\"\"\"\r\n self.plotPulses(t_start,t_end)\r\n\r\n def plotPulses(self,t_start,t_end):\r\n print('PlotAnalogIn')\r\n self.clear() # clear plot\r\n self.setBackground('w')\r\n # self.s\r\n d_plot = self.addPlot()\r\n d_plot.getAxis('left').setPen(pg.Color('k'))\r\n d_plot.getAxis('bottom').setPen(pg.Color('k'))\r\n d_plot.showGrid(x=True)\r\n digital_height = 1.2 # place for each curve of height=1graphic\r\n digital_counter = 0 # number of plotted channel\r\n digital_list = [] # list of active digital channels\r\n # print(self.globals[\"pulses\"])\r\n if self.parent.channels_data == {}:\r\n return\r\n t_first = min(min(x[0] for x in value if x[0] > 0) for name, value in\r\n self.parent.channels_data.items() if 'I' in name)\r\n t_last = t_end\r\n # t_last = max(max(x[0] for x in value if x[0] > 0) for name, value in\r\n # self.globals['pulses'].items() if 'D' in name and len(value)>1) + 10\r\n pulse_counter = 0\r\n for name in sorted(self.parent.channels_data.keys(),reverse=True):\r\n if 'I' not in name:\r\n continue\r\n digital_list.append(name)\r\n value = self.parent.channels_data[name]\r\n\r\n\r\n # construct points to show\r\n for i, pulse in enumerate(value):\r\n xx = [t_first-(100 if t_first > 100 else t_first),pulse[0],pulse[0],pulse[1],pulse[1],t_last-t_start]\r\n yy = [0,0,1,1,0,0]\r\n # xx.append(t_first-(100 if t_first > 100 else t_first))\r\n # yy.append(0)\r\n # xx.append(pulse[0])\r\n # yy.append(1)\r\n # xx.append(pulse[1])\r\n # yy.append(0)\r\n # xx.append(t_last-t_start)\r\n # yy.append(0)\r\n d_plot.plot(np.array(xx), np.array(yy)+digital_counter*digital_height,\r\n pen=pg.mkPen(pg.intColor(pulse_counter), width=2)) # plot data\r\n pulse_counter += 1\r\n d_plot.plot(np.array(xx), np.ones_like(xx)*digital_counter*digital_height,\r\n pen=pg.mkPen(width=0.5, style=Qt.DashLine)) # plot zero\r\n digital_counter += 1\r\n # set ticks names\r\n\r\n tick_names = digital_list\r\n d_plot.getAxis('left').setTicks([list(zip((np.arange(len(digital_list))+1/2) * digital_height, tick_names))])\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n # digital_pulses_folder = 'digital_schemes'\r\n # pulseGroup = PulseGroup\r\n app = QApplication(sys.argv)\r\n # mainWindow = PulseGroup(parent=None,name='New group',data=[])\r\n # mainWindow = PulseScheme()\r\n mainWindow = AnalogInWidget(config_file='config.json')\r\n mainWindow.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"PyLab/DDSFunction.py","file_name":"DDSFunction.py","file_ext":"py","file_size_in_byte":18755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"249597634","text":"import csv\nimport pdb\n\nCOLUMNS_TO_COUNT = [\"Year\", \"INVEST : Independent\",\"INVEST : Negotiable\",\"INVEST : Valuable\",\"INVEST : Estimable\",\"INVEST : Small\",\"INVEST : Testable\",\"BABOK 3 : atomic\",\"BABOK 3 : complete\",\"BABOK 3 : consistent\",\"BABOK 3 : concise\",\"BABOK 3 : feasible\",\"BABOK 3 : unambiguous\",\"BABOK 3 : testable\",\"BABOK 3 : prioritized\",\"BABOK 3 : understandable\",\"BABOK 2 : cohesion\",\"BABOK 2 : completeness\",\"BABOK 2 : consistency\",\"BABOK 2 : correction\",\"BABOK 2 : viability\",\"BABOK 2 : adaptability\",\"BABOK 2 : unambiguity\",\"BABOK 2 : testability\",\"Test As Requirement\", \"BABOK\", \"BABOK 3\", \"BABOK 2\", \"INVEST\"]\n\ndef initialize_count_one_column_values_if_needed(value, column_values):\n if value not in column_values:\n column_values[value] = {}\n column_values[value][\"count\"] = 0\n column_values[value][\"details\"] = []\n\ndef count_partial_column_values(file, partial_column_to_count):\n column_values = {}\n with open(file) as csvfile:\n reader = csv.DictReader(csvfile, delimiter=\"\\t\")\n initialize_count_one_column_values_if_needed(\"Total\", column_values)\n for row in reader:\n discovered_column_to_count = [column for column in row.keys() if partial_column_to_count in column]\n for column_to_count in discovered_column_to_count:\n if len(row[column_to_count]) > 0:\n to_add = row[\"ID Paper\"] + \" \" + row[\"Title\"]\n initialize_count_one_column_values_if_needed(row[column_to_count], column_values)\n if to_add not in column_values[row[column_to_count]][\"details\"]:\n column_values[row[column_to_count]][\"count\"] += 1\n column_values[row[column_to_count]][\"details\"].append(to_add)\n column_values[\"Total\"][\"count\"] += 1\n \n return column_values\n \ndef print_each_row_data(file):\n with open(file) as csvfile:\n reader = csv.DictReader(csvfile, delimiter=\"\\t\")\n for row in reader:\n print()\n print(row[\"ID Paper\"], row[\"Title\"])\n for column in row.keys():\n if column not in [\"ID Paper\", \"Title\"] and len(row[column]) > 0:\n if row[column] == \"Y\":\n print(\">>>\", column)\n else:\n print(\">>>\", column, \"=\", row[column])\n \ndef print_dict_per_line(dict, details=False, header=None):\n if header:\n print(header)\n \n for k in sorted(dict.keys()):\n print(k, dict[k][\"count\"])\n if details:\n print (\" \".join([\"\\cite{\" + d.split()[0] + \"}\" for d in dict[k][\"details\"]]))\n\nif __name__ == \"__main__\":\n file = \"mono_data.csv\"\n for c in COLUMNS_TO_COUNT:\n print_dict_per_line(count_partial_column_values(file, c), True, c + \" Count Details\")\n print()\n print_each_row_data(file)","sub_path":"Mono/interpret_data.py","file_name":"interpret_data.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"619538326","text":"import numpy as np\n\nimport os\nimport glob\n\nimport cv2\nimport geoio\nimport tifffile as tiff\n\nflood_map_dir = \"D:\\\\Workspace\\\\results\\\\pisar\\\\scences\\\\flood_mask_post\"\ngeo_dir = \"D:\\\\Workspace\\\\data\\\\raw\\\\pi-sar2\\\\20110312\\\\tiff_all\"\ndem_path = \"D:\\\\Workspace\\\\data\\\\raw\\\\pi-sar2\\\\20110312\\\\dem.tif\"\nsave_dir = \"D:\\\\Workspace\\\\results\\\\pisar\\\\scences\\\\dem\"\n\ndem_img = geoio.GeoImage(dem_path)\ndem_data = dem_img.get_data() # (bands, rows, cols)\n\nfor filepath in glob.glob(os.path.join(flood_map_dir, \"*.png\")):\n\tbasename = os.path.basename(filepath).split(\".\")[0]\n\n\tflood_img = cv2.imread(filepath, 0)\n\n\tflood_dem_img = np.zeros(flood_img.shape + (1,))\n\t\n\tgeo_path = os.path.join(geo_dir, \"%s_sc.tif\" % basename)\n\tgeo_img = geoio.GeoImage(geo_path)\n\n\ty = np.arange(flood_img.shape[0])\n\tx = np.arange(flood_img.shape[1])\n\n\tyx = [[i, j] for i in y for j in x]\n\tyx = np.array(yx)\n\n\tgeo_x, geo_y = geo_img.raster_to_proj(yx[:, 1], yx[:, 0])\n\t\n\tdem_x, dem_y = dem_img.proj_to_raster(geo_x, geo_y)\n\tdem_x = [int(i) for i in dem_x]\n\tdem_y = [int(i) for i in dem_y]\n\t\n\tflood_dem_img[yx[:, 0], yx[:, 1], 0] = dem_data[0, dem_y, dem_x]\n\n\ttiff.imsave(os.path.join(save_dir, \"%s.tif\" % basename), flood_dem_img, planarconfig='contig')","sub_path":"flood-depth/gen_area_dem.py","file_name":"gen_area_dem.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"609624110","text":"#!/usr/bin/python3\nfrom DirOrFileToOSS import DirOrFileToOSS\nimport Config\nconfig = {\n 'accessKeyID': Config.accessKeyID,\n 'accessKeySecret': Config.accessKeySecret,\n 'endpoint': Config.endpoint,\n 'bucketName': Config.bucketName,\n 'baklist': [\n # mysql5目录备份\n {\n # 要备份的目录(后面不带/)或文件全路径\n 'path': Config.dnmpDirPath + '/data/mysql5',\n # 本地备份路径\n 'locBakPath': Config.bakRootPath + '/dirbak',\n # oss上传路径,结尾带 /\n 'ossPath': 'DataAutoBak/gitbak/',\n # 要忽略的文件或目录\n 'ignoreDirOrFile': [],\n # 是否删除本地备份,如果上传oss为False时此设置不会生效\n 'isRemoveLocBak': False,\n # 是否上传oss\n 'isUploadOss':True\n\n },\n # git备份\n {\n # 要备份的目录(后面不带/)或文件全路径\n 'path': Config.dnmpDirPath + '/data/scm',\n # 本地备份路径\n 'locBakPath': Config.bakRootPath + '/dirbak',\n # oss上传路径,结尾带 /\n 'ossPath': 'DataAutoBak/gitbak/',\n # 要忽略的文件或目录\n 'ignoreDirOrFile': [],\n # 是否删除本地备份,如果上传oss为False时此设置不会生效\n 'isRemoveLocBak': False,\n # 是否上传oss\n 'isUploadOss':True\n\n },\n # web备份\n {\n # 要备份的目录(后面不带/)或文件全路径\n 'path': Config.dnmpDirPath + '/www',\n # 本地备份路径\n 'locBakPath': Config.bakRootPath + '/dirbak',\n # oss上传路径,结尾带 /\n 'ossPath': 'DataAutoBak/webbak/',\n # 要忽略的文件或目录\n 'ignoreDirOrFile': ['.vscode', '.git', 'runtime', 'Data', 'aspnet_client', 'imagethumb'],\n # 是否删除本地备份,如果上传oss为False时此设置不会生效\n 'isRemoveLocBak': False,\n # 是否上传oss\n 'isUploadOss':True\n\n },\n ]\n}\nif __name__ == '__main__':\n bak = DirOrFileToOSS(config)\n bak.run()\n","sub_path":"scripts/BakDir.py","file_name":"BakDir.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"166384682","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains the tool of zopyx.ecardsng\n\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\ndef read(*rnames):\n return open(os.path.join(os.path.dirname(__file__), *rnames)).read()\n\nversion = '0.3.3'\n\nlong_description = (\n read('README.txt')\n + '\\n\\n' +\n read(os.path.join('docs', 'HISTORY.txt'))\n )\n\ntests_require=['zope.testing']\n\nsetup(name='zopyx.ecardsng',\n version=version,\n description=\"An ECard implementation for Plone\",\n long_description=long_description,\n # Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Framework :: Plone',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: Zope Public License',\n ],\n keywords='Plone Zope ECards',\n author='Andreas Jung',\n author_email='info@zopyx.com',\n url='http://svn.plone.org/svn/collective/zopyx.ecardsng/trunk',\n license='ZPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['zopyx', ],\n include_package_data=True,\n zip_safe=False,\n install_requires=['setuptools',\n 'uuid',\n # -*- Extra requirements: -*-\n ],\n tests_require=tests_require,\n extras_require=dict(tests=tests_require),\n test_suite = 'zopyx.ecardsng.tests.test_docs.test_suite',\n entry_points=\"\"\"\n # -*- entry_points -*- \n [distutils.setup_keywords]\n paster_plugins = setuptools.dist:assert_string_list\n\n [egg_info.writers]\n paster_plugins.txt = setuptools.command.egg_info:write_arg\n \"\"\",\n paster_plugins = [\"ZopeSkel\"],\n )\n","sub_path":"pypi_install_script/zopyx.ecardsng-0.3.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"356500221","text":"#神经网络\nimport torch\nimport numpy as np\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nimport matplotlib.pyplot as plt\n\ndef plot_decision_boundary(model, x, y):#一个model就是一个lambda\n\t# Set min and max values and give it some padding\n\tx_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1\n\ty_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1\n\th = 0.01\n\t# Generate a grid of points with distance h between them\n\txx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))#产生网格\n\tprint('xx是什么:',xx)\n\t# Predict the function value for the whole grid\n\tZ = model(np.c_[xx.ravel(), yy.ravel()])#使用模型lambda\n\tprint('看一下Z:\\n',Z)\n\tZ = Z.reshape(xx.shape)\n\t#print(Z.shape)#输出的是1,0\n\t# Plot the contour and training examples\n\tplt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)#画出等高值\n\tplt.ylabel('x2')\n\tplt.xlabel('x1')\n\tplt.scatter(x[:, 0], x[:, 1], c=y.reshape(-1), s=40, cmap=plt.cm.Spectral)\n\n\nnp.random.seed(1) #种子就是产生随机数\nm = 400 # 样本数量\nN = int(m/2) # 每一类的点的个数\nD = 2 # 维度\nx = np.zeros((m, D))\ny = np.zeros((m, 1), dtype='uint8') # label 向量,0 表示红色,1 表示蓝色\na = 4\n\nfor j in range(2):\n\tix = range(N*j,N*(j+1))\n\tt = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta\n\tr = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius\n\tx[ix] = np.c_[r*np.sin(t), r*np.cos(t)]\n\ty[ix] = j\n\nplt.scatter(x[:, 0], x[:, 1], c=y.reshape(-1), s=40, cmap=plt.cm.Spectral)\n\n#逻辑回归做一下\nx = torch.from_numpy(x).float()\ny = torch.from_numpy(y).float()\nw = nn.Parameter(torch.randn(2, 1))\nb = nn.Parameter(torch.zeros(1))\n\noptimizer = torch.optim.SGD([w, b], 1e-1)\n\ndef logistic_regression(x):#单层神经网\n return torch.mm(x, w) + b\n\ncriterion = nn.BCEWithLogitsLoss()\nfor e in range(100):\n out = logistic_regression(Variable(x))\n loss = criterion(out, Variable(y))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (e + 1) % 20 == 0:\n print('epoch: {}, loss: {}'.format(e+1, loss.data[0]))\n\ndef plot_logistic(x):\n\tx = Variable(torch.from_numpy(x).float())\n\tout = torch.sigmoid(logistic_regression(x))\n\tout = (out > .5) * 1\n\tprint('模型输出',out) #rensor,1,0\n\tprint('输出:',out.data.numpy())\n\treturn out.data.numpy()\n\t\n\n\nplot_decision_boundary(lambda x: plot_logistic(x), x.numpy(), y.numpy())#(1/0,x坐标,y坐标)\nplt.title('logistic regression')\nplt.show()\n\t\n\n#神经网路mlp\nprint('用神经网络\\n')\n# 定义两层神经网络的参数#多层逻辑回归\nw1 = nn.Parameter(torch.randn(2, 4) * 0.01) # 输入为2个特征,隐藏层���经元个数 4\nb1 = nn.Parameter(torch.zeros(4))\n\nw2 = nn.Parameter(torch.randn(4, 1) * 0.01)\nb2 = nn.Parameter(torch.zeros(1))\n\nprint('W1是什么',w1.shape)#[2,4]\n# 定义模型\ndef two_network(x):\n x1 = torch.mm(x, w1) + b1\n x1 = F.tanh(x1) # 使用 PyTorch 自带的 tanh 激活函数\n x2 = torch.mm(x1, w2) + b2\n return x2\n\noptimizer = torch.optim.SGD([w1, w2, b1, b2], 1.)\n\ncriterion = nn.BCEWithLogitsLoss()\n\t\n# 我们训练 10000 次\n\nprint('x是什么:',x.shape)#[400,2]\n\nfor e in range(10000):\n out = two_network(Variable(x))\n loss = criterion(out, Variable(y))\n optimizer.zero_grad()\n loss.backward()\n print(\"w1是多少:\",w1)\n optimizer.step()\n print(\"优化后的w1是多少:\",w1)\n if (e + 1) % 1000 == 0:\n print('epoch: {}, loss: {}'.format(e+1, loss.data[0]))\ndef plot_network(x):\n x = Variable(torch.from_numpy(x).float())\n x1 = torch.mm(x, w1) + b1\n x1 = torch.tanh(x1)\n x2 = torch.mm(x1, w2) + b2\n out = torch.sigmoid(x2)\n out = (out > 0.5) * 1\n return out.data.numpy()\nprint('y.numpy()是这个:',y.numpy().shape)#400*1的矩阵\n\n\n\nplot_decision_boundary(lambda x: plot_network(x), x.numpy(), y.numpy())\nplt.title('2 layer network')\nplt.show()\n\n#在平面上的情况\n#逻辑回归只能是一条线,一次,二次,三次等函数\n#神经网络似乎有多条线可以用\n\n\n\n\n\n\t","sub_path":"神经网络(结构包在函数).py","file_name":"神经网络(结构包在函数).py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"314020949","text":"from Fitness import Fitness\nfrom AstIndividual import AstIndividual\n\n\nclass FindNumberFitness(Fitness):\n\n def __init__(self, target_number: float):\n super().__init__()\n self.target_number = target_number\n\n def eval(self, individual):\n assert isinstance(individual, AstIndividual), \"The individual to evaluate must be an Abstract Syntax Tree\"\n tree_to_eval = individual.get_gen()\n return -(abs(self.target_number - tree_to_eval.eval(feed_dict={'values':[]})))\n","sub_path":"src/genetic_programming/fitness/FindNumberFitness_v0.py","file_name":"FindNumberFitness_v0.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"605978351","text":"# coding:utf-8\n# 二叉树的遍历算法\n\nclass treeNode(object):\n def __init__(self,val=None,left=None,right=None):\n self.val = val\n self.left = left\n self.right = right\n\ndef midTraverse(root):\n '''\n 中序遍历\n :param root:根节点\n :return:\n '''\n if root == None:\n return\n midTraverse(root.left)\n print(root.val)\n midTraverse(root.right)\n\ndef preTraverse(root):\n '''\n 前序遍历\n :param root:根节点\n :return:\n '''\n if root == None:\n return\n print(root.val)\n preTraverse(root.left)\n preTraverse(root.right)\n\ndef afterTraverse(root):\n '''\n 后序遍历\n :param root:根节点\n :return:\n '''\n if root == None:\n return\n afterTraverse(root.right)\n print(root.val)\n afterTraverse(root.left)\n\nif __name__ == '__main__':\n root = treeNode('A',treeNode('B'),treeNode('C'))\n # midTraverse(root)\n # preTraverse(root)\n afterTraverse(root)","sub_path":"BinaryTree/binaryTreeMethod.py","file_name":"binaryTreeMethod.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"297955013","text":"#!/usr/bin/python2.5\n# Copyright 2011 JatLeGo Inc. All Rights Reserved.\n# Author: andyzh1314@gmail.com (Andy Zhau)\n\nfrom sfdb.internal import model\nfrom sfdb.internal import list_model\nfrom sfdb.internal import dbtypes\nfrom sfdb.internal import property\nfrom sfdb.internal import validator\n\n# public symbols\n__all__ = [\"Model\", \"SFModel\", \"ListModel\", \"SFListModel\",\n\n \"Property\", \"IDProperty\", \"IntegerProperty\", \"StringProperty\",\n \"BooleanProperty\",\n \"FloatProperty\", \"TimestampProperty\", \"EmailProperty\",\n \"JSONProperty\", \"MetaProperty\", \"CounterProperty\",\n \"PasswordProperty\", \"ListRefProperty\",\n\n \"Validator\", \"PropertyValidator\", \"TypeValidator\",\n \"NotNoneValidator\", \"EmailValidator\", \"ModelValidator\",\n\n \"ValidatorFormatError\", \"StringLengthError\", \"EmailFormatError\",\n\n \"objid\", \"json\", \"dbref\", \"notset\", \"time\", \"list\",\n ]\n\n__version__ = \"1.0.1\"\n\n\ndef model_dict():\n \"\"\"Return a dict of (model name, model class) with all the model knowns.\"\"\"\n return model.Model.__model_dict__\n\ndef issfdbinstance(obj, typ=dbtypes.dbtype):\n return dbtypes.issfdbinstance(obj, typ)\n\nModel = model.Model\nListModel = list_model.ListModel\nSFModel = set\n\nProperty = property.Property\nBooleanProperty = property.BooleanProperty\nIntegerProperty = property.IntegerProperty\nFloatProperty = property.FloatProperty\nTimestampProperty = property.TimestampProperty\nListProperty = property.ListProperty\nStringProperty = property.StringProperty\nPasswordProperty = property.PasswordProperty\nEmailProperty = property.EmailProperty\nJSONProperty = property.JSONProperty\nIDProperty = property.IDProperty\nDBRefProperty = property.DBRefProperty\nCounterProperty = property.CounterProperty\nListRefProperty = property.ListRefProperty\n\nobjid = dbtypes.objid\nstring = dbtypes.string\njson = dbtypes.json\ndbref = dbtypes.dbref\ncounter = dbtypes.counter\nnotset = dbtypes.notset\ntime = dbtypes.time\nlist = dbtypes.list\n\nValidatorFormatError = validator.ValidatorFormatError\nStringLengthError = validator.StringLengthError\nEmailFormatError = validator.EmailFormatError\n","sub_path":"sfdb/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"218893370","text":"# Binary Search Tree(인터뷰때 반드시 알고있어야 됨)\n# 퍼포먼스 상당히 좋음\n# Recursion 개념도 확인 가능\n# Search, Delete, Insert : O(log n)\n# cf) LinkedList의 Search, Delete, Insert : O(n)으로 상당히 느림\n# 특징\n# 1) 왼쪽 부트리 노드는 부모 노드보다 작거나 같다.\n# 2) 오른쪽 부트리 노드는 부모 노드보다 크거나 같다.\n#\n# 트리 : 노드들의 구성체. 노드들이 서로 연결된 것\n\nclass Node:\n def __init__(self,item):\n self.val = item\n self.left = None\n self.right = None\n\nclass BinarySearchTree:\n\n def __init__(self):\n self.head=Node(None)\n self.preorder_list = []\n\n def search(self, item): # 2가지 경우 1) Tree가 None인 경우 2) 아닌 경우\n if self.head.val is None:\n return False\n else:\n return self.__search_node(self.head, item)\n\n def __search_node(self, cur, item):\n if cur.val == item:\n return True\n else:\n if cur.val >= item:\n if cur.left is not None:\n return self.__search_node(cur.left, item) # 재귀함수로 leaf node로 내려가기\n else:\n return False\n else:\n if cur.right is not None:\n return self.__search_node(cur.right, item) # 재귀함수로 leaf node로 내려가기\n else:\n return False\n\n def add(self,item): #2가지 경우 1) Tree가 None인 경우 2) 아닌 경우\n if self.head.val is None:\n self.head.val = item\n else :\n self.__add_node(self.head,item)\n\n def __add_node(self, cur,item):\n if cur.val >= item:\n if cur.left is not None:\n self.__add_node(cur.left,item) #재귀함수로 leaf node로 내려가기\n else:\n cur.left=Node(item)\n else :\n if cur.right is not None:\n self.__add_node(cur.right,item) #재귀함수로 leaf node로 내려가기\n else :\n cur.right = Node(item)\n\n# Remove 1 : Node to be removed has no child -> 그냥 없애면 됨\n# Remove 2 : Node to be removed has one child -> 부모를 죽이고, 할아버지와 연결\n# Remove 3 : Node to be removed has two children\n# -> 오른쪽에 있는 서브트리의 가장 왼쪽 노드와 자리 바꿔주기\n\n def remove(self,item):\n if self.head.val is None:\n print(\"there is no item:in BST\",item)\n if self.head.val==item:\n # 1) Node to be removed has no children\n if self.head.left is None and self.head.right is None:\n self.head=None\n # 2) Node to be removed has one child\n elif self.head.left is None and self.head.right is not None:\n # self.head.val = self.head.right.val\n # self.head.right=None\n self.head=self.head.right\n #########################질문올리기\n # 2) Node to be removed has one child\n elif self.head.left is not None and self.head.right is None:\n # self.head.val=self.head.left.val\n # self.head.left=None\n self.head=self.head.left\n #########################질문올리기\n # 3) Node to be removed has two children\n else:\n self.head.val=self.__most_left_val_from_right_node(self.head.right).val\n self.__removeitem(self.head, self.head.right, self.head.val)\n #self.head 기준으로 right 부트리 방향으로 노드를 지워라\n else:\n if self.head.val > item:\n self.__remove(self.head, self.head.left, item)\n else :\n self.__remove(self.head, self.head.right, item)\n\n def __remove(self, parent, cur, item):\n if cur is None:\n print (\"There is no item: \", item)\n if cur.val == item:\n # 1) Node to be removed has no children.\n if cur.left is None and cur.right is None:\n if parent.left == cur:\n parent.left = None\n else:\n parent.right = None\n # 2) Node to be removed has one child.\n elif cur.left is None and cur.right is not None:\n if parent.left == cur:\n parent.left = cur.right\n else:\n parent.right = cur.right\n # 2) Node to be removed has one child.\n elif cur.left is not None and cur.right is None:\n if parent.left == cur:\n parent.left = cur.left\n else:\n parent.right = cur.left\n # 3) Node to be removed has two children.\n else:\n cur.val = self.__most_left_val_from_right_node(cur.right).val\n self.__removeitem(cur, cur.right, cur.val)\n else:\n if self.head.val > item:\n self.__remove(cur, cur.left, item)\n else:\n self.__remove(cur, cur.right, item)\n\n def __most_left_val_from_right_node(self,cur): #cur : 오른쪽 부트리 node값이 넘어온다.\n if cur.left is None:\n return cur\n else:\n return self.__most_left_val_from_right_node(cur.left) #재귀함수\n\n def __removeitem(self,parent,cur,item):\n if cur.val==item: #parent의 바로 자식 노드가 item 일 경우\n if parent.left==cur:\n parent.left=None\n else:\n parent.right=None\n else:\n if cur.val>item:\n self.__removeitem(cur,cur.left,item) #재귀함수\n else :\n self.__removeitem(cur,cur.right,item) #재귀함수\n\n def preorder_traverse(self):\n if self.head is not None:\n self.__preorder(self.head)\n\n def __preorder(self, cur):\n self.preorder_list.append(cur.val)\n print (cur.val)\n if cur.left is not None:\n self.__preorder(cur.left)\n if cur.right is not None:\n self.__preorder(cur.right)\n\nbt = BinarySearchTree()\n# bt.add(5)\n# bt.add(3)\n# bt.add(4)\n# bt.add(1)\n# bt.add(7)\n# # 5\n# # 3 7\n# # 1 4\n\nbt.add(1)\nbt.add(2)\nbt.add(3)\nbt.add(4)\nbt.preorder_traverse()\nprint(\"pre order\")\nbt.remove(1)\nbt.preorder_traverse()\nprint(\"pre order\")\n\nprint(bt.search(3))\nprint(bt.search(999))\n\n\n\n\n\n\n\n\n","sub_path":"dataStructure/binarySearchTree.py","file_name":"binarySearchTree.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"511751422","text":"#!/usr/bin/env python\nfrom math import *\nfrom matplotlib.patches import Circle\nfrom collision import *\nfrom functions import *\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.path import Path\n\nfrom obstacle import *\n\nclass Root(Obstacle):\n\tcap=0.6 #roots bigger than this are split\n\tdef __init__(self, pos, Bradius, diameter, z, nodes, direction=None, terrain=None, stump=None, visible=False):\n\t\tObstacle.__init__(self, pos, isSpherical=False, radius=Bradius, terrain=terrain)\n\t\tself.z=z\n\t\tif z>0:\n\t\t\traise Exception('Root cannot have height >0')\n\t\tself.diameter=diameter\n\t\tself.nodes=nodes\n\t\tself.direction=direction\n\t\tself.visible=visible #visible roots are part of the stump\n\t\tself.stump=stump\n\t\tif self.stump:\n\t\t\tself.color=stump.color\n\t\tif terrain:\n\t\t\tself.terrain=terrain\n\t\t\tself.name=\"root%d\"%len(self.terrain.roots)\n\t\t\tself.terrain.roots.append(self)\n\t\telse:\n\t\t\tself.name=\"root\"\n\t\t\t\n\tdef draw(self,ax):\n\t\talpha=(not self.visible)*150+1\n\t\tp=mpl.patches.Polygon(np.array(self.nodes), closed=True, facecolor=self.color, alpha=alpha)\n\t\tax.add_patch(p)\n\t\t#ax.annotate(\"%0.2f\"%self.z, xy=tuple(self.pos)) #draw the depth\n\t\t\n\tdef getNodes(self, pos=None):\n\t\tif pos and self.pos != pos: raise Exception('Root pos is constant.')\n\t\treturn self.nodes\n","sub_path":"terrain/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"526855911","text":"# This file is a part of the IncludeOS unikernel - www.includeos.org\n#\n# Copyright 2017-2018 IncludeOS AS, Oslo, Norway\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n# To avoid: <...>/NaCl/type_processors/iface.py:1: RuntimeWarning: Parent module '<...>/NaCl/type_processors' not found while handling absolute import\n\nfrom NaCl import exit_NaCl, NaCl_exception, Typed, BASE_TYPE_TYPED_INIT, BASE_TYPE_FUNCTION, DOT\nfrom shared import *\n# TYPE_IFACE, TYPE_NAT, TEMPLATE_KEY_IFACE_PUSHES, TEMPLATE_KEY_ENABLE_CT_IFACES, TEMPLATE_KEY_HAS_NATS,\n# TRUE, FALSE\n\n# -------------------- CONSTANTS Iface --------------------\n\n# Moved to shared.py: TYPE_IFACE = \"iface\"\n\n# Iface keys\n\nIFACE_KEY_ADDRESS \t\t= \"address\"\nIFACE_KEY_NETMASK \t\t= \"netmask\"\nIFACE_KEY_GATEWAY\t\t= \"gateway\"\nIFACE_KEY_DNS \t\t\t= \"dns\"\nIFACE_KEY_INDEX \t\t= \"index\"\nIFACE_KEY_VLAN \t\t\t= \"vlan\"\nIFACE_KEY_MASQUERADE \t= \"masquerade\"\nIFACE_KEY_CONFIG \t\t= \"config\"\n\nIFACE_KEY_PREROUTING \t= \"prerouting\"\nIFACE_KEY_INPUT \t\t= \"input\"\nIFACE_KEY_OUTPUT \t\t= \"output\"\nIFACE_KEY_POSTROUTING \t= \"postrouting\"\n\nCHAIN_NAMES = [\n\tIFACE_KEY_PREROUTING,\n\tIFACE_KEY_INPUT,\n\tIFACE_KEY_OUTPUT,\n\tIFACE_KEY_POSTROUTING\n]\n\nPREDEFINED_IFACE_KEYS = [\n\tIFACE_KEY_ADDRESS,\n\tIFACE_KEY_NETMASK,\n\tIFACE_KEY_GATEWAY,\n\tIFACE_KEY_DNS,\n\tIFACE_KEY_INDEX,\n\tIFACE_KEY_VLAN,\n\tIFACE_KEY_MASQUERADE,\n\tIFACE_KEY_CONFIG\n]\nPREDEFINED_IFACE_KEYS.extend(CHAIN_NAMES)\n\nDHCP_CONFIG \t\t\t= \"dhcp\"\nDHCP_FALLBACK_CONFIG \t= \"dhcp-with-fallback\"\nSTATIC_CONFIG \t\t\t= \"static\"\n\nPREDEFINED_CONFIG_TYPES = [\n\tDHCP_CONFIG,\n\tDHCP_FALLBACK_CONFIG,\n\tSTATIC_CONFIG\n]\n\n# -------------------- CONSTANTS Vlan --------------------\n\nTYPE_VLAN \t= \"vlan\"\n\n# Vlan keys\n\nVLAN_KEY_ADDRESS \t= IFACE_KEY_ADDRESS\nVLAN_KEY_NETMASK \t= IFACE_KEY_NETMASK\nVLAN_KEY_GATEWAY \t= IFACE_KEY_GATEWAY\nVLAN_KEY_DNS \t\t= IFACE_KEY_DNS\nVLAN_KEY_INDEX \t\t= IFACE_KEY_INDEX\n\nPREDEFINED_VLAN_KEYS = [\n\tVLAN_KEY_ADDRESS,\n\tVLAN_KEY_NETMASK,\n\tVLAN_KEY_GATEWAY,\n\tVLAN_KEY_INDEX\n]\n\n# -------------------- TEMPLATE KEYS (pystache) --------------------\n\n# Template keys Vlan\n\nTEMPLATE_KEY_VLANS \t\t\t= \"vlans\"\n\n# Template keys Iface\n\nTEMPLATE_KEY_IFACES \t\t\t\t\t= \"ifaces\"\nTEMPLATE_KEY_IFACES_WITH_VLANS\t\t\t= \"ifaces_with_vlans\"\n# Moved to shared.py: TEMPLATE_KEY_IFACE_PUSHES = \"pushes_iface\"\nTEMPLATE_KEY_AUTO_NATTING_IFACES \t\t= \"auto_natting_ifaces\"\nTEMPLATE_KEY_MASQUERADES \t\t\t\t= \"masquerades\"\n\n# Moved to shared.py: TEMPLATE_KEY_ENABLE_CT_IFACES = \"enable_ct_ifaces\"\n\nTEMPLATE_KEY_HAS_AUTO_NATTING_IFACES \t= \"has_auto_natting_ifaces\"\nTEMPLATE_KEY_HAS_VLANS \t\t\t\t\t= \"has_vlans\"\nTEMPLATE_KEY_HAS_MASQUERADES \t\t\t= \"has_masquerades\"\n\nTEMPLATE_KEY_CONFIG_IS_DHCP \t\t\t= \"config_is_dhcp\"\nTEMPLATE_KEY_CONFIG_IS_DHCP_FALLBACK \t= \"config_is_dhcp_fallback\"\nTEMPLATE_KEY_CONFIG_IS_STATIC \t\t\t= \"config_is_static\"\n\nTEMPLATE_KEY_INDEX \t\t\t\t\t\t= \"index\"\nTEMPLATE_KEY_ADDRESS \t\t\t\t\t= \"address\"\nTEMPLATE_KEY_NETMASK \t\t\t\t\t= \"netmask\"\nTEMPLATE_KEY_GATEWAY \t\t\t\t\t= \"gateway\"\nTEMPLATE_KEY_DNS \t\t\t\t\t\t= \"dns\"\n\nTEMPLATE_KEY_IFACE_INDEX \t\t\t\t= \"iface_index\"\n\n# -------------------- CLASSES --------------------\n\n# ---- class Common (base class to the below classes Vlan and Iface) ----\n\nclass Common(Typed):\n\tdef __init__(self, nacl_state, idx, name, ctx, base_type, type_t):\n\t\tsuper(Common, self).__init__(nacl_state, idx, name, ctx, base_type, type_t)\n\n\tdef process_assignment(self, element_key):\n\t\t# print \"process assignment Common\"\n\n\t\telement = self.nacl_state.elements.get(element_key)\n\n\t\tname_parts = element_key.split(DOT)\n\t\torig_member = name_parts[1]\n\t\tmember = orig_member.lower()\n\n\t\tif len(name_parts) != 2:\n\t\t\texit_NaCl(element.ctx, \"Invalid \" + self.get_class_name() + \" member \" + element.name)\n\n\t\ttry:\n\t\t\tself.validate_key(orig_member)\n\t\texcept NaCl_exception as e:\n\t\t\texit_NaCl(element.ctx, e.value)\n\n\t\tif self.members.get(member) is not None:\n\t\t\texit_NaCl(element.ctx, \"Member \" + member + \" has already been set\")\n\n\t\tfound_element_value = element.ctx.value()\n\t\ttry:\n\t\t\tself.add_member(member, found_element_value)\n\t\texcept NaCl_exception as e:\n\t\t\texit_NaCl(element.ctx, e.value)\n\n# < class Common\n\n# ---- class Vlan ----\n\nclass Vlan(Common):\n\tdef __init__(self, nacl_state, idx, name, ctx, base_type, type_t):\n\t\tsuper(Vlan, self).__init__(nacl_state, idx, name, ctx, base_type, type_t)\n\n\t\tself.handle_as_untyped = False\n\n\t\t# Vlan keys/members:\n\t\t# address\n\t\t# netmask\n\t\t# gateway\n\t\t# index\n\n\t# Overriding\n\tdef validate_key(self, key):\n\t\tkey_lower = key.lower()\n\t\tif key_lower not in PREDEFINED_VLAN_KEYS:\n\t\t\traise NaCl_exception(\"Invalid Vlan member \" + key)\n\n\tdef validate_members(self):\n\t\tvlan_index = self.members.get(VLAN_KEY_INDEX)\n\t\tvlan_address = self.members.get(VLAN_KEY_ADDRESS)\n\t\tvlan_netmask = self.members.get(VLAN_KEY_NETMASK)\n\n\t\tif vlan_index is None or vlan_address is None or vlan_netmask is None:\n\t\t\texit_NaCl(self.ctx, \"The members index, address and netmask must be set for every Vlan\")\n\n\t\tif vlan_index.obj() is not None or vlan_index.list_t() is not None:\n\t\t\texit_NaCl(vlan_index, \"The Vlan member \" + VLAN_KEY_INDEX + \" can not be an object\")\n\t\tif vlan_address.obj() is not None or vlan_address.list_t() is not None:\n\t\t\texit_NaCl(vlan_address, \"The Vlan member \" + VLAN_KEY_ADDRESS + \" can not be an object\")\n\t\tif vlan_netmask.obj() is not None or vlan_netmask.list_t() is not None:\n\t\t\texit_NaCl(vlan_netmask, \"The Vlan member \" + VLAN_KEY_NETMASK + \" can not be an object\")\n\n\tdef process_members(self):\n\t\t# Transpile values\n\t\tfor key, member in self.members.iteritems():\n\t\t\tself.members[key] = self.nacl_state.transpile_value(member)\n\n\t# Main processing method\n\tdef process(self):\n\t\tif self.res is None:\n\t\t\t# Then process\n\n\t\t\tself.process_ctx()\n\t\t\tself.process_assignments()\n\t\t\tself.validate_members()\n\t\t\tself.process_members()\n\n\t\t\tself.res = self.members\n\n\t\treturn self.res\n\n# < class Vlan\n\n# ---- class Iface ----\n\nclass Iface(Common):\n\tdef __init__(self, nacl_state, idx, name, ctx, base_type, type_t):\n\t\tsuper(Iface, self).__init__(nacl_state, idx, name, ctx, base_type, type_t)\n\n\t\tself.handle_as_untyped = False\n\n\t\tself.config_is_dhcp \t\t\t= False\n\t\tself.config_is_dhcp_fallback \t= False\n\t\tself.config_is_static \t\t\t= False\n\n\t\tself.chains = {}\t# To handle setting of a chain multiple times in the same Iface\n\t\t \t# Should not be handled as the ctx objects in self.members\n\n\t\t# Iface keys/members:\n\t\t# address\n\t\t# netmask\n\t\t# gateway\n\t\t# dns\n\t\t# index\n\t\t# vlan\n\t\t# config\n\t\t# masquerade\n\t\t# prerouting\n\t\t# input\n\t\t# forward\n\t\t# output\n\t\t# postrouting\n\n\t# Overriding\n\tdef validate_key(self, key):\n\t\tkey_lower = key.lower()\n\t\tif key_lower not in PREDEFINED_IFACE_KEYS:\n\t\t\traise NaCl_exception(\"Invalid Iface member \" + key)\n\n\t# Overriding\n\tdef add_member(self, key, value):\n\t\tif key in CHAIN_NAMES:\n\t\t\tself.process_push(key, value)\n\t\telse:\n\t\t\tsuper(Iface, self).add_member(key, value) # self.members[key] = pair_value\n\n\t# Overriding\n\t# TODO: Naming...\n\tdef add_not_obj_value(self, value_ctx):\n\t\tif value_ctx.value_name() is not None:\n\t\t\t# configuration type (dhcp, dhcp-with-fallback, static)\n\t\t\tconfig = value_ctx.value_name().getText().lower()\n\t\t\tif config in PREDEFINED_CONFIG_TYPES:\n\t\t\t\tself.members[IFACE_KEY_CONFIG] = value_ctx\n\t\t\telse:\n\t\t\t\traise NaCl_exception(\"Invalid Iface value \" + value_ctx.value_name().getText())\n\t\telse:\n\t\t\traise NaCl_exception(\"An Iface has to contain key value pairs, or be set to a configuration type (\" + \\\n\t\t\t\t\", \".join(PREDEFINED_CONFIG_TYPES) + \")\")\n\n\tdef process_push(self, chain, value_ctx):\n\t\tif self.chains.get(chain) is not None:\n\t\t\texit_NaCl(value_ctx, \"Iface chain \" + chain + \" has already been set\")\n\n\t\tfunctions = []\n\t\tif value_ctx.list_t() is not None:\n\t\t\t# More than one function pushed onto chain\n\t\t\tfor list_value in value_ctx.list_t().value_list().value():\n\t\t\t\tif list_value.value_name() is None:\n\t\t\t\t\texit_NaCl(list_value, \"This is not supported: \" + value_ctx.getText())\n\t\t\t\tfunctions.append(list_value.value_name())\n\t\telif value_ctx.value_name() is not None:\n\t\t\t# Only one function pushed onto chain\n\t\t\tfunctions = [ value_ctx.value_name() ]\n\t\telse:\n\t\t\texit_NaCl(value_ctx, \"This is not supported: \" + value_ctx.getText())\n\n\t\tself.chains[chain] = chain # Mark as set\n\t\tself.add_push(chain, functions)\n\n\tdef validate_members(self):\n\t\tif self.members.get(IFACE_KEY_INDEX) is None:\n\t\t\texit_NaCl(self.ctx.value(), \"An index needs to be specified for all Ifaces\")\n\n\t\tconfig = self.members.get(IFACE_KEY_CONFIG)\n\t\tif config is not None and (config.value_name() is None or config.value_name().getText().lower() not in PREDEFINED_CONFIG_TYPES):\n\t\t\texit_NaCl(config, \"Invalid config value \" + config.getText())\n\n\t\tif (config is None or config.value_name().getText().lower() != DHCP_CONFIG) and \\\n\t\t\t(self.members.get(IFACE_KEY_ADDRESS) is None or \\\n\t\t\t\tself.members.get(IFACE_KEY_NETMASK) is None):\n\t\t\texit_NaCl(self.ctx.value(), \"The members \" + IFACE_KEY_ADDRESS + \" and \" + IFACE_KEY_NETMASK + \\\n\t\t\t\t\" must be set for every Iface if the Iface configuration hasn't been set to \" + DHCP_CONFIG)\n\t\telif config is not None and config.value_name().getText().lower() == DHCP_CONFIG and \\\n\t\t\t(self.members.get(IFACE_KEY_ADDRESS) is not None or \\\n\t\t\t\tself.members.get(IFACE_KEY_NETMASK) is not None or \\\n\t\t\t\tself.members.get(IFACE_KEY_GATEWAY) is not None or \\\n\t\t\t\tself.members.get(IFACE_KEY_DNS) is not None):\n\t\t\texit_NaCl(config.value_name(), \"An Iface with config set to dhcp should not specify \" + IFACE_KEY_ADDRESS + \\\n\t\t\t\t\", \" + IFACE_KEY_NETMASK + \", \" + IFACE_KEY_GATEWAY + \" or \" + IFACE_KEY_DNS)\n\n\tdef is_vlan(self, element):\n\t\tif element is None or not hasattr(element, 'type_t') or element.type_t.lower() != TYPE_VLAN:\n\t\t\treturn False\n\t\treturn True\n\n\tdef process_members(self):\n\t\t# Vlans\n\t\tvlans = []\n\t\tif self.members.get(IFACE_KEY_VLAN) is not None:\n\t\t\tvlan_ctx = self.members.get(IFACE_KEY_VLAN)\n\n\t\t\tif vlan_ctx.obj() is not None and any(pair.key().getText().lower() in PREDEFINED_VLAN_KEYS for pair in vlan_ctx.obj().key_value_list().key_value_pair()):\n\t\t\t\t# Then handle this as a vlan object in itself, not an obj of vlans\n\t\t\t\tvlan_element = Vlan(self.nacl_state, 0, \"\", vlan_ctx, BASE_TYPE_TYPED_INIT, TYPE_VLAN)\n\t\t\t\tvlans.append(vlan_element)\n\t\t\telif vlan_ctx.obj() is not None:\n\t\t\t\t# If this is a dictionary/map/obj of vlans\n\t\t\t\t# Add each Vlan in obj to the vlans list\n\t\t\t\t# Each element in the obj needs to be a valid Vlan\n\t\t\t\tfor pair in vlan_ctx.obj().key_value_list().key_value_pair():\n\t\t\t\t\t# Key: Name of Vlan\n\t\t\t\t\t# Value: Actual Vlan object/value (containing address, netmask, gateway, index)\n\t\t\t\t\tvlan_element = Vlan(self.nacl_state, 0, pair.key().getText(), pair.value(), BASE_TYPE_TYPED_INIT, TYPE_VLAN)\n\t\t\t\t\tvlans.append(vlan_element)\n\t\t\telif vlan_ctx.list_t() is not None:\n\t\t\t\t# Add each Vlan in list_t to the vlans list\n\t\t\t\t# Each element in the list_t needs to be a valid Vlan\n\t\t\t\tfor _, v in enumerate(vlan_ctx.list_t().value_list().value()):\n\t\t\t\t\tvlan_element = None\n\n\t\t\t\t\tif v.value_name() is not None:\n\t\t\t\t\t\tvlan_name = v.value_name().getText()\n\t\t\t\t\t\tvlan_element = self.nacl_state.elements.get(vlan_name)\n\t\t\t\t\t\tif not self.is_vlan(vlan_element):\n\t\t\t\t\t\t\texit_NaCl(v.value_name(), \"Undefined Vlan \" + vlan_name)\n\t\t\t\t\telif v.obj() is not None:\n\t\t\t\t\t\tvlan_element = Vlan(self.nacl_state, 0, \"\", v, BASE_TYPE_TYPED_INIT, TYPE_VLAN)\n\t\t\t\t\telse:\n\t\t\t\t\t\texit_NaCl(v, \"A Vlan list must either contain Vlan objects (key value pairs) or names of Vlans\")\n\n\t\t\t\t\tvlans.append(vlan_element)\n\t\t\telif vlan_ctx.value_name() is not None:\n\t\t\t\tvlan_name = vlan_ctx.value_name().getText()\n\t\t\t\tvlan_element = self.nacl_state.elements.get(vlan_name)\n\t\t\t\tif not self.is_vlan(vlan_element):\n\t\t\t\t\texit_NaCl(vlan_ctx.value_name(), \"Undefined Vlan \" + vlan_name)\n\t\t\t\tvlans.append(vlan_element)\n\t\t\telse:\n\t\t\t\texit_NaCl(vlan_ctx, \"An Iface's vlan needs to be a list of Vlans\")\n\n\t\t# Loop through self.members and transpile the values\n\t\tfor key, member in self.members.iteritems():\n\t\t\tif key != IFACE_KEY_CONFIG and key != IFACE_KEY_MASQUERADE:\n\t\t\t\tself.members[key] = self.nacl_state.transpile_value(member)\n\n\t\t\t\t# Validate that an Iface with this Iface's index has not already been defined\n\t\t\t\tif key == IFACE_KEY_INDEX:\n\t\t\t\t\tfor key, el in self.nacl_state.elements.iteritems():\n\t\t\t\t\t\tif isinstance(el, Iface) and key != self.name:\n\t\t\t\t\t\t\tel_idx = el.members.get(IFACE_KEY_INDEX)\n\t\t\t\t\t\t\tif el_idx is not None and el_idx == self.members.get(IFACE_KEY_INDEX):\n\t\t\t\t\t\t\t\texit_NaCl(member, \"Another Iface has been defined with index \" + el_idx)\n\t\t\telif key == IFACE_KEY_CONFIG:\n\t\t\t\tself.members[IFACE_KEY_CONFIG] = member.value_name().getText().lower()\n\t\t\telse:\n\t\t\t\tmasq_val = self.nacl_state.transpile_value(member)\n\n\t\t\t\tif not isinstance(masq_val, basestring) or (masq_val.lower() != TRUE and masq_val.lower() != FALSE):\n\t\t\t\t\texit_NaCl(member, \"Invalid masquerade value. Must be set to true or false\")\n\n\t\t\t\tif masq_val == TRUE:\n\t\t\t\t\tself.nacl_state.append_to_pystache_data_list(TEMPLATE_KEY_MASQUERADES, {\n\t\t\t\t\t\tTEMPLATE_KEY_IFACE: self.name\n\t\t\t\t\t})\n\n\t\t# Update config members\n\t\tconfig = self.members.get(IFACE_KEY_CONFIG)\n\t\tif config == DHCP_CONFIG:\n\t\t\tself.config_is_dhcp = True\n\t\telif config == DHCP_FALLBACK_CONFIG:\n\t\t\tself.config_is_dhcp_fallback = True\n\t\telse:\n\t\t\tself.config_is_static = True\n\n\t\tif len(vlans) > 0:\n\t\t\t# Process and add vlans found\n\t\t\tself.add_iface_vlans(vlans)\n\n\tdef add_push(self, chain, functions):\n\t\t# chain: string with name of chain\n\t\t# functions: list containing value_name ctxs, where each name corresponds to the name of a NaCl function\n\n\t\tadd_auto_natting = False\n\t\tfunction_names = []\n\t\tnum_functions = len(functions)\n\t\tfor i, function in enumerate(functions):\n\t\t\tname = function.getText()\n\t\t\telement = self.nacl_state.elements.get(name)\n\t\t\tif element is None or element.base_type != BASE_TYPE_FUNCTION:\n\t\t\t\texit_NaCl(function, \"No function with the name \" + name + \" exists\")\n\n\t\t\t# If a Nat function is pushed onto an Iface's chain,\n\t\t\t# push the snat_translate lambda in cpp_template.mustache\n\t\t\t# onto the same Iface's postrouting chain\n\t\t\t# and push the dnat_translate lambda in cpp_template.mustache\n\t\t\t# onto the same Iface's prerouting chain\n\t\t\tif element.type_t.lower() == TYPE_NAT:\n\t\t\t\tadd_auto_natting = True\n\n\t\t\tfunction_names.append({TEMPLATE_KEY_FUNCTION_NAME: name, TEMPLATE_KEY_COMMA: (i < (num_functions - 1))})\n\n\t\tif add_auto_natting:\n\t\t\tself.nacl_state.append_to_pystache_data_list(TEMPLATE_KEY_AUTO_NATTING_IFACES, {\n\t\t\t\tTEMPLATE_KEY_IFACE: self.name\n\t\t\t})\n\n\t\tself.nacl_state.append_to_pystache_data_list(TEMPLATE_KEY_IFACE_PUSHES, {\n\t\t\tTEMPLATE_KEY_NAME:\t\t\t\tself.name,\n\t\t\tTEMPLATE_KEY_CHAIN: \t\t\tchain,\n\t\t\tTEMPLATE_KEY_FUNCTION_NAMES: \tfunction_names\n\t\t})\n\n\tdef add_iface_vlans(self, vlans):\n\t\t# Called once per Iface\n\n\t\tpystache_vlans = []\n\t\tfor vlan in vlans:\n\t\t\tvlan.process() # Make sure the Vlan has been processed\n\n\t\t\tgateway = vlan.members.get(VLAN_KEY_GATEWAY)\n\t\t\tif gateway is None:\n\t\t\t\tgateway = self.members.get(IFACE_KEY_GATEWAY)\n\n\t\t\tpystache_vlans.append({\n\t\t\t\tTEMPLATE_KEY_INDEX: \tvlan.members.get(VLAN_KEY_INDEX),\n\t\t\t\tTEMPLATE_KEY_ADDRESS: \tvlan.members.get(VLAN_KEY_ADDRESS),\n\t\t\t\tTEMPLATE_KEY_NETMASK: \tvlan.members.get(VLAN_KEY_NETMASK),\n\t\t\t\tTEMPLATE_KEY_GATEWAY: \tgateway\n\t\t\t})\n\n\t\tself.nacl_state.append_to_pystache_data_list(TEMPLATE_KEY_IFACES_WITH_VLANS, {\n\t\t\tTEMPLATE_KEY_IFACE: \t\tself.name,\n\t\t\tTEMPLATE_KEY_IFACE_INDEX: \tself.members.get(IFACE_KEY_INDEX),\n\t\t\tTEMPLATE_KEY_VLANS: \t\tpystache_vlans\n\t\t})\n\n\tdef add_iface(self):\n\t\t# Append iface object to pystache ifaces list\n\n\t\t# Create object containing key value pairs with the data we have collected\n\t\t# Append this object to the ifaces list\n\t\t# Is to be sent to pystache renderer in handle_input function\n\t\tself.nacl_state.append_to_pystache_data_list(TEMPLATE_KEY_IFACES, {\n\t\t\tTEMPLATE_KEY_NAME: \t\tself.name,\n\t\t\tTEMPLATE_KEY_TITLE: \tself.name.title(),\n\t\t\tTEMPLATE_KEY_INDEX: \tself.members.get(IFACE_KEY_INDEX),\n\n\t\t\tTEMPLATE_KEY_CONFIG_IS_STATIC: \t\t\tself.config_is_static,\n\t\t\tTEMPLATE_KEY_CONFIG_IS_DHCP: \t\t\tself.config_is_dhcp,\n\t\t\tTEMPLATE_KEY_CONFIG_IS_DHCP_FALLBACK: \tself.config_is_dhcp_fallback,\n\n\t\t\tTEMPLATE_KEY_ADDRESS: \tself.members.get(IFACE_KEY_ADDRESS),\n\t\t\tTEMPLATE_KEY_NETMASK:\tself.members.get(IFACE_KEY_NETMASK),\n\t\t\tTEMPLATE_KEY_GATEWAY: \tself.members.get(IFACE_KEY_GATEWAY),\n\t\t\tTEMPLATE_KEY_DNS: \t\tself.members.get(IFACE_KEY_DNS)\n\t\t})\n\n\tdef enable_ct(self):\n\t\t# Add this Iface's name to enable_ct_ifaces pystache list if it is not in the list already\n\t\tif not self.nacl_state.exists_in_pystache_list(TEMPLATE_KEY_ENABLE_CT_IFACES, TEMPLATE_KEY_IFACE, self.name):\n\t\t\tfor chain in CHAIN_NAMES:\n\t\t\t\tif self.chains.get(chain) is not None:\n\t\t\t\t\tself.nacl_state.append_to_pystache_data_list(TEMPLATE_KEY_ENABLE_CT_IFACES, {\n\t\t\t\t\t\tTEMPLATE_KEY_IFACE: self.name\n\t\t\t\t\t})\n\t\t\t\t\treturn # Only one entry in enable_ct_ifaces list for each Iface\n\n\t# Main processing method\n\tdef process(self):\n\t\tif self.res is None:\n\t\t\t# Then process\n\n\t\t\tself.process_ctx()\n\t\t\tself.process_assignments()\n\t\t\tself.validate_members()\n\t\t\tself.process_members()\n\t\t\tself.add_iface()\n\t\t\tself.enable_ct()\n\n\t\t\tself.res = self.members\n\n\t\treturn self.res\n\n\t# Called from handle_input (NaCl.py) right before rendering, after the NaCl file has been processed\n\t# Register the last data here that can not be registered before this (set has-values f.ex.)\n\t@staticmethod\n\tdef final_registration(nacl_state):\n\t\tif not nacl_state.pystache_list_is_empty(TEMPLATE_KEY_AUTO_NATTING_IFACES):\n\t\t\tnacl_state.register_pystache_data_object(TEMPLATE_KEY_HAS_AUTO_NATTING_IFACES, True)\n\n\t\tif not nacl_state.pystache_list_is_empty(TEMPLATE_KEY_IFACES_WITH_VLANS):\n\t\t\tnacl_state.register_pystache_data_object(TEMPLATE_KEY_HAS_VLANS, True)\n\n\t\tif not nacl_state.pystache_list_is_empty(TEMPLATE_KEY_MASQUERADES):\n\t\t\tnacl_state.register_pystache_data_object(TEMPLATE_KEY_HAS_MASQUERADES, True)\n\t\t\tnacl_state.register_pystache_data_object(TEMPLATE_KEY_HAS_NATS, True)\n\n# < class Iface\n\n# -------------------- INIT --------------------\n\n# Dictionary of lists in NaCl_state\n# pystache_data{}\n# pystache_data[TEMPLATE_KEY] = []\n\ndef create_iface_pystache_lists(nacl_state):\n\tnacl_state.create_pystache_data_lists([ \\\n\t\tTEMPLATE_KEY_IFACES, \\\n\t\tTEMPLATE_KEY_AUTO_NATTING_IFACES, \\\n\t\tTEMPLATE_KEY_IFACE_PUSHES, \\\n\t\tTEMPLATE_KEY_IFACES_WITH_VLANS, \\\n\t\tTEMPLATE_KEY_MASQUERADES, \\\n\t\tTEMPLATE_KEY_ENABLE_CT_IFACES\n\t\t# TEMPLATE_KEY_HAS_MASQUERADES, \\\n\t\t# TEMPLATE_KEY_HAS_AUTO_NATTING_IFACES, \\\n\t\t# TEMPLATE_KEY_HAS_VLANS \\\n\t\t# These three are added in the final_registration method\n\t])\n\n# def create_vlan_pystache_lists(nacl_state):\n#\tnacl_state.create_pystache_data_lists(...)\n\ndef init(nacl_state):\n\t# print \"Init iface: Iface and Vlan\"\n\n\tnacl_state.add_type_processor(TYPE_IFACE, Iface)\n\tnacl_state.add_type_processor(TYPE_VLAN, Vlan)\n\n\tcreate_iface_pystache_lists(nacl_state)\n\t# create_vlan_pystache_lists(nacl_state) # No lists to create\n","sub_path":"type_processors/iface.py","file_name":"iface.py","file_ext":"py","file_size_in_byte":19267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"320086866","text":"import re\n#back reference\n\nrand_str = \"The cat cat fell out the window\"\nregex = re.compile(r\"(\\b\\w+)\\s+\\1\")\nmatches = re.findall(regex,rand_str)\nprint(matches)\n\n# back reference substitutions\nrand_str = \"

So am I

\"\nregex = re.compile(r\"(?<=

)\\w+\")\nregex2 = re.compile(r\"\\w+(?=

)\")\ncombined = re.compile(r\"(?<=

).+?(?=

)\")\nprint(re.findall(combined, rand_str))\n","sub_path":"basics/regex_4.py","file_name":"regex_4.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"611160316","text":"#\n# For licensing see accompanying LICENSE.txt file.\n# Copyright (C) 2019-2020 Apple Inc. All Rights Reserved.\n#\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass View(nn.Module):\n def __init__(self, shape):\n \"\"\" Simple helper module to reshape a tensor\n\n :param shape: the desired shape, -1 for ignored dimensions\n :returns: reshaped tensor\n :rtype: torch.Tensor\n\n \"\"\"\n super(View, self).__init__()\n self.shape = shape\n\n def forward(self, input):\n return input.contiguous().view(*self.shape)\n\n\nclass DAB(nn.Module):\n def __init__(self, approximator, hard_layer):\n \"\"\" DAB layer simply accepts an approximator model, a hard layer\n and adds syntatic sugar to return the hard output while caching\n the soft version. It also adds a helper fn loss_function() to\n return the DAB loss.\n\n :param approximator: the approximator nn.Module\n :param hard_layer: the hard layer nn.Module\n :returns: DAB Object\n :rtype: nn.Module\n\n \"\"\"\n super(DAB, self).__init__()\n self.loss_fn = F.mse_loss\n self.hard_layer = hard_layer.apply\n self.approximator = approximator\n\n def loss_function(self):\n \"\"\" Simple helper to return the cached loss\n\n :returns: loss reduced across feature dimension\n :rtype: torch.Tensor\n\n \"\"\"\n assert self.true_output.shape[0] == self.approximator_output.shape[0], \"batch mismatch\"\n batch_size = self.true_output.shape[0]\n return torch.sum(self.loss_fn(self.approximator_output.view(batch_size, -1),\n self.true_output.view(batch_size, -1),\n reduction='none'), dim=-1)\n\n def forward(self, x, **kwargs):\n \"\"\" DAB layer simply caches the true and approximator outputs\n and returns the hard output.\n\n :param x: the input to the DAB / hard fn\n :returns: hard output\n :rtype: torch.Tensor\n\n \"\"\"\n self.approximator_output = self.approximator(x, **kwargs)\n self.true_output = self.hard_layer(x, self.approximator_output)\n\n # sanity check and return\n assert self.approximator_output.shape == self.true_output.shape, \\\n \"proxy output {} doesn't match size of hard output [{}]\".format(\n self.approximator_output.shape, self.true_output.shape\n )\n\n return self.true_output\n\n\nclass BaseHardFn(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x, soft_y, hard_fn, *args):\n \"\"\" Runs the hard function for forward, cache the output and returns.\n All hard functions should inherit from this, it implements the autograd override.\n\n :param ctx: pytorch context, automatically passed in.\n :param x: input tensor.\n :param soft_y: forward pass output (logits) of DAB approximator network.\n :param hard_fn: to be passed in from derived class.\n :param args: list of args to pass to hard function.\n :returns: hard_fn(tensor), backward pass using DAB.\n :rtype: torch.Tensor\n\n \"\"\"\n hard = hard_fn(x, *args)\n saveable_args = list([a for a in args if isinstance(a, torch.Tensor)])\n ctx.save_for_backward(x, soft_y, *saveable_args)\n return hard\n\n @staticmethod\n def _hard_fn(x, *args):\n raise NotImplementedError(\"implement _hard_fn in derived class\")\n\n @staticmethod\n def backward(ctx, grad_out):\n \"\"\" Returns DAB derivative.\n\n :param ctx: pytorch context, automatically passed in.\n :param grad_out: grads coming into layer\n :returns: dab_grad(tensor)\n :rtype: torch.Tensor\n\n \"\"\"\n x, soft_y, *args = ctx.saved_tensors\n with torch.enable_grad():\n grad = torch.autograd.grad(outputs=soft_y, inputs=x,\n grad_outputs=grad_out,\n # allow_unused=True,\n retain_graph=True)\n return grad[0], None, None, None\n\n\nclass SignumWithMargin(BaseHardFn):\n @staticmethod\n def _hard_fn(x, *args):\n \"\"\" x[x < -eps] = -1\n x[x > +eps] = 1\n else x = 0\n\n :param x: input tensor\n :param args: list of args with 0th element being eps\n :returns: signum(tensor)\n :rtype: torch.Tensor\n\n \"\"\"\n eps = args[0] if len(args) > 0 else 0.5\n sig = torch.zeros_like(x)\n sig[x < -eps] = -1\n sig[x > eps] = 1\n return sig\n\n @staticmethod\n def forward(ctx, x, soft_y, *args):\n return BaseHardFn.forward(ctx, x, soft_y, SignumWithMargin._hard_fn, *args)\n","sub_path":"models/dab.py","file_name":"dab.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"142388543","text":"'''\nCreated on Dec 8, 2015\n\n@author: rajeev.kumar\n\nDescription:Verify that on the Firmware page, the Read-only is unable to add, remove, set, import or configure settings.\n'''\nfrom tests.globalImports import *\n\ntc_id=utility.get_tc_data(__file__)\n\nclass Testcase(Manager.Manager): \n \"\"\"\n Firmware page, the Read-only is unable to add, remove, set, import or configure settings.\n \"\"\"\n \n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialization\n \"\"\"\n Manager.Manager.__init__(self, tc_id, *args, **kwargs)\n \n \n @BaseClass.TestBase.func_exec\n def test_functionality(self): \n \"\"\"\n This is the execution starting function\n \"\"\"\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Read only', loginAsUser=True)\n \n #Navigate to Repositories Page\n self.get_RepositoriesPage(\"Firmware\")\n \n self.logout()\n \n #Verify Options\n# self.verifyOptions(optionList=[\"Add\", \"Delete\"], pageName=\"Repositories\")","sub_path":"GUI/gui-automation-ASMvNext84UI/tests/rbac/Testcase_NGI_TC_4174.py","file_name":"Testcase_NGI_TC_4174.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"156685993","text":"#Ques 1.\nNexaCars=['Ignis','Baleno','Ciaz','S-cross']\nprint(NexaCars)\n\n#Ques 2.\nNewEntry=['google','apple','facebook','microsoft','tesla']\nAdd=NewEntry + NexaCars\nprint(Add)\n\n#Ques 3.\nNum=[1,0,1,5,2,6,2,5,3,0,3]\nprint(Num.count(0))\n\n#Ques 4.\nNumbers=[30,26,25,20,15,10,3]\nNumbers.sort()\nprint(Numbers)\n\n#Ques 5.\nA=[3,10,15,20]\nB=[25,26,30]\nC=A+B\nC.sort()\nprint(C)\n\n#Ques 6.\nNumbers=[3,10,15,20,25,26,30]\ncount1=0\ncount2=0\nfor i in Numbers:\n if not i % 2:\n count1=count1+1\n else:\n count2=count2+1\nprint(\"Even Numbers=\",count1)\nprint(\"Odd Numbers=\",count2)\n\n\n#TUPLE QUES\n\n#Ques 1.\nA=(\"10,15,20\")\nB=reversed(A)\nprint(tuple(B))\n\n#Ques 2.\nA=[3,10,15,20,25,26,30]\nprint('Maximum is=',max(A))\nprint('Minimum is=',min(A))\n\n\n#STRING QUES\n\n#Ques 1.\nstring=\"ujjwal agnihotri\"\nprint(string.upper())\n\n#Ques 2.\nstr=\"ujjwal9292\"\nprint(str.isnumeric())\n\n#Ques 3.\nstring='Hello World'\nprint(string.replace('World','Ujjwal Agnihotri'))\n","sub_path":"assingment 3.py","file_name":"assingment 3.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"508137083","text":"#測試 1 : 偵測到移動使 LED 閃爍 \nfrom machine import Pin\nimport time\np0=Pin(33, Pin.IN) #接 PIR 感測器信號輸出 (中間腳)\np2=Pin(17, Pin.OUT) #接 LED + 220 歐姆電阻\n\ndef LED_blink(): #閃爍 LED 一次的函數\n p2.value(1)\n time.sleep_ms(50) #暫停 50 ms\n p2.value(0)\n time.sleep_ms(50) #暫停 50 ms\n\nwhile True:\n if p0.value()==1: #PIR 偵測到人體移動時輸出 High (3.3V)\n LED_blink() #讓 LED 閃爍\n else: #移動停止時 PIR 輸出 Low\n p2.value(0) #讓 LED 熄滅","sub_path":"source/mvDetect.py","file_name":"mvDetect.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"648576911","text":"import json\nimport csv\nimport os\n\n\ndef get_json(filename, default_value=None):\n if os.path.isfile(filename):\n with open(filename) as f:\n return json.load(f)\n return default_value\n\n\ndef save_json(filename, values):\n with open(filename, 'w') as outfile:\n json.dump(values, outfile, indent=4, sort_keys=True)\n\n\ndef get_mastostats():\n mastostats_csv = \"mastostats.csv\"\n masto_array = [['timestamp', 'usercount', 'instancecount', 'tootscount']]\n if os.path.isfile(mastostats_csv):\n with open(mastostats_csv, 'r') as csvfile:\n reader = csv.reader(csvfile)\n masto_array = [row for row in reader]\n csvfile.close()\n return masto_array\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"87403515","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.feature_selection import mutual_info_classif\nimport sklearn_relief as relief\nfrom sklearn.feature_selection import GenericUnivariateSelect\n\n\n\n\nclass Preprocess:\n\n def preprocess_method(self, data):\n features_del = []\n\n for feature in range(data.shape[1]):\n\n # Numerical Features\n if type(data[0, feature]) in [float, np.float64]:\n\n # Calculate the mean of this feature of feed NaNs with it\n mean_v = np.nanmean(data[:, feature], dtype=float)\n\n # Calculate the max and min to normalize numerical data between 0 and 1\n max_v = np.nanmax(data[:, feature])\n min_v = np.nanmin(data[:, feature])\n\n for sample in range(data.shape[0]):\n if np.isnan(data[sample, feature]):\n data[sample, feature] = mean_v\n if max_v != 0:\n data[sample, feature] = (data[sample, feature] - min_v) / (max_v - min_v)\n\n # Categorical Features\n if type(data[0, feature]) is bytes:\n # Calculate the mode of this feature\n cat_values = np.unique(data[:, feature])\n moda = max(cat_values, key=lambda x: data[:, feature].tolist().count(x))\n\n # Assign the mode to NaNs\n cond_nan = np.where(data[:, feature] == '?'.encode('utf8'))\n data[cond_nan, feature] = moda\n\n # OneHotEncoding\n data1 = np.array(pd.get_dummies(data[:, feature]))\n data = np.concatenate((data, data1), axis=1)\n\n features_del.append(feature)\n\n # Delete categorical feature\n data = np.delete(data, features_del, 1)\n\n return data","sub_path":"preproc/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"590555233","text":"# -*- coding:utf-8 -*-\n__author__ = 'Anderson'\n\ndef rightStrip(tmpStr, splitStr=' '):\n endindex = tmpStr.rfind(splitStr)\n while endindex != -1 and endindex == len(tmpStr)-1:\n tmpStr = tmpStr[:endindex]\n endindex = tmpStr.rfind(splitStr)\n return tmpStr\n\ndef leftStrip(tmpStr, splitStr=' '):\n index = tmpStr.find(splitStr)\n while index == 0:\n tmpStr = tmpStr[1:]\n index = tmpStr.find(splitStr)\n return tmpStr\n\ntmpString = '###hello world###anderson#################'\n\nprint(tmpString)\nprint(rightStrip(tmpString, '#'))\nprint(leftStrip(tmpString, '#'))\n","sub_path":"python_interview/strip.py","file_name":"strip.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"235101318","text":"from flask import Flask, render_template, url_for, request, redirect, flash, Response,jsonify\r\nfrom datetime import datetime\r\nfrom flask_cors import CORS, cross_origin\r\nfrom io import StringIO\r\n#import cStringIO as StringIO\r\nfrom PIL import Image, ImageFont, ImageDraw\r\nimport urllib3\r\nimport numpy as np\r\nimport dlib\r\nimport io\r\nimport pandas as pd\r\nimport json\r\n\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\ndef cat_uncat():\r\n\txls = pd.ExcelFile('/home/inbox/Desktop/media/Development Data.xlsx')\r\n\t# Data = pd.read_csv('/home/inbox/Desktop/media/Development Data.csv')\r\n\tData = pd.read_excel(xls, 'Source data')\r\n\tData = Data.rename(columns={'Sub Category': 'Sub_Category'})\r\n\tData = Data.rename(columns={'Esp/levers': 'Competetors'})\r\n\tData['SOS'] = (Data['COST W GST'] / Data['Total Net Cost'])\r\n\tData['SOV'] = Data['Brand Grps'] / sum(Data['Index Grps PBs 30 Sec'] + Data['Pb Grps'])\r\n\tCat_Data = Data[(Data['Cat / non cat'] == 'Cat') | (Data['Cat / non cat'] == 'CAT')]\r\n\tNonCat_Data = Data[(Data['Cat / non cat'] == 'Non Cat') | (Data['Cat / non cat'] == 'NON CAT')]\r\n\tDetergents_Cat_lever = Cat_Data[(Cat_Data['Sub_Category'] == 'Detergents') & (Cat_Data['Competetors'] == 'Lever')]\r\n\treturn Detergents_Cat_lever\r\n\r\ndef report():\r\n\t# xls = pd.ExcelFile('/home/inbox/Desktop/media/Development Data.xlsx')\r\n\t# Data = pd.read_csv('/home/inbox/Desktop/media/Development Data.csv')\r\n\t# Data = pd.read_excel(xls, 'Source data')\r\n\t# column_name=Data.columns.values\r\n\t# column_name= json.dumps(column_name)\r\n\txls = pd.ExcelFile('/home/inbox/Desktop/media/Development Data.xlsx')\r\n\t# Data = pd.read_csv('/home/inbox/Desktop/media/Development Data.csv')\r\n\tData = pd.read_excel(xls, 'Source data')\r\n\tData = Data.rename(columns={'Sub Category': 'Sub_Category'})\r\n\tData = Data.rename(columns={'Esp/levers': 'Competetors'})\r\n\r\n\t# Data['SOS'] = (Data[' COST W GST '] / Data[' Total Net Cost ']) * 0.1\r\n\t# Data['SOV'] = Data[' Brand Grps '] / sum(Data[' Index Grps PBs 30 Sec '] + Data[' Pb Grps '])\r\n\r\n\t# Data['SOS'] = (Data['COST W GST'] / Data['Total Net Cost']) * 0.1\r\n\t# Data['SOV'] = Data['Brand Grps'] / sum(Data['Index Grps PBs 30 Sec'] + Data['Pb Grps'])\r\n\r\n\tData['SOS'] = (Data['COST W GST'] / Data['Total Net Cost'])\r\n\tData['SOV'] = Data['Brand Grps'] / sum(Data['Index Grps PBs 30 Sec'] + Data['Pb Grps'])\r\n\r\n\tCat_Data = Data[(Data['Cat / non cat'] == 'Cat') | (Data['Cat / non cat'] == 'CAT')]\r\n\tNonCat_Data = Data[(Data['Cat / non cat'] == 'Non Cat') | (Data['Cat / non cat'] == 'NON CAT')]\r\n\r\n\tCompetetors = Data[Data['Competetors'] == 'Comp']\r\n\tUnilever = Data[Data['Competetors'] == 'Lever']\r\n\thair = Data[(Data.Sub_Category == 'Hair Care')]\r\n\tDetegents = Data[(Data.Sub_Category == 'Detergents')]\r\n\tPersonal_Wash = Data[(Data.Sub_Category == 'Personal Wash')]\r\n\tIce_Cream = Data[(Data.Sub_Category == 'Ice Cream')]\r\n\tFace_Care = Data[(Data.Sub_Category == 'Face Care')]\r\n\tNoodles = Data[(Data.Sub_Category == 'Boullion (Noodles)')]\r\n\tTea = Data[(Data.Sub_Category == 'Tea')]\r\n\tCooking_Aids = Data[(Data.Sub_Category == 'Boullion (Cooking Aids)')]\r\n\tSCC_Margarine = Data[(Data.Sub_Category == 'SCC-Margarine')]\r\n\tSauces = Data[(Data.Sub_Category == 'Sauces')]\r\n\tWater = Data[(Data.Sub_Category == 'Water')]\r\n\t# hair.groupby(['BRAND',hair.BRAND.unique()]).sum()\r\n\thair_SOS = hair.groupby(['BRAND', 'SOS']).sum( )\r\n\thair_SOV = hair.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tDetegents_SOS = Detegents.groupby(['BRAND', 'SOS']).sum( )\r\n\tDetegents_SOV = Detegents.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tPersonal_Wash_SOS = Personal_Wash.groupby(['BRAND', 'SOS']).sum( )\r\n\tPersonal_Wash_SOV = Personal_Wash.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tIce_Cream_SOS = Ice_Cream.groupby(['BRAND', 'SOS']).sum( )\r\n\tIce_Cream_SOV = Ice_Cream.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tFace_Care_SOS = Face_Care.groupby(['BRAND', 'SOS']).sum( )\r\n\tFace_Care_SOV = Face_Care.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tNoodles_SOS = Noodles.groupby(['BRAND', 'SOS']).sum( )\r\n\tNoodles_SOV = Noodles.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tTea_SOS = Tea.groupby(['BRAND', 'SOS']).sum( )\r\n\tTea_SOV = Tea.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tCooking_Aids_SOS = Cooking_Aids.groupby(['BRAND', 'SOS']).sum( )\r\n\tCooking_Aids_SOV = Cooking_Aids.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tSCC_Margarine_SOS = SCC_Margarine.groupby(['BRAND', 'SOS']).sum( )\r\n\tSCC_Margarine_SOV = SCC_Margarine.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tSauces_SOS = Sauces.groupby(['BRAND', 'SOS']).sum( )\r\n\tSauces_SOV = Sauces.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tWater_SOS = Water.groupby(['BRAND', 'SOS']).sum( )\r\n\tWater_SOV = Water.groupby(['BRAND', 'SOV']).sum( )\r\n\r\n\tCom_hair = Competetors[(Competetors.Sub_Category == 'Hair Care')]\r\n\tCom_Detegents = Competetors[(Competetors.Sub_Category == 'Detergents')]\r\n\tCom_Personal_Wash = Competetors[(Competetors.Sub_Category == 'Personal Wash')]\r\n\tCom_Ice_Cream = Competetors[(Competetors.Sub_Category == 'Ice Cream')]\r\n\tCom_Face_Care = Competetors[(Competetors.Sub_Category == 'Face Care')]\r\n\tCom_Noodles = Competetors[(Competetors.Sub_Category == 'Boullion (Noodles)')]\r\n\tCom_Tea = Competetors[(Competetors.Sub_Category == 'Tea')]\r\n\tCom_Cooking_Aids = Competetors[(Competetors.Sub_Category == 'Boullion (Cooking Aids)')]\r\n\tCom_SCC_Margarine = Competetors[(Competetors.Sub_Category == 'SCC-Margarine')]\r\n\tCom_Sauces = Competetors[(Competetors.Sub_Category == 'Sauces')]\r\n\tCom_Water = Competetors[(Competetors.Sub_Category == 'Water')]\r\n\r\n\tCom_hair_SOS = Com_hair.groupby(['BRAND', 'SOS']).sum( )\r\n\tCom_hair_SOV = Com_hair.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tCom_Detegents_SOS = Com_Detegents.groupby(['BRAND', 'SOS']).sum( )\r\n\tCom_Detegents_SOV = Com_Detegents.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tCom_Personal_Wash_SOS = Com_Personal_Wash.groupby(['BRAND', 'SOS']).sum( )\r\n\tCom_Personal_Wash_SOV = Com_Personal_Wash.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tCom_Ice_Cream_SOS = Com_Ice_Cream.groupby(['BRAND', 'SOS']).sum( )\r\n\tCom_Ice_Cream_SOV = Com_Ice_Cream.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tCom_Face_Care_SOS = Com_Face_Care.groupby(['BRAND', 'SOS']).sum( )\r\n\tCom_Face_Care_SOV = Com_Face_Care.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tCom_Noodles_SOS = Com_Noodles.groupby(['BRAND', 'SOS']).sum( )\r\n\tCom_Noodles_SOV = Com_Noodles.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tCom_Tea_SOS = Com_Tea.groupby(['BRAND', 'SOS']).sum( )\r\n\tCom_Tea_SOV = Com_Tea.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tCom_Cooking_Aids_SOS = Com_Cooking_Aids.groupby(['BRAND', 'SOS']).sum( )\r\n\tCom_Cooking_Aids_SOV = Com_Cooking_Aids.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tCom_SCC_Margarine_SOS = Com_SCC_Margarine.groupby(['BRAND', 'SOS']).sum( )\r\n\tCom_SCC_Margarine_SOV = Com_SCC_Margarine.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tCom_Sauces_SOS = Com_Sauces.groupby(['BRAND', 'SOS']).sum( )\r\n\tCom_Sauces_SOV = Com_Sauces.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tCom_Water_SOS = Com_Water.groupby(['BRAND', 'SOS']).sum( )\r\n\tCom_Water_SOV = Com_Water.groupby(['BRAND', 'SOV']).sum( )\r\n\r\n\tLever_hair = Unilever[(Unilever.Sub_Category == 'Hair Care')]\r\n\tLever_Detegents = Unilever[(Unilever.Sub_Category == 'Detergents')]\r\n\tLever_Personal_Wash = Unilever[(Unilever.Sub_Category == 'Personal Wash')]\r\n\tLever_Ice_Cream = Unilever[(Unilever.Sub_Category == 'Ice Cream')]\r\n\tLever_Face_Care = Unilever[(Unilever.Sub_Category == 'Face Care')]\r\n\tLever_Noodles = Unilever[(Unilever.Sub_Category == 'Boullion (Noodles)')]\r\n\tLever_Tea = Unilever[(Unilever.Sub_Category == 'Tea')]\r\n\tLever_Cooking_Aids = Unilever[(Unilever.Sub_Category == 'Boullion (Cooking Aids)')]\r\n\tLever_SCC_Margarine = Unilever[(Unilever.Sub_Category == 'SCC-Margarine')]\r\n\tLever_Sauces = Unilever[(Unilever.Sub_Category == 'Sauces')]\r\n\tLever_Water = Unilever[(Unilever.Sub_Category == 'Water')]\r\n\r\n\tLever_hair_SOS = Lever_hair.groupby(['BRAND', 'SOS']).sum( )\r\n\tLever_hair_SOV = Lever_hair.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tLever_Detegents_SOS = Lever_Detegents.groupby(['BRAND', 'SOS']).sum( )\r\n\tLever_Detegents_SOV = Lever_Detegents.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tLever_Personal_Wash_SOS = Lever_Personal_Wash.groupby(['BRAND', 'SOS']).sum( )\r\n\tLever_Personal_Wash_SOV = Lever_Personal_Wash.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tLever_Ice_Cream_SOS = Lever_Ice_Cream.groupby(['BRAND', 'SOS']).sum( )\r\n\tLever_Ice_Cream_SOV = Lever_Ice_Cream.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tLever_Face_Care_SOS = Lever_Face_Care.groupby(['BRAND', 'SOS']).sum( )\r\n\tLever_Face_Care_SOV = Lever_Face_Care.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tLever_Noodles_SOS = Lever_Noodles.groupby(['BRAND', 'SOS']).sum( )\r\n\tLever_Noodles_SOV = Lever_Noodles.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tLever_Tea_SOS = Lever_Tea.groupby(['BRAND', 'SOS']).sum( )\r\n\tLever_Tea_SOV = Lever_Tea.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tLever_Cooking_Aids_SOS = Lever_Cooking_Aids.groupby(['BRAND', 'SOS']).sum( )\r\n\tLever_Cooking_Aids_SOV = Lever_Cooking_Aids.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tLever_SCC_Margarine_SOS = Lever_SCC_Margarine.groupby(['BRAND', 'SOS']).sum( )\r\n\tLever_SCC_Margarine_SOV = Lever_SCC_Margarine.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tLever_Sauces_SOS = Lever_Sauces.groupby(['BRAND', 'SOS']).sum( )\r\n\tLever_Sauces_SOV = Lever_Sauces.groupby(['BRAND', 'SOV']).sum( )\r\n\t# --------------------------------------------------------------\r\n\tLever_Water_SOS = Lever_Water.groupby(['BRAND', 'SOS']).sum( )\r\n\tLever_Water_SOV = Lever_Water.groupby(['BRAND', 'SOV']).sum( )\r\n\r\n\r\n\tcolumn_name=Data.columns.values\r\n\treturn \tLever_hair_SOS['SOV']\r\n\r\ndef sub_categories():\r\n\tData = pd.read_csv('/home/inbox/Desktop/media/Development Data.csv')\r\n\tData = Data.rename(columns={'Sub Category': 'Sub_Category'})\r\n\treturn Data['Sub_Category'].unique()\r\n\r\n\r\n@app.route(\"/\")\r\ndef landingpage():\r\n return render_template(\"elements_cards.html\")\r\n\r\n@app.route(\"/charts\")\r\ndef emcharts():\r\n return render_template(\"charts.html\")\r\n\r\n\r\n@app.route(\"/emcharts\")\r\ndef emmcharts():\r\n return render_template(\"emcharts.html\")\r\n\r\n\r\n\r\n@app.route('/dashboard', methods=['GET','POST'])\r\ndef dashboard():\r\n return render_template(\"dashboard.html\")\r\n\r\n@app.route('/elements_cards', methods=['GET','POST'])\r\ndef elements_cards():\r\n return render_template('elements_cards.html')\r\n\r\n# @app.route('/charts_index', methods=['GET','POST'])\r\n# def charts_index():\r\n# return render_template('index1.html')\r\n\r\n@app.route('/index1', methods=['GET','POST'])\r\ndef index1():\r\n return render_template('index1.html')\r\n\r\n@app.route('/pie1.html', methods=['GET','POST'])\r\ndef pie():\r\n return render_template('pie1.html')\r\n\r\n@app.route('/pie2.html', methods=['GET','POST'])\r\ndef pie1():\r\n return render_template('pie2.html')\r\n\r\n@app.route('/pie3.html', methods=['GET','POST'])\r\ndef pie2():\r\n return render_template('pie3.html')\r\n\r\n@app.route('/pie4.html', methods=['GET','POST'])\r\ndef pie3():\r\n return render_template('pie4.html')\r\n\r\n@app.route('/serial2.html', methods=['GET','POST'])\r\ndef serial2():\r\n return render_template('serial2.html')\r\n\r\n@app.route('/serial1.html', methods=['GET','POST'])\r\ndef serial1():\r\n return render_template('serial1.html')\r\n\r\n@app.route('/serial3.html', methods=['GET','POST'])\r\ndef serial3():\r\n return render_template('serial3.html')\r\n\r\n@app.route('/xy.html', methods=['GET','POST'])\r\ndef xy():\r\n return render_template('xy.html')\r\n\r\n@app.route('/radar.html', methods=['GET','POST'])\r\ndef radar():\r\n return render_template('radar.html')\r\n\r\n\r\n@app.route('/funnel.html', methods=['GET','POST'])\r\ndef funnel():\r\n return render_template('funnel.html')\r\n\r\n@app.route('/stock.html', methods=['GET','POST'])\r\ndef stock():\r\n return render_template('stock.html')\r\n\r\n\r\n@app.route('/exports.css', methods=['GET','POST'])\r\ndef exports():\r\n return render_template('exports.css')\r\n\r\n\r\n@app.route('/export.js', methods=['GET','POST'])\r\ndef js():\r\n return render_template('exports.js')\r\n\r\n\r\n# @app.route('/stock.html', methods=['GET','POST'])\r\n# def stock():\r\n# return render_template('stock.html')\r\n''\r\n#======================================================================\r\n#report processing\r\n# def report():\r\n# \t# xls = pd.ExcelFile('/home/inbox/Desktop/media/Development Data.xlsx')\r\n# \tData = pd.read_csv('/home/inbox/Desktop/media/Development Data.csv')\r\n# \t# Data = pd.read_excel(xls, 'Source data')\r\n# \tcolumn_name=Data.columns.values\r\n# \treturn column_name\r\n\r\n#======================================================================\r\n\r\n#======================================================================\r\n#Dlib\r\n@app.route('/index', methods=['GET','POST'])\r\ndef index():\r\n return render_template('index.html')\r\n\r\ndef gen(anom_type):\r\n\tif anom_type==\"mobilephones\":\r\n\t\tdetector=dlib.simple_object_detector(\"detector.svm\")\r\n\telif anom_type==\"ciggarette\":\r\n\t\tdetector=dlib.simple_object_detector(\"cigg_detector.svm\")\r\n\telif anom_type==\"id\":\r\n\t\tdetector=dlib.simple_object_detector(\"ID_detector.svm\")\r\n\t\r\n\ttry:\r\n\t\thost = \"10.15.2.7:8080/video\"\r\n\t\thoststr = 'http://' + host\r\n\r\n\t\tstream=urllib2.urlopen(hoststr)\r\n\r\n\t\tbytes=''\r\n\r\n\t\twhile True:\r\n\t\t\tbytes+=stream.read(1024)\r\n\t\t\ta = bytes.find('\\xff\\xd8')\r\n\t\t\tb = bytes.find('\\xff\\xd9')\r\n\t\t\tif a!=-1 and b!=-1:\r\n\t\t\t\tjpg = bytes[a:b+2]\r\n\t\t\t\tbytes= bytes[b+2:]\r\n\t\t\t\tstreamline = StringIO.StringIO(jpg)\r\n\t\t\t\timg = Image.open(streamline)\r\n\t\t\t\t\r\n\r\n\r\n\t\t\t\t#basewidth = 300\r\n\t\t\t\t#wpercent = (basewidth/float(img.size[0]))\r\n\t\t\t\t#hsize = int((float(img.size[1])*float(wpercent)))\r\n\t\t\t\t#img = img.resize((basewidth,hsize), PIL.Image.ANTIALIAS)\r\n\r\n\t\t\t\tframe=np.array(img)\t\t\r\n\t\t\t\t\r\n\t\t\t\tcolor = np.array([0, 255, 0], dtype=np.uint8)\r\n\t\t\t\tdets = detector(frame)\r\n\t\t\t\tfor k, d in enumerate(dets):\r\n\t\t\t\t\tprint(\"Mobile Detected\")\r\n\t\t\t\t\tboundingbox=(d.left(), d.top()), (d.right(), d.bottom())\r\n\t\t\t\t\tim = Image.fromarray(frame)\r\n\t\t\t\t\tdr = ImageDraw.Draw(im)\r\n\t\t\t\t\tdr.rectangle(((d.left(),d.top()),(d.right(),d.bottom())), outline = \"blue\")\r\n\t\t\t\t\tframe=np.array(im)\r\n\t\t\t\tconvjpg = Image.fromarray(frame)\r\n\t\t\t\timgByteArr=io.BytesIO()\r\n\t\t\t\tconvjpg.save(imgByteArr,format=\"jpeg\")\r\n\t\t\t\timgByteArr=imgByteArr.getvalue()\t\t\t\t\r\n\t\t\t\t#print(\"-------------\")\r\n\t\t\t\t#print(convjpg)\r\n\t\t\t\t#print(frame)\r\n\t\t\t\tyield (b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + imgByteArr + b'\\r\\n')\r\n\texcept Exception as e:\r\n\t\tpass\r\n\r\n@app.route('/raspberry/')\r\ndef raspberry(input_str):\r\n\treturn Response(gen(input_str),\r\n\t\tmimetype='multipart/x-mixed-replace; boundary=frame')\r\n\r\n\r\n@app.route('/component', methods=['GET','POST'])\r\ndef component():\r\n\t# print(report())\r\n\t# a={1:'test',2:'testing'}\r\n\t# print(jsonify(a))\r\n\treturn render_template('component.html')\r\n\t# return render_template('component.html',data=report().to_html(),categories=sub_categories())\r\n\r\n@app.route('/component1', methods=['GET','POST'])\r\ndef component1():\r\n\t# print(cat_uncat())\r\n\t# a={1:'test',2:'testing'}\r\n\t# print(jsonify(a))\r\n\treturn render_template('component1.html')\r\n\r\n\r\n\r\n#========================================================================\r\n#gis\r\n@app.route('/gis', methods=['GET','POST'])\r\ndef gis():\r\n return render_template('gis.html')\r\n\r\n@app.route('/mapWindow', methods=['GET','POST'])\r\ndef mapWindow():\r\n return render_template('mapWindow.html')\r\n#========================================================================\t\r\n#@app.route('/singlepage', methods=['GET','POST'])\r\n#def singlepage():\r\n# return render_template('singlepage.html')\r\n#========================================================================\r\n#Main Starts Here\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"635394376","text":"# Write your solutions for 1.5 here!\nclass Superheros:\n\tdef __init__(self, name, superpower, strength):\n\t\tself.name=name\n\t\tself.superpower=superpower\n\t\tself.strength=strength\n\n\tdef PrintNameAndStrength(self):\n\t\tprint(\"The superhero's name is : \" + self.name + \" and his strength is: \" + self.strength)\n\n\tdef save_civilian(self,work):\n\t\tif self.strength>self.work:\n\t\t\tprint(self.strength-self.work)\n\t\telse:\n\t\t\tprint(\"Superhero is not strong enough! :(\")\n\t\t\t\ns=Superheros(\"Superman\",\"Flying\",100)\ns.PrintNameAndStrength()\ns.save_civilian(70)","sub_path":"exercises/superheros.py","file_name":"superheros.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"9836058","text":"\nimport numpy as np\nimport argparse\nfrom collections import OrderedDict\nimport os\nimport time\nimport copy\nimport matplotlib.pyplot as plt\nimport random\nfrom rdkit import Chem\nimport torch\nfrom rdkit.Chem.QED import qed\nfrom tqdm import tqdm\nplt.style.use('ggplot')\nimport ipdb\nfrom utils.helpers import (argmax,stable_normalizer)\nfrom environment.molecule_state import MolState\nfrom time import time\nBOND_TYPES = 3\nMAX_NODES = 9\ncalled = 0\ntotal_time = []\ncache = {}\n##### MCTS functions #####\n\ndef get_select_indexes(state):\n arr = []\n nodes = len(state.mol_graph.ndata['x'])\n if nodes not in cache:\n for i in range(nodes):\n arr.append(np.arange(i*nodes*BOND_TYPES,(i+1)*nodes*BOND_TYPES))\n arr.append(np.zeros((MAX_NODES-nodes)*BOND_TYPES))\n arr = np.hstack(arr)\n arr = np.pad(arr,(0,state.na-len(arr)),constant_values=0)\n\n cache[nodes] = arr.copy()\n\n return cache[nodes]\n \ndef actiontolist(action):\n \"\"\"\n \n \"\"\"\n return [ (action//(3*MAX_NODES)) , (action//3)%MAX_NODES , action%3] \nclass Action():\n ''' Action object '''\n def __init__(self,index,parent_state,Q_init=0.0):\n self.index = index\n self.parent_state = parent_state\n self.W = 0.0\n self.n = 0\n self.Q = Q_init\n \n def add_child_state(self,s1,r,terminal,model,valuemodel):\n # print(terminal)\n self.child_state = State(s1,r,terminal,self,self.parent_state.na,model,valuemodel)\n return self.child_state\n \n def update(self,R):\n self.n += 1\n self.W += R\n self.Q = self.W/self.n\n\nclass State():\n ''' State object '''\n\n def __init__(self,mol_state,r,terminal,parent_action,na,model,valuemodel):\n ''' Initialize a new state '''\n \n self.mol_state = copy.deepcopy(mol_state)\n self.mol_graph = self.mol_state.molGraph # state (constructed molecule) + individual atoms\n \n self.r = r # reward upon arriving in this state\n self.terminal = terminal # whether the domain terminated in this state\n self.parent_action = parent_action\n self.n = 0\n self.model = model\n\n self.valuemodel = valuemodel\n \n if not self.terminal:\n self.calc_rollout_reward()\n else:\n self.rollout_reward = 0.0\n # Child actions\n self.na = na # will be fixed for us\n self.action_mask = self.mol_state.action_mask.copy() #better to have a standalone function also.\n self.index_mask = get_select_indexes(self)*self.action_mask\n self.action_1_mask = self.action_mask.copy()\n\n self.N_value = torch.zeros(na)\n self.W_value = torch.zeros(na)\n self.Q_value = torch.zeros(na)\n self.Q_value[self.action_mask==0] = -100\n\n self.child_actions = list(np.zeros(na))\n \n for i in np.where(self.action_mask)[0]:\n self.child_actions[i] = (Action(i,parent_state=self,Q_init=self.Q_value[i]))\n \n\n self.model = model\n self.priors = model.predict_pi(self.mol_graph,self.action_mask,self.action_1_mask,self.index_mask)\n self.priors[(self.action_mask==0).nonzero()] = -1e8\n self.priors = (torch.softmax(self.priors,dim=0)*torch.FloatTensor(self.action_mask)).flatten()\n self.addPriorNoise()\n\n def update_Q(self,R,index):\n self.N_value[index] += 1\n self.W_value[index] += R\n self.Q_value[index] = self.W_value[index]/self.N_value[index]\n\n \n\n def select(self,c=0.01):\n ''' Select one of the child actions based on UCT rule '''\n random.seed()\n UCT = self.Q_value + (self.priors*c*np.sqrt(self.n+1)/(self.N_value+1))\n # winner = argmax(UCT)\n winner = random.choice(torch.where(UCT==torch.max(UCT))[0])\n\n return self.child_actions[winner]\n\n def addPriorNoise(self):\n \"\"\"\n Adds dirichlet noise to priors.\n Called when the state is the root node\n \"\"\"\n np.random.seed()\n alpha = 2/10\n e = 0.25\n noise = np.random.dirichlet([alpha] * int(sum(self.action_mask)))\n noiseReshape = np.zeros(self.action_mask.shape)\n noiseIdx = 0\n for i in range(len(self.action_mask)):\n if self.action_mask[i]:\n noiseReshape[i] = noise[noiseIdx]\n noiseIdx += 1\n\n self.priors = (1-e) * self.priors + e * noiseReshape\n\n def step(self, action):\n # print(action,\"Action\")\n ret = self.env.step(self.env._actionIntToList(int(action)))\n # print(ret,\"Ret\")\n self.env, self.r , self.terminal = ret\n\n def calc_rollout_reward(self):\n \"\"\"\n performs R random rollout, the total reward in each rollout is computed.\n returns: mean across the R random rollouts.\n \"\"\"\n self.rollout_reward = self.model.predict_V(self.mol_state.molGraph)[0]\n return self.rollout_reward\n\n def do_rollout(self,k):\n raise \"This Function is deprecated. Use approximator instead.\"\n\n def update(self):\n ''' update count on backward pass '''\n self.n += 1\n \nclass MCTS():\n ''' MCTS object '''\n\n def __init__(self,root,root_molstate,model,valuemodel,na,gamma):\n self.root = root\n self.root_molstate = root_molstate\n self.model = model\n self.valuemodel = valuemodel\n self.na = na\n self.gamma = gamma\n \n def search(self,n_mcts,c,Env):\n ''' Perform the MCTS search from the root '''\n if self.root is None:\n self.root = State(self.root_molstate,r=0.0,terminal=False,parent_action=None,na=self.na,model=self.model,valuemodel=self.valuemodel) # initialize new root\n else:\n self.root.parent_action = None # continue from current root\n \n if self.root.terminal:\n raise(ValueError(\"Can't do tree search from a terminal state\"))\n for i in range(n_mcts):\n state = self.root # reset to root for new trace\n mcts_env = copy.deepcopy(Env) \n\n flag = 0\n while not state.terminal:\n action = state.select(c=c)\n return_step = mcts_env.step(int(action.index))\n s1,r,t = return_step\n if hasattr(action,'child_state'):\n state = action.child_state # select\n continue\n else:\n \n state = action.add_child_state(s1,r,t,self.model,self.valuemodel) # expand\n \n break\n\n # Back-up \n R = state.rollout_reward\n while state.parent_action is not None: # loop back-up until root is reached\n R = state.r + self.gamma * R \n action = state.parent_action\n state = action.parent_state\n state.update_Q(R,action.index)\n state.update() \n del mcts_env\n\n def return_results(self,temp):\n ''' Process the output at the root node '''\n counts = self.root.N_value\n Q = self.root.Q_value\n pi_target = stable_normalizer(counts,temp)\n action_1 = np.expand_dims(np.array(pi_target),-1).reshape(MAX_NODES,-1)\n action_1 = np.sum(action_1,1)\n V_target = torch.sum((counts/torch.sum(counts))*Q)\n return self.root, pi_target.numpy(), torch.FloatTensor([V_target]), action_1\n \n def forward(self,a,s1):\n ''' Move the root forward '''\n if not hasattr(self.root.child_actions[a],'child_state'):\n print(\"ERROR\")\n self.root = None\n self.env = s1\n else:\n self.root = self.root.child_actions[a].child_state\n","sub_path":"source/test/mcts.py","file_name":"mcts.py","file_ext":"py","file_size_in_byte":7777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"6676971","text":"\"\"\"InnoClubs URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\n\nfrom . import views\n\n\nurlpatterns = [\n\n path('get_auth_url/', views.get_auth_url, name='get-auth-url'),\n path('microsoft/login/', views.OutlookLogin.as_view(), name='user-login'),\n\n path('user_profile/', views.UserProfileRUDView.as_view(), name='user-profile'),\n\n path('get_clubs/', views.ListClubsView.as_view(), name='clubs-view'),\n path('club_profile/', views.RUDClubView.as_view(), name='club-view'),\n\n path('create_club/', views.CreateClubView.as_view(), name='club-create'),\n path('join_club/', views.JoinClubView.as_view(), name='join-club'),\n path('leave_club/', views.LeaveClubView.as_view(), name='leave-club'),\n path('change_club_header/', views.ChangeClubHeaderView.as_view(), name='change-club-header')\n\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"309686039","text":"# import gmplot package\nimport gmplot\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\n# Use a service account\ncred = credentials.Certificate(\n r'C:\\Users\\skukr\\Downloads\\sihhackathon-99c65-firebase-adminsdk-jggbr-4dc0ace0d6.json')\nfirebase_admin.initialize_app(cred)\n\ndb = firestore.client()\ndocs = db.collection(u'accident').stream()\nlatitude_list = []\nlongitude_list = []\n\nfor doc in docs:\n doc = doc.to_dict()\n latitude_list.append(doc['Latitude'])\n longitude_list.append(doc['Longitude'])\n\n\ngmap3 = gmplot.GoogleMapPlotter(18.45747,\n 73.85, 13)\n\n# scatter method of map object\n# scatter points on the google map\ngmap3.scatter(latitude_list, longitude_list, '#ff0000',\n size=40, marker=False)\n\n# Plot method Draw a line in\n# between given coordinates\ngmap3.apikey = \"AIzaSyDne1iEolZXx1fbNHTyIMpRl2dsanKKqS8\"\n\ngmap3.draw(\"map.html\")\n","sub_path":"Map/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436348978","text":"\n\n#calss header\nclass _DECREPIT():\n\tdef __init__(self,): \n\t\tself.name = \"DECREPIT\"\n\t\tself.definitions = [u'in very bad condition because of being old, or not having been cared for, or having been used a lot: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_decrepit.py","file_name":"_decrepit.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"609919420","text":"\"\"\"\nSetting up settings, living under XDG paths.\n\nMore information: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html\n\n\"\"\"\n\nfrom pathlib import Path\n\nfrom dynaconf import Dynaconf\nfrom xdg import xdg_config_home\n\n# logging and application settings files\nLOGGING_CONF_FILE = str(xdg_config_home().joinpath(\"labe/logging.ini\"))\nCONFIG_FILE = str(xdg_config_home().joinpath(\"labe/settings.ini\"))\nENVVAR_PREFIX_FOR_DYNACONF = \"LABE\"\nENV_FOR_DYNACONF = \"default\"\n\nsettings = Dynaconf(\n ENVIRONMENTS=True,\n ENV_FOR_DYNACONF=ENV_FOR_DYNACONF,\n ENVVAR_PREFIX_FOR_DYNACONF=ENVVAR_PREFIX_FOR_DYNACONF,\n SETTINGS_FILE_FOR_DYNACONF=CONFIG_FILE,\n)\n","sub_path":"labe/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620145986","text":"from util import tracker, result\nimport os\n\ndef take_root(x):\n if x >= 0:\n return x**0.5\n else:\n return -((-x)**0.5)\n\ndef iq_process(prn, ac, tr, decode_period, reset_integ_period, acquired,new_dir_name):\n if acquired:\n tr.medium_estimate_freq(0, 1)\n Iss = []\n Qss = []\n power_monitor = []\n phis = []\n i_pass = []\n iq_pow_diffs = []\n phi_bs = []\n filtered_lags = []\n lags=[]\n for i in range(0,decode_period):\n #if i % reset_integ_period == 0: tr.filter_integ_reset()\n Is_1, Qs_1, phi_1, phi_b1,filter_lag1,delta_lag1 = tr.step(i, i+tr.tc*1000)\n tr.shift_filters()\n qi_power_ratio_1 = (Qs_1 ** 2 / Is_1 ** 2) ** 0.5\n iq_pow_diff_1 = take_root(Is_1 ** 2 - Qs_1 ** 2)\n\n if iq_pow_diff_1 < 100000:\n print(\"lost track, qi power too low\", iq_pow_diff_1)\n lag, coarse, X_plot, Y_plot, global_result, acquired, peak_above_mean = ac.acquire(i, i + 1,\n prn=prn,\n quiet=True)\n # tr.reacquire_reset()\n if peak_above_mean <= 3:\n print('satellite gone:', prn)\n break\n\n tr.code_shift_out = lag\n tr.medium_estimate_freq(i, i + tr.tc*1000)\n print('new acquisition: code:', tr.code_shift_out, ' new freq:', tr.carrier_freq_out,\n 'peak above mean:',\n peak_above_mean)\n\n Is_2, Qs_2, phi_2, phi_b2,filter_lag2,delta_lag2 = tr.step(i, i + tr.tc*1000)\n\n qi_power_ratio_2 = (Qs_2 ** 2 / Is_2 ** 2) ** 0.5\n iq_pow_diff_2 = take_root(Is_2 ** 2 - Qs_2 ** 2)\n\n if iq_pow_diff_1 > iq_pow_diff_2:\n print('NEW QI difference', iq_pow_diff_2, 'DO NOT TAKE REQCIAITION')\n Is = Is_1\n Qs = Qs_1\n phi = phi_1\n phi_b = phi_b1\n qi_power_ratio = qi_power_ratio_1\n filtered_lag = filter_lag1\n lag = delta_lag1\n iq_pow_diff = iq_pow_diff_1\n else:\n Is = Is_2\n Qs = Qs_2\n phi = phi_2\n phi_b = phi_b2\n qi_power_ratio = qi_power_ratio_2\n filtered_lag = filter_lag2\n lag = delta_lag2\n iq_pow_diff = iq_pow_diff_2\n else:\n Is = Is_1\n Qs = Qs_1\n phi = phi_1\n phi_b = phi_b1\n qi_power_ratio = qi_power_ratio_1\n filtered_lag = filter_lag1\n lag = delta_lag1\n iq_pow_diff = iq_pow_diff_1\n\n print('Is',Is,'Qs',Qs,'pow diff',iq_pow_diff)\n iq_pow_diffs.append(iq_pow_diff)\n i_pass.append(i)\n phis.append(phi)\n Iss.append(Is)\n Qss.append(Qs)\n phi_bs.append(phi_b)\n power_monitor.append(qi_power_ratio)\n filtered_lags.append(filtered_lag)\n lags.append(lag)\n print('iteration i:', i, ', q/i power', qi_power_ratio, 'freq:', tr.carrier_freq_out, 'code:',\n tr.code_shift_out)\n print(' ')\n r = result.Result(Iss, Qss, i_pass, power_monitor, phis, iq_pow_diffs,phi_bs,filtered_lags,lags)\n sub_dir_name = 'reset_period' + str(reset_integ_period) + '_decoded_period_' + str(\n decode_period) + '_dll_filter_para_' + str(\n tr.get_dll_filter_para()) + '_pll_filter_para_' + str(tr.get_pll_filter_para())\n dir_name = new_dir_name + '/' + sub_dir_name\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n\n print('SAVING RESULT TO', dir_name)\n r.save_result(dir_name)\n print('')\n","sub_path":"util/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"578153027","text":"from Stack import Stack\nfrom Node import Node\n\n\nclass AnimalStack:\n\n def __init__(self):\n self.dog = Stack()\n self.cat = Stack()\n\n def push(self, type, name, age):\n if type == 'cat':\n ncat = Node(name)\n ncat.age = age\n self.cat.push(ncat)\n elif type == 'dog':\n ndog = Node(name)\n ndog.age = age\n self.dog.push(ndog)\n","sub_path":"3-StacksQueues/AnimalStack.py","file_name":"AnimalStack.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"213539393","text":"#\r\n# @lc app=leetcode id=438 lang=python3\r\n#\r\n# [438] Find All Anagrams in a String\r\n#\r\n\r\n# @lc code=start\r\nclass Solution:\r\n def findAnagrams(self, s: str, p: str) -> List[int]:\r\n countP, countS = [0]*256, [0]*256\r\n lenP, res = len(p), []\r\n length = start = end = 0\r\n\r\n for c in p:\r\n countP[ord(c)] += 1\r\n\r\n while end < len(s):\r\n countS[ord(s[end])] += 1\r\n if countS[ord(s[end])] <= countP[ord(s[end])]:\r\n length += 1\r\n if length == lenP:\r\n while countS[ord(s[start])] > countP[ord(s[start])] or countP[ord(s[start])] == 0:\r\n if countP[ord(s[start])] != 0:\r\n countS[ord(s[start])] -= 1\r\n start += 1\r\n if end - start + 1 == lenP:\r\n res.append(start)\r\n end += 1\r\n \r\n return res\r\n# @lc code=end\r\n\r\n","sub_path":"java/leetcode/438.find-all-anagrams-in-a-string.py","file_name":"438.find-all-anagrams-in-a-string.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"601235614","text":"#!/usr/bin/env python\n\"\"\"docstring for entire document\"\"\"\nfrom __future__ import print_function\nimport sys\nimport os\n\nimport numpy as np\nimport pylab as plt\nimport pandas as pd\n\n\nsys.path.append('/Users/luke/GitProjects/OceanLearnCO2/')\nos.chdir('/Users/luke/GoogleDrive/PhD/Scripts')\n\nimport main_LoadData as load_data\nimport main_DataPrep as data_prep\nimport main_Figures as figures\nfrom sklearn.metrics import r2_score, mean_squared_error\n\n\ndef construct_model(config_filename):\n \"\"\"\n This function is split into three sections:\n 1. Data import and preperation\n 2. Model setup and prediction\n 3. Saving data and figures\n Each of these sections is commented\n \"\"\"\n\n params = load_data.get_config_file(config_filename)\n params['results_dir'] = '/'.join(config_filename.split('/')[:-1]) + '/'\n\n # a failsafe for ANNs: Ensures that data is not scaled\n if params.get('model').__name__ == 'MATLAB_ffn':\n params.pop('scaling')\n params['train_size'] = 0.999\n\n # =============================================================================\n # DATA IMPORT AND PREPERATION\n # =============================================================================\n # Here the data is imported and modified according to the config file\n # 1. Importing data from from training file as pandas dataframe\n # 2. Adding certain features, removing nans and creating a location index\n # 3. In the parameter length scale estimation use only support vectors\n # 4. A scaler is created before data is split into training and test\n # 5. Data is split into test and training. The output is pandas dataframe\n # with the index being defined in step 2. THas to be done before scaling\n # 6. Scale the data once it has been transformed. This will become more\n # complex as Gaussian Process regression is implimented\n # =============================================================================\n # 1. Importing data\n print(\"Importing Data\")\n # data_orig = load_data.training(params.get('training_filename'),\n # params.get('headers_filename'))\n data_orig = pd.read_csv(params.get('training_filename'))\n # 2. Add features and remove unwanted columns and rows (NaNs)\n data = data_prep.add_features(data_orig, params)\n # 3. Apply gp to only support vectors\n support_fname = params.get('support_vector_config', None)\n if support_fname:\n data = data_prep.only_support_vectors(data_orig, data, support_fname)\n # 4. Creating data scaler\n scaler = data_prep.generate_scaler(data)\n # 5. Split data into training and testing\n train_x, test_x, train_y, test_y, x_names = data_prep.data_split(data, params.get('train_size'))\n train_i = train_x.index.values # has to be done before scaling\n # 6. Scaling x axes\n if 'scaling' in params:\n print('Scaling data ', end='')\n train_x = scaler.transform(train_x)\n test_x = scaler.transform(test_x)\n if 'gp_scaling' in params:\n print('to Gaussian Processes estimates', end='')\n gp_scale = pd.read_csv(params['gp_scaling'])\n assert gp_scale.shape[-1] == train_x.shape[-1], 'GP scaling params != train data shape'\n assert np.all(gp_scale.columns == x_names), 'GP scaling params != train data columns'\n train_x = gp_scale.values.squeeze() * train_x\n test_x = gp_scale.values.squeeze() * test_x\n print('')\n print('Size of the array:', data.shape)\n # end of section\n\n # =============================================================================\n # MODEL SETUP AND PREDICTION\n # =============================================================================\n # The following lines impliment the model itself. In this step the following\n # is done:\n # 1. Creating a estimator object from sklearn object defined in config\n # 2. Training the model using training data\n # 3. Estimating the fCO2 for the training and test data\n # =============================================================================\n # 1. Setting up model estimator\n\n if False: # testing\n np.savez('/Users/luke/Desktop/construction_data', {'x': train_x, 'y': train_y,\n 'x_test': test_x, 'y_test':test_y})\n\n print('Setting up model and fitting data')\n estimator = params.get('model')(**params.get('grid_params'))\n estimator.config_file = config_filename\n # 2. Fit data using the input X and output y\n estimator.fit(train_x, train_y)\n # 3. Estimating training data\n train_y_hat = estimator.predict(train_x)\n test_y_hat = estimator.predict(test_x)\n # end of section\n\n # =============================================================================\n # SAVING DATA AND FIGURES\n # =============================================================================\n # Now save the model and the figures of the data that has been created in the\n # script above. This has been changed to store the data as a dictionary instead\n # of an object (avoids some import intricacies).\n # 1. Writing dictionary and saving data to saved_model\n # 2. Making and saving plots\n # =============================================================================\n # 1. Saving objects to file\n save_data = {'model': estimator,\n 'score_mse_test': mean_squared_error(test_y, test_y_hat),\n 'score_mse_train': mean_squared_error(train_y, train_y_hat),\n 'score_r2_test': r2_score(test_y, test_y_hat),\n 'score_r2_train': r2_score(train_y, train_y_hat),\n 'index_training_points': train_i,\n 'input_names': x_names,\n 'scaler': scaler}\n np.save(params.get('results_dir') + 'saved_model', save_data)\n # 2. Making and saving plots\n if params['residual_plots']:\n fig1 = figures.residuals_plot(estimator, train_y, train_y_hat, 'Training Data')\n fig2 = figures.residuals_plot(estimator, test_y, test_y_hat, 'Test Data')\n\n fig1.savefig(params.get('results_dir') + 'plot_residuals_train.png')\n fig2.savefig(params.get('results_dir') + 'plot_residuals_test.png')\n\n if params['residual_plots_show']:\n plt.show()\n","sub_path":"main_CONSTRUCT.py","file_name":"main_CONSTRUCT.py","file_ext":"py","file_size_in_byte":6386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583566809","text":"# !/usr/bin/env python\n# -*- coding:utf8 -*-\n# @TIME :2019/4/20 18:05\n# @Author : 洪松\n# @File : AnotherWay1.py\n\n\nimport csv\nimport time\nimport term\n\n\nf1, f2, f3, f4 = 0, 0, 0, 0\nyuanyin = ['\\u0F72', '\\u0F74', '\\u0F7A', '\\u0F7C']\nyuanyindic = {'\\u0F72': f1, '\\u0F74': f2, '\\u0F7A': f3, '\\u0F7C': f4}\n\nd1, d2, d3, d4 = 0, 0, 0, 0\nxiajia = ['\\u0FAD', '\\u0FB1', '\\u0FB2', '\\u0FB3']\nxiajiadic = {'\\u0FAD': d1, '\\u0FB1': d2, '\\u0FB2': d3, '\\u0FB3': d4}\n\ne = 0\nzaixiajia = ['\\u0FAD']\nzaixiajiadic = {'\\u0FAD': e}\n\nh1, h2, h3, h4, h5, h6, h7, h8, h9, h10 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\nhoujia = ['ག', 'ང', 'ད', 'ན', 'བ', 'མ', 'ར', 'ལ', 'ས', 'འ']\nhoujiadic = {'ག': h1, 'ང': h2, 'ད': h3, 'ན': h4, 'བ': h5, 'མ': h6, 'ར': h7, 'ལ': h8, 'ས': h9, 'འ': h10}\n\na1, a2, a3, a4, a5 = 0, 0, 0, 0, 0\nqianjia = ['ག', 'ད', 'བ', 'མ', 'འ']\nqianjiadic = {'ག': a1, 'ད': a2, 'བ': a3, 'མ': a4, 'འ': a5}\n\nb1, b2, b3 = 0, 0, 0\nshangjia = ['ར', 'ལ', 'ས']\nshangjiadic = {'ར': b1, 'ལ': b2, 'ས': b3}\n\nl1, l2 = 0, 0\nzaihoujia = ['ད', 'ས']\nzaihoujiadic = {'ད': l1, 'ས': l2}\n\n\njizi = ['ཀ', 'ཁ', 'ག', 'ང', 'ཅ', 'ཆ', 'ཇ', 'ཉ', 'ཏ', 'ཐ', 'ད', 'ན', 'པ', 'ཕ',\n 'བ', 'མ', 'ཙ', 'ཚ', 'ཛ', 'ཝ', 'ཞ', 'ཟ', 'འ', 'ཡ', 'ར', 'ལ', 'ཤ', 'ས', 'ཧ', 'ཨ']\nc1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30 = 0, 0, \\\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\njizidic = {\n 'ཀ': c1, 'ཁ': c2, 'ག': c3, 'ང': c4, 'ཅ': c5, 'ཆ': c6, 'ཇ': c7, 'ཉ': c8, 'ཏ': c9, 'ཐ': c10, 'ད': c11, 'ན': c12, 'པ': c13, 'ཕ': c14,\n 'བ': c15, 'མ': c16, 'ཙ': c17, 'ཚ': c18, 'ཛ': c19, 'ཝ': c20, 'ཞ': c21, 'ཟ': c22, 'འ': c23, 'ཡ': c24, 'ར': c25, 'ལ': c26, 'ཤ': c27,\n 'ས': c28, 'ཧ': c29, 'ཨ': c30\n}\n\n\ndef MoveCircle(q):\n x = ord(q) - 80\n x_removecircle = chr(x)\n return x_removecircle\n\n\ndef qianjiasort(A):\n qianjiadic[qianjia[0]] = A.count(qianjia[0])\n qianjiadic[qianjia[1]] = A.count(qianjia[1])\n qianjiadic[qianjia[2]] = A.count(qianjia[2])\n qianjiadic[qianjia[3]] = A.count(qianjia[3])\n qianjiadic[qianjia[4]] = A.count(qianjia[4])\n\n return qianjiadic\n\n\ndef shangjiasort(B):\n shangjiadic[shangjia[0]] = B.count(shangjia[0])\n shangjiadic[shangjia[1]] = B.count(shangjia[1])\n shangjiadic[shangjia[2]] = B.count(shangjia[2])\n\n return shangjiadic\n\n\ndef jizisort(C):\n jizidic[jizi[0]] = C.count(jizi[0])\n jizidic[jizi[1]] = C.count(jizi[1])\n jizidic[jizi[2]] = C.count(jizi[2])\n jizidic[jizi[3]] = C.count(jizi[3])\n jizidic[jizi[4]] = C.count(jizi[4])\n jizidic[jizi[5]] = C.count(jizi[5])\n jizidic[jizi[6]] = C.count(jizi[6])\n jizidic[jizi[7]] = C.count(jizi[7])\n jizidic[jizi[8]] = C.count(jizi[8])\n jizidic[jizi[9]] = C.count(jizi[9])\n jizidic[jizi[10]] = C.count(jizi[10])\n jizidic[jizi[11]] = C.count(jizi[11])\n jizidic[jizi[12]] = C.count(jizi[12])\n jizidic[jizi[13]] = C.count(jizi[13])\n jizidic[jizi[14]] = C.count(jizi[14])\n jizidic[jizi[15]] = C.count(jizi[15])\n jizidic[jizi[16]] = C.count(jizi[16])\n jizidic[jizi[17]] = C.count(jizi[17])\n jizidic[jizi[18]] = C.count(jizi[18])\n jizidic[jizi[19]] = C.count(jizi[19])\n jizidic[jizi[20]] = C.count(jizi[20])\n jizidic[jizi[21]] = C.count(jizi[21])\n jizidic[jizi[22]] = C.count(jizi[22])\n jizidic[jizi[23]] = C.count(jizi[23])\n jizidic[jizi[24]] = C.count(jizi[24])\n jizidic[jizi[25]] = C.count(jizi[25])\n jizidic[jizi[26]] = C.count(jizi[26])\n jizidic[jizi[27]] = C.count(jizi[27])\n jizidic[jizi[28]] = C.count(jizi[28])\n jizidic[jizi[29]] = C.count(jizi[29])\n\n return jizidic\n\n\ndef xiajiasort(D):\n xiajiadic[xiajia[0]] = D.count(xiajia[0])\n xiajiadic[xiajia[1]] = D.count(xiajia[1])\n xiajiadic[xiajia[2]] = D.count(xiajia[2])\n xiajiadic[xiajia[3]] = D.count(xiajia[3])\n\n return xiajiadic\n\n\ndef zaixiajiasort(E):\n zaixiajiadic[zaixiajia[0]] = E.count(zaixiajia[0])\n return zaixiajiadic\n\n\ndef yuanyinsort(F):\n yuanyindic[yuanyin[0]] = F.count(yuanyin[0])\n yuanyindic[yuanyin[1]] = F.count(yuanyin[1])\n yuanyindic[yuanyin[2]] = F.count(yuanyin[2])\n yuanyindic[yuanyin[3]] = F.count(yuanyin[3])\n\n return yuanyindic\n\n\ndef houjiasort(G):\n houjiadic[houjia[0]] = G.count(houjia[0])\n houjiadic[houjia[1]] = G.count(houjia[1])\n houjiadic[houjia[2]] = G.count(houjia[2])\n houjiadic[houjia[3]] = G.count(houjia[3])\n houjiadic[houjia[4]] = G.count(houjia[4])\n houjiadic[houjia[5]] = G.count(houjia[5])\n houjiadic[houjia[6]] = G.count(houjia[6])\n houjiadic[houjia[7]] = G.count(houjia[7])\n houjiadic[houjia[8]] = G.count(houjia[8])\n houjiadic[houjia[9]] = G.count(houjia[9])\n\n return houjiadic\n\n\ndef zaihoujiasort(H):\n zaihoujiadic[zaihoujia[0]] = H.count(zaihoujia[0])\n zaihoujiadic[zaihoujia[1]] = H.count(zaihoujia[1])\n\n return zaihoujiadic\n\n\nif __name__ == '__main__':\n start = time.time()\n num = 0\n q_list = []\n s_list = []\n j_list = []\n x_list = []\n zx_list = []\n y_list = []\n h_list = []\n zh_list = []\n with open(r'D:\\藏字结构识别--正确\\排序后结构识别.csv', newline='', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n num += 1\n if num > 1:\n if row[1]:\n q_list.append(row[1])\n q = qianjiasort(q_list)\n if row[2]:\n s_list.append(row[2])\n s = shangjiasort(s_list)\n if row[3]:\n if '\\u0f90' <= row[3] <= '\\u0fbc':\n withoutcircle = MoveCircle(row[3])\n j_list.append(withoutcircle)\n else:\n j_list.append(row[3])\n j = jizisort(j_list)\n if row[4]:\n x_list.append(row[4])\n x = xiajiasort(x_list)\n if row[5]:\n zx_list.append(row[5])\n zxj = zaixiajiasort(zx_list)\n if row[6]:\n y_list.append(row[6])\n y = yuanyinsort(y_list)\n if row[7]:\n h_list.append(row[7])\n h = houjiasort(h_list)\n if row[8]:\n zh_list.append(row[8])\n zhj = zaihoujiasort(zh_list)\n\n print('前加字出现次数:', q)\n print('上加字出现的次数:', s)\n print('基字出现的次数:', j)\n print('下加字出现的次数:', x)\n print('再下加字出现的次数:', zxj)\n print('元音出现的次数:', y)\n print('后加字出现的次数:', h)\n print('再后加字出现的次数:', zhj)\n end = time.time()\n\nterm.writeLine(str(end - start) + '秒', term.green)\n","sub_path":"Task_5/AnotherWay1.py","file_name":"AnotherWay1.py","file_ext":"py","file_size_in_byte":7047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"140118527","text":"import os, sys\nimport datetime\nfrom netCDF4 import Dataset #read netcdf\nimport urllib.request\nimport numpy as np\nimport multiprocessing as mp\n\nsSystem_paths = os.environ['PATH'].split(os.pathsep)\nsys.path.extend(sSystem_paths)\n\nfrom pyes.system.define_global_variables import *\nfrom pyes.toolbox.reader.read_configuration_file import read_configuration_file\n\nsPath_pye3sm = sWorkspace_code + slash + 'python' + slash + 'e3sm' + slash + 'e3sm_python'\nsys.path.append(sPath_pye3sm)\n\nfrom e3sm.shared import oE3SM\n\nnrow = 180 * 2\nncolumn = 360 * 2\n#read surface data \nsWorkspace_out = '/compyfs/liao313/04model/h2sc/global/usgs_site2'\nsFilename_surface_map = '/compyfs/inputdata/lnd/clm2/surfdata_map' + slash + 'surfdata_0.5x0.5_simyr2010_c191025.nc'\naDatasets = Dataset(sFilename_surface_map)\nnetcdf_format = aDatasets.file_format\nprint(netcdf_format)\nprint(\"Print dimensions:\")\nprint(aDatasets.dimensions.keys())\nprint(\"Print variables:\")\nprint(aDatasets.variables.keys())\nfor sKey, aValue in aDatasets.variables.items():\n if (sKey == 'LONGXY'):\n #print(aValue.datatype)\n #print(aValue.dimensions)\n aLongitude = (aValue[:]).data\n continue\n if (sKey == 'LATIXY'):\n #print(aValue.datatype)\n #print(aValue.dimensions)\n aLatitude = (aValue[:]).data\n aLatitude = np.flip(aLatitude, 0) \n continue\ndResolution = 0.5\nsString1 = 'http://waterservices.usgs.gov/nwis/site/?format=rdb&bBox='\nsString2 = '&startDT=1980-01-01&endDT=2010-12-31&siteType=GW&siteStatus=all&hasDataTypeCd=gw'\n\nsUrl_test = 'http://waterservices.usgs.gov/nwis/site/?format=rdb&bBox=-83.000000,36.500000,-81.000000,38.500000&startDT=1980-01-01&endDT=2010-12-31&siteType=GW&siteStatus=all&hasDataTypeCd=gw'\ni = int((90-36.5) /0.5)\nj = int((-83-(-180)) / 0.5)\n\ndef prepare_usgs_groundwater_site_list_parallel(iRow):\n\n \n #response = urllib.request.urlopen(sUrl_test)\n #html = response.read()\n #print(html)\n ##save as a rdb file\n #sFilename_test = 'usgs_site_text.txt'\n #pFile = open(sFilename_test,\"w\")#write mode \n #pFile.write(html.decode(\"utf-8\") ) \n #pFile.close() \n\n \n \n sRow = \"{:03d}\".format(iRow) \n \n for iColumn in np.arange(1, ncolumn+1, 1): \n sColumn = \"{:03d}\".format(iColumn)\n #define the lower and upper boundary\n x = aLongitude[ iRow -1, iColumn-1 ]\n y = aLatitude[iRow-1, iColumn-1] \n \n \n dLongitude_left = x -0.5 * dResolution \n dLongitude_right = x + 0.5 * dResolution \n dLatitude_bottom = y - 0.5 * dResolution \n dLatitude_top = y + 0.5 * dResolution\n sLongitude_left = \"{:0f}\".format( dLongitude_left)\n sLongitude_right = \"{:0f}\".format( dLongitude_right)\n sLatitude_bottom = \"{:0f}\".format( dLatitude_bottom)\n sLatitude_top = \"{:0f}\".format( dLatitude_top)\n sBox = sLongitude_left + ',' + sLatitude_bottom + ',' + sLongitude_right + ',' + sLatitude_top\n #an example\n #\n sUrl = sString1 + sBox + sString2\n dummy = sColumn+ ',' + sRow+ ','+sUrl + '\\n'\n print(dummy)\n \n \n try: \n \n pResponse = urllib.request.urlopen(sUrl)\n bHtml = pResponse.read()\n #save as a rdb file \n sFilename_out = sWorkspace_out + slash + 'usgs_site_' + sRow + '_' + sColumn + sExtension_txt\n print(sFilename_out)\n pFile = open(sFilename_out,\"w\") #write mode \n pFile.write(bHtml.decode(\"utf-8\") ) \n pFile.close() \n except urllib.error.URLError as e:\n #print(e.code)\n #print(e.read())\n pass\n \n \n return\nif __name__ == '__main__':\n #prepare_usgs_groundwater_site_list()\n\n pool = mp.Pool(mp.cpu_count())\n \n num_cores = 5\n \n results = pool.map(prepare_usgs_groundwater_site_list_parallel, [row for row in np.arange(1, nrow+1, 1)])\n\n pool.close()\n \n \n \n \n","sub_path":"pye3sm/tools/usgs/site/prepare_usgs_groundwater_site_list_multiplethread.py","file_name":"prepare_usgs_groundwater_site_list_multiplethread.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"357129640","text":"#!/usr/bin/python3\nimport sqlite3\nimport createTables as ct\nimport insertValues as iv\nimport insData as ins\nfrom tkinter import *\ndatabase = \"data.db\"\nroot = Tk()\nroot.title(\"Database Entry System\")\nroot.geometry(\"1000x800\")\n\n### Pull from imported modules\nct.createTables()\nins.InsertTableData()\n\ndef increaseStatus():\n conn = sqlite3.connect(database)\n c = conn.cursor()\n c.execute(\"UPDATE SUPPLIER set Status = Status * 1.1\")\n c.execute(\"SELECT * FROM SUPPLIER\")\n print(c.fetchall())\n conn.commit()\n conn.close()\n\ndef shipments_entry():\n conn = sqlite3.connect(database)\n c = conn.cursor()\n mesg, fetch = iv.insertValues('SHIPMENT', shipment_sno_entry.get(), shipment_pno_entry.get(),\n shipment_qty_entry.get(),shipment_price_entry.get() )\n\n shipment_message.config(text = mesg)\n\n shipment_sno_entry.delete(0,END)\n shipment_pno_entry.delete(0,END)\n shipment_qty_entry.delete(0,END)\n shipment_price_entry.delete(0,END)\n#############################################\ndef suppliers_entry():\n conn = sqlite3.connect(database)\n c = conn.cursor()\n mesg,fetch = iv.insertValues('SUPPLIER', supplier_sno_entry.get(), supplier_sname_entry.get(),\n supplier_status_entry.get(),supplier_city_entry.get() )\n\n supplier_message.config(text = mesg)\n supplier_info.delete('1.0', END)\n supplier_info.insert(INSERT,fetch)\n\n supplier_sno_entry.delete(0,END)\n supplier_sname_entry.delete(0,END)\n supplier_status_entry.delete(0,END)\n supplier_city_entry.delete(0,END)\n##########################################################\ndef parts_entry():\n conn = sqlite3.connect(database)\n c = conn.cursor()\n mesg, fetch = iv.insertValues('PART', part_pno_entry.get(), part_pname_entry.get(),\n part_color_entry.get(), part_weight_entry.get() ,\n part_city_entry.get() )\n\n part_message.config(text = mesg)\n\n part_pno_entry.delete(0,END)\n part_pname_entry.delete(0,END)\n part_color_entry.delete(0,END)\n part_weight_entry.delete(0,END)\n part_city_entry.delete(0,END)\n#################################################\n\nsupplier_info = Text(root, height = 6, width = 60)\nsupplier_info.grid(row=23, column=0, columnspan = 5, padx=0)\nsupplier_label = Label(root, text=\"Table Info\")\nsupplier_label.grid(row=22, column=0, padx=0)\n\ndef populateText(mesg):\n supplier_info.delete('1.0', END)\n supplier_info.insert(INSERT,mesg)\n\ndef fetchSupplier():\n fetch = iv.getVal(\"SUPPLIER\")\n print(fetch)\n populateText(fetch)\nsup_button = Button(root, text=\"show Suppliers\", command=fetchSupplier)\nsup_button.grid(row = 30, column = 0, padx = (100,0))\n\ndef fetchPart():\n fetch = iv.getVal(\"PART\")\n print(fetch)\n populateText(fetch)\npart_button = Button(root, text=\"show Parts\", command=fetchPart)\npart_button.grid(row = 30, column = 1, padx = 0)\n\n\ndef fetchShip():\n fetch = iv.getVal(\"SHIPMENT\")\n print(fetch)\n populateText(fetch)\nship_button = Button(root, text=\"show Shipments\", command=fetchShip)\nship_button.grid(row = 30, column = 2, padx = 0)\n\nstatus_button = Button(root, text=\"status x 10%\", command=increaseStatus)\nstatus_button.grid(row = 1, column = 6, padx = 20)\n\ndef getPart():\n matches = iv.getPartShipped(\"SHIPMENT\", part_no_entry.get())\n part_no_info.delete('1.0', END)\n part_no_info.insert(INSERT,matches)\n part_no_entry.delete(0,END)\n\n################# prompt user for part no:\npart_no_button = Button(root, text=\"Enter Part No:\", command=getPart)\npart_no_button.grid(row = 2, column = 6, padx = 20, pady = 20)\n\npart_no_entry = Entry(root, width=10)\npart_no_entry.grid(row = 2, column = 7, padx = 5, pady = 20)\n\npart_no_info= Text(root, height = 6, width = 30)\npart_no_info.grid(row=3, column=6, columnspan = 3, padx=10)\n\n\n################ SUPPLIER ###########################################\nsup_row=8\nsupplier_table = Label(root, text=\"Supplier Table\").grid(row=(sup_row-1), column=0, padx=5)\n# sno\nsupplier_sno_entry = Entry(root, width=10)\nsupplier_sno_entry.grid(row=sup_row, column=0, padx=5)\nsupplier_sno_label = Label(root, text=\"Sno\").grid(row=(2+sup_row), column=0, padx=5)\n# sname\nsupplier_sname_entry = Entry(root, width=10)\nsupplier_sname_entry.grid(row=sup_row, column=1, padx=5)\nsupplier_sname_label = Label(root, text=\"Sname\").grid(row=(2+sup_row), column=1, padx=5)\n# status\nsupplier_status_entry = Entry(root, width=10)\nsupplier_status_entry.grid(row=sup_row, column=2, padx=5)\nsupplier_status_label = Label(root, text=\"Status\").grid(row=(2+sup_row), column=2, padx=5)\n# city\nsupplier_city_entry = Entry(root, width=10)\nsupplier_city_entry.grid(row=sup_row, column=3, padx=5)\nsupplier_city_label = Label(root, text=\"City\").grid(row=(2+sup_row), column=3, padx=5)\n\n# supplier submit button\nsupplier_submit_button = Button(root, text=\"Supplier Entry Submit\", command=suppliers_entry)\nsupplier_submit_button.grid(row=(sup_row+3), column=0, columnspan=4, padx=5, pady=5, ipadx=100)\n# supplier message\nsupplier_message = Label(root, text=\"Normal\")\nsupplier_message.grid(row=(4+sup_row), columnspan=6, padx=5)\n#####################################################################################\n\n\n################ SHIPMENT ###########################################\nship_row=14\nshipment_table = Label(root, text=\"Shipment Table\").grid(row=(ship_row-1), column=0, padx=5)\n#sno\nshipment_sno_entry = Entry(root, width=10)\nshipment_sno_entry.grid(row=ship_row, column=0, padx=5)\nshipment_sno_label = Label(root, text=\"Sno\").grid(row=(2+ship_row), column=0, padx=5)\n# pno\nshipment_pno_entry = Entry(root, width=10)\nshipment_pno_entry.grid(row=ship_row, column=1, padx=5)\nshipment_pno_label = Label(root, text=\"Pno\").grid(row=(2+ship_row), column=1, padx=5)\n# QTY\nshipment_qty_entry = Entry(root, width=10)\nshipment_qty_entry.grid(row=ship_row, column=2, padx=5)\nshipment_qty_label = Label(root, text=\"Qty\").grid(row=(2+ship_row), column=2, padx=5)\n# Price\nshipment_price_entry = Entry(root, width=10)\nshipment_price_entry.grid(row=ship_row, column=3, padx=5)\nshipment_price_label = Label(root, text=\"Price\").grid(row=(2+ship_row), column=3, padx=5)\n\n# shipment submit button\nshipment_submit_button = Button(root, text=\"Shipment Entry Submit\", command=shipments_entry)\nshipment_submit_button.grid(row=(ship_row+3), column=0, columnspan=4, padx=5, pady=5, ipadx=100)\n\nshipment_message = Label(root, text=\"Normal\")\nshipment_message.grid(row=(4+ship_row), columnspan=6, padx=5)\n#####################################################################################\n\n\n\n################ PARTS ###########################################\nparts_table = Label(root, text=\"Parts Table\").grid(row=0, column=0, padx=5)\n\npart_pno_entry = Entry(root, width=10)\npart_pno_entry.grid(row=1, column=0, padx=5)\npart_pno_label = Label(root, text=\"Pno\").grid(row=2, column=0, padx=5)\n\npart_pname_entry = Entry(root, width=10)\npart_pname_entry.grid(row=1, column=1, padx=5)\npart_pname_label = Label(root, text=\"Pname\").grid(row=2, column=1, padx=5)\n\npart_color_entry = Entry(root, width=10)\npart_color_entry.grid(row=1, column=2, padx=5)\npart_color_label = Label(root, text=\"Color\").grid(row=2, column=2, padx=5)\n\npart_weight_entry = Entry(root, width=10)\npart_weight_entry.grid(row=1, column=3, padx=5)\npart_weight_label = Label(root, text=\"Weight\").grid(row=2, column=3, padx=5)\n\npart_city_entry = Entry(root, width=10)\npart_city_entry.grid(row=1, column=4, padx=5)\npart_city_label = Label(root, text=\"City\").grid(row=2, column=4, padx=5)\n\n# parts submit button\nparts_submit_button = Button(root, text=\"Parts Entry Submit\", command=parts_entry)\nparts_submit_button.grid(row=3, column=0, columnspan=4, padx=5, pady=5, ipadx=100)\n\npart_message = Label(root, text=\"Normal\")\npart_message.grid(row=4, columnspan=6, padx=5)\n###########################################################################################\n\n\nroot.mainloop()\n\n\n ## creates tables\n ## insert into\n","sub_path":"hw5.py","file_name":"hw5.py","file_ext":"py","file_size_in_byte":7915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"35437823","text":"#!/usr/bin/env python3\n\n\"\"\"\nGood performance module\n\n\"\"\"\n\n# pylint: disable= C0103\n\nimport datetime\nimport csv\nimport time\n\n\ndef analyze(filename):\n \"\"\"\n This function will take a csv file as an input and return the year count and ao count\n within the file\n :param filename: CSV file path.\n :return: A dictionary showing the year count and a print statement of the ao count.\n \"\"\"\n start = datetime.datetime.now()\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n\n year_count = {\n \"2013\": 0,\n \"2014\": 0,\n \"2015\": 0,\n \"2016\": 0,\n \"2017\": 0,\n \"2018\": 0\n }\n\n found = 0\n for row in reader:\n year_count[row[5][6:]] += 1\n if \"ao\" in row[6]:\n found += 1\n\n print(year_count)\n\n print(f\"'ao' was found {found} times\")\n end = datetime.datetime.now()\n\n return start, end, year_count, found\n\n\ndef main():\n \"\"\"\n This is the main function that calls the analyze function.\n :return:\n \"\"\"\n filename = \"data/exercise.csv\"\n analyze(filename)\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n main()\n print(\"{}\".format(time.time() - start_time))\n","sub_path":"students/matt_casali/lesson06/Assignment/good_perf.py","file_name":"good_perf.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"647619668","text":"# chutesladdersmarkov.py\n\n# A Markov chain simulation of Chutes and Ladders.\n# As described by Nick Berry at http://www.datagenetics.com/blog/november12011/index.html\n\nimport pylab as pl\nimport numpy as np\nimport json\n\n# Location of chutes and ladders\n# start space : end space\nchutes = {\n 98:78,\n 95:75,\n 93:73,\n 87:24,\n 64:60,\n 62:19,\n 56:53,\n 49:11,\n 48:26,\n 16:6}\n\nladders = {\n 1:38,\n 4:14,\n 9:31,\n 21:42,\n 28:84,\n 36:44,\n 51:67,\n 71:91,\n 80:100}\n\n# Transition matrix. 101 x 101, to in starting position off board.\nboardsize = 10*10 + 1\n\n# Probability to roll an arbitrary number with single die.\np = 1./6\n\n# Generate generic transition matrix, then modify\nT = []\n\nfor k in range(boardsize):\n T += [(k+1)*[0.]+[p,p,p,p,p,p]+(boardsize-7-k)*[0.]]\n\n# Modify last element to account for landing on last square (winning)\nfor k in range(6):\n if k < 5:\n T[k-6][-2-k:] = [(2+k)*p]\n else:\n # Last row handled differently\n T[k-6][-2-k:] = [1.0]\n\n# Modify transition matrix based on presence of chutes and ladders\ndef addobstacles(obstacles):\n for j in obstacles:\n # Modify row with obstacle and proceeding 5 rows (if on board)\n for k in [x for x in (j - np.arange(6)) if x >= 0]:\n T[k][j] = T[k][j] - p\n T[k][obstacles[j]] = T[k][obstacles[j]] + p\n\naddobstacles(chutes)\naddobstacles(ladders)\n\nT = np.array(T)\n\n# Vector of game piece location probabilities (101 spaces)\nL = np.zeros(10*10+1)\n# Intial position. Probability = 1.0\nL[0] = 1.0\n\n# Moves to simulate\nmoves = 50\n# Probability of landing in square 100+\ngamefinish = []\n# Save boards (L) for each subsequent move\nboards = []\n\n# Propagate game piece via Markov process!\nfor k in range(moves):\n L = np.dot(L,T)\n boards += [L.tolist()]\n gamefinish += [L[100]]\n\n# Calculate differential winning probability distribution from cummulative distribution\ndiff = []\nfor k in range(len(gamefinish)-1):\n diff += [gamefinish[k+1] - gamefinish[k]]\n\n# Export boards to JSON file for further analysis\njson.dump(boards,open('markovboards.json','w'))\n\n# Plotting\n# Display board with probability of being on a given square\n#board = pl.matshow(L[1:].reshape(10,10),cmap='Reds', origin='lower')\n#board.get_axes().axis('off')\n\n# Plot differential winning probability of each move\n#pl.plot(gamefinish)\n#pl.plot(diff)\n\n#pl.show()\n","sub_path":"chutesladdersmarkov.py","file_name":"chutesladdersmarkov.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"478010210","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import Task\nfrom .forms import TaskForm\n# Create your views here.\n\n\ndef index(request):\n tasks = Task.objects.all()\n return render(request, 'django_app/index.html',\n {'form': TaskForm})\n\n\ndef create_task(request):\n if request.method == 'POST':\n task = Task(\n text=request.POST.get('text'),\n checked=bool(request.POST.get('checked', False))\n )\n task.save()\n return redirect('/tasks')\n\n\ndef detail(request, pk):\n try:\n task = Task.objects.get(id=pk)\n except Task.DoesNotExist:\n print('Exception 404')\n return render(request, 'django_app/details.html',\n {'task': task})\n","sub_path":"django_proj/django_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"229718133","text":"from tkinter import *\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.filedialog import asksaveasfilename\nimport os\n\nclass GuiMain(Frame):\n\n # This initialized the GUI\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.master = master\n self.init_window()\n\n # Functiom called by Open Menu\n # Using askopenfilename to select the file to be open\n # Open the file and read the content\n # Write the content into listbox and add the listbox to the main window\n def open_file(self):\n # This will call tkinter widget and give us interface to select file\n # This will return the file name as a string which we can pass to open)0 function later\n filename = askopenfilename(initialdir=os.getcwd(), title=\"Choose a file.\")\n # Using try in case user types in unknown file or closes without choosing a file.\n try:\n file = open(filename, 'r')\n count = 0\n # Delete first the\n self.listBox.delete(0)\n for line in file:\n # In my data file, I used first line to explain the format\n # First list starts with #, so we need to prevent that to be printed\n if not line.startswith(\"#\") and not line is None:\n self.listBox.insert(count, line)\n count+=1\n self.listBox.pack()\n except:\n print(\"No file exists\")\n else:\n file.close()\n self.fileMenu.entryconfigure('New', state='normal')\n self.fileMenu.entryconfigure('Modify', state='normal')\n self.fileMenu.entryconfigure('Save', state='normal')\n\n # Function called by New Menu\n def new(self):\n print('NEW')\n self.createWindows('New')\n\n # Function called by Modify Menu\n def modify(self):\n selection = self.listBox.curselection()\n name, address, birthday = self.listBox.get(selection).split(\",\");\n print(name)\n print(address)\n print(birthday)\n self.createWindows('Modify', name.strip(), address.strip(), birthday.strip())\n\n # This function will create an interface for user to add new record or modify exiting record\n def createWindows(self, title, name=None, address=None, birthday=None):\n newWindow = Toplevel()\n newWindow.geometry(\"360x150+120+120\")\n newWindow.title = title\n newWindow.resizable(width=False, height=False)\n\n nameLabel = Label(newWindow, text='Name', width=10).place(x=20, y=20, width=80, height=25)\n nameEntry = Entry(newWindow, width=25)\n nameEntry = Entry(newWindow, width=25)\n nameEntry.place(x=120, y=20, width=220, height=25)\n #if not name is None:\n nameEntry.insert(0, name)\n\n addrLabel = Label(newWindow, text='Address', width=10).place(x=20, y=50, width=80, height=25)\n addrEntry = Entry(newWindow, width=25)\n addrEntry.place(x=120, y=50, width=220, height=25)\n #if not address is None:\n addrEntry.insert(0, address)\n\n bdayLabel = Label(newWindow, text='Birthday', width=10).place(x=20, y=80, width=80, height=25)\n bdayEntry = Entry(newWindow, width=25)\n bdayEntry.place(x=120, y=80, width=220, height=25)\n #if not birthday is None:\n bdayEntry.insert(0, birthday)\n\n button01 = Button(newWindow, text= title,)\n button01.place(x=20, y=110, width=100, height=25)\n clearButton = Button(newWindow, text='Clear',)\n clearButton.place(x=130, y=110, width=100, height=25, command=self.clear)\n closeButton = Button(newWindow, text='Close')\n closeButton.place(x=240, y = 110, width=100, height=25)\n\n newWindow.mainloop()\n\n # Save File Function\n # Using textArea.get to get the content of the text area\n # Called asksaveasfilename to input the file name\n # Then open the file and write the content into the file.\n def save(self):\n count = self.listBox.size();\n print(count)\n filename = asksaveasfilename(initialdir=os.getcwd(), title=\"Choose a file.\")\n try:\n file = open(filename, \"w\")\n for item in range (0, count-1):\n name, address, birthday = self.listBox.get(item).split(\",\")\n name = name.strip(' \\t\\n\\r')\n address = address.strip(' \\t\\n\\r')\n birthday = birthday.strip(' \\t\\n\\r')\n file.writelines(name + \", \" + address + \", \" + birthday + '\\n')\n except:\n print(sys.exc_info())\n else:\n file.flush()\n file.close()\n\n # Exit this program\n def exit(self):\n exit(0)\n\n def init_window(self):\n self.master.title('My Patient Program')\n\n # Create Menu Bar\n self.menu = Menu(self.master)\n\n # Create File Menu\n self.fileMenu = Menu(self.menu, tearoff=0)\n # Add File Menu's content\n self.fileMenu.add_command(label=\"Open\", command=self.open_file, state='normal')\n self.fileMenu.add_command(label='New', command=self.new, state='disabled')\n self.fileMenu.add_command(label='Modify', command=self.modify, state='disabled')\n self.fileMenu.add_command(label='Save', command=self.save, state='disabled')\n self.fileMenu.add_separator()\n self.fileMenu.add_command(label='Exit', command=self.exit)\n self.menu.add_cascade(label='File', menu=self.fileMenu)\n # Create Help Menu\n self.helpMenu = Menu(self.menu, tearoff=0)\n self.helpMenu.add_command(label='Help')\n self.helpMenu.add_command(label='About')\n self.menu.add_cascade(label='Help', menu=self.helpMenu)\n\n # Add Menu to the window\n try:\n self.master.config(menu=self.menu)\n except AttributeError:\n self.master.tk.call(self.master, \"config\", \"-menu\", self.menu)\n\n self.listBox = Listbox(self.master, height=400, width=500)\n self.listBox.insert(1, \"Please load patient file.\")\n self.listBox.pack()\n\n\n\nif __name__ == '__main__':\n root = Tk()\n root.geometry('600x500')\n app = GuiMain(root)\n app.mainloop()","sub_path":"tw/idv/sang/csci6651/Assignment07/GuiMain.py","file_name":"GuiMain.py","file_ext":"py","file_size_in_byte":6169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"408499685","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n # def __init__(self):\n # import sys\n # self.result = sys.maxint\n # self.prev = None\n\n # def getMinimumDifference(self, root):\n # \"\"\"\n # :type root: TreeNode\n # :rtype: int\n # \"\"\"\n # if root.left is not None:\n # self.getMinimumDifference(root.left)\n\n # if self.prev is not None:\n # self.result = min(self.result, root.val - self.prev)\n # self.prev = root.val\n\n # if root.right is not None:\n # self.getMinimumDifference(root.right)\n\n # return self.result\n\n def helper(self, node):\n self.ele.append(node.val)\n if node.left is not None:\n self.helper(node.left)\n if node.right is not None:\n self.helper(node.right)\n\n def getMinimumDifference(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.ele = []\n if root is not None:\n self.helper(root)\n self.ele = sorted(self.ele)\n import sys\n result = sys.maxint\n for x in range(len(self.ele)-1):\n result = min(result, abs(self.ele[x] - self.ele[x+1]))\n return result","sub_path":"530-MinimumAbsoluteDifferenceinBST/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"128867619","text":"from tkinter import *\nfrom quiz_brain import QuizBrain\n\nTHEME_COLOR = \"#375362\"\n\n\nclass QuizInterface:\n def __init__(self, quiz_brain: QuizBrain):\n self.quiz = quiz_brain\n self.window = Tk()\n self.window.title(\"Quizzler App\")\n self.window.config(padx=20, pady=20, bg=THEME_COLOR)\n\n self.score = Label(text=\"Score: 0\", fg=\"white\", bg=THEME_COLOR)\n self.score.grid(column=1, row=0)\n\n self.canvas = Canvas(width=300, height=250, bg=\"white\")\n self.question = self.canvas.create_text((150, 125), text=\"Question here!\", fill=THEME_COLOR,\n width=280, font=(\"Arial\", 20, \"italic\"))\n self.canvas.grid(column=0, row=1, columnspan=2, pady=50)\n\n true_image = PhotoImage(file=\"images/true.png\")\n self.true_button = Button(image=true_image, highlightthickness=0, command=self.check_true)\n self.true_button.grid(column=0, row=2)\n\n false_image = PhotoImage(file=\"images/false.png\")\n self.false_button = Button(image=false_image, highlightthickness=0, command=self.check_false)\n self.false_button.grid(column=1, row=2)\n\n self.get_next_question()\n\n self.window.mainloop()\n\n def get_next_question(self):\n self.canvas.config(bg=\"white\")\n self.score.config(text=f\"Score: {self.quiz.score}\")\n\n if self.quiz.still_has_questions():\n q_text = self.quiz.next_question()\n self.canvas.itemconfig(self.question, text=q_text)\n else:\n self.canvas.itemconfig(self.question, text=\"You've reach the end of the quizz!\")\n self.true_button.config(state=\"disabled\")\n self.false_button.config(state=\"disabled\")\n\n def check_true(self):\n is_right = self.quiz.check_answer(\"True\")\n self.give_feedback(is_right)\n\n def check_false(self):\n is_right = self.quiz.check_answer(\"False\")\n self.give_feedback(is_right)\n\n def give_feedback(self, is_right):\n if is_right:\n self.canvas.config(bg=\"green\")\n else:\n self.canvas.config(bg=\"red\")\n\n self.window.after(1000, self.get_next_question)\n","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"635045555","text":"#!/usr/bin/python3\n\"\"\"JSON module\"\"\"\nimport json\n\n\ndef load_from_json_file(filename):\n \"\"\"Creates an Object from a JSON file\"\"\"\n\n with open(filename, encoding='utf-8') as a_file:\n line = a_file.readline()\n new_object = json.loads(line)\n return new_object\n","sub_path":"0x0B-python-input_output/8-load_from_json_file.py","file_name":"8-load_from_json_file.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"516648224","text":"from eigen_lib import jacobi, rotate, max_off_diag,make_matrix_two_electrons\nimport numpy as np\nfrom sklearn.preprocessing import normalize\nimport matplotlib.pyplot as plt\n\ndef plot_function_varying_omega(N, rho_max, rho_min):\n \"\"\"\n Function for plotting the function for varying values of omega.\n \"\"\"\n omega_values = [0.001, 0.5, 1, 5]\n\n for omega in omega_values:\n A = make_matrix_two_electrons(N, rho_max, rho_min, omega)\n A, R = jacobi(A, 0.00001)\n R = R**2\n eig_vals_jacobi = np.diag(A)\n temp = np.sort(eig_vals_jacobi)\n plt.plot(np.linspace(rho_min, rho_max, N+2), R[:,np.where(eig_vals_jacobi == temp[0])[0]])\n\n plt.legend([\"omega = 0.001\", \"omega = 0.5\", \"omega = 1\", \"omega = 5\"])\n plt.xlabel(\"rho\")\n plt.ylabel(\"u(rho)\")\n plt.title(\"function for varying omega\")\n plt.show()\n\ndef plot_function_fixed_omega(N, rho_max, rho_min, omega):\n \"\"\"\n Function for plotting the function for fixed value of omega.\n \"\"\"\n A = make_matrix_two_electrons(N, rho_max, rho_min, omega)\n\n A, R = jacobi(A, 0.00001)\n eig_vals_jacobi = np.diag(A)\n temp = np.sort(eig_vals_jacobi)\n\n plt.plot(np.linspace(rho_min, rho_max, N+2), R[:,np.where(eig_vals_jacobi == temp[0])[0]])\n plt.legend([\"omega = %.3f\" %(omega)])\n plt.xlabel(\"rho\")\n plt.ylabel(\"u(rho)\")\n plt.title(\"Function for fixed omega = %.3f\" %(omega))\n plt.show()\n\n\n\nN = 100\nrho_max = 10**20\nrho_min = 0\nomega_r = 0.5\n\nplot_function_fixed_omega(N, rho_max, rho_min, 0.1)\n# plot_function_varying_omega(N, rho_max, rho_min)\n","sub_path":"2e.py","file_name":"2e.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"395377651","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models,_\nfrom odoo.exceptions import UserError\n\n\nclass AccountInvoice(models.Model):\n _inherit = 'account.invoice'\n \n @api.multi\n def action_download_attachment(self):\n for rec in self:\n if rec.attachment_id:\n config_obj = self.env['ir.config_parameter']\n config_ids = config_obj.search([('key', '=', 'web.base.url')])\n config_id = config_ids[0].value\n url = \"%s/web/content/%s?download=true\" % (config_id, rec.attachment_id.id)\n return {\n 'type': 'ir.actions.act_url',\n 'url': url,\n 'nodestroy': False,\n }\n else:\n raise UserError(_(\"We could not found attachment\"))\n","sub_path":"ioud10/ioud_invoice_attachment_download/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"243401529","text":"import numpy as np\r\nimport cv2\r\nfrom skimage import color, measure\r\nimport matplotlib.pyplot as plt\r\n\r\nimage_path = './images/pro_4/Orical1.jpg'\r\nimage_path2 = './images/pro_4/Orical2.jpg'\r\n\r\n\r\ndef BGR_to_RGB(image):\r\n # 由BGR转换成RGB\r\n img1 = np.array(image)\r\n\r\n for i in range(img1.shape[0]):\r\n for j in range(img1.shape[1]):\r\n temp = img1[i][j][0]\r\n img1[i][j][0] = img1[i][j][2]\r\n img1[i][j][2] = temp\r\n return img1\r\n\r\ndef findeyes(img_skin, minr, minc, maxr, maxc, img):\r\n '''\r\n\r\n :param img_skin: 进行开操作后的图像\r\n :param minr: 脸部区域的最小行\r\n :param minc: 脸部最小列\r\n :param maxr: 脸部最大行\r\n :param maxc: 脸部最大列\r\n :param img: 原图像\r\n :return: 是否有眼睛和 框定眼睛的图像\r\n '''\r\n\r\n # 如果区域内有两个以上的空框是眼睛\r\n part = np.zeros(((maxr - minr), (maxc - minc)))\r\n\r\n # 二值取反 原本图像中眼睛是黑色的\r\n for i in range(minr, maxr):\r\n for j in range(minc, maxc):\r\n if img_skin[i, j] == 0:\r\n part[i - minr, j - minc] = 255\r\n else:\r\n part[i - minr, j - minc] = 0\r\n\r\n # 标定连通区域\r\n part_labeled, num = measure.label(part, return_num=True, connectivity=2) # 八邻域\r\n\r\n img_copy = img.copy()\r\n count = 0\r\n # measure.regionprops 得到连通区域\r\n for region2 in measure.regionprops(part_labeled):\r\n min_row2, min_col2, max_row2, max_col2 = region2.bbox\r\n w = max_col2-min_col2\r\n h = max_row2-min_row2\r\n total_w = maxc-minc\r\n total_h = maxr-minr\r\n w_ratio = w/total_w\r\n h_ratio = h/total_h\r\n if w_ratio<1/3 and h_ratio<0.2 and w_ratio>0.045 and h_ratio>1/30 and w>=h:\r\n count = count+1\r\n img_copy = cv2.rectangle(img_copy, (min_col2 + minc, min_row2 + minr), (max_col2 + minc, max_row2 + minr), (0, 255, 0), 2)\r\n\r\n if count >= 1:\r\n img = img_copy\r\n return True, img\r\n\r\n return False, img\r\n\r\n\r\ndef find_face(image_path, kernel_size):\r\n #读取图像\r\n image = cv2.imread(image_path)\r\n #转到ycbcr空间更方便人脸的分离\r\n image_ycbcr = cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB)\r\n height, width, page = image_ycbcr.shape\r\n y, cb, cr = cv2.split(image_ycbcr)\r\n # 高斯去噪\r\n cr_gaussian = cv2.GaussianBlur(cr, (kernel_size, kernel_size), 0)\r\n cb_gaussian = cv2.GaussianBlur(cb, (kernel_size, kernel_size), 0)\r\n\r\n # 对皮肤的颜色区域变成白色\r\n skin = np.zeros_like(cr)\r\n for i in range(height):\r\n for j in range(width):\r\n if y[i][j] < 70:\r\n skin[i][j] = 0\r\n elif cr_gaussian[i][j] > 133 and cr_gaussian[i][j] < 173 and cb_gaussian[i][j] > 77 and cb_gaussian[i][j] < 127:\r\n skin[i][j] = 255\r\n else:\r\n skin[i][j] = 0\r\n\r\n # 对二值图像形态学处理\r\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5)) # 得到5x5的十字架\r\n skin_opening = cv2.morphologyEx(skin, cv2.MORPH_OPEN, kernel) # 对图像进行开操作\r\n\r\n # 标定连通区域\r\n skin_labeled = measure.label(skin_opening, connectivity=2) # 尝试后发现八邻域更好\r\n\r\n count_face = 0\r\n # 找到连通的脸\r\n for region in measure.regionprops(skin_labeled):\r\n min_row, min_col, max_row, max_col = region.bbox\r\n if (max_row - min_row) / width > 1 / 15 and (max_col - min_col) / height > 0.08: # 参数手动设定\r\n height_width_ratio = (max_row - min_row) / (max_col - min_col)\r\n # 比例在(0.6, 2)以内\r\n if height_width_ratio > 0.6 and height_width_ratio < 2.0:\r\n # 对可能的脸区域进行眼睛的找寻\r\n res, image = findeyes(skin_opening, min_row, min_col, max_row, max_col, image)\r\n if res:\r\n count_face = count_face + 1\r\n img = cv2.rectangle(image, (min_col, min_row), (max_col, max_row), (0, 0, 255), 2)\r\n\r\n return img\r\n\r\nplt.figure()\r\n# img = plt.imread(image_path2)\r\nimg = plt.imread(image_path)\r\nplt.subplot(1, 2, 1)\r\nplt.xticks([]), plt.yticks([]) # 隐藏坐标轴\r\nplt.imshow(img)\r\n\r\n# img1 = find_face(image_path2, 5)\r\nimg1 = find_face(image_path, 5)\r\nplt.subplot(1, 2, 2)\r\nplt.xticks([]), plt.yticks([]) # 隐藏坐标轴\r\n# 由BGR转换成RGB\r\nimg1 = BGR_to_RGB(img1)\r\nplt.imshow(img1)\r\nplt.show()\r\n\r\nplt.figure()\r\nimg = plt.imread(image_path2)\r\nplt.subplot(1, 2, 1)\r\nplt.xticks([]), plt.yticks([]) # 隐藏坐标轴\r\nplt.imshow(img)\r\n\r\nimg1 = find_face(image_path2, 5)\r\nplt.subplot(1, 2, 2)\r\nplt.xticks([]), plt.yticks([]) # 隐藏坐标轴\r\n# 由BGR转换成RGB\r\nimg1 = BGR_to_RGB(img1)\r\n\r\nplt.imshow(img1)\r\nplt.show()","sub_path":"experiment_4.py","file_name":"experiment_4.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"516923999","text":"import os, sys\n# My libs\nimport settings, dialogs\n\nimport QuickPALM2.plugins.ParticleDetection\nimport QuickPALM2.plugins.DriftCorrection\nimport QuickPALM2.plugins.DataRendering\n\ndef getPath():\n \"\"\"\n Returns the QuickPALM root path\n \n @rtype: str\n \"\"\"\n path = os.path.split(__file__)[0] # down to QuickPALM2/QPUtils\n path = os.path.split(path)[0] # down to QuickPALM2\n return path\n\ndef getPlugins(type = 'ParticleDetection'):\n \"\"\"\n Returns a dictionary of plugins.\n \n @param type: either 'ParticleDetection', 'DriftCorrection' or 'DataRendering'\n @type type: str\n @return: {plugin_name: plugin_class, ...}\n @rtype: dict\n \"\"\"\n plugins = {}\n parent = getattr(QuickPALM2.plugins, type)\n for p in dir(parent):\n obj = getattr(parent, p)\n # in the future somehow I need to discover if it implements the ParticleDetectionPlugIn\n if \"label\" in dir(obj):\n name = str(obj.label)\n plugins[name] = obj\n return plugins\n","sub_path":"src/QuickPALM2/site-packages/QPUtils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"207953470","text":"import boto3\nimport pprint\nimport csv\n\nregion = 'us-west-2'\nec2client = boto3.client('ec2', region_name=region)\nwith open('instances.csv', 'r') as f:\n reader = csv.reader(f)\n instances = list(reader)\n\n\n# print(instances)\n\ninstance_list = []\n\nfor instance in instances:\n for i in instance:\n instance_list.append(i)\n\nresponse = ec2client.describe_instances(\n # Filters=[\n # {\n # 'Name': 'instance-id',\n # 'Values': instance_list\n # },\n # ]\n)\n\n\n\n# server_name = []\n\nfor reservations in response['Reservations']:\n for instances in reservations['Instances']:\n for tags in instances['Tags']:\n if tags['Key'] == 'Name':\n print(tags['Value'])\n print(instances['InstanceId'])\n # try:\n # print(instances['KeyName'])\n # except:\n # pass\n # print(\"\\n\")\n\n\n# if (instances['Value']):\n# server_name.append(instances['Value'])\n# server_name.append(instances['Key'])\n# print(reservations)\n\n# ec2 = boto3.client('ec2', region_name=region)\n\n# responses = ec2.describe_instances(\n# Filters=[\n# {\n# 'Name': 'platform',\n# 'Values': [\n# 'windows',\n# ]\n# },\n# ],\n# )\n# instances = []\n# for reservations in responses['Reservations']:\n# for instance in reservations['Instances']:\n# if (instance['InstanceId']):\n# instances.append(instance['InstanceId'])\n\n# print(instances)\n\n# print(responses)","sub_path":"Python/ec2/instances/describe_instance_by_tag+instanceid.py","file_name":"describe_instance_by_tag+instanceid.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"73770798","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2016 Red Hat, Inc.\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: ovirt_datacenter\nshort_description: Module to manage data centers in oVirt/RHV\nversion_added: \"2.3\"\nauthor: \"Ondra Machacek (@machacekondra)\"\ndescription:\n - \"Module to manage data centers in oVirt/RHV\"\noptions:\n id:\n description:\n - \"ID of the datacenter to manage.\"\n version_added: \"2.8\"\n name:\n description:\n - \"Name of the data center to manage.\"\n required: true\n state:\n description:\n - \"Should the data center be present or absent.\"\n choices: ['present', 'absent']\n default: present\n description:\n description:\n - \"Description of the data center.\"\n comment:\n description:\n - \"Comment of the data center.\"\n local:\n description:\n - \"I(True) if the data center should be local, I(False) if should be shared.\"\n - \"Default value is set by engine.\"\n type: bool\n compatibility_version:\n description:\n - \"Compatibility version of the data center.\"\n quota_mode:\n description:\n - \"Quota mode of the data center. One of I(disabled), I(audit) or I(enabled)\"\n choices: ['disabled', 'audit', 'enabled']\n mac_pool:\n description:\n - \"MAC pool to be used by this datacenter.\"\n - \"IMPORTANT: This option is deprecated in oVirt/RHV 4.1. You should\n use C(mac_pool) in C(ovirt_clusters) module, as MAC pools are\n set per cluster since 4.1.\"\n force:\n description:\n - \"This parameter can be used only when removing a data center.\n If I(True) data center will be forcibly removed, even though it\n contains some clusters. Default value is I(False), which means\n that only empty data center can be removed.\"\n version_added: \"2.5\"\n default: False\n type: bool\n\nextends_documentation_fragment: ovirt\n'''\n\nEXAMPLES = '''\n# Examples don't contain auth parameter for simplicity,\n# look at ovirt_auth module to see how to reuse authentication:\n\n# Create datacenter\n- ovirt_datacenter:\n name: mydatacenter\n local: True\n compatibility_version: 4.0\n quota_mode: enabled\n\n# Remove datacenter\n- ovirt_datacenter:\n state: absent\n name: mydatacenter\n\n# Change Datacenter Name\n- ovirt_datacenter:\n id: 00000000-0000-0000-0000-000000000000\n name: \"new_datacenter_name\"\n'''\n\nRETURN = '''\nid:\n description: \"ID of the managed datacenter\"\n returned: \"On success if datacenter is found.\"\n type: str\n sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c\ndata_center:\n description: \"Dictionary of all the datacenter attributes. Datacenter attributes can be found on your oVirt/RHV instance\n at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/datacenter.\"\n returned: \"On success if datacenter is found.\"\n type: dict\n'''\n\nimport traceback\n\ntry:\n import ovirtsdk4.types as otypes\nexcept ImportError:\n pass\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.ovirt import (\n BaseModule,\n check_sdk,\n check_params,\n create_connection,\n equal,\n ovirt_full_argument_spec,\n search_by_name,\n)\n\n\nclass DatacentersModule(BaseModule):\n\n def __get_major(self, full_version):\n if full_version is None:\n return None\n if isinstance(full_version, otypes.Version):\n return full_version.major\n return int(full_version.split('.')[0])\n\n def __get_minor(self, full_version):\n if full_version is None:\n return None\n if isinstance(full_version, otypes.Version):\n return full_version.minor\n return int(full_version.split('.')[1])\n\n def _get_mac_pool(self):\n mac_pool = None\n if self._module.params.get('mac_pool'):\n mac_pool = search_by_name(\n self._connection.system_service().mac_pools_service(),\n self._module.params.get('mac_pool'),\n )\n\n return mac_pool\n\n def build_entity(self):\n return otypes.DataCenter(\n name=self._module.params['name'],\n id=self._module.params['id'],\n comment=self._module.params['comment'],\n description=self._module.params['description'],\n mac_pool=otypes.MacPool(\n id=getattr(self._get_mac_pool(), 'id', None),\n ) if self._module.params.get('mac_pool') else None,\n quota_mode=otypes.QuotaModeType(\n self._module.params['quota_mode']\n ) if self._module.params['quota_mode'] else None,\n local=self._module.params['local'],\n version=otypes.Version(\n major=self.__get_major(self._module.params['compatibility_version']),\n minor=self.__get_minor(self._module.params['compatibility_version']),\n ) if self._module.params['compatibility_version'] else None,\n )\n\n def update_check(self, entity):\n minor = self.__get_minor(self._module.params.get('compatibility_version'))\n major = self.__get_major(self._module.params.get('compatibility_version'))\n return (\n equal(getattr(self._get_mac_pool(), 'id', None), getattr(entity.mac_pool, 'id', None)) and\n equal(self._module.params.get('comment'), entity.comment) and\n equal(self._module.params.get('description'), entity.description) and\n equal(self._module.params.get('name'), entity.name) and\n equal(self._module.params.get('quota_mode'), str(entity.quota_mode)) and\n equal(self._module.params.get('local'), entity.local) and\n equal(minor, self.__get_minor(entity.version)) and\n equal(major, self.__get_major(entity.version))\n )\n\n\ndef main():\n argument_spec = ovirt_full_argument_spec(\n state=dict(\n choices=['present', 'absent'],\n default='present',\n ),\n name=dict(default=None, required=True),\n description=dict(default=None),\n local=dict(type='bool'),\n id=dict(default=None),\n compatibility_version=dict(default=None),\n quota_mode=dict(choices=['disabled', 'audit', 'enabled']),\n comment=dict(default=None),\n mac_pool=dict(default=None),\n force=dict(default=None, type='bool'),\n )\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n )\n\n check_sdk(module)\n check_params(module)\n\n try:\n auth = module.params.pop('auth')\n connection = create_connection(auth)\n data_centers_service = connection.system_service().data_centers_service()\n data_centers_module = DatacentersModule(\n connection=connection,\n module=module,\n service=data_centers_service,\n )\n\n state = module.params['state']\n if state == 'present':\n ret = data_centers_module.create()\n elif state == 'absent':\n ret = data_centers_module.remove(force=module.params['force'])\n\n module.exit_json(**ret)\n except Exception as e:\n module.fail_json(msg=str(e), exception=traceback.format_exc())\n finally:\n connection.close(logout=auth.get('token') is None)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"env/lib/python3.9/site-packages/ansible/modules/cloud/ovirt/ovirt_datacenter.py","file_name":"ovirt_datacenter.py","file_ext":"py","file_size_in_byte":7685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"134298939","text":"from django.urls import path\nfrom . import views\n\nurlpatterns=[\n path('', views.index),\n path('register', views.register),\n path('login', views.login),\n path('validate_login', views.validate_login),\n path('logout', views.logout),\n path('quotes', views.quotes),\n path('add_quote',views.add_quote),\n path('edit_page/',views.edit_page),\n path('edit/',views.edit),\n path('delete_quote/',views.delete_quote),\n path('user_page/', views.user_page),\n]","sub_path":"quote_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290518806","text":"import random\n\n# for i in range(10):\n# print(i)\n\n# instead while loops:\n# i = 0\n# while i < 10:\n# print(i)\n# i += 1\n\n# availableExits = [\"north\", \"south\", \"south west\"]\n# chosenExit = ''\n#\n# while chosenExit not in availableExits:\n# chosenExit = input(\"Choose a direction \")\n# if chosenExit == \"quit\":\n# print(\"Game Over\")\n# break\n# else:\n# print(\"Aren't you glad you got out of there!?\")\n\nhighest = 1000\nanswer = random.randint(1, highest)\n\nprint(\"Guess a number between 1 and {}, or enter 0 to quit\".format(highest))\nguess = 0\ncount = 0\nwhile guess != answer:\n guess = int(input(\"> \"))\n if guess == 0:\n print(\"Game Over\")\n break\n elif guess < answer:\n print(\"Guess higher\")\n elif guess > answer:\n print(\"Guess lower\")\n else:\n print(\"You have guessed correctly\")\n","sub_path":"udemy/forLoops/whileLoops.py","file_name":"whileLoops.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"589718221","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymongo\n\nclass NovelPipeline(object):\n def __init__(self):\n self.client = pymongo.MongoClient(host='127.0.0.1', port=27017)\n self.db = self.client['novel']\n self.collection = self.db['book']\n\n def process_item(self, item, spider):\n if item.get('book'):\n book ={\n 'book_id': item['book_id'],\n 'book': item['book'],\n 'author': item['author'],\n 'category': item['category'],\n 'status': item['status'],\n 'count': item['count'],\n 'profile': item['profile'],\n 'chapter_list': item['chapter_list']\n }\n self.collection.save(book)\n return item\n else:\n if item.get('chapter_number'):\n self.db[item.get('book_id')].insert_one(dict(item))\n","sub_path":"novel/novel/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"394574788","text":"def find_consecutive_runs(input_list):\n # verify the input\n if type(input_list) is not list:\n raise TypeError('Please input a list')\n indicies = []\n for idx, val in enumerate(input_list):\n list_length = len(input_list)\n if list_length - idx < 3:\n # we can't read the next two. break out of the loop\n break\n next_val = input_list[idx+1]\n if next_val - val in (1, -1):\n third_val = input_list[idx+2]\n # since we are counting get the interval from the previous iteration\n interval = next_val - val\n if third_val - next_val == interval:\n indicies.append(idx)\n if len(indicies) > 0:\n return indicies\n return None\n","sub_path":"samples/fcr45.py","file_name":"fcr45.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"410470828","text":"import ConfigUpdater as CU\nfrom os.path import exists, join\nfrom discord.ext import commands\ndef get_input(input_msg: str, is_list: bool = False, is_number: bool = False):\n if is_list:\n the_list = []\n while True:\n try:\n print('press enter with no text when done')\n list_item = input(input_msg)\n if list_item.strip() == '':\n return the_list\n if is_number:\n int(list_item)\n the_list += [list_item.strip()]\n except:\n print('Must be a number')\n else:\n while True:\n try:\n c = input(input_msg)\n if is_number:\n int(c)\n return c\n except:\n pass\n\n# noinspection PyBroadException\ndef get_perms(msg) -> dict:\n try:\n c = CU.read_values(msg.server.id, 'per_server')\n if c is None:\n return CU.read_values('permissions', 'config')\n except:\n return CU.read_values('permissions', 'config')\n\n\n\n# noinspection PyBroadException\ndef is_owner_check(msg) -> bool:\n try:\n return msg.author.id == get_perms(msg)['owner id']\n except:\n return False\n\n# noinspection PyBroadException\ndef is_owner():\n return commands.check(lambda ctx: is_owner_check(ctx.message))\n\n# noinspection PyBroadException\ndef is_allowed_check(msg) -> bool:\n try:\n for role in msg.author.roles:\n if role.name.lower() in get_perms(msg)['allowed roles']:\n return True\n return discord_server_owner_check(msg)\n except:\n return False\n\n# noinspection PyBroadException\ndef is_allowed():\n return commands.check(lambda ctx: is_allowed_check(ctx.message))\n\n# noinspection PyBroadException\ndef is_home_server_check(msg) -> bool:\n try:\n return msg.server.id in get_perms(msg)['home server']\n except:\n return False\n\n# noinspection PyBroadException\ndef is_home_server():\n return commands.check(lambda ctx: is_home_server_check(ctx.message))\n\n# noinspection PyBroadException\ndef server_access_check(msg) -> bool:\n try:\n if is_owner_check(msg):\n return True\n return msg.author.id in get_perms(msg)['server access']\n except Exception:\n return False\n\n# noinspection PyBroadException\ndef server_access():\n return commands.check(lambda ctx: server_access_check(ctx.message))\n\ndef global_blacklist_check(msg) -> bool:\n try:\n return msg.author.id in get_perms(msg)['global_blacklist']\n except:\n return\n\ndef global_blacklist():\n return commands.check(lambda ctx: global_blacklist_check(ctx.message))\n\ndef discord_server_owner_check(msg) -> bool:\n try:\n return msg.author == msg.server.owner\n except:\n return False\n\ndef discord_server_owner():\n return commands.check(lambda ctx: discord_server_owner_check(ctx.message))\n\n#try:\nc = CU.read_values('permissions', 'config')\nif c is None:\n c = {}\ntry:\n c['owner id']\nexcept:\n c['owner id'] = get_input('owner id\\n>>> ', is_list=False, is_number=True)\ntry:\n c['allowed roles']\nexcept:\n c['allowed roles'] = get_input('Leave blank if done or type a name of a role\\n>>> ', is_list=True)\ntry:\n c['home server']\nexcept:\n c['home server'] = get_input('home server id\\n>>> ', is_list=True, is_number=True)\ntry:\n c['server access']\nexcept:\n c['server access'] = get_input('id of user with server access\\n>>> ', is_list=True, is_number=True)\ntry:\n c['global blacklist']\nexcept:\n c['global blacklist'] = []\nCU.write_values(c, 'permissions', 'config')\ndel c\n#except Exception as e:\n# print(e)\n# exit('Could not get permissions config')\n","sub_path":"checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"139497682","text":"import warnings\n\nimport sklearn\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.svm import LinearSVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom config import iot23_attacks_dir, iot23_data_dir, iot23_experiments_dir\nfrom src.helpers.log_helper import add_logger\nfrom src.helpers.process_helper import run_end_to_end_process\nfrom src.iot23 import get_data_sample, iot23_metadata, feature_selections\n\n# Add Logger\nadd_logger(file_name='experiments.log')\n\n# Setup warnings\nwarnings.filterwarnings(\"ignore\", category=sklearn.exceptions.UndefinedMetricWarning)\nwarnings.filterwarnings(\"ignore\", category=sklearn.exceptions.ConvergenceWarning)\n\nfile_header = iot23_metadata[\"file_header\"]\nsource_files_dir = iot23_attacks_dir\ndata_dir = iot23_data_dir\nexperiments_dir = iot23_experiments_dir\ndata_samples = [\n get_data_sample(dataset_name='S04', rows_per_dataset_file=5_000_000),\n get_data_sample(dataset_name='S16', rows_per_dataset_file=5_000_000),\n]\n\n# Selected Features\nfeatures = [\n feature_selections['F14'],\n feature_selections['F17'],\n feature_selections['F18'],\n feature_selections['F19'],\n]\n\n# Selected Algorithms\ntraining_algorithms = dict([\n ('DecisionTree', Pipeline([('normalization', StandardScaler()), ('classifier', DecisionTreeClassifier())])),\n ('GaussianNB', Pipeline([('normalization', StandardScaler()), ('classifier', GaussianNB())])),\n ('LogisticRegression', Pipeline([('normalization', StandardScaler()), ('classifier', LogisticRegression())])),\n ('RandomForest', Pipeline([('normalization', StandardScaler()), ('classifier', RandomForestClassifier())])),\n ('SVC_linear', Pipeline([('normalization', MinMaxScaler()), ('classifier', LinearSVC())])),\n])\n\nrun_end_to_end_process(source_files_dir,\n data_dir,\n experiments_dir,\n data_samples,\n features,\n training_algorithms,\n final_report_name='experiment_scores.xlsx')\n","sub_path":"src/run_experiments.py","file_name":"run_experiments.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"273622858","text":"#! /usr/bin/env python3\n\nimport os\n\n# Reads the next line entered by user.\n\ncurr = 0\nibuf = \"\"\nsbuf = \"\"\n\ndef readline(fd = 0, limit = 100):\n global curr\n global ibuf\n global sbuf\n\n if ibuf == \"\":\n ibuf = os.read(fd,limit)\n sbuf = ibuf.decode() \n line = \"\"\n while curr < len(sbuf): \n line += sbuf[curr] # adds each character to line\n if sbuf[curr] == '\\n': # if character is '\\n' return line\n curr += 1\n return line\n curr += 1\n if curr == limit: # if end of buffer is reached, read again.\n ibuf = os.read(fd,limit)\n sbuf = ibuf.decode()\n curr = 0\n\n return \"\"\n\ndef readfile(filename):\n fd = os.open(filename, os.O_RDONLY)\n lines = \"\"\n line = readline(fd)\n while line != \"\":\n lines += line\n line = readline(fd)\n return lines\n","sub_path":"my-tcp/std_in.py","file_name":"std_in.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"384615470","text":"from random import random\n\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import *\nfrom django.core.paginator import Paginator, EmptyPage, InvalidPage\n\nlst = []\nanslist = []\nanswers = Pergunta.objects.all()\nfor i in answers:\n anslist.append(i.resposta)\n\ndef index(request):\n return render(request, 'inicio.html')\n\ndef quizz(request):\n return render(request, 'form.html')\n\ndef create(request):\n req = request\n nome = req.GET['name']\n desing = req.GET['desing']\n perguntas = req.GET['perguntas']\n videos = req.GET['videos']\n opiniao = req.GET['opiniao']\n score = req.GET['score']\n try:\n usuario = Usuario(nome=nome, desing=desing, perguntas=perguntas, videos=videos, opiniao=opiniao, score=score)\n usuario.save()\n\n return render(request, 'end.html')\n except ValueError as e:\n raise ValueError(e)\n\ndef game(request):\n obj = Pergunta.objects.all()\n paginator = Paginator(obj, 1)\n try:\n page = int(request.GET.get('page', '1'))\n except:\n page = 1\n try:\n perguntas = paginator.page(page)\n except(EmptyPage, InvalidPage):\n perguntas = paginator.page(paginator.num_pages)\n return render(request, 'questions.html', {\"obj\": obj, \"perguntas\": perguntas})\n\ndef responde(request):\n score = 0\n for i in range(len(lst)):\n if lst[i] == anslist[i]:\n score += 1\n return render(request, 'result.html', {\"score\": score})\n\ndef saveans(request):\n ans = request.GET['ans']\n lst.append(ans)","sub_path":"game/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"134910160","text":"from contextlib import contextmanager\nimport gc\nimport os\nimport time\nimport numpy as np\nimport pandas as pd\nimport joblib\nfrom keras import backend as K\nfrom keras import initializers, regularizers, constraints, optimizers, layers\nfrom keras.callbacks import Callback\nfrom keras.callbacks import EarlyStopping\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.engine.topology import Layer\nfrom keras.layers import Bidirectional\nfrom keras.layers import BatchNormalization\nfrom keras.layers import Concatenate\nfrom keras.layers import CuDNNGRU\nfrom keras.layers import CuDNNLSTM\nfrom keras.layers import dot\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Embedding\nfrom keras.layers import GlobalMaxPool1D\nfrom keras.layers import Flatten\nfrom keras.layers import Input\nfrom keras.layers import PReLU\nfrom keras.layers import Reshape\nfrom keras.layers import *\nfrom keras.models import Model\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import class_weight\n\n\nMAX_FEATURES = 50000\nMAX_SEQUENCE_LENGTH = 100\nGLOVE_PATH = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'\nFAST_TEXT_PATH = '../input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec'\n\n@contextmanager\ndef timer(name):\n t0 = time.time()\n yield\n print(f'[{name}] done in {time.time() - t0:.0f} s')\n\n\ndef attention_3d_block(inputs):\n # inputs.shape = (batch_size, time_steps, input_dim)\n TIME_STEPS = inputs.shape[1].value\n SINGLE_ATTENTION_VECTOR = False\n\n input_dim = int(inputs.shape[2])\n a = Permute((2, 1))(inputs)\n # this line is not useful. It's just to know which dimension is what.\n a = Reshape((input_dim, TIME_STEPS))(a)\n a = Dense(TIME_STEPS, activation='softmax')(a)\n if SINGLE_ATTENTION_VECTOR:\n a = Lambda(lambda x: K.mean(x, axis=1))(a)\n a = RepeatVector(input_dim)(a)\n a_probs = Permute((2, 1))(a)\n output_attention_mul = Multiply()([inputs, a_probs])\n return output_attention_mul\n\n\nclass Attention(Layer):\n def __init__(self, step_dim,\n W_regularizer=None, b_regularizer=None,\n W_constraint=None, b_constraint=None,\n bias=True, **kwargs):\n self.supports_masking = True\n self.init = initializers.get('glorot_uniform')\n\n self.W_regularizer = regularizers.get(W_regularizer)\n self.b_regularizer = regularizers.get(b_regularizer)\n\n self.W_constraint = constraints.get(W_constraint)\n self.b_constraint = constraints.get(b_constraint)\n\n self.bias = bias\n self.step_dim = step_dim\n self.features_dim = 0\n super(Attention, self).__init__(**kwargs)\n\n def build(self, input_shape):\n assert len(input_shape) == 3\n\n self.W = self.add_weight((input_shape[-1],),\n initializer=self.init,\n name='{}_W'.format(self.name),\n regularizer=self.W_regularizer,\n constraint=self.W_constraint)\n self.features_dim = input_shape[-1]\n\n if self.bias:\n self.b = self.add_weight((input_shape[1],),\n initializer='zero',\n name='{}_b'.format(self.name),\n regularizer=self.b_regularizer,\n constraint=self.b_constraint)\n else:\n self.b = None\n\n self.built = True\n\n def compute_mask(self, input, input_mask=None):\n return None\n\n def call(self, x, mask=None):\n features_dim = self.features_dim\n step_dim = self.step_dim\n\n eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),\n K.reshape(self.W, (features_dim, 1))), (-1, step_dim))\n\n if self.bias:\n eij += self.b\n\n eij = K.tanh(eij)\n\n a = K.exp(eij)\n\n if mask is not None:\n a *= K.cast(mask, K.floatx())\n\n a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n\n a = K.expand_dims(a)\n weighted_input = x * a\n return K.sum(weighted_input, axis=1)\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], self.features_dim\n\n\ndef bigru_model(hidden_dim,\n dropout_rate,\n input_shape,\n is_embedding_trainable=False,\n embedding_matrix=None):\n\n inp = Input(shape=(input_shape[0],))\n x = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n input_length=input_shape[0],\n weights=[embedding_matrix],\n trainable=is_embedding_trainable)(inp)\n\n x = SpatialDropout1D(0.2)(x)\n\n x = Bidirectional(CuDNNLSTM(40, return_sequences=True))(x)\n x = Bidirectional(CuDNNGRU(40, return_sequences=True))(x)\n x = Attention(MAX_SEQUENCE_LENGTH)(x)\n # x = Dense(64, activation=\"relu\")(x)\n x = Dense(1, activation=\"sigmoid\")(x)\n model = Model(inputs=inp, outputs=x)\n return model\n\n\ndef build_gru(hidden_dim,\n dropout_rate,\n input_shape,\n model_type=0,\n is_embedding_trainable=False,\n meta_embeddings='DME',\n embedding_matrix=None):\n inp = Input(shape=(input_shape[0],))\n embeddings = []\n if meta_embeddings == 'concat':\n for weights in embedding_matrix:\n x = Embedding(input_dim=weights.shape[0],\n output_dim=weights.shape[1],\n input_length=input_shape[0],\n weights=[weights],\n trainable=is_embedding_trainable)(inp)\n embeddings.append(x)\n x = Concatenate(axis=2)(embeddings)\n\n if meta_embeddings == 'DME':\n for weights in embedding_matrix:\n x = Embedding(input_dim=weights.shape[0],\n output_dim=weights.shape[1],\n input_length=input_shape[0],\n weights=[weights],\n trainable=is_embedding_trainable)(inp)\n x = Dense(300)(x)\n embeddings.append(x)\n x = add(embeddings)\n\n if model_type == 0:\n h = Bidirectional(CuDNNGRU(hidden_dim, return_sequences=True))(x)\n a = Dense(hidden_dim, activation='tanh')(h)\n a = Dense(8, activation=\"softmax\")(a)\n m = dot([a, h], axes=(1, 1))\n x = Flatten()(m)\n x = Dense(8 * hidden_dim * 2, activation=\"relu\")(x)\n x = Dropout(dropout_rate)(x)\n x = Dense(8 * hidden_dim * 2, activation=\"relu\")(x)\n x = Dropout(dropout_rate)(x)\n if model_type == 1:\n x = Bidirectional(CuDNNLSTM(40, return_sequences=True))(x)\n x = Bidirectional(CuDNNGRU(40, return_sequences=True))(x)\n x = Attention(MAX_SEQUENCE_LENGTH)(x)\n x = Dense(64, activation=\"relu\")(x)\n x = Dropout(0.1)(x)\n if model_type == 2:\n x = Bidirectional(CuDNNLSTM(40, return_sequences=True))(x)\n x = Bidirectional(CuDNNGRU(40, return_sequences=True))(x)\n avg_pool = GlobalAveragePooling1D()(x)\n max_pool = GlobalMaxPooling1D()(x)\n x = concatenate([avg_pool, max_pool])\n x = Dense(64, activation=\"relu\")(x)\n x = Dropout(0.1)(x)\n\n x = Dense(1, activation=\"sigmoid\")(x)\n model = Model(inputs=inp, outputs=x)\n return model\n\n\ndef get_best_threshold(y_pred_val,\n y_val):\n threshold_dict = {}\n for thresh in np.arange(0.1, 0.501, 0.01):\n thresh = np.round(thresh, 2)\n threshold_dict[thresh] = f1_score(\n y_val, (y_pred_val > thresh).astype(int)\n )\n\n best_threshold = max(threshold_dict, key=threshold_dict.get)\n print(\"best threshold: {}\".format(best_threshold))\n print(\"best f1 score: {}\".format(threshold_dict[best_threshold]))\n return best_threshold\n\n\ndef fit_predict(X_train,\n X_val,\n y_train,\n y_val,\n X_test,\n model,\n epochs=3,\n lr=0.001,\n batch_size=1024):\n with timer('fitting'):\n early_stopping = EarlyStopping(monitor='val_loss', patience=2)\n class_weights = class_weight.compute_class_weight(\n 'balanced',\n np.unique(y_train),\n y_train\n )\n model.compile(\n loss='binary_crossentropy',\n optimizer=optimizers.Adam(lr=lr, clipvalue=0.5)\n )\n\n model.summary()\n\n val_loss = []\n for i in range(epochs):\n model_checkpoint = ModelCheckpoint(\n str(i) + '_weight.h5',\n save_best_only=True,\n save_weights_only=True\n )\n\n hist = model.fit(\n X_train,\n y_train,\n validation_data=(X_val, y_val),\n epochs=1,\n # batch_size=2**(9 + i),\n batch_size=batch_size * (i + 1),\n # batch_size=512,\n class_weight=class_weights,\n callbacks=[model_checkpoint],\n verbose=2\n )\n\n val_loss.extend(hist.history['val_loss'])\n\n best_epoch_index = np.array(val_loss).argmin()\n print(\"best epoch: {}\".format(best_epoch_index + 1))\n model.load_weights(str(best_epoch_index) + '_weight.h5')\n y_pred_val = model.predict(X_val, batch_size=2048)[:, 0]\n\n with timer('predicting'):\n y_pred = model.predict(X_test, batch_size=2048)[:, 0]\n\n get_best_threshold(y_pred_val, y_val)\n return y_pred, y_pred_val\n\n\ndef main():\n with timer('load data'):\n test_df = pd.read_csv('../input/test.csv')\n X_train = joblib.load('../input/X_train.joblib')\n y_train = joblib.load('../input/y_train.joblib')\n X_test = joblib.load('../input/X_test.joblib')\n glove_embedding = joblib.load('../input/glove_embedding.joblib')\n fast_text_embedding = joblib.load('../input/fast_text_embedding.joblib')\n paragram_embedding = joblib.load('../input/paragram_embedding.joblib')\n word2vec_embedding = joblib.load('../input/word2vec_embedding.joblib')\n test_df = pd.read_csv('../input/test.csv')\n qid = test_df[\"qid\"]\n\n X_train, X_val, y_train, y_val = train_test_split(\n X_train,\n y_train,\n test_size=0.1,\n random_state=39\n )\n\n embedding_matrix = [\n glove_embedding, fast_text_embedding, paragram_embedding, word2vec_embedding\n ]\n\n y_pred_test = []\n y_pred_val = []\n\n for i in range(6):\n gru = build_gru(\n hidden_dim=40,\n dropout_rate=0.1,\n input_shape=X_train.shape[1:],\n model_type=i % 3,\n is_embedding_trainable=False,\n meta_embeddings='concat',\n embedding_matrix=embedding_matrix\n )\n\n pred_test, pred_val = fit_predict(\n X_train=X_train,\n X_val=X_val,\n y_train=y_train,\n y_val=y_val,\n X_test=X_test,\n epochs=3,\n model=gru,\n lr=0.001,\n batch_size=1024\n )\n\n y_pred_test.append(pred_test)\n y_pred_val.append(pred_val)\n\n y_pred_test = np.array(y_pred_test).mean(axis=0)\n y_pred_val = np.array(y_pred_val).mean(axis=0)\n\n print(\"ALL ensemble\")\n threshold = get_best_threshold(y_pred_val, y_val)\n y_pred = (np.array(y_pred_test) > threshold).astype(np.int)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"script/meta_embedding_pred.py","file_name":"meta_embedding_pred.py","file_ext":"py","file_size_in_byte":11749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"112025852","text":"import numpy as np\nfrom mlreco.utils.gnn.cluster import form_clusters_new\nfrom mlreco.utils.gnn.compton import filter_compton\nfrom mlreco.visualization.voxels import scatter_label\nfrom mlreco.utils.gnn.primary import assign_primaries_unique\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.neighbors import KNeighborsClassifier\n\ndef cluster(positions, em_primaries, params=[14.107334041, 52.94032412, 5.86322059, 1.01], inclusive=True):\n \"\"\"\n positions: Nx3 array of EM shower voxel positions\n em_primaries: Nx3 array of EM primary positions\n \n if inclusive=True: returns a list of length len(em_primaries) containing np arrays, each of which contains the indices corresponding to the voxels in the cone of the corresponding EM primary; note that each voxel might thus have multiple labels\n if inclusive=False: returns a tuple (arr of length len(em_primaries), arr of length len(positions)) corresponding to EM primary labels and the voxel labels; note that each voxel has a unique label\n \"\"\"\n length_factor = params[0]\n slope_percentile = params[1]\n slope_factor = params[2]\n \n dbscan = DBSCAN(eps=params[3], min_samples=3).fit(positions).labels_.reshape(-1, 1)\n dbscan = np.concatenate((positions, np.zeros((len(positions), 1)), dbscan), axis=1)\n \n clusts = form_clusters_new(dbscan)\n selected_voxels = []\n true_voxels = []\n \n if len(clusts) == 0:\n # assignn everything to first primary\n selected_voxels.append(np.arange(len(dbscan)))\n print('all clusters identified as Compton')\n return selected_voxels\n assigned_primaries = assign_primaries_unique(np.concatenate((em_primaries, np.zeros((len(em_primaries), 2))), axis=1), clusts, np.concatenate((positions, np.zeros((len(positions), 2))), axis=1)).astype(int)\n for i in range(len(assigned_primaries)):\n if assigned_primaries[i] != -1:\n c = clusts[assigned_primaries[i]]\n \n p = em_primaries[i]\n em_point = p[:3]\n\n # find primary cluster axis\n primary_points = dbscan[c][:, :3]\n primary_center = np.average(primary_points.T, axis=1)\n primary_axis = primary_center - em_point\n\n # find furthest particle from cone axis\n primary_length = np.linalg.norm(primary_axis)\n direction = primary_axis / primary_length\n axis_distances = np.linalg.norm(np.cross(primary_points-primary_center, primary_points-em_point), axis=1)/primary_length\n axis_projections = np.dot(primary_points - em_point, direction)\n primary_slope = np.percentile(axis_distances/axis_projections, slope_percentile)\n \n # define a cone around the primary axis\n cone_length = length_factor * primary_length\n cone_slope = slope_factor * primary_slope\n cone_vertex = em_point\n cone_axis = direction\n\n classified_indices = []\n for j in range(len(dbscan)):\n point = positions[j]\n coord = point[:3]\n axis_dist = np.dot(coord - em_point, cone_axis)\n if 0 <= axis_dist and axis_dist <= cone_length:\n cone_radius = axis_dist * cone_slope\n point_radius = np.linalg.norm(np.cross(coord-(em_point + cone_axis), coord-em_point))\n if point_radius < cone_radius:\n # point inside cone\n classified_indices.append(j)\n classified_indices = np.array(classified_indices)\n selected_voxels.append(classified_indices)\n else:\n selected_voxels.append(np.array([]))\n \n # don't require that each voxel can only be in one group\n if inclusive:\n return selected_voxels\n \n # require each voxel can only be in one group (order groups in descending size to overwrite large groups)\n em_primary_labels = -np.ones(len(selected_voxels))\n node_labels = -np.ones(len(positions))\n lengths = []\n for group in selected_voxels:\n lengths.append(len(group))\n sorter = np.argsort(lengths)[::-1]\n for l in range(len(selected_voxels)):\n if len(selected_voxels[sorter[l]]) > 0:\n node_labels[selected_voxels[sorter[l]]] = l\n em_primary_labels[sorter[l]] = l\n \n labeled = np.where(node_labels != -1)\n unlabeled = np.where(node_labels == -1)\n if len(labeled[0]) > 5 and len(unlabeled[0]) > 0:\n classified_positions = positions[labeled]\n unclassified_positions = positions[unlabeled]\n cl = KNeighborsClassifier(n_neighbors=2)\n cl.fit(classified_positions, node_labels[labeled])\n node_labels[unlabeled] = cl.predict(unclassified_positions)\n \n return em_primary_labels, node_labels","sub_path":"pi0/utils/cone_clusterer.py","file_name":"cone_clusterer.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"430101234","text":"from Constracts.IMqttTypeCmdHandler import IMqttTypeCmdHandler\nfrom Constracts import ITransport\nimport logging\nimport Constants.Constant as Const\nimport json\nfrom Database.Db import Db\nimport uuid\n\n\nclass DelDevHandler(IMqttTypeCmdHandler):\n def __init__(self, log: logging.Logger, mqtt: ITransport):\n super().__init__(log, mqtt)\n\n def handler(self, data):\n db = Db()\n rqi = data.get(\"RQI\")\n mqttReceiveCommandResponse = {\n \"RQI\": rqi\n }\n\n self.mqtt.send(Const.MQTT_CLOUD_TO_DEVICE_RESPONSE_TOPIC, json.dumps(mqttReceiveCommandResponse))\n\n devices = data.get(\"Device\", [])\n\n db.Services.DeviceService.RemoveDeviceByCondition(\n db.Table.DeviceTable.c.DeviceAddress.in_(devices)\n )\n db.Services.GroupDeviceMappingService.RemoveGroupDeviceMappingByCondition(\n db.Table.GroupDeviceMappingTable.c.DeviceAddress.in_(devices)\n )\n db.Services.DevicePropertyService.RemoveDevicePropertyMappingByCondition(\n db.Table.DevicePropertyMappingTable.c.DeviceAddress.in_(devices)\n )\n db.Services.EventTriggerOutputDeviceMappingService.RemoveEventTriggerOutputDeviceMappingByCondition(\n db.Table.EventTriggerOutputDeviceMappingTable.c.DeviceAddress.in_(devices)\n )\n db.Services.EventTriggerOutputDeviceSetupValueService.RemoveEventTriggerOutputDeviceSetupValueByCondition(\n db.Table.EventTriggerOutputDeviceSetupValueTable.c.DeviceAddress.in_(devices)\n )\n self.__cmd_res(devices)\n\n def __cmd_res(self, devices: list):\n res = {\n \"RQI\": str(uuid.uuid4()),\n \"TYPCMD\": \"DelDevRsp\",\n \"Devices\": []\n }\n for d in devices:\n res[\"Devices\"].append({\n \"Device\": d,\n \"Success\": True\n })\n self.globalVariable.mqtt_need_response_dict[res[\"RQI\"]] = res\n self.mqtt.send(Const.MQTT_DEVICE_TO_CLOUD_REQUEST_TOPIC, json.dumps(res))\n\n","sub_path":"Handler/MqttTypCmdHandlers/DelDevHandler.py","file_name":"DelDevHandler.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"124536806","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('user_reg', '0024_auto_20160213_1951'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Budget',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=70)),\n ('description', models.TextField(max_length=100)),\n ('total_cost', models.DecimalField(null=True, max_digits=7, decimal_places=2, blank=True)),\n ('event', models.ForeignKey(to='user_reg.Event')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='BudgetItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=70, verbose_name=b'Name of Item')),\n ('description', models.TextField(max_length=100)),\n ('quantity', models.IntegerField(default=0, verbose_name=b'Number of Items')),\n ('unit_cost', models.DecimalField(verbose_name=b'Cost Per Item', max_digits=7, decimal_places=2)),\n ('total_cost', models.DecimalField(max_digits=7, decimal_places=2)),\n ('budget', models.ForeignKey(to='user_reg.Budget')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"user_reg/migrations/0025_budget_budgetitem.py","file_name":"0025_budget_budgetitem.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"605791065","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport biosppy.signals.ecg as ecg\r\nfrom sklearn.preprocessing import StandardScaler, Normalizer\r\nfrom sklearn.metrics import f1_score, make_scorer\r\nfrom sklearn.model_selection import train_test_split, GridSearchCV\r\nfrom sklearn.feature_selection import SelectKBest, f_classif\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nimport auxilary\r\n\r\nFEATURE_SELECTION = False\r\nREMOVE_OUTLIER = True\r\nPLOT_FEATURE_SCORES = False\r\n\r\n\r\nX_train = np.load('preprocessed_X_train_20.npy')\r\nX_test = np.load('preprocessed_X_test_20.npy')\r\n\r\nprint(X_train.shape)\r\nprint(X_test.shape)\r\n\r\ny_train = pd.read_csv(r'y_train.csv')\r\ny_train = y_train.drop(columns= 'id', axis=1)\r\ny_train = y_train.values.ravel()\r\n\r\nprint(\"train shape: \", X_train.shape)\r\nprint(\"test shape: \", X_test.shape)\r\n\r\n# Remove Outliers\r\nif REMOVE_OUTLIER:\r\n X_train, y_train = auxilary.OutlierDetectionIsolationForest(X_train, y_train, percentageOutlier='auto')\r\n# Plot scores of features\r\nif PLOT_FEATURE_SCORES:\r\n auxilary.plotSFeatureScores(X_train, y_train, f_classif)\r\n# Feature Selection !\r\nif FEATURE_SELECTION:\r\n inputDim = 120\r\n featureSelection = SelectKBest(f_classif, k = inputDim)\r\n X_train = featureSelection.fit_transform(X_train, y_train)\r\n scores = featureSelection.scores_\r\n print(\"Shape after feature selection: \", X_train.shape)\r\n\r\nnormalizer = Normalizer()\r\nscaler = StandardScaler()\r\n\r\n#X_train = scaler.fit_transform(X_train)\r\nX_train = scaler.fit_transform(X_train)\r\n\r\n# First train a binary classifier on labels [0,1,2] vs 3\r\n\r\ny_train_BinaryClassifier = np.copy(y_train)\r\n\r\nindicesLabel1 = np.argwhere(y_train_BinaryClassifier == 1)\r\nindicesLabel2 = np.argwhere(y_train_BinaryClassifier == 2)\r\ny_train_BinaryClassifier[indicesLabel1] = 0\r\ny_train_BinaryClassifier[indicesLabel2] = 0\r\n\r\nindicesLabel3 = np.argwhere(y_train_BinaryClassifier == 3)\r\ny_train_BinaryClassifier[indicesLabel3] = 1\r\n\r\nprint('Label 1: ', indicesLabel1.shape)\r\nprint('Label 2: ', indicesLabel2.shape)\r\nprint('Label 3: ', indicesLabel3.shape)\r\n\r\nparameters_BinaryClassifier = { 'n_estimators': [10, 100, 250, 500, 1000],\r\n 'criterion': ['entropy'],\r\n 'class_weight': ['balanced']\r\n }\r\nrfc_bc = RandomForestClassifier(random_state=0)\r\n\r\n\r\nparameters_BinaryClassifier = {\r\n 'kernel': ['rbf', 'linear'],\r\n 'C': [0.001, 0.01,0.1,1,10]\r\n}\r\nsvc = SVC(gamma='scale', class_weight='balanced', random_state=37, decision_function_shape='ovo')\r\n\r\n\r\n\r\n\r\nscoreFunction = make_scorer(f1_score, average='micro', greater_is_better=True)\r\n\r\nclf_bc = GridSearchCV(estimator=svc, param_grid=parameters_BinaryClassifier, cv=5, verbose=2, scoring=scoreFunction, n_jobs=3)\r\nclf_bc.fit(X_train, y_train_BinaryClassifier)\r\n\r\nprint(\"Best score of best on validation set: \", clf_bc.best_score_) #rbf, 1e-5\r\nprint(\"Best Parameters: \", clf_bc.best_params_) # 0.968448\r\n\r\n#X_test = scaler.transform(X_test)\r\nX_test = normalizer.transform(X_test)\r\ny_pred_bc = clf_bc.predict(X_test)\r\n\r\nnumberOfOthers = np.count_nonzero(y_pred_bc == 0)\r\nnumberOfLabel3 = np.count_nonzero(y_pred_bc == 1)\r\n\r\n\r\nprint('prediction others:', numberOfOthers)\r\n\r\nprint('prediction label3:', numberOfLabel3)\r\n\r\nprint('Number of 0:', np.count_nonzero(y_train == 0))\r\nprint('Number of 1:', np.count_nonzero(y_train == 1))\r\nprint('Number of 2:', np.count_nonzero(y_train == 2))\r\nprint('Number of 3:', np.count_nonzero(y_train == 3))\r\n\r\n\r\n# Now remove the class 3 from data and train a second classifier\r\n\r\nX_train = np.delete(X_train, indicesLabel3, axis=0)\r\ny_train = np.delete(y_train, indicesLabel3, axis=0)\r\n\r\nprint('Number of 0:', np.count_nonzero(y_train == 0))\r\nprint('Number of 1:', np.count_nonzero(y_train == 1))\r\nprint('Number of 2:', np.count_nonzero(y_train == 2))\r\nprint('Number of 3:', np.count_nonzero(y_train == 3))\r\n\r\n\r\n\r\nparameters_rfc = { 'n_estimators': [10, 100, 250, 500, 1000],\r\n 'criterion': ['entropy'],\r\n 'class_weight': ['balanced']\r\n }\r\nrfc = RandomForestClassifier(random_state=0)\r\n\r\n\r\nclf = GridSearchCV(estimator=rfc, param_grid= parameters_rfc, cv=5, verbose=2, scoring= scoreFunction, n_jobs=3)\r\nclf.fit(X_train, y_train)\r\n\r\n\r\nprint(\"Best score of best on validation set: \", clf.best_score_) #0.6670\r\nprint(\"Best Parameters: \", clf.best_params_) #rbf, 10\r\n\r\n\r\n# First predict the first 3 classes\r\n\r\nX_test = scaler.transform(X_test)\r\n\r\ny_pred_test = clf.predict(X_test)\r\n\r\ny_pred_test_bc = clf_bc.predict(X_test)\r\nindicesOfLabel3Prediction = np.argwhere(y_pred_test_bc == 1)\r\n\r\ny_pred_test[indicesOfLabel3Prediction] = 3\r\nauxilary.createSubmissionFiles(y_pred_test)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n######## Plot some samples #########\r\n\r\n# for i in range(10):\r\n# randomIndex = np.random.randint(0, 5117) \r\n# sample = X_train[randomIndex,:]\r\n# sample = pd.Series(sample)\r\n# sample.plot()\r\n# label = y_train[randomIndex]\r\n# plt.title('y index: %i' % label )\r\n# plt.show(block=False)\r\n# plt.pause(1)\r\n# plt.close()\r\n\r\n# indicesOfLabel3 = np.where(y_train == 3)[0]\r\n# print(indicesOfLabel3.shape)\r\n# for i in range(10):\r\n# randomIndex = np.random.randint(0, indicesOfLabel3.shape[0]) \r\n# sample = X_train[indicesOfLabel3[randomIndex],:]\r\n# sample = pd.Series(sample)\r\n# sample.plot()\r\n# label = y_train[indicesOfLabel3[randomIndex]]\r\n# plt.title('y index: %i' % label )\r\n# plt.show(block=False)\r\n# plt.pause(1)\r\n# plt.close()\r\n\r\n\r\n\r\n\r\n","sub_path":"amlTask3.py","file_name":"amlTask3.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"403443331","text":"import my_app.tool_box as tool\nfrom my_app.settings import app_cfg\nimport datetime\nfrom my_app.models import Bookings, Telemetry, Subscriptions, Customer_Ids, Services\nimport time\nfrom my_app import db\n\n\ndef sub_analysis(cust_name):\n # sql = 'CREATE TABLE ta_adoption_db.archive_services_repo LIKE ta_adoption_db.services;'\n sql = \"SELECT * FROM ta_adoption_db.archive_subscriptions_repo \" + \\\n \"WHERE end_customer = '\" + cust_name + \"'\"\n sub_recs = db.engine.execute(sql)\n print(\"Customer Subscriptions for:\", cust_name, sub_recs.rowcount)\n\n #\n # Gather Subscription record info and sort it\n #\n sub_list = []\n for sub_rec in sub_recs:\n sub_id = sub_rec.subscription_id\n sub_offer_name = sub_rec.offer_name\n sub_start_date = sub_rec.start_date\n sub_term = int(float(sub_rec.initial_term))\n sub_status = sub_rec.status\n sub_so_num = sub_rec.weborderid\n sub_renewal_date = sub_rec.renewal_date\n sub_date_added = sub_rec.date_added\n\n sub_info = [sub_id, sub_offer_name, sub_start_date, sub_term, sub_status,\n sub_so_num, sub_renewal_date, sub_date_added]\n sub_list.append(sub_info)\n\n #\n # Create a reverse 2 level sorted index list of (SubId, Date Added to repo)\n #\n sub_list.sort(key=lambda x: (x[0], x[7]), reverse=True)\n\n print('---------------------')\n for x in sub_list:\n print(x[0], x[5], x[4], x[7])\n\n web_order_dict = {}\n\n for x in sub_list:\n sub_id = x[0]\n web_order_id = x[5]\n web_order_dict[web_order_id] = sub_id\n\n print(web_order_dict)\n\n return\n\n\ndef build_rosetta_stone():\n #\n # Build a Team Dict to figure out PSS/TSA\n #\n team_dict = tool.build_coverage_dict()\n\n #\n # Define Header Row\n #\n rosetta_list = []\n rosetta_list.append(['Customer Name', 'Num Of Licenses ', '% of Sensors Installed', '% of Active Sensors',\n 'Adoption Factor' + '\\n' + '(% Active Sensors : % of Subscription Consumed)'\n + '\\n' + ' as of ',\n 'Subscription Term', 'Subscription Status', 'Days to Renew',\n 'PSS', 'TSA', 'AM', 'Sales Lv 1', 'Sales Lv 2',\n 'Telemetry Name', 'Telemetry VRF', 'Sensors Installed', 'Active Agents',\n 'Sub Type', 'Sub Order Num','Sub ID', 'Req Start Date', 'Renewal Date',\n 'CX PID', 'CX Delivery Manager', 'Customer ID'])\n\n #\n # Main Loop over all Customer IDs\n #\n customer_ids = Customer_Ids.query.all()\n print('There are', len(customer_ids), 'unique Customer IDs')\n print()\n\n for my_id_info in customer_ids:\n customer_id = my_id_info.customer_id\n if customer_id == 'INVALID':\n continue\n\n # Loop over each Alias we found\n for alias_num, my_alias in enumerate(my_id_info.customer_aliases):\n customer_name = my_alias.customer_alias\n\n #\n # Perform All Queries for this Customer Alias\n #\n sql = \"SELECT * FROM ta_adoption_db.subscriptions where end_customer = \" + '\"' + \\\n customer_name + '\"'\n\n # sql = \"SELECT * FROM ta_adoption_db.subscriptions where end_customer = \" + '\"' + \\\n # customer_name + '\"' + \" and status = 'ACTIVE'\"\n my_subs = db.engine.execute(sql)\n my_services = Services.query.filter_by(end_customer=customer_name).all()\n my_telemetry = Telemetry.query.filter_by(erp_cust_name=customer_name).all()\n my_bookings = Bookings.query.filter_by(erp_end_customer_name=customer_name).all()\n\n #\n # Get where this was sold and by who\n #\n cust_sales_lev_1 = my_bookings[0].sales_level_1\n cust_sales_lev_2 = my_bookings[0].sales_level_2\n cust_sales_lev_3 = my_bookings[0].sales_level_3\n cust_sales_lev_4 = my_bookings[0].sales_level_4\n cust_sales_lev_5 = my_bookings[0].sales_level_5\n cust_sales_lev_6 = my_bookings[0].sales_level_6\n sales_level = cust_sales_lev_1 + ',' + cust_sales_lev_2 + ',' + cust_sales_lev_3 + ',' + \\\n cust_sales_lev_4 + ',' + cust_sales_lev_5 + ',' + cust_sales_lev_6\n sales_team = tool.find_team(team_dict, sales_level)\n pss = sales_team[0]\n tsa = sales_team[1]\n cust_acct_mgr = my_bookings[0].sales_agent_name\n\n #\n # Subscription Analysis\n # Loop over each Subscription for this Customer_Name / Customer_ID\n #\n rosetta_row = []\n for my_rec in my_subs:\n #\n # Gather Subscription record info\n #\n print(my_subs.rowcount ,my_rec)\n time.sleep(.2)\n sub_id = my_rec.subscription_id\n sub_offer_name = my_rec.offer_name\n sub_start_date = my_rec.start_date\n sub_term = int(float(my_rec.initial_term))\n sub_status = my_rec.status\n sub_so_num = my_rec.weborderid\n sub_renewal_date = my_rec.renewal_date\n\n #\n # Check Telemetry Table and SaaS data\n #\n telemetry_name = ''\n telemetry_vrf_number = ''\n telemetry_num_of_licenses = 0\n telemetry_actual_sensors_installed = 0\n telemetry_inactive_agents = 0\n telemetry_so = ''\n telemetry_start_date = ''\n\n # Calculated Fields for Telemetry\n saas_flag = False\n telemetry_active_agents = 0\n pct_installed = 0\n pct_active = 0\n\n if len(my_telemetry) == 1:\n # print(\"\\tNumber of Telemetry sessions\", len(my_telemetry), my_telemetry[0].name)\n saas_flag = True\n # Telemetry Data\n telemetry_name = my_telemetry[0].name\n telemetry_vrf_number = my_telemetry[0].vrf\n telemetry_num_of_licenses = my_telemetry[0].licensed\n telemetry_actual_sensors_installed = my_telemetry[0].installed\n telemetry_inactive_agents = my_telemetry[0].inactive\n telemetry_so = my_telemetry[0].so_number\n telemetry_start_date = my_telemetry[0].start_date\n elif len(my_telemetry) > 1:\n print(\"\\tERROR: More than one Telemetry session found !\", len(my_telemetry))\n exit()\n\n #\n # Make the Calculations for Telemetry\n #\n if int(telemetry_num_of_licenses) != 0:\n telemetry_active_agents = telemetry_actual_sensors_installed - telemetry_inactive_agents\n pct_installed = telemetry_actual_sensors_installed / telemetry_num_of_licenses\n pct_active = telemetry_active_agents / telemetry_num_of_licenses\n\n # Fields to grab from other places\n as_pid = '1234'\n as_dm = 'jim'\n\n # Calc Adoption Factor and Days to Renewal\n days_to_renew = ''\n now = datetime.datetime.now()\n adoption_factor = 0\n if isinstance(telemetry_start_date, datetime.datetime) and \\\n isinstance(sub_renewal_date, datetime.datetime):\n days_to_renew = (sub_renewal_date - now).days\n\n sub_days_total = int(sub_term) * 30\n sub_days_active = sub_days_total - days_to_renew\n\n pct_sub_expired = sub_days_active / sub_days_total\n adoption_factor = (pct_active / pct_sub_expired)\n adoption_factor = str(round(adoption_factor, 2)) + '_non$_'\n\n #\n # All the math is done so now\n # Format these all to push_list_to_xls.py\n #\n telemetry_num_of_licenses = int(telemetry_num_of_licenses)\n telemetry_actual_sensors_installed = int(telemetry_actual_sensors_installed)\n telemetry_active_agents = int(telemetry_active_agents)\n pct_installed = str(round(pct_installed, 1)) + '_%_'\n pct_active = str(round(pct_active, 1)) + '_%_'\n\n # Push out a row\n rosetta_row = [customer_name, telemetry_num_of_licenses, pct_installed, pct_active,\n adoption_factor,\n sub_term, sub_status, days_to_renew,\n pss, tsa, cust_acct_mgr, cust_sales_lev_1, cust_sales_lev_2,\n telemetry_name, telemetry_vrf_number,\n telemetry_actual_sensors_installed, telemetry_active_agents,\n sub_offer_name, sub_so_num, sub_id,telemetry_start_date, sub_renewal_date,\n as_pid, as_dm, customer_id]\n\n rosetta_list.append(rosetta_row)\n\n tool.push_list_to_xls(rosetta_list, 'stan.xlsx')\n\n print('done')\n my_telemetry = Telemetry.query.all()\n a_list = [['cust','vrf','order']]\n for r in my_telemetry:\n telemetry_cust = r.erp_cust_name\n vrf = r.vrf\n order = r.so_number\n found_it = False\n for my_row in rosetta_list:\n if my_row[0] == telemetry_cust:\n found_it = True\n # print('found',telemetry_cust)\n break\n if found_it == False:\n a_list.append([telemetry_cust, vrf, order])\n print('MISSING', telemetry_cust, vrf, order)\n tool.push_list_to_xls(a_list, 'blanche.xlsx')\n\n return\n\n\nif __name__ == \"__main__\" and __package__ is None:\n # build_rosetta_stone()\n # sub_analysis('CHOCTAW CASINO ADMINISTRATION')\n # print()\n sub_analysis('Clarivate Analytics')\n # sub_analysis('WHATABURGER INC')\n # sub_analysis('JACOB K JAVITS CONVENTION CTR')\n # sub_analysis('FRUIT OF THE LOOM')\n\n\n","sub_path":"my_app/old_revs/build_rosetta_stone_r1.py","file_name":"build_rosetta_stone_r1.py","file_ext":"py","file_size_in_byte":10149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"356902788","text":"import os, sys, glob\nimport utilities\n\ninFilePath = sys.argv[1]\noutFilePath = sys.argv[2]\n\noutFile = open(outFilePath, 'w')\n\nfor line in file(inFilePath):\n if len(line.strip()) == 0:\n continue\n\n outFile.write(line)\n\noutFile.close()\n","sub_path":"code/RemoveBlankLines.py","file_name":"RemoveBlankLines.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419027283","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 18 18:21:43 2018\n\n@author: Atman\n\"\"\"\n\nmessage = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']\ntemp={}\nfor i in message:\n temp.setdefault(i, 0)\n temp[i] = temp[i] + 1\nprint(temp)\nfor a in temp:\n print(temp[a])\n ","sub_path":"PYTHON PROJECTS/Python Scripts/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"238043713","text":"# coding=utf-8\nfrom setuptools import setup, find_packages\n\nreadme = open('README.md', 'r')\nREADME_TEXT = readme.read()\nreadme.close()\n\nsetup(\n name='rummy',\n version='1.2.1',\n url='https://github.com/sarcoma/Python-Rummy',\n license='MIT',\n author='sarcoma',\n author_email='sean@orderandchaoscreative.com',\n description='Console Rummy game',\n long_description=README_TEXT,\n long_description_content_type='text/markdown',\n entry_points={\n 'console_scripts': ['rummy=rummy.__main__:main'],\n },\n packages=find_packages(exclude=['contrib', 'docs', 'tests*']),\n package_data={'rummy':\"templates/*.txt\"},\n include_package_data=True,\n install_requires=['colorama', 'text_template', 'ansi_colours'],\n project_urls={\n 'Order & Chaos Creative': 'https://orderandchaoscreative.com',\n },\n python_requires='>=3.4'\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"394317596","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ct', '0002_auto_20141110_1820'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='conceptlink',\n name='relationship',\n field=models.CharField(default=b'defines', max_length=10, choices=[(b'is', b'Represents (unique ID for)'), (b'defines', b'Defines'), (b'informal', b'Intuitive statement of'), (b'formaldef', b'Formal definition for'), (b'tests', b'Tests understanding of'), (b'derives', b'Derives'), (b'proves', b'Proves'), (b'assumes', b'Assumes'), (b'motiv', b'Motivates'), (b'illust', b'Illustrates'), (b'intro', b'Introduces'), (b'comment', b'Comments on'), (b'warns', b'Warning about')]),\n ),\n ]\n","sub_path":"mysite/ct/migrations/0003_auto_20141110_2153.py","file_name":"0003_auto_20141110_2153.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"397762188","text":"import math\n\n\nclass Fraction:\n \"\"\"A fraction with a numerator and denominator and arithmetic operations.\n\n Fractions are always stored in proper form, without common factors in \n numerator and denominator, and denominator >= 0.\n Since Fractions are stored in proper form, each value has a\n unique representation, e.g. 4/5, 24/30, and -20/-25 have the same\n internal representation.\n\n\n Note:\n 0/0 form is represented as NaN (short form of Not a Number)\n 1/0 denotes a indeterminate form of positive infinity (math.inf).\n -1/0 denotes a indeterminate form of negative infinity (-math.inf).\n \"\"\"\n \n def __init__(self, numerator, denominator=1):\n \"\"\"Initializes a new fraction with the given numerator\n and denominator (default 1).\n \"\"\"\n if type(numerator) is not int or type(denominator) is not int:\n raise ValueError(\"The numerator or denominator of a fraction must be an integer.\")\n self.is_infinity = False\n gcd = math.gcd(numerator, denominator)\n self.numerator = int(numerator / gcd)\n self.denominator = int(denominator / gcd)\n if self.denominator < 0 or self.numerator == 0:\n if self.denominator < 0:\n self.numerator *= -1\n self.denominator *= -1\n\n def __add__(self, other):\n \"\"\"Returns the sum of two fractions as a new fraction.\n Use the standard formula a/b + c/d = (ad+bc)/(b*d)\n\n Args:\n other (Fraction): Another fraction to add\n Returns:\n Fraction: The summation of both operated fractions\n \"\"\"\n if type(other) is not Fraction:\n if math.isinf(other):\n return other\n else:\n numerator_result = self.numerator + (other*self.denominator)\n denominator_product = self.denominator\n else:\n numerator_result = (self.numerator*other.denominator) + (other.numerator*self.denominator)\n denominator_product = self.denominator*other.denominator\n return Fraction(numerator_result, denominator_product)\n\n def __sub__(self, other):\n \"\"\"Returns the difference of two fractions as a new fraction.\n Args:\n other (Fraction): Another fraction to subtract\n\n Returns:\n Fraction: The difference result of both operated fractions\n \"\"\"\n if type(other) is Fraction:\n other.numerator *= -1\n return self.__add__(other)\n else:\n return self.__add__(-other)\n\n def __mul__(self, other):\n \"\"\"Returns the product of two fractions according to multiplication rule of fractions\n A fraction of which denominator of 0 will not be allowed unless it has a numerator of 1.\n\n Args:\n other (Fraction): Another fraction to multiply\n\n Returns:\n Fraction: The product of both operated fractions\n \"\"\"\n numerator_result = self.numerator*other.numerator\n denominator_result = self.denominator*other.denominator\n return Fraction(numerator_result, denominator_result)\n\n def to_decimal(self):\n \"\"\"Converts this fraction to its decimal equivalent\n\n Returns:\n float: The decimal representation\n \"\"\"\n return float(self.numerator/self.denominator)\n\n @classmethod\n def to_comparable(cls, obj):\n \"\"\"Convert the given number into its comparable form\n\n Args:\n obj: The given number\n\n Returns:\n any: The given object in its comparable form\n \"\"\"\n if type(obj) != cls:\n return obj\n else:\n return obj.to_decimal()\n\n @classmethod\n def from_str(cls, frac_str: str):\n \"\"\"Converts a fraction representation into a Fraction object\n\n Args:\n frac_str (str): A fraction representation to convert\n\n Returns:\n Fraction: The parsed result from string representation\n \"\"\"\n if \"/\" not in frac_str:\n raise ValueError(\"Invalid fraction representation\")\n numerator, denominator = frac_str.split(\"/\")\n return Fraction(int(numerator), int(denominator))\n\n def __gt__(self, other):\n \"\"\"Compares two fractions whether the first fraction is greater than the other or else.\n\n Args:\n other: Another fraction to compare with\n\n Returns:\n bool: Whether the first fraction is greater than the other or not\n \"\"\"\n return self.to_decimal() > Fraction.to_comparable(other)\n\n def __lt__(self, other):\n \"\"\"Compares two fractions whether the first fraction is less than the other or else.\n\n Args:\n other: Another fraction to compare with\n\n Returns:\n bool: Whether the first fraction is less than the other or not\n \"\"\"\n return self.to_decimal() < Fraction.to_comparable(other)\n\n def __neg__(self):\n \"\"\"Negates this fraction's sign from positive to negative\n and negative to positive\n\n Returns:\n bool: A negation form of the Fraction (as a new one)\n \"\"\"\n numerator = -self.numerator\n return Fraction(numerator, self.denominator)\n\n def __eq__(self, frac):\n \"\"\"Two fractions are equal if they have the same value.\n Fractions are stored in proper form so the internal representation\n is unique (3/6 is the same as 1/2).\n \"\"\"\n if type(frac) is int:\n return self.numerator == frac\n else:\n return self.to_decimal() == Fraction.to_comparable(frac)\n\n def __str__(self):\n \"\"\"Represents fractions in terms of the numerator over denominator\n The fraction of which denominator of 1 is represented in whole number instead.\n\n Returns:\n str: A string representation of a Fraction\n \"\"\"\n if self.denominator is 1:\n return f\"{self.numerator}\"\n else:\n return f\"{self.numerator}/{self.denominator}\"\n\n def __new__(cls, numerator, denominator=1):\n if denominator is 0:\n if numerator is 0:\n return math.nan\n elif numerator is 1 or numerator is -1:\n return numerator*math.inf\n else:\n raise ValueError(\"A fraction cannot have a denominator of zero\")\n elif denominator is 1:\n return numerator\n else:\n return object.__new__(cls)\n","sub_path":"fraction.py","file_name":"fraction.py","file_ext":"py","file_size_in_byte":6542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"268854984","text":"####this file is only used for continuous evaluation test!\n\nimport os\nimport sys\nsys.path.insert(0, os.environ['ceroot'])\n#sys.path.append('.')\nfrom kpi import CostKpi, DurationKpi, AccKpi\n\n#### NOTE kpi.py should shared in models in some way!!!!\n\ntrain_cost_xnli_card1_kpi = CostKpi(\n 'train_cost_xnli_card1', 0.002, 0, actived=True)\ntrain_acc_xnli_card1_kpi = AccKpi(\n 'train_acc_xnli_card1', 0.002, 0, actived=True)\ntrain_duration_xnli_card1_kpi = DurationKpi(\n 'train_duration_xnli_card1', 0.01, 0, actived=True)\ntrain_cost_xnli_card4_kpi = CostKpi(\n 'train_cost_xnli_card4', 0.002, 0, actived=True)\ntrain_acc_xnli_card4_kpi = AccKpi('train_acc_xnli_card4', 0.02, 0, actived=True)\ntrain_duration_xnli_card4_kpi = DurationKpi(\n 'train_duration_xnli_card4', 0.03, 0, actived=True)\n\ntracking_kpis = [\n train_cost_xnli_card1_kpi,\n train_acc_xnli_card1_kpi,\n train_duration_xnli_card1_kpi,\n train_cost_xnli_card4_kpi,\n train_acc_xnli_card4_kpi,\n train_duration_xnli_card4_kpi,\n]\n\n\ndef parse_log(log):\n '''\n This method should be implemented by model developers.\n The suggestion:\n each line in the log should be key, value, for example:\n \"\n train_cost\\t1.0\n test_cost\\t1.0\n train_cost\\t1.0\n train_cost\\t1.0\n train_acc\\t1.2\n \"\n '''\n for line in log.split('\\n'):\n fs = line.strip().split('\\t')\n print(fs)\n if len(fs) == 3 and fs[0] == 'kpis':\n print(\"-----%s\" % fs)\n kpi_name = fs[1]\n kpi_value = float(fs[2])\n yield kpi_name, kpi_value\n\n\ndef log_to_ce(log):\n kpi_tracker = {}\n for kpi in tracking_kpis:\n kpi_tracker[kpi.name] = kpi\n\n for (kpi_name, kpi_value) in parse_log(log):\n print(kpi_name, kpi_value)\n kpi_tracker[kpi_name].add_record(kpi_value)\n kpi_tracker[kpi_name].persist()\n\n\nif __name__ == '__main__':\n log = sys.stdin.read()\n print(\"*****\")\n print(log)\n print(\"****\")\n log_to_ce(log)\n","sub_path":"DuReader-Robust/src/_ce.py","file_name":"_ce.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"228205554","text":"# class Ercha:\n# def __init__(self, val):\n# self.val = val\n# self.left = None\n# self.right = None\n# def insert(root, val):\n# if root is None:\n# root=Ercha(val)\n#\n# else:\n# if valroot.val:\n# root.right = insert(root.right,val)\n# return root\n\n\nclass Node:\n def __init__(self, data=None):\n self.data = data\n self.left = None\n self.right = None\n\n\nclass BTree:\n def __init__(self, dataset):\n self.root = self.creat_node(dataset)\n\n def creat_node(self, dataset, i=0):\n if i >= len(dataset):\n return\n node = Node()\n node.data = dataset[i]\n\n left_index = 2 * (i + 1)\n right_index = left_index + 1\n\n node.left = self.creat_node(dataset, left_index - 1)\n node.right = self.creat_node(dataset, right_index - 1)\n\n return node\n\n def _loop(self, node, flag=0):\n if flag < 0:\n yield node.data\n\n if node.left:\n for d in self._loop(node.left):\n yield d\n if flag == 0:\n yield node.data\n if node.right:\n for d in self._loop(node.right):\n yield d\n if flag > 0:\n yield node.data\n\n def duilei(self,n,node):\n dataset =[]\n while True:\n dataset[n]= node\n dataset.append(self._loop(node.left))\n dataset.append(self._loop(node.right))\n\n\n\n def __iter__(self):\n return self._loop(self.root)\n\n\ntree =BTree((1,2,3,4,5,6,7,8,9,10))\nfor d in tree:\n print(d)\n","sub_path":"d盘/keshang/bootstrap-4.3.1/ggchat/src/gglib/erchashu.py","file_name":"erchashu.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"25433252","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\npackages = [\"slack_click\"]\n\npackage_data = {\"\": [\"*\"]}\n\ninstall_requires = [\n \"click>=8.0.1,<9.0.0\",\n \"first>=2.0.2,<3.0.0\",\n \"pyee>=8.1.0,<9.0.0\",\n \"slack-bolt>=1.6.0,<2.0.0\",\n]\n\nsetup_kwargs = {\n \"name\": \"slack-click\",\n \"version\": \"0.1.0\",\n \"description\": \"\",\n \"long_description\": None,\n \"author\": \"Jeremy Schulman\",\n \"author_email\": \"jeremy.schulman@mlb.com\",\n \"maintainer\": None,\n \"maintainer_email\": None,\n \"url\": None,\n \"packages\": packages,\n \"package_data\": package_data,\n \"install_requires\": install_requires,\n \"python_requires\": \">=3.8,<4.0\",\n}\n\n\nsetup(**setup_kwargs)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"121607136","text":"\"\"\"\nthis callback\n\"\"\"\nimport os\nimport json\nimport matplotlib\nmatplotlib.use('agg')\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom torchsummary import summary\n\nclass PlotterBase:\n\n def __init__(self, train_generator, val_generator, summary_path, loss, sample_size=1, model=None):\n self.train_generator = train_generator\n self.val_generator = val_generator\n self.summary_path = summary_path\n self.loss = loss\n self.sample_size = sample_size\n\n def get_metrics(self, y_true, y_pred):\n loss = self.loss(y_true, y_pred)\n abs_res = np.abs(y_true - y_pred)\n mean_error = abs_res.mean()\n max_error = abs_res.max()\n std_error = abs_res.std()\n return loss, mean_error, max_error, std_error\n\n def on_epoch_end(self, epoch, model, logs=None):\n train_labels, train_predictions = self.run(model, self.train_generator)\n val_labels, val_predictions = self.run(model, self.val_generator)\n train_loss, train_mean_error, train_max_error, train_std_error = self.get_metrics(train_labels,\n train_predictions)\n val_loss, val_mean_error, val_max_error, val_std_error = self.get_metrics(val_labels, val_predictions)\n\n update = {\n 'train_loss': train_loss.astype(float),\n 'train_mean_error': train_mean_error.astype(float),\n 'train_max_error': train_max_error.astype(float),\n 'train_std_error': train_std_error.astype(float),\n 'val_loss': val_loss.astype(float),\n 'val_mean_error': val_mean_error.astype(float),\n 'val_max_error': val_max_error.astype(float),\n 'val_std_error': val_std_error.astype(float)\n }\n print('train_loss', update['train_loss'], 'val_loss', update['val_loss'])\n self.write_summary(epoch, update)\n self.write_graph(epoch, train_labels, train_predictions, val_labels, val_predictions)\n\n def on_train_begin(self, model, logs=None):\n \"\"\"\n if summary_path_dir doesnt exist create dir call write_sumamry\n \"\"\"\n self.graph_path = self.summary_path.parents[0].joinpath('graphs').resolve()\n self.model_summary = self.summary_path.parents[0].joinpath('model.txt').resolve()\n if not self.graph_path.exists():\n self.graph_path.mkdir(parents=True)\n if not self.summary_path.exists():\n self.save_summary({})\n if model is not None and False: # fixme this doesnt work for torch models\n with open(self.model_summary, 'w') as f:\n model.summary(print_fn=lambda x: f.write(x + '\\n'))\n\n @classmethod\n def load_summary(cls, summary_path):\n with open(summary_path, 'r') as f:\n s = json.load(f)\n return s\n\n def save_summary(self, summary):\n with open(self.summary_path, 'w') as f:\n json.dump(summary, f, indent=4, sort_keys=True)\n\n def write_summary(self, key, update):\n summary = self.load_summary(self.summary_path)\n summary.update({f'{key:02d}': update})\n self.save_summary(summary)\n\n def get_losses(self):\n summary = self.load_summary(self.summary_path)\n num_epochs = len(summary)\n epochs, train_loss, val_loss = np.zeros(num_epochs), np.zeros(num_epochs), np.zeros(num_epochs)\n for i, (epoch, v) in enumerate(summary.items()):\n epochs[i] = int(epoch)\n train_loss[i] = v['train_loss']\n val_loss[i] = v['val_loss']\n sorted_inds = epochs.argsort()\n epochs = epochs[sorted_inds]\n train_loss = train_loss[sorted_inds]\n val_loss = val_loss[sorted_inds]\n if num_epochs > 10 and False:\n epochs = epochs[10:]\n train_loss = train_loss[10:]\n val_loss = val_loss[10:]\n\n return epochs, train_loss, val_loss\n\n def get_labels_predictions(self, train_labels, train_predictions, val_labels, val_predictions):\n raise NotImplementedError\n\n def write_graph(self, epoch, train_labels, train_predictions, val_labels, val_predictions):\n epochs, train_loss, val_loss = self.get_losses()\n\n loss_ax, train_ax, val_ax = self.get_gridspec()\n ax_lim = [-0.9, 1.2]\n\n train_ax.set_ylim(ax_lim)\n val_ax.set_ylim(ax_lim)\n loss_ax.set_title('loss')\n train_ax.set_title('train')\n val_ax.set_title('val')\n\n (train_labels, train_predictions), (val_labels, val_predictions) = self.get_labels_predictions(train_labels, train_predictions, val_labels, val_predictions)\n\n loss_ax.plot(epochs, train_loss, 'g', label='train')\n loss_ax.plot(epochs, val_loss, 'r', label='val', alpha=0.3)\n\n train_ax.plot(train_labels, 'g', label='label')\n train_ax.plot(train_predictions, 'r', alpha=0.2, label='prediction')\n val_ax.plot(val_labels, 'g', label='label')\n val_ax.plot(val_predictions, 'r', alpha=0.2, label='prediction')\n\n train_ax.legend()\n val_ax.legend()\n loss_ax.legend()\n plt.savefig(self.graph_path.joinpath(f'epoch_{epoch}.png').resolve())\n plt.close()\n\n @classmethod\n def get_gridspec(cls):\n fig10 = plt.figure(constrained_layout=True)\n gs0 = fig10.add_gridspec(1, 2)\n loss_ax = gs0[0].subgridspec(1, 1).subplots()\n train_ax, val_ax = gs0[1].subgridspec(2, 1).subplots()\n return loss_ax, train_ax, val_ax\n\n\n def run(self, model, gen_obj):\n raise NotImplementedError\n\n # fixme this is a divergence point try to abstract this and rename it to run\n # one solution might be to accept another dimension for y_true and y_pred which\n # will be 9 for omp and 1 for the direct force approaches and drop the reshape\n # which will need to be handled later in plotter_callback but is already handled in omp_plotter_callback\n def _run(self, gen_obj):\n y_true = np.zeros(gen_obj.steps * gen_obj.batch_size * self.sample_size) # here\n y_pred = np.zeros(gen_obj.steps * gen_obj.batch_size * self.sample_size) # here\n for i, (data, labels) in tqdm(enumerate(gen_obj.val_generator())):\n if i == gen_obj.steps:\n break\n start_index = i * gen_obj.batch_size\n end_index = start_index + len(data)\n pred = self.model.predict_on_batch(data)\n y_true[start_index:end_index] = labels\n y_pred[start_index:end_index] = pred.reshape(-1) # here\n return y_true, y_pred\n\n @classmethod\n def plot_summary(cls, summary_path):\n graph_path = summary_path.parent[0].joinpath('graphs').resole()\n graph_path.makedir()\n summary = cls.load_summary(summary_path)\n train_labels = summary['train_labels']\n val_labels = summary['val_labels']\n train_inds = train_labels.argsort()\n val_inds = val_labels.argsort()\n train_labels = np.array(train_labels[train_inds])\n val_labels = np.array(val_labels[val_inds])\n del summary['train_labels'], summary['val_labels']\n f, (ax1, ax2) = plt.subplots(1, 2)\n for k, v in summary.items():\n train_pred = np.array(v['train_prediction'])[train_inds]\n val_pred = np.array(v['val_prediction'])[val_inds]\n ax1.plot(train_labels, 'g')\n ax1.plot(train_pred, 'r')\n ax2.plot(val_labels, 'g')\n ax2.plot(val_pred, 'r')\n plt.savefig(graph_path.joinpath(f'epoch_{k}.png'))\n ax1.cla()\n ax2.cla()\n\n\nif __name__ == '__main__':\n PlotterBase.get_gridspec()\n pass","sub_path":"utils/callbacks/plotter_base.py","file_name":"plotter_base.py","file_ext":"py","file_size_in_byte":7717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"453393847","text":"import requests\nfrom django import template\n\nfrom blog.models import Category\n\nregister = template.Library() # необходимая строчка\n\n\n@register.simple_tag(name='categories_list') # тег возвращает данные\ndef get_categories(): # создание функции получения категорий (что бы не писать в views каждый раз)\n return Category.objects.all() # вызывается {% load blog_tags %} и {% get_categories as category %}\n\n\n@register.inclusion_tag('blog/home.html') # __ПРИМЕР__, в проекте не используется\ndef show_categories():\n categories = Category.objects.all() # возвращаем dict в blog/home.html и там производим операции\n return {'categories': categories} # вызов через {% show_categories %} там где надо\n","sub_path":"blog/templatetags/blog_tags.py","file_name":"blog_tags.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"36896482","text":"import cv2\nimport numpy as np\nimport random as rd\n\ndef getTriangles(img, insidePt, edgePt):\n\n # orig = np.copy (img)\n width = img.shape[1]\n height = img.shape[0]\n rect = (0, 0, width, height)\n subdiv = cv2.Subdiv2D(rect)\n\n # points on the inside\n\n for i in range(0, insidePt):\n randx = rd.randint(0, width-1)\n randy = rd.randint(0, height-1)\n subdiv.insert((randx, randy))\n\n # edge points\n\n for i in range(0, edgePt):\n subdiv.insert((0, rd.randint(0, height-1)))\n subdiv.insert((rd.randint(0, width-1), 0))\n subdiv.insert((width-1, rd.randint(0, height-1)))\n subdiv.insert((rd.randint(0, width-1), height-1))\n\n # corners\n\n subdiv.insert((0, 0))\n subdiv.insert((0, height-1))\n subdiv.insert((width-1, 0))\n subdiv.insert((width-1, height-1))\n\n triangleList = subdiv.getTriangleList()\n noPieces = triangleList.shape[0]\n\n return (noPieces, triangleList)\n\n","sub_path":"MosaicPieces/TriangulationTest.py","file_name":"TriangulationTest.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"480203663","text":"from pymel.core import *\nimport maya.cmds as cmds\n\nfrom maya.OpenMaya import MNodeMessage, MEventMessage, MSelectionList, MItSelectionList, MObject, MGlobal, MDagPath\nimport json\n\nfrom milk.shotgun.v1_1.tank import get_context\nfrom alembicHolder.cmds import abcToApi\n\nRED = (1.0, 0.2, 0.2)\nGREEN = (0.4, 1.0, 0.4)\n\nclass LocalizedTemplate(ui.AETemplate):\n \"\"\"\n Automatically apply language localizations to template arguments\n \"\"\"\n def _applyLocalization(self, name):\n if name is not None and len(name)>2 and name[0] == 'k' and name[1].isupper():\n return mel.uiRes('m_' + self.__class__.__name__ + '.' + name)\n return name\n\n def addControl(self, control, label=None, **kwargs):\n \"\"\"\n Add localised control to template\n \"\"\"\n label = self._applyLocalization(label)\n ui.AETemplate.addControl(self, control, label=label, **kwargs)\n\n def beginLayout(self, name, collapse=True):\n \"\"\"\n Start layout\n \"\"\"\n name = self._applyLocalization(name)\n ui.AETemplate.beginLayout(self, name, collapse=collapse)\n\nclass BaseTemplate(LocalizedTemplate):\n def __init__(self, nodeName):\n \"\"\"\n Base template\n \"\"\"\n LocalizedTemplate.__init__(self,nodeName)\n self.beginScrollLayout()\n self.buildBody(nodeName)\n self.endScrollLayout()\n\nclass AEalembicHolderTemplate(BaseTemplate):\n \"\"\"\n Alembic Holder Template\n \"\"\"\n\n def _refresh(self, *args):\n \"\"\"\n \"\"\"\n if os.path.isfile(args[0]):\n bg_color = GREEN\n enable = True\n else:\n bg_color = RED\n enable = False\n\n cmds.button(\"%s\" % (self.btn), backgroundColor=bg_color, edit=True, enable=enable) \n\n def _abcWidget(self, cacheName):\n \"\"\"\n ABC Path widgets\n \"\"\"\n # fix attr name firstly\n self.cache = cacheName + \"[0]\"\n\n cmds.setUITemplate('attributeEditorTemplate', pushTemplate=True)\n cmds.columnLayout(adjustableColumn=True)\n cmds.rowLayout(numberOfColumns=4, adjustableColumn4=2)\n cmds.text(label=\"ABC Path\")\n cmds.textField(\"abcpathNameField\")\n cmds.symbolButton(image=\"navButtonBrowse.png\", width=15, height=15, command=self._abcBrowser)\n cmds.setParent('..')\n cmds.setUITemplate(popTemplate=True)\n cmds.setParent('..')\n self._abcConnect(cacheName)\n cmds.select()\n\n def _abcConnect(self, cacheName):\n \"\"\"\n Connect the new control to existing control\n \"\"\"\n self.cache = cacheName + \"[0]\" \n cmds.connectControl(\"abcpathNameField\", self.cache)\n\n def _abcBrowser(self, args):\n \"\"\"\n Open file dialog and set the cache attribute\n \"\"\"\n ret = cmds.fileDialog2(fileFilter=\"Alembic (*.abc)\", fileMode=1, dialogStyle=2, caption=\"Select Alembic File\")\n if ret:\n selected = abcToApi.getCurrentSelection()\n cmds.setAttr(\"%s.cacheFileNames[0]\" % selected, ret[0], type=\"string\")\n\n # now check if we need to update transforms\n node = cmds.listRelatives(selected, parent=True)[0]\n if cmds.getAttr('%s.updateTransforms' % selected):\n abcToApi.update_xforms(ret[0], node)\n\n def _abcImport(self, args):\n \"\"\"\n Import the alembic file via abcToApi\n \"\"\"\n for i in abcToApi.getSelectedAlembicHolder(cls=True):\n i.importAbc()\n\n def _jsonWidget(self, json):\n \"\"\"\n Json Path widgets\n \"\"\"\n self.json = json\n\n cmds.setUITemplate('attributeEditorTemplate', pushTemplate=True)\n cmds.columnLayout(adjustableColumn=True)\n cmds.rowLayout(numberOfColumns=4, adjustableColumn4=2)\n cmds.text(label=\"Json Path\")\n cmds.textField(\"jsonpathNameField\", editable=False, enable=False, textChangedCommand=self._refresh) \n cmds.symbolButton(image=\"navButtonBrowse.png\", width=15, height=15, command=self._jsonBrowser)\n cmds.setParent('..')\n cmds.setUITemplate(popTemplate=True)\n cmds.setParent('..')\n self._jsonConnect(json)\n cmds.select()\n\n def _jsonConnect(self, json):\n \"\"\"\n Connect the new control to existing control\n \"\"\"\n cmds.connectControl(\"jsonpathNameField\", self.json)\n\n def _jsonBrowser(self, args):\n \"\"\"\n Open file dialog and set the jsonFile attribute\n \"\"\"\n ret = cmds.fileDialog2(fileFilter=\"Json (*.json)\", fileMode=1, dialogStyle=2, caption=\"Select Json File\")\n if ret:\n selected = abcToApi.getCurrentSelection()\n cmds.setAttr(\"%s.jsonFile\" % selected, ret[0], type=\"string\")\n cmds.refreshEditorTemplates()\n\n def _jsonImport(self, args):\n \"\"\"\n Import the json file via abcToApi\n \"\"\"\n for i in abcToApi.getSelectedAlembicHolder(cls=True):\n i.importJson()\n\n def _shadersWidget(self, shaders):\n \"\"\"\n Shaders Path widgets\n \"\"\"\n self.shaders = shaders\n\n cmds.setUITemplate('attributeEditorTemplate', pushTemplate=True)\n cmds.columnLayout(adjustableColumn=True)\n cmds.rowLayout(numberOfColumns=4, adjustableColumn4=2)\n cmds.text(label=\"Shaders Path\")\n cmds.textField(\"shaderspathNameField\", editable=False, enable=False, textChangedCommand=self._refresh)\n cmds.symbolButton(image=\"navButtonBrowse.png\", width=15, height=15, command=self._shadersBrowser)\n cmds.setParent('..')\n cmds.setUITemplate(popTemplate=True)\n cmds.setParent('..')\n self._shadersConnect(shaders)\n cmds.select()\n\n def _shadersConnect(self, json):\n \"\"\"\n Connect the new control to existing control\n \"\"\"\n cmds.connectControl(\"shaderspathNameField\", self.shaders)\n\n def _shadersBrowser(self, args):\n \"\"\"\n Open file dialog and set the shaders attribute\n \"\"\"\n ret = cmds.fileDialog2(fileFilter=\"Alembic (*.abc)\", fileMode=1, dialogStyle=2, caption=\"Select Alembic File\")\n if ret:\n selected = abcToApi.getCurrentSelection()\n cmds.setAttr(\"%s.abcShaders\" % selected, ret[0], type=\"string\")\n cmds.refreshEditorTemplates()\n\n def _shadersImport(self, args):\n \"\"\"\n Import the shaders file via abcToApi\n \"\"\"\n for i in abcToApi.getSelectedAlembicHolder(cls=True):\n i.importShaders()\n\n def _localiseLookdevWidget(self, loader):\n \"\"\"\n \"\"\"\n\n self.loader = loader\n cmds.setUITemplate('attributeEditorTemplate', pushTemplate=True)\n cmds.columnLayout(adjustableColumn=True)\n cmds.rowLayout(numberOfColumns=2, adjustableColumn2=1)\n\n if not cmds.getAttr(\"%sjsonFile\" % self.loader) or not cmds.getAttr(\"%sabcShaders\" % self.loader):\n bg_color = RED\n enable = False\n else:\n bg_color = GREEN\n enable = True\n\n self.btn = cmds.button(label='Localise Lookdev', command=self._localiseLookdevImport, enableBackground=True, backgroundColor=bg_color, enable=enable)\n cmds.setParent('..')\n cmds.setUITemplate(popTemplate=True)\n cmds.setParent('..')\n self._localiseLookdevConnect(loader)\n cmds.select()\n\n def _localiseLookdevConnect(self, json):\n \"\"\"\n Connect the new control to existing control\n \"\"\"\n pass\n\n def _localiseLookdevImport(self, args):\n \"\"\"\n \"\"\"\n ret = cmds.promptDialog(title='Namespace', message='Enter Name:', button=['Ok', 'Cancel'], defaultButton='Ok', cancelButton='Cancel', dismissString='Cancel', text='root')\n if ret == 'Ok':\n namespace = cmds.promptDialog(query=True, text=True)\n if namespace == 'root':\n namespace = ':'\n\n for i in abcToApi.getSelectedAlembicHolder(cls=True):\n i.importLookdev(namespace)\n\n def buildBody(self, nodeName):\n \"\"\"\n Build the body of the attribute editor template according to shotgun context\n \"\"\"\n self.beginLayout(name=\"Cache File\", collapse=False)\n self.callCustom(self._abcWidget, self._abcConnect, \"cacheFileNames\")\n self.addControl(control=\"updateTransforms\", label=\"Auto update transforms\")\n self.addControl(control=\"cacheGeomPath\", label=\"Geometry Path\")\n self.addControl(control=\"cacheSelectionPath\", label=\"Selection Path\")\n self.addControl(control=\"boundingBoxExtendedMode\", label=\"Bounding Box Extended Mode\")\n self.addControl(control=\"timeOffset\", label=\"Time Offset\")\n self.addControl(control=\"loadAtInit\", label=\"Load At Init\") \n self.endLayout()\n\n self.beginLayout(name=\"Shaders and Assignments\", collapse=False)\n self.addControl(control=\"shadersAssignation\", label=\"Shaders Assignation\")\n self.addControl(control=\"displacementsAssignation\", label=\"Displacements Assignation\")\n self.addControl(control=\"attributes\", label=\"Attributes\")\n self.addControl(control=\"layersOverride\", label=\"Layers Override\")\n self.addControl(control=\"shadersNamespace\", label=\"Shaders Namespace\")\n self.endLayout() \n\n if get_context().task != None:\n # if we are in a lighting context, create a section for the attrs to live\n if get_context().task['name'].lower() == 'lighting':\n self.beginLayout(name=\"Published Lookdev\", collapse=False)\n self.callCustom(self._jsonWidget, self._jsonConnect, \"jsonFile\")\n self.callCustom(self._shadersWidget, self._shadersConnect, \"abcShaders\")\n self.callCustom(self._localiseLookdevWidget, self._localiseLookdevConnect, \"\")\n self.endLayout()\n\n render_attrs = [\"primaryVisibility\", \"aiSelfShadows\", \"castsShadows\", \"aiReceiveShadows\", \"motionBlur\", \"aiVisibleInDiffuse\", \"aiVisibleInGlossy\", \"visibleInRefractions\", \"visibleInReflections\", \"aiOpaque\", \"aiMatte\", \"overrideGlobalShader\", \"aiTraceSets\", \"aiSssSetname\", \"aiUserOptions\"]\n self.beginLayout(name=\"Render Stats\", collapse=True)\n self.beginNoOptimize()\n for attr in render_attrs:\n self.addControl(attr)\n self.endNoOptimize()\n self.endLayout()\n\n self.suppress(\"blackBox\")\n self.suppress(\"borderConnections\")\n self.suppress(\"isHierarchicalConnection\")\n self.suppress(\"publishedNodeInfo\")\n self.suppress(\"publishedNodeInfo.publishedNode\")\n self.suppress(\"publishedNodeInfo.isHierarchicalNode\")\n self.suppress(\"publishedNodeInfo.publishedNodeType\")\n self.suppress(\"rmbCommand\")\n self.suppress(\"templateName\")\n self.suppress(\"templatePath\")\n self.suppress(\"viewName\")\n self.suppress(\"iconName\")\n self.suppress(\"viewMode\")\n self.suppress(\"templateVersion\")\n self.suppress(\"uiTreatment\")\n self.suppress(\"customTreatment\")\n self.suppress(\"creator\")\n self.suppress(\"creationDate\")\n self.suppress(\"containerType\")\n self.suppress(\"center\")\n self.suppress(\"boundingBoxCenterX\")\n self.suppress(\"boundingBoxCenterY\")\n self.suppress(\"boundingBoxCenterZ\")\n self.suppress(\"matrix\")\n self.suppress(\"inverseMatrix\")\n self.suppress(\"worldMatrix\")\n self.suppress(\"worldInverseMatrix\")\n self.suppress(\"parentMatrix\")\n self.suppress(\"parentInverseMatrix\")\n self.suppress(\"visibility\")\n self.suppress(\"intermediateObject\")\n self.suppress(\"template\")\n self.suppress(\"ghosting\")\n self.suppress(\"instObjGroups\")\n self.suppress(\"instObjGroups.objectGroups\")\n self.suppress(\"instObjGroups.objectGroups.objectGrpCompList\")\n self.suppress(\"instObjGroups.objectGroups.objectGroupId\")\n self.suppress(\"instObjGroups.objectGroups.objectGrpColor\")\n self.suppress(\"renderInfo\")\n self.suppress(\"identification\")\n self.suppress(\"layerRenderable\")\n self.suppress(\"layerOverrideColor\")\n self.suppress(\"renderLayerInfo\")\n self.suppress(\"renderLayerInfo.renderLayerId\")\n self.suppress(\"renderLayerInfo.renderLayerRenderable\")\n self.suppress(\"renderLayerInfo.renderLayerColor\")\n self.suppress(\"ghostingControl\")\n self.suppress(\"ghostCustomSteps\")\n self.suppress(\"ghostPreSteps\")\n self.suppress(\"ghostPostSteps\")\n self.suppress(\"ghostStepSize\")\n self.suppress(\"ghostFrames\")\n self.suppress(\"ghostColorPreA\")\n self.suppress(\"ghostColorPre\")\n self.suppress(\"ghostColorPreR\")\n self.suppress(\"ghostColorPreG\")\n self.suppress(\"ghostColorPreB\")\n self.suppress(\"ghostColorPostA\")\n self.suppress(\"ghostColorPost\")\n self.suppress(\"ghostColorPostR\")\n self.suppress(\"ghostColorPostG\")\n self.suppress(\"ghostColorPostB\")\n self.suppress(\"ghostRangeStart\")\n self.suppress(\"ghostRangeEnd\")\n self.suppress(\"ghostDriver\")\n self.suppress(\"renderType\")\n self.suppress(\"renderVolume\")\n self.suppress(\"visibleFraction\")\n self.suppress(\"motionBlur\")\n self.suppress(\"maxVisibilitySamplesOverride\")\n self.suppress(\"maxVisibilitySamples\")\n self.suppress(\"geometryAntialiasingOverride\")\n self.suppress(\"antialiasingLevel\")\n self.suppress(\"shadingSamplesOverride\")\n self.suppress(\"shadingSamples\")\n self.suppress(\"maxShadingSamples\")\n self.suppress(\"volumeSamplesOverride\")\n self.suppress(\"volumeSamples\")\n self.suppress(\"depthJitter\")\n self.suppress(\"ignoreSelfShadowing\")\n self.suppress(\"referenceObject\")\n self.suppress(\"compInstObjGroups\")\n self.suppress(\"compInstObjGroups.compObjectGroups\")\n self.suppress(\"compInstObjGroups.compObjectGroups.compObjectGrpCompList\")\n self.suppress(\"compInstObjGroups.compObjectGroups.compObjectGroupId\")\n self.suppress(\"tweak\")\n self.suppress(\"relativeTweak\")\n self.suppress(\"controlPoints\")\n self.suppress(\"controlPoints.xValue\")\n self.suppress(\"controlPoints.yValue\")\n self.suppress(\"controlPoints.zValue\")\n self.suppress(\"weights\")\n self.suppress(\"tweakLocation\")\n self.suppress(\"blindDataNodes\")\n self.suppress(\"uvPivot\")\n self.suppress(\"uvPivotX\")\n self.suppress(\"uvPivotY\")\n self.suppress(\"uvSet\")\n self.suppress(\"uvSet.uvSetName\")\n self.suppress(\"uvSet.uvSetPoints\")\n self.suppress(\"uvSet.uvSetPoints.uvSetPointsU\")\n self.suppress(\"uvSet.uvSetPoints.uvSetPointsV\")\n self.suppress(\"uvSet.uvSetTweakLocation\")\n self.suppress(\"currentUVSet\")\n self.suppress(\"displayImmediate\")\n self.suppress(\"displayColors\")\n self.suppress(\"displayColorChannel\")\n self.suppress(\"currentColorSet\")\n self.suppress(\"colorSet\")\n self.suppress(\"colorSet.colorName\")\n self.suppress(\"colorSet.clamped\")\n self.suppress(\"colorSet.representation\")\n self.suppress(\"colorSet.colorSetPoints\")\n self.suppress(\"colorSet.colorSetPoints.colorSetPointsR\")\n self.suppress(\"colorSet.colorSetPoints.colorSetPointsG\")\n self.suppress(\"colorSet.colorSetPoints.colorSetPointsB\")\n self.suppress(\"colorSet.colorSetPoints.colorSetPointsA\")\n self.suppress(\"ignoreHwShader\")\n self.suppress(\"doubleSided\")\n self.suppress(\"opposite\")\n self.suppress(\"smoothShading\")\n self.suppress(\"boundingBoxScale\")\n self.suppress(\"boundingBoxScaleX\")\n self.suppress(\"boundingBoxScaleY\")\n self.suppress(\"boundingBoxScaleZ\")\n self.suppress(\"featureDisplacement\")\n self.suppress(\"initialSampleRate\")\n self.suppress(\"extraSampleRate\")\n self.suppress(\"textureThreshold\")\n self.suppress(\"normalThreshold\")\n self.suppress(\"displayHWEnvironment\")\n self.suppress(\"collisionOffsetVelocityIncrement\")\n self.suppress(\"collisionOffsetVelocityIncrement.collisionOffsetVelocityIncrement_Position\")\n self.suppress(\"collisionOffsetVelocityIncrement.collisionOffsetVelocityIncrement_FloatValue\")\n self.suppress(\"collisionOffsetVelocityIncrement.collisionOffsetVelocityIncrement_Interp\")\n self.suppress(\"collisionDepthVelocityIncrement\")\n self.suppress(\"collisionDepthVelocityIncrement.collisionDepthVelocityIncrement_Position\")\n self.suppress(\"collisionDepthVelocityIncrement.collisionDepthVelocityIncrement_FloatValue\")\n self.suppress(\"collisionDepthVelocityIncrement.collisionDepthVelocityIncrement_Interp\")\n self.suppress(\"collisionOffsetVelocityMultiplier\")\n self.suppress(\"collisionOffsetVelocityMultiplier.collisionOffsetVelocityMultiplier_Position\")\n self.suppress(\"collisionOffsetVelocityMultiplier.collisionOffsetVelocityMultiplier_FloatValue\")\n self.suppress(\"collisionOffsetVelocityMultiplier.collisionOffsetVelocityMultiplier_Interp\")\n self.suppress(\"collisionDepthVelocityMultiplier\")\n self.suppress(\"collisionDepthVelocityMultiplier.collisionDepthVelocityMultiplier_Position\")\n self.suppress(\"collisionDepthVelocityMultiplier.collisionDepthVelocityMultiplier_FloatValue\")\n self.suppress(\"collisionDepthVelocityMultiplier.collisionDepthVelocityMultiplier_Interp\")\n self.suppress(\"time\")\n self.suppress(\"shaders\")\n\n self.addExtraControls()\n","sub_path":"maya/scripts/AEalembicHolderTemplate.py","file_name":"AEalembicHolderTemplate.py","file_ext":"py","file_size_in_byte":17623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"543378694","text":"import matplotlib.pyplot as plt\nfrom os import getcwd\nfrom os.path import join\nfrom sklearn.decomposition import PCA\n\n\ndef plot_k_means_fit(kmeans, X, filename=\"\"):\n # to get rid of color warnings\n from matplotlib.axes._axes import _log as matplotlib_axes_logger\n matplotlib_axes_logger.setLevel('ERROR')\n\n # reduce dimensionality to 2 to plot multidimensional data\n pca = PCA(n_components=2)\n centroids = pca.fit_transform(kmeans.centroids)\n cluster_assignments = kmeans.predict(X)\n X = pca.fit_transform(X)\n\n colormap = plt.cm.get_cmap(\"hsv\", centroids.shape[0] + 1)\n for i, point in enumerate(X):\n x, y = point[0], point[1]\n color = colormap(cluster_assignments[i])\n plt.scatter(x, y, c=color)\n\n for centroid in centroids:\n plt.scatter(centroid[0], centroid[1],\n c=\"black\", marker=\"*\")\n\n plt.title(\"K-means clustering\")\n\n if filename:\n filepath = join(getcwd(), \"task1\", filename)\n plt.savefig(filepath)\n else:\n plt.show()\n\n plt.clf()\n","sub_path":"lab2_clustering/task1/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"480098252","text":"\"\"\"\nProvide an interface to enable:\n\n1) Download and installation of pretrained models\n2) Check if models are installed\n3) If all systems go, have user input source sound for voice, text to be spoken, and it should create and return a sound\n -Do we need to save it as a wav, or can we just return the raw audio data to be processed?\n -Do we want to provide an auto-download-and-installation of the libri-speech dataset, and enable users to just select from those?\n -We could also provide the input of a folder to just select an audio from\n\nWhen first loaded, this module should perform pre-flight checks, and report whether or not its going to fail out.\n\"\"\"\nfrom pathlib import Path\n\nimport librosa\nimport numpy as np\nimport soundfile as sf\nimport torch\nfrom audioread.exceptions import NoBackendError\n\nfrom rtvc.download_install import *\nfrom rtvc.encoder import inference as encoder\nfrom rtvc.encoder.params_model import model_embedding_size as speaker_embedding_size\nfrom rtvc.synthesizer.inference import Synthesizer\nfrom rtvc.utils.argutils import print_args\nfrom rtvc.utils.modelutils import check_model_paths\nfrom rtvc.vocoder import inference as vocoder\n\nUSE_CPU = False\nSUPPORT_MP3 = True\npath = os.path.abspath(__file__)\ndir_path = os.path.dirname(path)\nDEFAULT_ENCODER_PATH = dir_path + \"/encoder/saved_models/pretrained.pt\"\nDEFAULT_VOCODER_PATH = dir_path + \"/vocoder/saved_models/pretrained/pretrained.pt\"\nDEFAULT_SYNTHESIZER_PATH = dir_path + \"/synthesizer/saved_models/pretrained/pretrained.pt\"\n\n\n# we just want to reutrn the audio -- not necessarily play it. No need to check if audio devices exist!\ndef preFlightChecks(download_models=True, using_cpu=USE_CPU, mp3support=SUPPORT_MP3, encoderpath=DEFAULT_ENCODER_PATH,\n synthpath=DEFAULT_SYNTHESIZER_PATH,\n vocoderpath=DEFAULT_SYNTHESIZER_PATH):\n global DEFAULT_SYNTHESIZER_PATH\n global DEFAULT_ENCODER_PATH\n global DEFAULT_VOCODER_PATH\n global path\n global dir_path\n\n # check to see if the default exists. If it doesn't, prompt for it, then update it so that the module can work later on without error.\n # checkpaths = check_local_model_paths(encoderpath, synthpath, vocoderpath)\n locationdict = {\"encoder\": encoderpath, \"synthesizer\": synthpath, \"vocoder\": vocoderpath}\n allfound = False\n # this will guarantee proper configuration!\n while not allfound:\n encoderpath = locationdict[\"encoder\"]\n synthpath = locationdict['synthesizer']\n vocoderpath = locationdict['vocoder']\n DEFAULT_ENCODER_PATH = encoderpath\n DEFAULT_SYNTHESIZER_PATH = synthpath\n DEFAULT_VOCODER_PATH = vocoderpath\n checkpaths = check_local_model_paths(encoderpath, synthpath, vocoderpath)\n\n allfound = True\n for item in checkpaths:\n if not checkpaths[item]:\n allfound = False\n if download_models:\n # here we will need to have defaultinstall() return a dictionary of where it installed the models,\n # and then have that get updated to the appropriate defaults. return it like locationdict.\n locationdict = defaultInstall()\n break\n else:\n locationdict[item] = input(\"Please enter the full path to the \" + str(item) + \" model >\")\n\n # first we need to check for if the GPU is available...\n if mp3support:\n try:\n # print(\"Debug: Loading Librosa...\")\n # so this isn't working hardcoded. It cannot find the file. But I want this part of the module -- lets find out\n # how to locally reference things\n librosa.load(dir_path + \"/samples/1320_00000.mp3\")\n except NoBackendError:\n print(\"Librosa will be unable to open mp3 files if additional software is not installed.\\n\"\n \"Please install ffmpeg and restart the program, or continue with no MP3 support.\")\n SUPPORT_MP3 = False\n\n # print(\"debug: checking if cuda is available\")\n if torch.cuda.is_available():\n device_id = torch.cuda.current_device()\n gpu_properties = torch.cuda.get_device_properties(device_id)\n ## Print some environment information (for debugging purposes)\n # print(\"Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with \"\n # \"%.1fGb total memory.\\n\" %\n # (torch.cuda.device_count(),\n # device_id,\n # gpu_properties.name,\n # gpu_properties.major,\n # gpu_properties.minor,\n # gpu_properties.total_memory / 1e9))\n else:\n USE_CPU = True\n try:\n # print(\"debug: checking model paths...\")\n ## Remind the user to download pretrained models if needed\n # check_model_paths(encoder_path=encoderpath,\n # synthesizer_path=synthpath,\n # vocoder_path=vocoderpath)\n modelcheckdict = check_local_model_paths(encoderpath, synthpath, vocoderpath)\n # check if the models exist and are installed. If not, prompt the user if they would like auto installation TODO.\n # No. This should all be imported, which means it should only prompt if the input values are outside of default.\n # Otherwise, raise an error that ends the test saying to install the models and specify the paths before trying again\n if not (modelcheckdict['encoder'] and modelcheckdict['synthesizer'] and modelcheckdict['vocoder']):\n raise Exception(\"Could not locate models specified. Found Models: \" + str(modelcheckdict))\n # that should hold for now until I master the auto installer. I'm putting too many features in the first run!\n ## Load the models one by one.\n encoderpath = Path(encoderpath)\n synthpath = Path(synthpath)\n vocoderpath = Path(vocoderpath)\n encoder.load_model(encoderpath)\n synthesizer = Synthesizer(synthpath)\n vocoder.load_model(vocoderpath)\n # print(\"debug: running a test\")\n ## Run a test\n # Forward an audio waveform of zeroes that lasts 1 second. Notice how we can get the encoder's\n # sampling rate, which may differ.\n # If you're unfamiliar with digital audio, know that it is encoded as an array of floats\n # (or sometimes integers, but mostly floats in this projects) ranging from -1 to 1.\n # The sampling rate is the number of values (samples) recorded per second, it is set to\n # 16000 for the encoder. Creating an array of length will always correspond\n # to an audio of 1 second.\n\n encoder.embed_utterance(np.zeros(encoder.sampling_rate))\n\n # Create a dummy embedding. You would normally use the embedding that encoder.embed_utterance\n # returns, but here we're going to make one ourselves just for the sake of showing that it's\n # possible.\n embed = np.random.rand(speaker_embedding_size)\n # Embeddings are L2-normalized (this isn't important here, but if you want to make your own\n # embeddings it will be).\n embed /= np.linalg.norm(embed)\n # The synthesizer can handle multiple inputs with batching. Let's create another embedding to\n # illustrate that\n embeds = [embed, np.zeros(speaker_embedding_size)]\n texts = [\"test 1\", \"test 2\"]\n # print(\"\\tTesting the synthesizer... (loading the model will output a lot of text)\")\n mels = synthesizer.synthesize_spectrograms(texts, embeds)\n\n # The vocoder synthesizes one waveform at a time, but it's more efficient for long ones. We\n # can concatenate the mel spectrograms to a single one.\n mel = np.concatenate(mels, axis=1)\n # The vocoder can take a callback function to display the generation. More on that later. For\n # now we'll simply hide it like this:\n no_action = lambda *args: None\n # print(\"\\tTesting the vocoder...\")\n # For the sake of making this test short, we'll pass a short target length. The target length\n # is the length of the wav segments that are processed in parallel. E.g. for audio sampled\n # at 16000 Hertz, a target length of 8000 means that the target audio will be cut in chunks of\n # 0.5 seconds which will all be generated together. The parameters here are absurdly short, and\n # that has a detrimental effect on the quality of the audio. The default parameters are\n # recommended in general.\n vocoder.infer_waveform(mel, target=200, overlap=50, progress_callback=no_action)\n\n return 1\n except Exception as e:\n raise Exception(str(e))\n\n\ndef check_local_model_paths(encpath, synthpath, vocpath):\n encfound = False\n synthfound = False\n vocfound = False\n if (os.path.exists(encpath)):\n encfound = True\n if os.path.exists(synthpath):\n synthfound = True\n if os.path.exists(vocpath):\n vocfound = True\n\n return {\"encoder\": encfound, \"synthesizer\": synthfound, \"vocoder\": vocfound}\n\n\n# Okay. Now to have the class\n# we need a forced text to vocode, an optional modelpath for encoder/vocoder/synthesizer, and a forced path to voice to model from.\n\nclass voiceclone:\n\n def __init__(self, inputtext=None, encoderpath=None, vocoderpath=None,\n synthesizerpath=None, voiceactor=None, savepath=None):\n # This looks ugly. It feels wrong. But it works! I'm hoping someone else sees this and has inspiration to\n # make it work as intended while not looking awful.\n if encoderpath is None:\n encoderpath = DEFAULT_ENCODER_PATH\n if vocoderpath is None:\n vocoderpath = DEFAULT_VOCODER_PATH\n if synthesizerpath is None:\n synthesizerpath = DEFAULT_SYNTHESIZER_PATH\n\n if inputtext is None:\n raise Exception(\"You must specify text to be vocoded into audio. \")\n if voiceactor is None:\n raise Exception(\"You must specify an input voice sample to synthesize from. \")\n\n modelcheckdict = check_local_model_paths(encoderpath, synthesizerpath, vocoderpath)\n if not (modelcheckdict['encoder'] and modelcheckdict['synthesizer'] and modelcheckdict['vocoder']):\n raise Exception(\"Could not locate models specified. Found Models: \" + str(modelcheckdict))\n encoderpath = Path(encoderpath)\n synthesizerpath = Path(synthesizerpath)\n vocoderpath = Path(vocoderpath)\n encoder.load_model(encoderpath)\n synthesizer = Synthesizer(synthesizerpath)\n vocoder.load_model(vocoderpath)\n # finally we can actually give some meat to this thing...\n in_fpath = Path(voiceactor) # Path(input(voiceactor).replace(\"\\\"\", \"\").replace(\"\\'\", \"\"))\n\n if in_fpath.suffix.lower() == \".mp3\" and not SUPPORT_MP3:\n raise Exception(\n \"Your current installation does not support .mp3 files. Please specify another format and try again.\")\n # preprocessed_wav = encoder.preprocess_wav(in_fpath)\n original_wav, sampling_rate = librosa.load(str(in_fpath))\n preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate)\n embed = encoder.embed_utterance(preprocessed_wav)\n # The synthesizer works in batch, so you need to put your data in a list or numpy array\n texts = [inputtext]\n embeds = [embed]\n # If you know what the attention layer alignments are, you can retrieve them here by\n # passing return_alignments=True\n specs = synthesizer.synthesize_spectrograms(texts, embeds)\n spec = specs[0]\n generated_wav = vocoder.infer_waveform(spec)\n generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode=\"constant\")\n\n # Trim excess silences to compensate for gaps in spectrograms (issue #53)\n generated_wav = encoder.preprocess_wav(generated_wav)\n if savepath is not None:\n sf.write(savepath, generated_wav.astype(np.float32), synthesizer.sample_rate)\n # Save it on the disk\n # filename = \"demo_output_%02d.wav\" % num_generated\n # print(generated_wav.dtype)\n # sf.write(filename, generated_wav.astype(np.float32), synthesizer.sample_rate)\n # print(\"\\nSaved output as %s\\n\\n\" % filename)\n\n# preFlightChecks()\n","sub_path":"rtvc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"307588479","text":"import tensorflow as tf \n\n#defining varibales is necessary beacuase they hold the parameteres. without having parameters training, updating, saving, restroing and any\n#other operations cannot be performed. The defined variables in the Tensorflow are just tesnors with certain\n#shapes and types. the tensors must be initialized with values to become valid. \n\nfrom tensorflow.python.framework import ops\n\n#create three variables with three default values\nweights = tf.Variable(tf.random_normal([2, 3], stddev=0.1), name=\"weights\")\nbiases = tf.Variable(tf.zeros([3]), name=\"biases\")\ncustom_variable = tf.Variable(tf.zeros([3]), name=\"custom\")\n\n#get all the variables' tensors and store them in a list\nall_variables_list = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)\n# ops.get_collection gets the list of all defined variables from the defined graph. The name \"key\",\n#define a specific name for each variabel on the graph\n\n#2) Initialization \n#Initializers of the variables must be run before all other operations in the model. For an analaogy,\n#we can consider the starter of the car. Instead of running an initializer, variables can be restored too from saved\n#model such as a checkpoint file/ Variables acan be initialized globally, specifically, or from other variables. \n\n#Initializing specific Variables\n#variable_list_custom is the list of variables that we want to initialize\nvariable_list_custom = [weights, custom_variable]\n\n#the initializer\ninit_custom_op = tf.variables_initializer(var_list=all_variables_list)\n\n#global variable initialization-- all variables can be initialized at once using following command.\n#the op must be run after the model is constructed.\ninit_all_op = tf.global_variables_initializer()\n\n#method 2\ninit_all_op = tf.variables_initializer(var_list=all_variables_list)\n\n#initialization of a variables using other existing variables\nWeightsNew = tf.Variable(weights.initialized_value(), name=\"WeightsNew\")\n\n#now the variable must be intializes\ninit_WeightsNew_op = tf.variables_initializer(var_list=[WeightsNew])\n\nwith tf.Session() as sess:\n\tsess.run(init_all_op)\n\tsess.run(init_custom_op)\n\tsess.run(init_WeightsNew_op)\n\n\n\n","sub_path":"docs/tutorials/1-basics/variables/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"464464718","text":"import openmdao.api as om\nfrom wisdem.floatingse.column import Column, ColumnGeometry\nfrom wisdem.floatingse.substructure import Substructure, SubstructureGeometry\nfrom wisdem.floatingse.loading import Loading\nfrom wisdem.floatingse.map_mooring import MapMooring\nfrom wisdem.towerse.tower import TowerLeanSE\nimport numpy as np\n\nfrom wisdem.commonse.vertical_cylinder import get_nfull\n\n\nclass FloatingSE(om.Group):\n\n def initialize(self):\n self.options.declare('modeling_options')\n\n def setup(self):\n opt = self.options['modeling_options']['platform']\n n_mat = self.options['modeling_options']['materials']['n_mat']\n n_height_main = opt['columns']['main']['n_height']\n n_height_off = opt['columns']['offset']['n_height']\n n_height_tow = self.options['modeling_options']['tower']['n_height']\n\n self.set_input_defaults('mooring_type', 'chain')\n self.set_input_defaults('anchor_type', 'SUCTIONPILE')\n self.set_input_defaults('loading', 'hydrostatic')\n self.set_input_defaults('wave_period_range_low', 2.0, units='s')\n self.set_input_defaults('wave_period_range_high', 20.0, units='s')\n self.set_input_defaults('cd_usr', -1.0)\n self.set_input_defaults('zref', 100.0)\n self.set_input_defaults('number_of_offset_columns', 0)\n self.set_input_defaults('material_names', ['steel'])\n \n self.add_subsystem('tow', TowerLeanSE(modeling_options=self.options['modeling_options']),\n promotes=['tower_s','tower_height','tower_outer_diameter_in','tower_layer_thickness','tower_outfitting_factor',\n 'max_taper','min_d_to_t','rna_mass','rna_cg','rna_I',\n 'tower_mass','tower_I_base','hub_height','material_names',\n 'labor_cost_rate','painting_cost_rate','unit_cost_mat','rho_mat','E_mat','G_mat','sigma_y_mat',\n ('transition_piece_height', 'main_freeboard'), ('foundation_height', 'main_freeboard')])\n \n # Next do main and ballast columns\n # Ballast columns are replicated from same design in the components\n column_promotes = ['E_mat','G_mat','sigma_y_mat','rho_air','mu_air','rho_water','mu_water','rho_mat',\n 'shearExp','yaw','Uc','water_depth',\n 'hsig_wave','Tsig_wave','cd_usr','cm','loading','beta_wind','beta_wave',\n 'max_draft','max_taper','min_d_to_t','material_names',\n 'permanent_ballast_density','outfitting_factor','ballast_cost_rate',\n 'unit_cost_mat','labor_cost_rate','painting_cost_rate','outfitting_cost_rate',\n 'wind_reference_speed', 'wind_reference_height', 'wind_z0']\n main_column_promotes = column_promotes.copy()\n main_column_promotes.append(('freeboard', 'main_freeboard'))\n \n self.add_subsystem('main', Column(modeling_options=opt, column_options=opt['columns']['main'], n_mat=n_mat),\n promotes=main_column_promotes)\n \n off_column_promotes = column_promotes.copy()\n off_column_promotes.append(('freeboard', 'off_freeboard'))\n\n self.add_subsystem('off', Column(modeling_options=opt, column_options=opt['columns']['offset'], n_mat=n_mat),\n promotes=off_column_promotes)\n\n # Run Semi Geometry for interfaces\n self.add_subsystem('sg', SubstructureGeometry(n_height_main=n_height_main,\n n_height_off=n_height_off), promotes=['*'])\n\n # Next run MapMooring\n self.add_subsystem('mm', MapMooring(modeling_options=opt), promotes=['*'])\n \n # Add in the connecting truss\n self.add_subsystem('load', Loading(n_height_main=n_height_main,\n n_height_off=n_height_off,\n n_height_tow=n_height_tow,\n modeling_options=opt), promotes=['*'])\n\n # Run main Semi analysis\n self.add_subsystem('subs', Substructure(n_height_main=n_height_main,\n n_height_off=n_height_off,\n n_height_tow=n_height_tow), promotes=['*'])\n \n # Connect all input variables from all models\n self.connect('tow.d_full', ['windLoads.d','tower_d_full'])\n self.connect('tow.d_full', 'tower_d_base', src_indices=[0])\n self.connect('tow.t_full', 'tower_t_full')\n self.connect('tow.z_full', ['loadingWind.z','windLoads.z','tower_z_full']) # includes tower_z_full\n self.connect('tow.E_full', 'tower_E_full')\n self.connect('tow.G_full', 'tower_G_full')\n self.connect('tow.rho_full', 'tower_rho_full')\n self.connect('tow.sigma_y_full', 'tower_sigma_y_full')\n self.connect('tow.cm.mass','tower_mass_section')\n self.connect('tow.turbine_mass','main.stack_mass_in')\n self.connect('tow.tower_center_of_mass','tower_center_of_mass')\n self.connect('tow.tower_raw_cost','tower_shell_cost')\n \n self.connect('main.z_full', ['main_z_nodes', 'main_z_full'])\n self.connect('main.d_full', 'main_d_full')\n self.connect('main.t_full', 'main_t_full')\n self.connect('main.E_full', 'main_E_full')\n self.connect('main.G_full', 'main_G_full')\n self.connect('main.rho_full', 'main_rho_full')\n self.connect('main.sigma_y_full', 'main_sigma_y_full')\n\n self.connect('off.z_full', ['offset_z_nodes', 'offset_z_full'])\n self.connect('off.d_full', 'offset_d_full')\n self.connect('off.t_full', 'offset_t_full')\n self.connect('off.E_full', 'offset_E_full')\n self.connect('off.G_full', 'offset_G_full')\n self.connect('off.rho_full', 'offset_rho_full')\n self.connect('off.sigma_y_full', 'offset_sigma_y_full')\n\n self.connect('max_offset_restoring_force', 'mooring_surge_restoring_force')\n self.connect('operational_heel_restoring_force', 'mooring_pitch_restoring_force')\n \n self.connect('main.z_center_of_mass', 'main_center_of_mass')\n self.connect('main.z_center_of_buoyancy', 'main_center_of_buoyancy')\n self.connect('main.I_column', 'main_moments_of_inertia')\n self.connect('main.Iwater', 'main_Iwaterplane')\n self.connect('main.Awater', 'main_Awaterplane')\n self.connect('main.displaced_volume', 'main_displaced_volume')\n self.connect('main.hydrostatic_force', 'main_hydrostatic_force')\n self.connect('main.column_added_mass', 'main_added_mass')\n self.connect('main.column_total_mass', 'main_mass')\n self.connect('main.column_total_cost', 'main_cost')\n self.connect('main.variable_ballast_interp_zpts', 'water_ballast_zpts_vector')\n self.connect('main.variable_ballast_interp_radius', 'water_ballast_radius_vector')\n self.connect('main.Px', 'main_Px')\n self.connect('main.Py', 'main_Py')\n self.connect('main.Pz', 'main_Pz')\n self.connect('main.qdyn', 'main_qdyn')\n\n self.connect('off.z_center_of_mass', 'offset_center_of_mass')\n self.connect('off.z_center_of_buoyancy', 'offset_center_of_buoyancy')\n self.connect('off.I_column', 'offset_moments_of_inertia')\n self.connect('off.Iwater', 'offset_Iwaterplane')\n self.connect('off.Awater', 'offset_Awaterplane')\n self.connect('off.displaced_volume', 'offset_displaced_volume')\n self.connect('off.hydrostatic_force', 'offset_hydrostatic_force')\n self.connect('off.column_added_mass', 'offset_added_mass')\n self.connect('off.column_total_mass', 'offset_mass')\n self.connect('off.column_total_cost', 'offset_cost')\n self.connect('off.Px', 'offset_Px')\n self.connect('off.Py', 'offset_Py')\n self.connect('off.Pz', 'offset_Pz')\n self.connect('off.qdyn', 'offset_qdyn')\n self.connect('off.draft', 'offset_draft')\n\n\n\n\ndef commonVars(prob, nsection):\n # Variables common to both examples\n\n # Set environment to that used in OC4 testing campaign\n prob['shearExp'] = 0.11 # Shear exponent in wind power law\n prob['cm'] = 2.0 # Added mass coefficient\n prob['Uc'] = 0.0 # Mean current speed\n prob['wind_z0'] = 0.0 # Water line\n prob['yaw'] = 0.0 # Turbine yaw angle\n prob['beta_wind'] = prob['beta_wave'] = 0.0 # Wind/water beta angle\n prob['cd_usr'] = -1.0 # Compute drag coefficient\n\n # Wind and water properties\n prob['rho_air'] = 1.226 # Density of air [kg/m^3]\n prob['mu_air'] = 1.78e-5 # Viscosity of air [kg/m/s]\n prob['rho_water'] = 1025.0 # Density of water [kg/m^3]\n prob['mu_water'] = 1.08e-3 # Viscosity of water [kg/m/s]\n \n # Material properties\n prob['rho_mat'] = np.array([7850.0]) # Steel [kg/m^3]\n prob['E_mat'] = 200e9*np.ones((1,3)) # Young's modulus [N/m^2]\n prob['G_mat'] = 79.3e9*np.ones((1,3)) # Shear modulus [N/m^2]\n prob['sigma_y_mat'] = np.array([3.45e8]) # Elastic yield stress [N/m^2]\n prob['permanent_ballast_density'] = 4492.0 # [kg/m^3]\n\n # Mass and cost scaling factors\n prob['outfitting_factor'] = 0.06 # Fraction of additional outfitting mass for each column\n prob['ballast_cost_rate'] = 0.1 # Cost factor for ballast mass [$/kg]\n prob['unit_cost_mat'] = np.array([1.1]) # Cost factor for column mass [$/kg]\n prob['labor_cost_rate'] = 1.0 # Cost factor for labor time [$/min]\n prob['painting_cost_rate'] = 14.4 # Cost factor for column surface finishing [$/m^2]\n prob['outfitting_cost_rate'] = 1.5*1.1 # Cost factor for outfitting mass [$/kg]\n prob['mooring_cost_factor'] = 1.1 # Cost factor for mooring mass [$/kg]\n \n # Mooring parameters\n prob['number_of_mooring_connections'] = 3 # Evenly spaced around structure\n prob['mooring_lines_per_connection'] = 1 # Evenly spaced around structure\n prob['mooring_type'] = 'chain' # Options are chain, nylon, polyester, fiber, or iwrc\n prob['anchor_type'] = 'DRAGEMBEDMENT' # Options are SUCTIONPILE or DRAGEMBEDMENT\n \n # Porperties of turbine tower\n nTower = prob.model.options['modeling_options']['tower']['n_height']-1\n prob['tower_height'] = prob['hub_height'] = 77.6 # Length from tower main to top (not including freeboard) [m]\n prob['tower_s'] = np.linspace(0.0, 1.0, nTower+1)\n prob['tower_outer_diameter_in'] = np.linspace(6.5, 3.87, nTower+1) # Diameter at each tower section node (linear lofting between) [m]\n prob['tower_layer_thickness'] = np.linspace(0.027, 0.019, nTower).reshape((1,nTower)) # Diameter at each tower section node (linear lofting between) [m]\n prob['tower_outfitting_factor'] = 1.07 # Scaling for unaccounted tower mass in outfitting\n\n # Materials\n prob['material_names'] = ['steel']\n prob['main.layer_materials'] = prob['off.layer_materials'] = prob['tow.tower_layer_materials'] = ['steel']\n \n # Properties of rotor-nacelle-assembly (RNA)\n prob['rna_mass'] = 350e3 # Mass [kg]\n prob['rna_I'] = 1e5*np.array([1149.307, 220.354, 187.597, 0, 5.037, 0]) # Moment of intertia (xx,yy,zz,xy,xz,yz) [kg/m^2]\n prob['rna_cg'] = np.array([-1.132, 0, 0.509]) # Offset of RNA center of mass from tower top (x,y,z) [m]\n # Max thrust\n prob['rna_force'] = np.array([1284744.196, 0, -112400.5527]) # Net force acting on RNA (x,y,z) [N]\n prob['rna_moment'] = np.array([3963732.762, 896380.8464, -346781.682]) # Net moment acting on RNA (x,y,z) [N*m]\n # Max wind speed\n #prob['rna_force'] = np.array([188038.8045, 0, -16451.2637]) # Net force acting on RNA (x,y,z) [N]\n #prob['rna_moment'] = np.array([0.0, 131196.8431, 0.0]) # Net moment acting on RNA (x,y,z) [N*m]\n \n # Mooring constraints\n prob['max_draft'] = 150.0 # Max surge/sway offset [m] \n prob['max_offset'] = 100.0 # Max surge/sway offset [m] \n prob['operational_heel'] = 10.0 # Max heel (pitching) angle [deg]\n\n # Design constraints\n prob['max_taper'] = 0.2 # For manufacturability of rolling steel\n prob['min_d_to_t'] = 120.0 # For weld-ability\n prob['connection_ratio_max'] = 0.25 # For welding pontoons to columns\n\n # API 2U flag\n prob['loading'] = 'hydrostatic'\n \n return prob\n\n","sub_path":"WISDEM/wisdem/floatingse/floating.py","file_name":"floating.py","file_ext":"py","file_size_in_byte":12749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"532849438","text":"import sys\n\nif __name__==\"__main__\":\n progressions=[]\n while(1):\n a1, a2, a3 =map(int, sys.stdin.readline().split())\n if(a1 == 0 and a2 == 0 and a3==0):\n break\n\n progressions.append( (a1, a2 , a3) )\n\n # print(progressions)\n\n for progression in progressions:\n a1, a2,a3=progression\n\n if( 2*a2 == a1+a3 ):\n print('AP',a3+(a2-a1))\n\n if( a2**2 == a1*a3 ):\n print('GP', a3*(a2//a1))","sub_path":"Algorithm/python/algorithmjobs/review/L053_02nextnum.py","file_name":"L053_02nextnum.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"504967158","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nimport pandas as pd\n\nfrom lib.common.stock_common import getQuote,getToday\nfrom lib.common.Services import services\n\nclass DataWriter():\n def __init__(self):\n self.dbhandler = services.get('dbhandler')\n\n\n def updateCodeToDB(self,codes,verbose = False):\n for key,a_item in codes.iterItems():\n if verbose == True:\n print(key,a_item)\n sql = self.generateCodeItemSQL(a_item)\n self.dbhandler.execSql(sql)\n\n def safe_unicode(self,obj, *args):\n \"\"\" return the unicode representation of obj \"\"\"\n try:\n return unicode(obj, *args)\n except UnicodeDecodeError:\n # obj is byte string\n ascii_text = str(obj).encode('string_escape')\n return unicode(ascii_text)\n\n def safe_str(self,obj):\n \"\"\" return the byte string representation of obj \"\"\"\n try:\n return str(obj)\n except UnicodeEncodeError:\n # obj is uncode\n return unicode(obj).encode('unicode_escape')\n\n\n def generateCodeItemSQL(self,code_item,verbose=False):\n \"\"\"\n other db\n sql = \"insert into codes set \"\n sql += \"last_update=\" + getQuote(getToday())\n sql += \",code=\" + getQuote(code_item.code)\n sql += \",full_code=\" + getQuote(code_item.full_code)\n sql += \",company=\" + getQuote(code_item.company)\n sql += \",market_type=\" + str(convertMarketType(code_item.market_type))\n sql += \" ON DUPLICATE KEY UPDATE \"\n sql += \"last_update=\" + getQuote(getToday())\n sql += \",code=\" + getQuote(code_item.code)\n sql += \",full_code=\" + getQuote(code_item.full_code)\n sql += \",company=\" + getQuote(code_item.company)\n sql += \",market_type=\" + str(convertMarketType(code_item.market_type))\n \"\"\"\n sql = \"insert ignore into codes(Last_update,Code,Full_code,\"\n sql += \"Market_type,company) values(\"\n sql += getQuote(getToday()) + \",\"\n sql += getQuote(code_item.code) + \",\"\n sql += getQuote(code_item.full_code) + \",\"\n sql += code_item.market_type + \",\"\n sql += '\\'' + code_item.company + '\\'' + \")\"\n sql_utf8 = sql#.encode('utf8')\n if verbose == True:\n \"\"\"\n code_item.dump()\n \"\"\"\n print(sql_utf8)\n return sql_utf8\n\n\n def updatePriceToDB(self,code,df):\n for row_index in range(df.shape[0]):\n sql = self.generatePriceItemSQL(code,df,row_index)\n self.dbhandler.execSql(sql)\n\n\n def generatePriceItemSQL(self,code,df,row_index, verbose=False):\n sql = \"insert into prices set \"\n sql += \"last_update='%s'\" %( getToday() )\n sql += \",code='%s'\" % (code)\n sql += \",price_date='%s'\" % (pd.to_datetime(df.loc[row_index,'Date']).isoformat())\n sql += \",price_open=%s\" % (df.loc[row_index,'Open'])\n sql += \",price_high=%s\" % (df.loc[row_index,'High'])\n sql += \",price_low=%s\" % (df.loc[row_index,'Low'])\n sql += \",price_close=%s\" % (df.loc[row_index,'Close'])\n sql += \",price_adj_close=%s\" % (df.loc[row_index,'Adj Close'])\n sql += \",volume=%s\" % (df.loc[row_index,'Volume'])\n\n sql += \" ON DUPLICATE KEY UPDATE \"\n\n sql += \"last_update='%s'\" %( getToday() )\n sql += \",code='%s'\" % (code)\n sql += \",price_date='%s'\" % (pd.to_datetime(df.loc[row_index,'Date']).isoformat())\n sql += \",price_open=%s\" % (df.loc[row_index,'Open'])\n sql += \",price_high=%s\" % (df.loc[row_index,'High'])\n sql += \",price_low=%s\" % (df.loc[row_index,'Low'])\n sql += \",price_close=%s\" % (df.loc[row_index,'Close'])\n sql += \",price_adj_close=%s\" % (df.loc[row_index,'Adj Close'])\n sql += \",volume=%s\" % (df.loc[row_index,'Volume'])\n\n if verbose == True:\n print(sql.encode('utf8'))\n\n return sql.encode('utf8') \n","sub_path":"lib/db/data_writer.py","file_name":"data_writer.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"74043716","text":"'''\n making GUI for the project.\n #template, from Code Clinic: python\n user choose the range, several entry for day, month, years,\n type of calculation\n a submit botton\n a display frame\n'''\nfrom tkinter import *\nfrom tkinter import ttk,messagebox\nfrom statistics import mean, median\nfrom datetime import date\nimport DB\n\n\nclass APP:\n def __init__(self, master):\n self.master = master\n self._createGUI()\n self.database = DB.DB()\n self.master.protocol(\"WM_DELETE_WINDOW\", self.safe_close)\n\n def _createGUI(self):\n bgcolor = '#CCCCFF'\n self.master.configure(background=bgcolor)\n self.master.title('Lake Pend Oreille')\n self.master.resizable(False,False)\n self.style = ttk.Style()\n self.style.configure('TFrame', background = bgcolor)\n self.style.configure('TButton', background = bgcolor, font = ('Arial Black', 10))\n self.style.configure('TLabel', background = bgcolor, font = ('Arial Black', 10))\n self.style.configure('Status.TLabel', background = bgcolor, font = ('Arial', 10))\n self.style.configure('Result.TLabel', background = bgcolor, font = ('Courier', 10))\n\n # header GUI\n self.topframe = ttk.Frame(self.master)\n self.topframe.pack()\n self.headerLabel = ttk.Label(self.topframe)\n self.headerLabel.pack()\n self.logo = PhotoImage(file='/Users/xingsonglin/Desktop/GitHub/python-Project/code Clinic/Project 1: Statical Analysis/lpo_logo.gif')\n ttk.Label(self.topframe, image = self.logo).pack()\n\n #input GUI\n self.inputframe = ttk.Frame(self.master)\n self.inputframe.pack(side = TOP)\n\n ttk.Label(self.inputframe, text = \"Start Date:\").grid(row=0, column =1,\n columnspan=3, sticky = 'sw')\n ttk.Label(self.inputframe, text = \"End Date:\").grid(row=0, column =5,\n columnspan=3, sticky = 'sw')\n\n self.start_day = StringVar()\n self.start_month = StringVar()\n self.start_year = StringVar()\n self.end_day = StringVar()\n self.end_month = StringVar()\n self.end_year = StringVar()\n\n #create a spinbox\n Spinbox(self.inputframe,from_=1, to =31, textvariable = self.start_day,\n width =2, font ='Courier 12').grid(row=1, column=1)\n Spinbox(self.inputframe, from_=1, to=31, textvariable=self.start_month,\n width=3, font='Courier 12').grid(row=1, column=2)\n Spinbox(self.inputframe, from_=1, to=31, textvariable=self.start_year,\n width=4, font='Courier 12').grid(row=1, column=3)\n Spinbox(self.inputframe, from_=1, to=31, textvariable=self.end_day,\n width=2, font='Courier 12').grid(row=1, column=5)\n Spinbox(self.inputframe, from_=1, to=31, textvariable=self.end_month,\n width=3, font='Courier 12').grid(row=1, column=6)\n Spinbox(self.inputframe, from_=1, to=31, textvariable=self.end_year,\n width=4, font='Courier 12').grid(row=1, column=7)\n\n #set default day,month,year to today\n self.start_day.set(date.today().day)\n self.start_month.set(date.today().month)\n self.start_year.set(date.today().year)\n self.end_day.set(date.today().day)\n self.end_month.set(date.today().month)\n self.end_year.set(date.today().year)\n\n # these labels are for padding purposes\n ttk.Label(self.inputframe).grid(row=1, column=0, padx=6)\n ttk.Label(self.inputframe).grid(row=1, column=4, padx=6)\n ttk.Label(self.inputframe).grid(row=1, column=8, padx=6)\n\n ttk.Button(self.inputframe, text='Submit',\n command=self._submit_callback).grid(row=2, column=0, columnspan=9, pady=5)\n\n\n #result display GUI\n self.resultframe = ttk.Frame(self.master)\n ttk.Label(self.resultframe, text='Mean:').grid(row=1, column=0, padx=5)\n ttk.Label(self.resultframe, text='Median:').grid(row=2, column=0, padx=5)\n\n ttk.Label(self.resultframe, text='Air\\nTemp:',\n justify=CENTER).grid(row=0, column=2, sticky='e', padx=5)\n ttk.Label(self.resultframe, text='Barometric\\nPressure:',\n justify=CENTER).grid(row=0, column=3, sticky='e', padx=5)\n ttk.Label(self.resultframe, text='Wind\\nSpeed:',\n justify=CENTER).grid(row=0, column=1, sticky='e', padx=5)\n\n self.air_temp_mean = StringVar()\n self.air_temp_median = StringVar()\n self.barometric_press_mean = StringVar()\n self.barometric_press_median = StringVar()\n self.wind_speed_mean = StringVar()\n self.wind_speed_median = StringVar()\n\n ttk.Label(self.resultframe, textvariable=self.air_temp_mean,\n style='Result.TLabel').grid(row=1, column=2)\n ttk.Label(self.resultframe, textvariable=self.air_temp_median,\n style='Result.TLabel').grid(row=2, column=2)\n ttk.Label(self.resultframe, textvariable=self.barometric_press_mean,\n style='Result.TLabel').grid(row=1, column=3)\n ttk.Label(self.resultframe, textvariable=self.barometric_press_median,\n style='Result.TLabel').grid(row=2, column=3)\n ttk.Label(self.resultframe, textvariable=self.wind_speed_mean,\n style='Result.TLabel').grid(row=1, column=1)\n ttk.Label(self.resultframe, textvariable=self.wind_speed_mean,\n style='Result.TLabel').grid(row=2, column=1)\n\n def _submit_callback(self):\n try:\n start = date(int(self.start_year.get()),\n int(self.start_month.get()),\n int(self.start_day.get()))\n end = date(int(self.end_year.get()),\n int(self.end_month.get()),\n int(self.end_day.get()))\n except ValueError as e:\n messagebox.showerror(title='ValueError',\n message = ('Invalid Date\\n'\n 'Correct format is ''DD Mon YYYY'))\n self.start_day.set(date.today().day)\n self.start_month.set(date.today().day)\n self.start_year.set(date.today().year)\n self.end_day.set(date.today().day)\n self.end_month.set(date.today().day)\n self.end_year.set(date.today().year)\n return\n\n if (start < date(2001,1,12)) or (end < date.today()) or (start>end):\n messagebox.showerror(title ='ValueError',\n message = ('INVALID DATE RANGE\\nStart Date: {}\\nEnd Date: {}\\n'\n 'Dates must be between 2001-1-12 and {}.\\n'\n 'Start Date must be <= End Date.').format(start, end, date.today()))\n return\n\n data = list(self.database._get_data_for_range(start,end))\n\n if data != []:\n # the lists will hold all values from date range for each weather parameter\n dict_of_lists = dict(Air_Temp=[], Barometric_Press=[], Wind_Speed=[])\n\n for entry in data:\n for key in dict_of_lists.keys():\n dict_of_lists[key].append(entry[key])\n\n # calculate the mean & median for each type of data; store result in dictionaries\n result = {}\n for key in dict_of_lists.keys():\n result[key] = dict(mean=mean(dict_of_lists[key]),\n median=median(dict_of_lists[key]))\n\n # set StringVars associated with results labels\n self.air_temp_mean.set('{0:.2f}'.format(result['Air_Temp']['mean']))\n self.air_temp_median.set('{0:.2f}'.format(result['Air_Temp']['median']))\n self.barometric_press_mean.set('{0:.2f}'.format(result['Barometric_Press']['mean']))\n self.barometric_press_median.set('{0:.2f}'.format(result['Barometric_Press']['median']))\n self.wind_speed_mean.set('{0:.2f}'.format(result['Wind_Speed']['mean']))\n self.wind_speed_median.set('{0:.2f}'.format(result['Wind_Speed']['median']))\n\n # display the results frame\n self.resultframe.pack(side=TOP)\n\n else:\n # if this request did not produce results, hide the results frame\n self.resultframe.forget()\n\n def safe_close(self):\n self.database.close()\n self.master.destroy()\n\ndef main():\n root = Tk()\n display = APP(root)\n root.mainloop()\n\nif __name__ == '__main__':\n main()","sub_path":"code Clinic/Project 1: Statical Analysis/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":8573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"117544944","text":"napisi=\"\"\nwhile True:\n besedilo=\"\"\n vnesi=input()\n if vnesi==\"0 0\":\n break\n velik=vnesi.split()\n vx=int(velik[0])\n vy=int(velik[1])\n mx=0\n my=0\n rx=0\n ry=0\n n=int(input())\n while n>0:\n n-=1\n hodi = input()\n hodi = hodi.split()\n smer = hodi[0]\n kol = int(hodi[1])\n if smer==\"u\":\n my+=kol\n ry+=kol\n elif smer==\"d\":\n my-=kol\n ry-=kol\n elif smer==\"l\":\n mx-=kol\n rx-=kol\n elif smer==\"r\":\n mx+=kol\n rx+=kol\n if rx>vx-1:\n rx=vx-1\n elif rx<0:\n rx=0\n if ry>vy-1:\n ry=vy-1\n elif ry<0:\n ry=0\n besedilo+=\"Robot thinks \"+ str(mx)+ \" \"+ str(my)+\"\\n\"\n besedilo+= \"Actually at \" +str(rx)+\" \"+ str(ry)+\"\\n\"\n napisi+=besedilo\nprint(napisi)\n\n\n","sub_path":"robots.py","file_name":"robots.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"222942859","text":"import os\nimport abc\nimport json\nimport socket\n\nimport time\nimport asyncio\nimport os.path\nimport functools\nimport subprocess\nimport collections\nfrom pathlib import Path\nfrom dataclasses import dataclass, field\nfrom typing import (Iterator, List, Dict, Tuple, Any, Callable, Set, Iterable, Optional, AsyncIterator, Awaitable,\n BinaryIO, cast)\n\nimport msgpack\n\nfrom koder_utils import LocalHost, b2ssize\n\nfrom cephlib import (RecId, CephCLI, CephOp, ParseResult, RecordFile, CephHealth, iter_log_messages, iter_ceph_logs_fd,\n CephRelease, OpRec, get_ceph_version, pack_historic, pack_record, HistoricFields, unpack_record)\n\nfrom .. import (logger, expose_func, expose_type, IReadableAsync, ChunkedFile, register_startup, register_shutdown,\n ConfigVar)\n\n\nexpose = functools.partial(expose_func, \"ceph\")\n\n\nhistoric_ops_file = ConfigVar[Path]('historic_ops', Path)\nhistoric_ops_cfg_file = ConfigVar[Path]('historic_ops_cfg', Path)\n\n\nclass NoPoolFound(Exception):\n pass\n\n\nFileRec = Tuple[RecId, Any]\nBinaryFileRec = Tuple[RecId, bytes]\nBinInfoFunc = Callable[[], Awaitable[Iterable[FileRec]]]\n\nGiB = 1 << 30\nMiB = 1 << 20\nDEFAULT_MAX_REC_FILE_SIZE = GiB\nDEFAULT_MIN_DEVICE_FREE = 50 * GiB\nDEFAULT_SIZE = 20\nDEFAULT_DURATION = 600\n\n\n@expose_type\n@dataclass\nclass HistoricCollectionConfig:\n osd_ids: List[int]\n size: int\n duration: int\n ceph_extra_args: List[str]\n collection_end_time: float\n min_duration: Optional[int] = 50\n dump_unparsed_headers: bool = False\n pg_dump_timeout: Optional[int] = None\n extra_cmd: List[str] = field(default_factory=list)\n extra_dump_timeout: Optional[int] = None\n max_record_file: int = DEFAULT_MAX_REC_FILE_SIZE\n min_device_free: int = DEFAULT_MIN_DEVICE_FREE\n packer_name: str = 'compact'\n cmd_timeout: float = 50\n stats_keep_cycles: int = 10\n\n def __str__(self) -> str:\n attrs = \"\\n \".join(f\"{name}: {getattr(self, name)!r}\" for name in self.__dataclass_fields__) # type: ignore\n return f\"{self.__class__.__name__}:\\n {attrs}\"\n\n\n@expose_type\n@dataclass\nclass HistoricCollectionStatus:\n cfg: Optional[HistoricCollectionConfig]\n running: bool\n path: str\n file_size: int\n disk_free_space: int\n\n\ndef almost_sorted_ceph_log_messages(sort_buffer_size: int) -> Iterator[List[Tuple[float, CephHealth]]]:\n all_messages: List[Tuple[float, CephHealth]] = []\n for fd in iter_ceph_logs_fd():\n for message in iter_log_messages(fd):\n all_messages.append(message)\n if len(all_messages) > sort_buffer_size:\n all_messages.sort()\n yield all_messages[:sort_buffer_size // 2]\n del all_messages[:sort_buffer_size // 2]\n yield all_messages\n\n\n@expose\ndef find_issues_in_ceph_log(max_lines: int = 100000, max_issues: int = 100) -> str:\n errs_warns = []\n for idx, ln in enumerate(open(\"/var/log/ceph/ceph.log\")):\n if idx == max_lines:\n break\n if 'cluster [ERR]' in ln or \"cluster [WRN]\" in ln:\n errs_warns.append(ln)\n if len(errs_warns) == max_issues:\n break\n return \"\".join(errs_warns[-max_lines:])\n\n\n# Don't using namedtuples/classes to simplify serialization\n@expose\ndef analyze_ceph_logs_for_issues(sort_buffer_size: int = 10000) \\\n -> Tuple[Dict[str, int], List[Tuple[bool, float, float]]]:\n\n error_per_type: Dict[CephHealth, int] = collections.Counter()\n status_ranges: List[Tuple[bool, float, float]] = []\n currently_healthy = False\n region_started_at: float = 0.0\n\n utc = None\n for all_messages in almost_sorted_ceph_log_messages(sort_buffer_size):\n for utc, mess_id in all_messages:\n if region_started_at < 1.0:\n region_started_at = utc\n currently_healthy = mess_id == CephHealth.HEALTH_OK\n continue\n\n if mess_id != CephHealth.HEALTH_OK:\n error_per_type[mess_id] += 1\n if currently_healthy:\n status_ranges.append((True, region_started_at, utc))\n region_started_at = utc\n currently_healthy = False\n elif not currently_healthy:\n status_ranges.append((False, region_started_at, utc))\n region_started_at = utc\n currently_healthy = True\n\n if utc and utc != region_started_at:\n status_ranges.append((currently_healthy, region_started_at, utc))\n\n return {key.name: val for key, val in error_per_type.items()}, status_ranges\n\n\nclass Recorder(metaclass=abc.ABCMeta):\n def __init__(self, cli: CephCLI, cfg: HistoricCollectionConfig, record_file: Optional[RecordFile]) -> None:\n assert record_file\n self.cli = cli\n self.cfg = cfg\n self.record_file = record_file\n\n async def start(self) -> None:\n pass\n\n @abc.abstractmethod\n async def cycle(self) -> None:\n pass\n\n async def close(self) -> None:\n await self.cycle()\n\n\nclass DumpHistoric(Recorder):\n def __init__(self, cli: CephCLI, cfg: HistoricCollectionConfig, record_file: Optional[RecordFile]) -> None:\n Recorder.__init__(self, cli, cfg, record_file)\n self.osd_ids = self.cfg.osd_ids.copy()\n self.not_inited_osd: Set[int] = set(self.cfg.osd_ids)\n self.pools_map: Dict[int, Tuple[str, int]] = {}\n self.pools_map_no_name: Dict[int, int] = {}\n self.last_time_ops: Set[str] = set()\n\n async def reload_pools(self) -> Optional[FileRec]:\n pools = await self.cli.get_pools()\n\n new_pools_map: Dict[int, Tuple[str, int]] = {}\n for idx, (pool_id, pool_name) in enumerate(sorted(pools.items())):\n new_pools_map[pool_id] = pool_name, idx\n\n if new_pools_map != self.pools_map:\n self.pools_map = new_pools_map\n self.pools_map_no_name = {num: idx for num, (_, idx) in new_pools_map.items()}\n return RecId.pools, self.pools_map\n return None\n\n async def dump_historic(self) -> AsyncIterator[FileRec]:\n ctime = int(time.time())\n curr_not_inited = self.not_inited_osd\n self.not_inited_osd = set()\n for osd_id in curr_not_inited:\n if not await self.cli.set_history_size_duration(osd_id, self.cfg.size, self.cfg.duration):\n self.not_inited_osd.add(osd_id)\n\n new_rec = await self.reload_pools()\n if new_rec:\n # pools updated - skip this cycle, as different ops may came from pools before and after update\n yield new_rec\n else:\n prev_ops = self.last_time_ops\n self.last_time_ops = set()\n for osd_id in set(self.osd_ids).difference(self.not_inited_osd):\n try:\n parsed = await self.cli.get_historic(osd_id)\n except (subprocess.CalledProcessError, OSError):\n self.not_inited_osd.add(osd_id)\n continue\n\n if self.cfg.size != parsed['size'] or self.cfg.duration != parsed['duration']:\n self.not_inited_osd.add(osd_id)\n continue\n\n ops: List[CephOp] = []\n\n for op in self.parse_historic_records(parsed['ops']):\n if op.tp is not None and op.description not in prev_ops:\n assert op.pack_pool_id is None\n op.pack_pool_id = self.pools_map_no_name[op.pool_id]\n ops.append(op)\n\n self.last_time_ops.update(op.description for op in ops)\n yield (RecId.ops, (osd_id, ctime, ops))\n\n def parse_historic_records(self, ops: List[OpRec]) -> Iterator[CephOp]:\n for raw_op in ops:\n if self.cfg.min_duration and int(raw_op.get('duration') * 1000) < self.cfg.min_duration:\n continue\n try:\n parse_res, ceph_op = CephOp.parse_op(raw_op)\n if ceph_op:\n yield ceph_op\n elif parse_res == ParseResult.unknown:\n pass\n except Exception:\n pass\n\n async def cycle(self) -> None:\n total_size = 0\n async for rec_id, data in self.dump_historic():\n if self.record_file:\n if rec_id is RecId.ops:\n data = *data, HistoricFields.compact\n rec = pack_record(rec_id, data)\n if rec:\n total_size += len(rec[1])\n self.record_file.write_record(*rec, flush=False)\n\n if self.record_file:\n self.record_file.flush()\n\n logger.debug(f\"Dump osd provides {b2ssize(total_size)}B\")\n\n async def close(self) -> None:\n await self.cycle()\n for osd_id in self.cfg.osd_ids:\n await self.cli.set_history_size_duration(osd_id, DEFAULT_SIZE, DEFAULT_DURATION)\n\n\nclass DumpPGDump(Recorder):\n async def cycle(self) -> None:\n data = (await self.cli.run_json_raw(\"pg dump\")).strip()\n if data.startswith(\"dumped all\"):\n data = data.replace(\"dumped all\", \"\", 1).lstrip()\n rec = pack_record(RecId.pgdump, data)\n if rec:\n self.record_file.write_record(*rec)\n logger.debug(f\"Pg dump provides {b2ssize(len(rec[1]))}B\")\n\n\nclass InfoDumper(Recorder):\n async def cycle(self) -> None:\n logger.debug(f\"Run cluster info: {self.cfg.extra_cmd}\")\n output = {'time': int(time.time())}\n\n for cmd in self.cfg.extra_cmd:\n try:\n output[cmd] = await self.cli.run_no_ceph(cmd)\n except subprocess.SubprocessError as exc:\n logger.error(\"Cmd failed: %s\", exc)\n\n if len(output) > 1:\n rec = pack_record(RecId.cluster_info, output)\n if rec:\n self.record_file.write_record(*rec)\n logger.debug(f\"Cluster info provides {b2ssize(len(rec[1]))}B\")\n\n\nclass CephHistoricDumper:\n def __init__(self, release: CephRelease,\n record_file_path: Path,\n collection_config: HistoricCollectionConfig) -> None:\n self.release = release\n self.record_file_path = record_file_path\n self.cfg = collection_config\n self.historic: Optional[DumpHistoric] = None\n\n self.cli = CephCLI(node=None, extra_params=self.cfg.ceph_extra_args, timeout=collection_config.cmd_timeout,\n release=self.release)\n\n if not self.record_file_path.exists():\n self.record_file_path.parent.mkdir(parents=True, exist_ok=True)\n with self.record_file_path.open(\"wb\"):\n pass\n self.record_fd = self.record_file_path.open(\"r+b\")\n self.record_file = RecordFile(self.record_fd)\n if self.record_file.prepare_for_append(truncate_invalid=True):\n logger.error(f\"Records file broken at offset {self.record_file.tell()}, truncated to last valid record\")\n\n self.exit_evt = asyncio.Event()\n self.active_loops_tasks: Set[Awaitable] = set()\n self.any_failed = False\n\n async def is_running(self) -> bool:\n _, running = await asyncio.wait(self.active_loops_tasks, timeout=0)\n return bool(running)\n\n def start(self) -> None:\n params = {\n 'hostname': socket.gethostname(),\n 'config': self.cfg.__dict__}\n self.record_file.write_record(*pack_record(RecId.params, params))\n\n assert not self.active_loops_tasks\n self.historic = DumpHistoric(self.cli, self.cfg, self.record_file)\n\n recorders = [(self.cfg.duration, self.historic)]\n\n info_dumper = InfoDumper(self.cli, self.cfg, self.record_file)\n pg_dumper = DumpPGDump(self.cli, self.cfg, self.record_file)\n recorders.extend([(self.cfg.extra_dump_timeout, info_dumper), (self.cfg.pg_dump_timeout, pg_dumper)])\n self.any_failed = False\n self.active_loops_tasks = {asyncio.create_task(self.loop(timeout, recorder)) for timeout, recorder in recorders}\n\n def get_free_space(self) -> int:\n vstat = os.statvfs(str(self.record_file_path))\n return vstat.f_bfree * vstat.f_bsize\n\n def check_recording_allowed(self) -> bool:\n assert self.cfg\n\n disk_free = self.get_free_space()\n if disk_free <= self.cfg.min_device_free:\n logger.warning(\"Stop recording due to disk free space %s less then minimal %s\",\n b2ssize(disk_free), b2ssize(self.cfg.min_device_free))\n return False\n\n if self.record_file.tell() >= self.cfg.max_record_file:\n logger.warning(\"Stop recording due to record file too large - %s, while %s is a limit\",\n b2ssize(self.record_file.tell()), b2ssize(self.cfg.max_record_file))\n return False\n\n if time.time() >= self.cfg.collection_end_time:\n logger.warning(\"Stop recording due record time expired\")\n return False\n\n return True\n\n async def stop(self, timeout=60) -> bool:\n self.exit_evt.set()\n _, self.active_loops_tasks = await asyncio.wait(self.active_loops_tasks, timeout=timeout) # type: ignore\n\n if not self.active_loops_tasks:\n self.record_file.close()\n self.record_fd.close()\n\n return not self.active_loops_tasks\n\n async def loop(self, timeout: Optional[float], recorder: Recorder) -> None:\n\n if timeout is None:\n return\n\n exit_requested = False\n\n try:\n next_run: float = time.time()\n\n await recorder.start()\n\n while True:\n sleep_for = next_run - time.time()\n\n if sleep_for > 0:\n try:\n await asyncio.wait_for(self.exit_evt.wait(), timeout=sleep_for)\n exit_requested = True\n except asyncio.TimeoutError:\n pass\n\n if exit_requested:\n logger.debug(f\"Stopping loop for {recorder.__class__.__name__}\")\n await recorder.close()\n break\n\n if self.any_failed:\n logger.debug(f\"Stopping loop for {recorder.__class__.__name__} due to another loop die\")\n await recorder.close()\n break\n\n if not self.check_recording_allowed():\n break\n\n await recorder.cycle()\n next_run = time.time() + timeout\n except asyncio.CancelledError:\n logger.warning(f\"Loop for {recorder.__class__.__name__} canceled\")\n raise\n except Exception:\n self.any_failed = True\n logger.exception(f\"In loop {recorder.__class__.__name__}\")\n raise\n finally:\n logger.info(f\"Exit loop {recorder.__class__.__name__}\")\n\n\ndumper: Optional[CephHistoricDumper] = None\n\n\n@expose\nasync def start_historic_collection(historic_config: HistoricCollectionConfig, save: bool = True) -> None:\n global dumper\n assert dumper is None, \"Collection already running\"\n\n version = await get_ceph_version(LocalHost(), extra_args=historic_config.ceph_extra_args)\n historic_ops = historic_ops_file()\n if not historic_ops.parent.exists():\n historic_ops.parent.mkdir(parents=True)\n\n hc = str(historic_config).replace('\\n', '\\n ')\n logger.info(f\"Start historic collection with config:\\n {hc}\")\n dumper = CephHistoricDumper(version.release, historic_ops, historic_config)\n dumper.start()\n cfg_path = historic_ops_cfg_file()\n\n if save:\n with cfg_path.open(\"w\") as fd:\n logger.info(f\"Storing historic config to {cfg_path}\")\n fd.write(json.dumps(historic_config.__dict__))\n\n\n@expose\nasync def stop_historic_collection(not_err: bool = False) -> None:\n global dumper\n if not dumper:\n if not_err:\n return\n assert False, \"Not running\"\n\n cfg_path = historic_ops_cfg_file()\n if cfg_path.exists:\n cfg_path.unlink()\n\n assert await dumper.stop(), \"Not all loops finised successfully\"\n dumper = None\n\n\n@expose\nasync def remove_historic_data() -> None:\n assert not dumper, \"Collection running. Stop first\"\n historic_ops_file().unlink()\n\n\n@expose\nasync def get_historic_collection_status() -> HistoricCollectionStatus:\n historic_ops = historic_ops_file()\n record_cfg = None if not dumper else dumper.cfg\n\n try:\n vstat = os.statvfs(str(historic_ops))\n free = vstat.f_bfree * vstat.f_bsize\n except OSError:\n free = 0\n\n size = 0\n try:\n if historic_ops.exists():\n size = historic_ops.stat().st_size\n except OSError:\n pass\n\n return HistoricCollectionStatus(record_cfg,\n False if not dumper else (await dumper.is_running()),\n str(historic_ops),\n disk_free_space=free,\n file_size=size)\n\n\n@expose\nasync def get_historic_settings(osds: List[int],\n ceph_extra_args: List[str],\n cmd_timeout: int,\n release_i: int) -> Dict[int, Tuple[int, int]]:\n cli = CephCLI(node=None, extra_params=ceph_extra_args, timeout=cmd_timeout, release=CephRelease(release_i))\n res = {}\n for osd_id in osds:\n vl = await cli.get_history_size_duration(osd_id)\n if vl:\n res[osd_id] = vl\n return res\n\n\n@expose\ndef get_collected_historic_data(offset: int, size: int = None) -> IReadableAsync:\n historic_ops = historic_ops_file()\n assert historic_ops.exists(), f\"File {historic_ops} with ops not found\"\n rfd = cast(BinaryIO, historic_ops.open(\"rb\"))\n\n if offset:\n rfd.seek(offset)\n\n return ChunkedFile(rfd,\n close_at_the_end=True,\n till_offset=offset + size if size is not None else None)\n\n\n@register_startup\nasync def restore_collection(_: Any):\n cfg_path = historic_ops_cfg_file()\n\n if cfg_path.exists():\n try:\n historic_config_dct = json.load(cfg_path.open())\n historic_config = HistoricCollectionConfig(**historic_config_dct)\n except:\n logger.exception(f\"Can't load historic config from {cfg_path}\")\n return\n\n await start_historic_collection(historic_config, save=False)\n\n\n@register_shutdown\nasync def stop_collection(_: Any):\n await stop_historic_collection(not_err=True)\n\n\n@expose\nasync def configure_historic(osd_ids: List[int],\n size: int,\n duration: float,\n ceph_extra_args: List[str],\n cmd_timeout: float,\n release_i: int) -> Tuple[List[int], Dict[int, Tuple[int, int]]]:\n cli = CephCLI(node=None, extra_params=ceph_extra_args, timeout=cmd_timeout, release=CephRelease(release_i))\n prev_settings: Dict[int, Tuple[int, int]] = {}\n failed: List[int] = []\n for osd_id in osd_ids:\n sd = await cli.get_history_size_duration(osd_id)\n if sd:\n prev_settings[osd_id] = sd\n if not await cli.set_history_size_duration(osd_id, size, duration):\n failed.append(osd_id)\n\n return failed, prev_settings\n\n\nprevious_ops: Set[str] = set()\n\n\n@expose\nasync def get_historic(osd_ids: List[int],\n size: int,\n duration: float,\n ceph_extra_args: List[str],\n cmd_timeout: float,\n release_i: int,\n min_duration: int = 0) -> bytes:\n cli = CephCLI(node=None, extra_params=ceph_extra_args, timeout=cmd_timeout, release=CephRelease(release_i))\n all_ops: Dict[int, List[CephOp]] = {}\n curr_ops: Set[str] = set()\n\n for osd_id in osd_ids:\n try:\n raw_ops = await cli.get_historic(osd_id)\n except (subprocess.CalledProcessError, OSError):\n continue\n\n if raw_ops['size'] != size or raw_ops['duration'] != duration:\n raise RuntimeError(\n f\"Historic ops setting changed for osd {osd_id}. Expect: duration={duration}, size={size}\" +\n f\". Get: duration={raw_ops['duration']}, size={raw_ops['size']}\")\n\n for raw_op in raw_ops['ops']:\n if min_duration > int(raw_op.get('duration') * 1000):\n continue\n try:\n _, op = CephOp.parse_op(raw_op)\n if not op:\n continue\n except Exception:\n continue\n\n if op.tp is not None and op.description not in previous_ops:\n op.pack_pool_id = op.pool_id\n all_ops.setdefault(osd_id, []).append(op)\n curr_ops.add(op.description)\n\n previous_ops.clear()\n previous_ops.update(curr_ops)\n chunks = []\n for osd_id, ops in all_ops.items():\n tpl = osd_id, int(time.time()), ops, HistoricFields.compact | HistoricFields.with_names\n chunks.append(pack_record(RecId.ops, tpl)[1])\n return msgpack.packb(chunks, use_bin_type=True)\n\n\ndef unpack_historic_data(data: bytes) -> Iterator[Dict]:\n for chunk in msgpack.unpackb(data, raw=False):\n yield from unpack_record(RecId.ops, chunk)\n","sub_path":"aiorpc/plugins/ceph.py","file_name":"ceph.py","file_ext":"py","file_size_in_byte":21399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"2993887","text":"import requests, json, re\nfrom geonames_en import Geonames\nimport time\nimport pymongo\nfrom scrapy.selector import Selector\nfrom w3lib.html import remove_tags\nimport datetime\nfrom bson import ObjectId\n\n\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\nmydb = myclient[\"sales_db\"]\njobdb = mydb[\"jobs\"]\nuserdb = mydb[\"user\"]\ncompanydb = mydb[\"companies\"]\n\n\nt = time.localtime()\nyear = time.strftime(\"%Y\", t)\nyear = int(year)\n\ntoday = datetime.date.today()\nyesterday = today - datetime.timedelta(days=1)\nyesterday_day = int(yesterday.strftime(\"%d\"))\n\nmonths = {\n \"Jan\": \"01\",\n \"Feb\": \"02\",\n \"Mar\": \"03\",\n \"Apr\": \"04\",\n \"May\": \"05\",\n \"Jun\": \"06\",\n \"Jul\": \"07\",\n \"Aug\": \"08\",\n \"Sep\": \"09\",\n \"Oct\": \"10\",\n \"Nov\": \"11\",\n \"Dec\": \"12\"\n }\n\n\nurl = 'http://repatarmenia.org/en/engage/careers/LatestPostsFilter?jsonData={%22Count%22:9,%22LoadMoreCount%22:%229%22,%22SortBy%22:0}'\n\ninfo = requests.get(url).json()\n\n# data = info.content.decode(\"utf-8-sig\").encode(\"utf-8\")\n# data = json.loads(data)\n\n# print(info[0][\"CompanyName\"])\n\n\nfor n in range(0, 9):\n\n # Company\n try:\n company = info[n][\"CompanyName\"]\n except:\n company = \"\"\n\n\n # Position\n try:\n position = info[n][\"Title\"]\n except:\n position = \"\"\n\n\n # Location\n try:\n location = info[n][\"Location\"]\n location_id = [{\"city\" : f\"{location}\", \"id\" : f\"{Geonames(location)}\"}]\n except:\n location_id = [{'city': 'Yerevan', 'id': '616052'}]\n\n # Logo\n try:\n logo = \"http://repatarmenia.org\" + info[n][\"CareersImage\"]\n except:\n logo = \"\"\n\n # Description\n try:\n description = info[n][\"Description\"]\n except:\n description = \"\"\n\n\n # Vacancy Link\n try:\n v_link = \"http://repatarmenia.org\" + info[n][\"ItemDefaultUrl\"]\n except:\n v_link = \"\"\n\n\n # Deadline\n try:\n ends = info[n][\"DeadLine\"]\n ends = ends.split(\" \")\n deadline_day = ends[1].replace(\",\", \"\")\n deadline_day = int(deadline_day)\n deadline_month = int(months[f\"{ends[0]}\"])\n deadline_year = int(ends[2])\n except Exception as e:\n deadline_day = e\n deadline_month = 0\n deadline_year = 0\n\n \n # Email\n try:\n email = re.findall(r'[\\w\\.-]+@[\\w\\.-]+', description)[0]\n except Exception as e:\n email = []\n\n # Publication stuff\n v_page = requests.get(v_link)\n\n try:\n published = Selector(response=v_page).xpath('//*[@id=\"ContentplaceholderMain_T7553F19B005_Col00\"]/div[2]/div[2]/div[1]/div[1]/text()').get()\n published = published.strip()\n published = published.split(\" \")\n publish_day = published[1].replace(\",\", \"\")\n publish_day = int(publish_day)\n publish_month = int(months[f\"{published[0]}\"])\n publish_year = int(published[2])\n except:\n published = 0\n publish_month = 0\n publish_year = 0\n if publish_day != yesterday_day:\n print(\"Not published Yesterday\")\n continue\n\n\n data = {\n \"company\" : company,\n \"position\" : position,\n \"location\" : location,\n \"logo\" : logo,\n # \"description\" : description,\n \"v_link\" : v_link,\n \"deadline_day\" : deadline_day,\n \"deadline_month\" : deadline_month,\n \"deadline_year\" : deadline_year,\n \"publish_day\" : publish_day,\n \"publish_month\" : publish_month,\n \"publish_year\" : publish_year,\n \"email\" : email,\n }\n print(\"Data is scraped succesfully\")\n\n\n # Check if company already exists in a collection\n check = companydb.find_one({\"name\" : company})\n if check is None:\n new_company_info = {\n \"name\" : company,\n \"industry\" : \"1\",\n \"logo\" : logo,\n \"created_at\" : datetime.datetime.utcnow(),\n \"emails\" : email,\n \"career_center\" : {\n \"description\" : \"\",\n \"custom_button_enabled\" : True,\n \"custom_button_title\" : \"Visit\",\n \"custom_button_url\" : \"\"\n },\n \"country\" : \"AM\"\n }\n print(new_company_info)\n companydb.insert_one(new_company_info)\n company_object_id = companydb.find_one({\"name\" : company})\n company_object_id = company_object_id[\"_id\"]\n print(company_object_id)\n else:\n company_object_id = companydb.find_one({\"name\" : company})\n company_object_id = company_object_id[\"_id\"]\n print(\"Company already exists: \", company_object_id)\n\n\n\n\n # Users\n # Vacany User\n if email == []:\n user_object_id = 100000000000000000000000\n else:\n check = userdb.find_one({\"email\" : email})\n if check is None:\n new_user_info = {\n \"email\" : email,\n \"company_id\" : ObjectId(f\"{company_object_id}\"),\n \"created_at\" : datetime.datetime.utcnow()\n }\n userdb.insert(new_user_info)\n user_object_id = userdb.find_one({\"email\" : email})\n user_object_id = user_object_id[\"_id\"]\n print(user_object_id)\n else:\n user_object_id = userdb.find_one({\"email\" : email})\n user_object_id = user_object_id[\"_id\"]\n print(\"User already exists\")\n \n\n # Job Itself\n new_job_info = {\n \"user_id\" : ObjectId(f\"{user_object_id}\"),\n 'company_id' : ObjectId(f\"{company_object_id}\"),\n \"job_details\" : {\n \"url\" : v_link,\n \"title\" : position,\n \"country_id\" : \"AM\",\n \"city\" : location_id,\n \"description\" : [\n {\n \"language\" : \"am\",\n \"description\" : \"\",\n },\n {\n \"language\" : \"en\",\n \"description\" : description,\n }\n ],\n \"required\" : {\n \"experience\" : \"\",\n },\n \"salarycurrency\" : \"AMD\",\n \"salarymin\" : 0,\n \"salarymax\" : 0,\n \"salaryinterval\" : \"month\",\n \"additional_info\" : {\n \"suitable_for\" : \"\"\n },\n \"numberofpositions\" : 1,\n \"publishday\" : publish_day,\n \"publishmonth\" : publish_month,\n \"publishyear\" : publish_year,\n \"deadlineday\" : deadline_day,\n \"deadlinemonth\" : deadline_month,\n \"deadlineyear\" : deadline_year\n },\n \"created_at\" : datetime.datetime.utcnow(),\n \"source\" : \"repatarmenia.org\",\n \"status\" : \"active\"\n }\n jobdb.insert(new_job_info)\n\n\n\n print(data)","sub_path":"armenia/repatarmenia/daily/repatarmenia.py","file_name":"repatarmenia.py","file_ext":"py","file_size_in_byte":6678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"264769641","text":"import matplotlib.pyplot as plt\nfrom collections import OrderedDict\nimport json\nfrom sortedcontainers import SortedList, SortedDict, SortedSet\nfrom utility import binarySearch, barGraphPlot, margin\nfrom helper import idOfMatch, dictFileReader\n\n\n# Reading Matches.csv\nfile = '/home/dev/workspace/raw-data-transformation/ipl/matches.csv'\nmatchReader = dictFileReader(file)\n\n# reading deliveries.cvv\nfile = '/home/dev/workspace/raw-data-transformation/ipl/deliveries.csv'\ndeliveriesReader = dictFileReader(file)\n\n\n#______main logic\n\n# id of matches which played in 2016\nidOfMatchIn2016 = idOfMatch(2016,matchReader)\n\n# insertin of extera runs given by bowling team\nextraRunByTeamsDict = SortedDict()\nfor delivery in deliveriesReader:\n if (binarySearch(int(delivery['match_id']), idOfMatchIn2016)):\n if(binarySearch(delivery['bowling_team'], extraRunByTeamsDict.keys())):\n extraRunByTeamsDict[delivery['bowling_team']\n ] = extraRunByTeamsDict[delivery['bowling_team']]+int(delivery['extra_runs'])\n else:\n extraRunByTeamsDict[delivery['bowling_team']] = int(\n delivery['extra_runs'])\n\n# display extraRunByTeamdict\nprint(json.dumps(extraRunByTeamsDict, indent=4))\n\n# graph ploting\ntitle = 'Extra run scored by Teams'\nyLabel = 'Runs'\nxLabel = \"Teams\"\n#x lable rotation\nxRotation = \"vertical\"\n#y lable rotation\nyRotation = \"horizontal\"\nyTicksFontSize = 7\nxTicksFontSize = 7\nyAxisElements = extraRunByTeamsDict.values()\nxAxisElements = extraRunByTeamsDict.keys()\nySpace=5\nyTopSpace=20\n#for initializing the margins dictionary for proper margin in frame\nl = .1\nb = .25\nr = .9\nt = .95\nmargin(l, b, r, t)\nbarGraphPlot(title, yLabel, xLabel, yRotation, xRotation, yTicksFontSize,\n xTicksFontSize, yAxisElements, xAxisElements,ySpace,yTopSpace)\n\n","sub_path":"extra_runs(Q3).py","file_name":"extra_runs(Q3).py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"119759900","text":"import smtplib\nimport time\nimport imaplib\nimport email\nimport json\nimport datetime\nimport socketio\nimport time\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\n\nsio = socketio.Client()\nsio.connect('http://localhost:3000')\nFROM_EMAIL=os.getenv(\"FROM_EMAIL\")\nFROM_PWD=os.getenv(\"FROM_PWD\")\nSMTP_SERVER=os.getenv(\"SMTP_SERVER\")\nSMTP_PORT=os.getenv(\"SMPT_PORT\")\n\n\nwhile True:\n mail = imaplib.IMAP4_SSL(SMTP_SERVER)\n mail.login(FROM_EMAIL, FROM_PWD)\n mail.list()\n mail.select('inbox')\n result, data = mail.uid('search', None, \"UNSEEN\") # (ALL/UNSEEN)\n i = len(data[0].split())\n\n emails = []\n for x in range(i):\n latest_email_uid = data[0].split()[x]\n result, email_data = mail.uid('fetch', latest_email_uid, '(RFC822)')\n raw_email = email_data[0][1]\n raw_email_string = raw_email.decode('utf-8')\n email_message = email.message_from_string(raw_email_string)\n email_from = str(email.header.make_header(email.header.decode_header(email_message['From'])))\n for part in email_message.walk():\n if part.get_content_type() == \"text/html\":\n date_time_obj = None\n body = part.get_payload(decode=True).decode('raw-unicode-escape')\n body = body.replace('\\r', '').replace('\\t', '').replace('\\n', '')\n # Following code is used to format the email body. This gets rid of the useless parts that Woocommerce tacks onto the emails which is not necessary\n # with how we want them to be displayed.\n if email_from == \"Brewed Awakening Vermilion \" and (\"Local pickup\" in body):\n body = body[0:body.find('')] + body[body.find(''): len(body)]\n body = body[0:body.find('') + 10: len(body)]\n body = body[0:body.find('')] + body[body.find(''): len(body)]\n body = body[0:body.find('') + 38: len(body)]\n body = body[0:body.find('')] + body[body.find('') + 8: len(body)]\n body = body[0:body.find(\"You\")] + body[body.find(\"You\") + 41: len(body)]\n body = body[0:body.find(':

') + 5] + body[body.find('', '')\n body = body.replace('', '')\n body = body.replace('','')\n body = body.replace('', '')\n date_time_obj = datetime.datetime.strptime(email_time, '%m/%d/%Y %I:%M%p')\n message = {\"email\": body, \"time\":date_time_obj.strftime(\"%m-%d-%Y %H:%M:%S\")}\n emails.append(message)\n else:\n continue\n else:\n continue\n mail.close()\n mail.logout()\n sio.emit('python-message', emails)\n emails.clear()\n time.sleep(120)","sub_path":"email_reader.py","file_name":"email_reader.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"97683757","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('spooapp', '0015_auto_20160710_0145'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='kitap',\n name='yazar',\n ),\n migrations.AlterField(\n model_name='tesisler',\n name='isletme_kayit_tarih',\n field=models.CharField(default=datetime.datetime(2016, 7, 10, 1, 53, 52, 630725), verbose_name='Kayıt Tarihi', max_length=100),\n ),\n migrations.DeleteModel(\n name='Kitap',\n ),\n migrations.DeleteModel(\n name='Yazar',\n ),\n ]\n","sub_path":"spooapp/migrations/0016_auto_20160710_0153.py","file_name":"0016_auto_20160710_0153.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"631113910","text":"#from infoManager import getAllBotsPosition\nfrom RULEngine.Util import Position\nfrom RULEngine.Util.Pose import Pose\nfrom RULEngine.Util.constant import FIELD_GOAL_BLUE_X_RIGHT, FIELD_GOAL_BLUE_X_LEFT\nfrom RULEngine.Util.geometry import get_angle, cvt_angle_360\n\n\nclass PeripheralVision:\n\n def __init__(self, info_manager, bot_id):\n # assert self.bot_id <= 6, \"Wrong team!\"\n\n self.info_manager = info_manager\n self.bot_id = bot_id\n self.obstruction_threshold = 5\n id = self.bot_id.split(' ')\n if id[0] == \"friend\":\n self.position = self.info_manager.getPlayerPosition(id[1])\n else:\n self.position = self.info_manager.getEnemyPosition(id[1])\n\n self.vision = []\n self.objects_to_see = {}\n\n for i in range(0, 359):\n self.vision.append(False)\n\n self.updateVision()\n\n def updateVision(self):\n\n self.objects_to_see[\"enemy_goal\"] = self.get_vision_angle(FIELD_GOAL_BLUE_X_LEFT, FIELD_GOAL_BLUE_X_RIGHT)\n\n # On boucle sur tous les robots sauf celui sur lequel on calcule la vision\n for bot_id, obstacle_position in self.info_manager.getAllBotsPosition().item():\n if bot_id != self.bot_id:\n angle_bot = int(cvt_angle_360(get_angle(self.position, obstacle_position)))\n # TODO optimisier en fonction de la distance de l'obstacle et le diametre de l'obstacle!!\n self.vision[angle_bot] = True\n\n\n def get_vision_angle(self, p1, p2):\n \"\"\"\n Args:\n origin: origin position of the bot\n p1: angle à droite\n p2: angle à gauche\n\n l'angle est toujours calculé de p1 à p2 (de droite à gauche)\n\n Returns: a tuple (angle, offset)\n \"\"\"\n assert isinstance(p1, Position), \"TypeError p1\"\n assert isinstance(p2, Position), \"TypeError p2\"\n assert p1 != p2, \"Null angle\"\n\n a1 = int(cvt_angle_360(get_angle(self.position, p1)))\n a2 = int(cvt_angle_360(get_angle(self.position, p2)))\n\n if a2 > a1:\n angle = a2 - a1\n else:\n angle = 360 - a1 + a2\n\n return angle, a1\n\n def if_object_free(self, object_to_see):\n object_offset = self.objects_to_see[object_to_see][0]\n object_angle = self.objects_to_see[object_to_see][1]\n\n self.is_free(object_offset, object_angle)\n\n def is_free(self, offset, angle):\n assert isinstance(angle, Pose), \"TypeError angle\"\n # degres de liberte\n freedom = 0\n\n for i in range(offset, angle.orientation + offset):\n if self.vision[i] is False:\n freedom += 1\n else:\n freedom = 0\n\n if freedom >= self.obstruction_threshold:\n return True\n\n return False\n\n def get_obstructed_factor(self, object_to_see):\n object_offset = self.objects_to_see[object_to_see][0]\n object_angle = self.objects_to_see[object_to_see][1]\n\n holes_number = 0\n max_hole_width = 0\n\n temp_width = 0\n\n for i in range(object_offset, object_angle.orientation + object_offset):\n if self.vision[i] is False:\n temp_width += 1\n max_hole_width = max(max_hole_width, temp_width)\n holes_number += 1\n else:\n temp_width = 0\n\n return holes_number * max_hole_width\n","sub_path":"UltimateStrat/GameLogic/PeripheralVision.py","file_name":"PeripheralVision.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"587491598","text":"import os\nimport sys\nimport argparse\n\nimport cv2\nimport torch\nimport numpy as np\n\nsys.path.insert(0, '../lib')\nfrom utils import visual_utils, nms_utils\n\ndef inference(config, network):\n # model_path\n saveDir = os.path.join('../model', 'rcnn_emd_simple')\n model_file = os.path.join(saveDir, 'outputs', 'rcnn_emd_simple_mge.pth')\n assert os.path.exists(model_file)\n # build network\n net = network()\n net.eval()\n check_point = torch.load(model_file, map_location=torch.device('cpu'))\n net.load_state_dict(check_point['state_dict'])\n # get data\n\n imglist = os.listdir('../test_imgs')\n for index, imgname in enumerate(imglist):\n image, resized_img, im_info = get_data(\n os.path.join('../test_imgs', imgname), config.eval_image_short_size, config.eval_image_max_size)\n pred_boxes = net(resized_img, im_info).numpy()\n pred_boxes = post_process(pred_boxes, config, im_info[0, 2])\n\n persons = visual_utils.draw_boxes(\n image,\n pred_boxes[:, :4],)\n # 保存一张从一张图中检测出来的行人\n for person_id, person in enumerate(persons):\n fpath = 'outputs/{}.png'.format('person' + '_' + str(person_id + 1))\n print(fpath)\n cv2.imwrite(fpath, person)\n\ndef post_process(pred_boxes, config, scale):\n if config.test_nms_method == 'set_nms':\n assert pred_boxes.shape[-1] > 6, \"Not EMD Network! Using normal_nms instead.\"\n assert pred_boxes.shape[-1] % 6 == 0, \"Prediction dim Error!\"\n top_k = pred_boxes.shape[-1] // 6\n n = pred_boxes.shape[0]\n pred_boxes = pred_boxes.reshape(-1, 6)\n idents = np.tile(np.arange(n)[:,None], (1, top_k)).reshape(-1, 1)\n pred_boxes = np.hstack((pred_boxes, idents))\n keep = pred_boxes[:, 4] > config.pred_cls_threshold\n pred_boxes = pred_boxes[keep]\n keep = nms_utils.set_cpu_nms(pred_boxes, 0.5)\n pred_boxes = pred_boxes[keep]\n elif config.test_nms_method == 'normal_nms':\n assert pred_boxes.shape[-1] % 6 == 0, \"Prediction dim Error!\"\n pred_boxes = pred_boxes.reshape(-1, 6)\n keep = pred_boxes[:, 4] > config.pred_cls_threshold\n pred_boxes = pred_boxes[keep]\n keep = nms_utils.cpu_nms(pred_boxes, config.test_nms)\n pred_boxes = pred_boxes[keep]\n elif config.test_nms_method == 'none':\n assert pred_boxes.shape[-1] % 6 == 0, \"Prediction dim Error!\"\n pred_boxes = pred_boxes.reshape(-1, 6)\n keep = pred_boxes[:, 4] > config.pred_cls_threshold\n pred_boxes = pred_boxes[keep]\n\n pred_boxes[:, :4] /= scale\n keep = pred_boxes[:, 4] > config.visulize_threshold\n pred_boxes = pred_boxes[keep]\n return pred_boxes\n\ndef get_data(img_path, short_size, max_size):\n image = cv2.imread(img_path, cv2.IMREAD_COLOR)\n resized_img, scale = resize_img(\n image, short_size, max_size)\n\n original_height, original_width = image.shape[0:2]\n height, width = resized_img.shape[0:2]\n resized_img = resized_img.transpose(2, 0, 1)\n im_info = np.array([height, width, scale, original_height, original_width, 0])\n return image, torch.tensor([resized_img]).float(), torch.tensor([im_info])\n\ndef resize_img(image, short_size, max_size):\n height = image.shape[0]\n width = image.shape[1]\n im_size_min = np.min([height, width])\n im_size_max = np.max([height, width])\n scale = (short_size + 0.0) / im_size_min\n if scale * im_size_max > max_size:\n scale = (max_size + 0.0) / im_size_max\n t_height, t_width = int(round(height * scale)), int(\n round(width * scale))\n resized_image = cv2.resize(\n image, (t_width, t_height), interpolation=cv2.INTER_LINEAR)\n return resized_image, scale\n\ndef run_inference():\n # import libs\n model_root_dir = os.path.join('../model/', 'rcnn_emd_simple')\n sys.path.insert(0, model_root_dir)\n from config import config\n from network import Network\n inference(config, Network)\n\nif __name__ == '__main__':\n run_inference()\n","sub_path":"tools/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"315555824","text":"from django.shortcuts import render, redirect\nfrom django.core.files.storage import FileSystemStorage\n\nfrom chan.models import Post, ListPosts, Category\n\ndef new(request, category_id):\n category = Category.objects.get(id=category_id)\n list_ = ListPosts.objects.create(category = category)\n upload_file_url = save_pic(request)\n Post.objects.create(\n title=request.POST['item_text'],\n pic=upload_file_url,\n post=request.POST['item_post'],\n list = list_)\n return redirect(view_thread, list_.id)\n\ndef view_thread(request, list_id):\n list_ = ListPosts.objects.get(id=list_id)\n return render(request, 'chan/thread.html', {\n 'lists': list_})\n\ndef add(request, list_id):\n list_ = ListPosts.objects.get(id=list_id)\n upload_file_url = save_pic(request)\n Post.objects.create(\n title=request.POST['item_text'],\n pic=upload_file_url,\n post=request.POST['item_post'],\n list = list_)\n return redirect(view_thread, list_.id)\n\ndef view_categories(request, category_name=None):\n if request.method == \"POST\":\n Category.objects.create(\n title=request.POST['category'])\n return redirect(view_categories)\n else:\n categories = Category.objects.all()\n return render(request, 'chan/categories.html', {\n 'list': categories})\n\ndef view_posts(request, list_id):\n if request.method == \"POST\":\n new(request, list_id)\n else:\n list_ = Category.objects.get(title=list_id)\n return render(request, 'chan/lists_category.html', {\n 'lists': list_})\n\ndef save_pic(request):\n try:\n picfile = request.FILES['pic']\n fs = FileSystemStorage()\n filename = fs.save(picfile.name, picfile)\n return fs.url(filename)\n except:\n return\n","sub_path":"chan/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"465553636","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nother property\n\"\"\"\nimport copy\n\nfrom rebulk import Rebulk, Rule, RemoveMatch, POST_PROCESS, AppendMatch\nfrom rebulk.remodule import re\n\nfrom ..common import dash\nfrom ..common import seps\nfrom ..common.validators import seps_surround, compose\nfrom ...reutils import build_or_pattern\nfrom ...rules.common.formatters import raw_cleanup\n\n\ndef other():\n \"\"\"\n Builder for rebulk object.\n :return: Created Rebulk object\n :rtype: Rebulk\n \"\"\"\n rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True)\n rebulk.defaults(name=\"other\", validator=seps_surround)\n\n rebulk.regex('Audio-?Fix', 'Audio-?Fixed', value='AudioFix')\n rebulk.regex('Sync-?Fix', 'Sync-?Fixed', value='SyncFix')\n rebulk.regex('Dual-?Audio', value='DualAudio')\n rebulk.regex('ws', 'wide-?screen', value='WideScreen')\n\n rebulk.string('Real', 'Fix', 'Fixed', value='Proper', tags=['has-neighbor-before', 'has-neighbor-after'])\n rebulk.string('Proper', 'Repack', 'Rerip', value='Proper')\n rebulk.string('Fansub', value='Fansub', tags='has-neighbor')\n rebulk.string('Fastsub', value='Fastsub', tags='has-neighbor')\n\n season_words = build_or_pattern([\"seasons?\", \"series?\"])\n complete_articles = build_or_pattern([\"The\"])\n\n def validate_complete(match):\n \"\"\"\n Make sure season word is are defined.\n :param match:\n :type match:\n :return:\n :rtype:\n \"\"\"\n children = match.children\n if not children.named('completeWordsBefore') and not children.named('completeWordsAfter'):\n return False\n return True\n\n rebulk.regex('(?P' + complete_articles + '-)?' +\n '(?P' + season_words + '-)?' +\n 'Complete' + '(?P-' + season_words + ')?',\n private_names=['completeArticle', 'completeWordsBefore', 'completeWordsAfter'],\n value={'other': 'Complete'},\n tags=['release-group-prefix'],\n validator={'__parent__': compose(seps_surround, validate_complete)})\n rebulk.string('R5', 'RC', value='R5')\n rebulk.regex('Pre-?Air', value='Preair')\n rebulk.regex('(?:PS-?)?Vita', value='PS Vita')\n\n for value in (\n 'Screener', 'Remux', 'Remastered', '3D', 'HD', 'mHD', 'HDLight', 'HQ', 'DDC', 'HR', 'PAL', 'SECAM', 'NTSC',\n 'CC', 'LD', 'MD', 'XXX'):\n rebulk.string(value, value=value)\n rebulk.string('LDTV', value='LD')\n\n for value in ('Limited', 'Complete', 'Classic', 'Unrated', 'LiNE', 'Bonus', 'Trailer', 'FINAL', 'Retail', 'Uncut',\n 'Extended', 'Extended Cut'):\n rebulk.string(value, value=value, tags=['has-neighbor', 'release-group-prefix'])\n\n rebulk.string('VO', 'OV', value='OV', tags='has-neighbor')\n\n rebulk.regex('Scr(?:eener)?', value='Screener', validator=None, tags='other.validate.screener')\n\n rebulk.rules(ValidateHasNeighbor, ValidateHasNeighborAfter, ValidateHasNeighborBefore, ValidateScreenerRule,\n ProperCountRule)\n\n return rebulk\n\n\nclass ProperCountRule(Rule):\n \"\"\"\n Add proper_count property\n \"\"\"\n priority = POST_PROCESS\n\n consequence = AppendMatch\n\n properties = {'proper_count': [None]}\n\n def when(self, matches, context):\n propers = matches.named('other', lambda match: match.value == 'Proper')\n if propers:\n raws = {} # Count distinct raw values\n for proper in propers:\n raws[raw_cleanup(proper.raw)] = proper\n proper_count_match = copy.copy(propers[-1])\n proper_count_match.name = 'proper_count'\n proper_count_match.value = len(raws)\n return proper_count_match\n\n\nclass ValidateHasNeighbor(Rule):\n \"\"\"\n Validate tag has-neighbor\n \"\"\"\n consequence = RemoveMatch\n\n def when(self, matches, context):\n ret = []\n for to_check in matches.range(predicate=lambda match: 'has-neighbor' in match.tags):\n previous_match = matches.previous(to_check, index=0)\n previous_group = matches.markers.previous(to_check, lambda marker: marker.name == 'group', 0)\n if previous_group and (not previous_match or previous_group.end > previous_match.end):\n previous_match = previous_group\n if previous_match and not matches.input_string[previous_match.end:to_check.start].strip(seps):\n break\n next_match = matches.next(to_check, index=0)\n next_group = matches.markers.next(to_check, lambda marker: marker.name == 'group', 0)\n if next_group and (not next_match or next_group.start < next_match.start):\n next_match = next_group\n if next_match and not matches.input_string[to_check.end:next_match.start].strip(seps):\n break\n ret.append(to_check)\n return ret\n\n\nclass ValidateHasNeighborBefore(Rule):\n \"\"\"\n Validate tag has-neighbor-before that previous match exists.\n \"\"\"\n consequence = RemoveMatch\n\n def when(self, matches, context):\n ret = []\n for to_check in matches.range(predicate=lambda match: 'has-neighbor-before' in match.tags):\n next_match = matches.next(to_check, index=0)\n next_group = matches.markers.next(to_check, lambda marker: marker.name == 'group', 0)\n if next_group and (not next_match or next_group.start < next_match.start):\n next_match = next_group\n if next_match and not matches.input_string[to_check.end:next_match.start].strip(seps):\n break\n ret.append(to_check)\n return ret\n\n\nclass ValidateHasNeighborAfter(Rule):\n \"\"\"\n Validate tag has-neighbor-after that next match exists.\n \"\"\"\n consequence = RemoveMatch\n\n def when(self, matches, context):\n ret = []\n for to_check in matches.range(predicate=lambda match: 'has-neighbor-after' in match.tags):\n previous_match = matches.previous(to_check, index=0)\n previous_group = matches.markers.previous(to_check, lambda marker: marker.name == 'group', 0)\n if previous_group and (not previous_match or previous_group.end > previous_match.end):\n previous_match = previous_group\n if previous_match and not matches.input_string[previous_match.end:to_check.start].strip(seps):\n break\n ret.append(to_check)\n return ret\n\n\nclass ValidateScreenerRule(Rule):\n \"\"\"\n Validate tag other.validate.screener\n \"\"\"\n consequence = RemoveMatch\n priority = 64\n\n def when(self, matches, context):\n ret = []\n for screener in matches.named('other', lambda match: 'other.validate.screener' in match.tags):\n format_match = matches.previous(screener, lambda match: match.name == 'format', 0)\n if not format_match or matches.input_string[format_match.end:screener.start].strip(seps):\n ret.append(screener)\n return ret\n","sub_path":"venc/lib/python3.4/site-packages/guessit/rules/properties/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":7102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272305510","text":"# -*- coding: utf-8 -*-\r\n\r\nimport threading\r\nimport datetime\r\n\r\n\r\nclass Delta(threading.Thread):\r\n\r\n def __init__(self, sock, cfg):\r\n self.sock = sock\r\n self.cfg = cfg\r\n\r\n def introduce(self):\r\n from core.message import Message\r\n msg = Message(self.sock, self.cfg)\r\n msg.to_raw('NICK %s 1 %s %s %s %s %s +ioqS * * * :%s'\r\n % (\r\n self.cfg.get('Client', 'NICKNAME'),\r\n datetime.datetime.now().time(),\r\n self.cfg.get('Client', 'USERNAME'),\r\n self.cfg.get('Server', \"HOSTNAME\"),\r\n self.cfg.get('Server', \"NAME\"),\r\n self.cfg.get(\"Server\", \"SID\"),\r\n self.cfg.get('Client', 'REALNAME')\r\n )\r\n )\r\n msg.to_raw(':%s JOIN %s' % (self.cfg.get('Client', \"NICKNAME\"),\r\n self.cfg.get(\"Network\", \"LOGCHAN\")))\r\n msg.to_raw(':%s MODE %s +o %s' % (self.cfg.get('Client', \"NICKNAME\"),\r\n self.cfg.get(\"Network\", \"LOGCHAN\"),\r\n self.cfg.get('Client', \"NICKNAME\")))\r\n\r\n @staticmethod\r\n def handler(sock, cfg, data):\r\n from core.message import Message\r\n\r\n if data[1] == 'PRIVMSG' and data[2].lower() == 'delta@'+cfg.get(\"Server\", \"HOSTNAME\"):\r\n command = data[3].split(\":\")\r\n command = command[1].lower()\r\n nick = data[0].split(':')\r\n nick = nick[1]\r\n\r\n if command == 'help':\r\n msg = Message(sock, cfg)\r\n msg.to_one(cfg.get('Client', \"NICKNAME\"), nick, \"\\x02*** Read the documentation at \\\r\n https://github.com/deltairc/deltairc/wiki.\\x02\")\r\n","sub_path":"modules/delta.py","file_name":"delta.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281647727","text":"import requests\nfrom bs4 import BeautifulSoup as bs\n\n# top 100 names\nnations = ['USA', 'RUS', 'CHN']\nurl = lambda n: 'http://www.studentsoftheworld.info/penpals/stats.php3?Pays={}'.format(n)\n\nheaders = {\n 'User-Agent': 'My User Agent 1.0',\n 'From': 'jfishersolutions@gmail.com'\n }\n\nwith open('data.csv', 'w') as f:\n\n for nation in nations:\n r = requests.get(url(nation), headers=headers)\n soup = bs(r.content, 'html.parser')\n \n for tag in soup.find_all('nobr'):\n f.write('{0},{1}\\n'.format(str(tag.findNext('font').text).lower(), nation))\n\nf.close()\n","sub_path":"name_scrape.py","file_name":"name_scrape.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"412830960","text":"import chainer\nimport chainer.functions as F\n\nfrom chainercv.links import Conv2DBNActiv\nfrom chainercv.links import PickableSequentialChain\n\n\nclass ResBlock(PickableSequentialChain):\n\n \"\"\"A building block for ResNets.\n\n in --> Bottleneck with residual_conv --> Bottleneck * (n_layer - 1) --> out\n\n Args:\n n_layer (int): The number of layers used in the building block.\n in_channels (int): The number of channels of the input array.\n mid_channels (int): The number of channels of intermediate arrays.\n out_channels (int): The number of channels of the output array.\n stride (int or tuple of ints): Stride of filter application.\n initialW (4-D array): Initial weight value used in\n the convolutional layers.\n stride_first (bool): This determines the behavior of the\n bottleneck with a shortcut. If :obj:`True`, apply strided\n convolution with the first convolution layer.\n Otherwise, apply strided convolution with the\n second convolution layer.\n\n \"\"\"\n\n def __init__(self, n_layer, in_channels, mid_channels,\n out_channels, stride, initialW=None, stride_first=False):\n super(ResBlock, self).__init__()\n with self.init_scope():\n self.a = Bottleneck(\n in_channels, mid_channels, out_channels, stride,\n initialW, residual_conv=True, stride_first=stride_first)\n for i in range(n_layer - 1):\n name = 'b{}'.format(i + 1)\n bottleneck = Bottleneck(\n out_channels, mid_channels, out_channels, stride=1,\n initialW=initialW, residual_conv=False)\n self.add_link(name, bottleneck)\n\n\nclass Bottleneck(chainer.Chain):\n\n \"\"\"A bottleneck layer.\n\n Args:\n in_channels (int): The number of channels of the input array.\n mid_channels (int): The number of channels of intermediate arrays.\n out_channels (int): The number of channels of the output array.\n stride (int or tuple of ints): Stride of filter application.\n initialW (4-D array): Initial weight value used in\n the convolutional layers.\n residual_conv (bool): If :obj:`True`, apply a 1x1 convolution\n to the residual.\n stride_first (bool): If :obj:`True`, apply strided convolution\n with the first convolution layer. Otherwise, apply\n strided convolution with the second convolution layer.\n\n \"\"\"\n\n def __init__(self, in_channels, mid_channels, out_channels,\n stride=1, initialW=None, residual_conv=False,\n stride_first=False):\n if stride_first:\n first_stride = stride\n second_stride = 1\n else:\n first_stride = 1\n second_stride = stride\n super(Bottleneck, self).__init__()\n with self.init_scope():\n self.conv1 = Conv2DBNActiv(in_channels, mid_channels, 1,\n first_stride, 0, initialW=initialW,\n nobias=True)\n self.conv2 = Conv2DBNActiv(mid_channels, mid_channels, 3,\n second_stride, 1, initialW=initialW,\n nobias=True)\n self.conv3 = Conv2DBNActiv(mid_channels, out_channels, 1, 1, 0,\n initialW=initialW, nobias=True,\n activ=None)\n if residual_conv:\n self.residual_conv = Conv2DBNActiv(\n in_channels, out_channels, 1, stride, 0,\n nobias=True, initialW=initialW, activ=None)\n\n def __call__(self, x):\n h = self.conv1(x)\n h = self.conv2(h)\n h = self.conv3(h)\n\n if hasattr(self, 'residual_conv'):\n residual = self.residual_conv(x)\n else:\n residual = x\n h += residual\n h = F.relu(h)\n return h\n","sub_path":"chainercv/links/model/resnet/resblock.py","file_name":"resblock.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272343080","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"TEST\")\n\n## configure message logger\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.threshold = cms.untracked.string('INFO')\nprocess.MessageLogger.categories = cms.untracked.vstring('TEST')\n\n## define input\nfrom TopQuarkAnalysis.TopEventProducers.tqafInputFiles_cff import relValTTbar\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(relValTTbar)\n)\n\n## define maximal number of events to loop over\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(10)\n)\n\n## configure process options\nprocess.options = cms.untracked.PSet(\n wantSummary = cms.untracked.bool(True)\n)\n\n## configure geometry & conditions\nprocess.load(\"Configuration.Geometry.GeometryRecoDB_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc')\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\n\nprocess.task = cms.Task()\n\n## std sequence for PAT\nprocess.load(\"PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff\")\nprocess.task.add(process.patCandidatesTask)\n#Temporary customize to the unit tests that fail due to old input samples\nprocess.patTaus.skipMissingTauID = True\nprocess.load(\"PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff\")\nprocess.task.add(process.selectedPatCandidatesTask)\n\nprocess.load(\"TopQuarkAnalysis.Examples.TopJetAnalyzer_cfi\")\n\n# register TFileService\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string('analyzeTopJet.root')\n)\n\n## end path\nprocess.p1 = cms.Path(process.analyzeJet, process.task)\n","sub_path":"TopQuarkAnalysis/Examples/test/analyzeTopJet_cfg.py","file_name":"analyzeTopJet_cfg.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"63688419","text":"import numpy as np\nimport dask\nfrom dask.array import learn\n\n\nclass _BigPartialFitMixin(object):\n\n _init_kwargs = []\n _fit_kwargs = []\n\n def __init__(self, **kwargs):\n missing = set(self._init_kwargs) - set(kwargs)\n\n if missing:\n raise TypeError(\"{} requires the keyword arguments {}\".format(\n type(self), missing)\n )\n for kwarg in self._init_kwargs:\n setattr(self, kwarg, kwargs.pop(kwarg))\n super(_BigPartialFitMixin, self).__init__(**kwargs)\n\n @classmethod\n def _get_param_names(cls):\n # Evil hack to make sure repr, get_params work\n # We could also try rewriting __init__ once the class is created\n bases = cls.mro()\n # walk bases until you hit an sklearn class.\n for base in bases:\n if base.__module__.startswith(\"sklearn\"):\n break\n\n # merge the inits\n my_init = cls._init_kwargs\n their_init = base._get_param_names()\n return my_init + their_init\n\n def fit(self, X, y=None, get=None):\n if get is None:\n get = dask.threaded.get\n\n fit_kwargs = {k: getattr(self, k) for k in self._fit_kwargs}\n result = learn.fit(self, X, y, get=get, **fit_kwargs)\n\n # Copy the learned attributes over to self\n # It should go without saying that this is *not* threadsafe\n attrs = {k: v for k, v in vars(result).items() if k.endswith('_')}\n for k, v in attrs.items():\n setattr(self, k, v)\n return self\n\n def predict(self, X, dtype=None):\n predict = super(_BigPartialFitMixin, self).predict\n if dtype is None:\n dtype = self._get_predict_dtype(X)\n return X.map_blocks(predict, dtype=dtype, drop_axis=1)\n\n def _get_predict_dtype(self, X):\n xx = np.zeros((1, X.shape[1]), dtype=X.dtype)\n return super(_BigPartialFitMixin, self).predict(xx).dtype\n\n\n__all__ = [\n '_BigPartialFitMixin'\n]\n","sub_path":"daskml/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"447291706","text":"from flask import render_template\nfrom app import app\n\n@app.route('/')\n@app.route('/index')\ndef index():\n user = { 'nickname': 'Mario' }\n posts = [\n { \n 'author': { 'nickname': 'Arrow' }, \n 'body': 'Mario Rodolfo is Oliver Queen!' \n },\n \n ]\n return render_template(\"index.html\",\n title = 'Home',\n user = user,\n posts = posts)\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"30294008","text":"# -*- encoding:utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\n\n\ndef padding(data, max_len):\n return tf.keras.preprocessing.sequence.pad_sequences(data, max_len, padding='post', truncating='post')\n\ndef eval_map_mrr(qids, aids, preds, labels):\n\t# 衡量map指标和mrr指标\n dic = dict()\n pre_dic = dict()\n for qid, aid, pred, label in zip(qids, aids, preds, labels):\n pre_dic.setdefault(qid, [])\n pre_dic[qid].append([aid, pred, label])\n for qid in pre_dic:\n dic[qid] = sorted(pre_dic[qid], key=lambda k: k[1], reverse=True)\n aid2rank = {aid:[label, rank] for (rank, (aid, pred, label)) in enumerate(dic[qid])}\n dic[qid] = aid2rank\n # correct = 0\n # total = 0\n # for qid in dic:\n # cur_correct = 0\n # for aid in dic[qid]:\n # if dic[qid][aid][0] == 1:\n # cur_correct += 1\n # if cur_correct > 0:\n # correct += 1\n # total += 1\n # print(correct * 1. / total)\n\n MAP = 0.0\n MRR = 0.0\n useful_q_len = 0\n for q_id in dic:\n sort_rank = sorted(dic[q_id].items(), key=lambda k: k[1][1], reverse=False)\n correct = 0\n total = 0\n AP = 0.0\n mrr_mark = False\n for i in range(len(sort_rank)):\n if sort_rank[i][1][0] == 1:\n correct += 1\n if correct == 0:\n continue\n useful_q_len += 1\n correct = 0\n for i in range(len(sort_rank)):\n # compute MRR\n if sort_rank[i][1][0] == 1 and mrr_mark == False:\n MRR += 1.0 / float(i + 1)\n mrr_mark = True\n # compute MAP\n total += 1\n if sort_rank[i][1][0] == 1:\n correct += 1\n AP += float(correct) / float(total)\n \n AP /= float(correct)\n MAP += AP\n\n MAP /= useful_q_len\n MRR /= useful_q_len\n return MAP, MRR\n\ndef build_embedding(in_file, word_dict):\n\t# 构建预训练的embedding矩阵\n num_words = max(word_dict.values()) + 1\n dim = int(in_file.split('.')[-2][:-1])\n embeddings = np.zeros((num_words, dim))\n\n if in_file is not None:\n pre_trained = 0\n initialized = {}\n avg_sigma = 0\n avg_mu = 0\n for line in open(in_file).readlines():\n sp = line.split()\n assert len(sp) == dim + 1\n if sp[0] in word_dict:\n initialized[sp[0]] = True\n pre_trained += 1\n embeddings[word_dict[sp[0]]] = [float(x) for x in sp[1:]]\n mu = embeddings[word_dict[sp[0]]].mean()\n #print embeddings[word_dict[sp[0]]]\n sigma = np.std(embeddings[word_dict[sp[0]]])\n avg_mu += mu\n avg_sigma += sigma\n avg_sigma /= 1. * pre_trained\n avg_mu /= 1. * pre_trained\n for w in word_dict:\n if w not in initialized:\n embeddings[word_dict[w]] = np.random.normal(avg_mu, avg_sigma, (dim,))\n print('Pre-trained: %d (%.2f%%)' %\n (pre_trained, pre_trained * 100.0 / num_words))\n return embeddings.astype(np.float32)\n\n\nclass Iterator(object):\n \"\"\"\n 数据迭代器\n \"\"\"\n def __init__(self, x):\n self.x = x\n self.sample_num = len(self.x)\n\n def next_batch(self, batch_size, shuffle=True):\n # produce X, Y_out, Y_in, X_len, Y_in_len, Y_out_len\n if shuffle:\n np.random.shuffle(self.x)\n l = np.random.randint(0, self.sample_num - batch_size + 1)\n r = l + batch_size\n x_part = self.x[l:r]\n return x_part\n\n def next(self, batch_size, shuffle=False):\n if shuffle:\n np.random.shuffle(self.x)\n l = 0\n while l < self.sample_num:\n r = min(l + batch_size, self.sample_num)\n batch_size = r - l\n x_part = self.x[l:r]\n l += batch_size\n yield x_part\n","sub_path":"ch4/siamese_cnn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"440577913","text":"\"\"\"\n输入M和N计算C(M,N)\n\"\"\"\n\n# m = int(input('m = '))\n# n = int(input('n = '))\n# fm = 1\n\n# for num in range(1, m + 1):\n# fm *= num\n\n# fn = 1\n# for num in range(1, n + 1):\n# fn *= num\n\n# fmn = 1\n# for num in range(1, m - n + 1):\n# fmn *= num\n\n# print(fm // fn // fmn)\n\n\n# 函数版本\ndef factorial(num):\n \"\"\"\n 求阶乘\n :param num: 非负整数\n :return: num的阶乘\n \"\"\"\n\n result = 1\n for n in range(1, num + 1):\n result *= n\n return result\n\n\nm = int(input('m = '))\nn = int(input('n = '))\n\nprint(factorial(m) // factorial(n) // factorial(m - n))\n","sub_path":"01-foundation/day06/01-func.py","file_name":"01-func.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"561627847","text":"import pandas as pd\r\n\r\ndef df_combine_first(file1, file2, f_type): \r\n if f_type == 'csv':\r\n old_data = pd.read_csv(file1, encoding = \"ISO-8859-1\")\r\n new_data = pd.read_csv(file2, encoding = \"ISO-8859-1\")\r\n elif f_type == 'excel':\r\n old_data = pd.read_excel(file1, encoding = \"ISO-8859-1\")\r\n new_data = pd.read_excel(file2, encoding = \"ISO-8859-1\")\r\n else:\r\n return 'Invalid file type'\r\n \r\n old_df = pd.DataFrame(old_data)\r\n new_df = pd.DataFrame(new_data)\r\n\r\n combined_df = new_df.combine_first(old_df) \r\n print(combined_df)\r\n\r\ndf_combine_first('projects/test_data_files/combine_first_old.xlsx', 'projects/test_data_files/combine_first_new.xlsx', 'excel')\r\n\r\n\"\"\"\r\nINPUT:\r\nold df:\r\n Employee_ID first_name salary skills\r\n0 1 John 2000 C++\r\n1 2 Mary 1500 Java\r\n2 3 Sam 3000 Python\r\n3 5 Anne 3500 Scripting\r\n4 6 Maria 1500 Testing\r\n\r\nnew df:\r\n Employee_ID age last_name salary skills\r\n0 1 34 Doe 2000 Perl\r\n1 2 23 Smith 5500 Java\r\n2 3 56 Jacob 3500 Python\r\n3 4 25 Clark 2000 Excel\r\n\r\nOUTPUT:\r\n Employee_ID age first_name last_name salary skills\r\n0 1.0 34.0 John Doe 2000.0 Perl\r\n1 2.0 23.0 Mary Smith 5500.0 Java\r\n2 3.0 56.0 Sam Jacob 3500.0 Python\r\n3 4.0 25.0 Anne Clark 2000.0 Excel\r\n4 6.0 NaN Maria NaN 1500.0 Testing\r\n\r\n\"\"\"\r\n","sub_path":"US_Crime_Analytics/tests/combine_first_ex.py","file_name":"combine_first_ex.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"642451282","text":"from rest_framework.relations import HyperlinkedRelatedField\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom api import models\nfrom rest_framework.routers import DefaultRouter, DynamicRoute, Route\n\n\ndef check_course_prerequisites(course, student):\n ok = True\n for prerequisite in course.prerequisites.all():\n try:\n sicbt = models.StudentInCourseByTeacher.objects.get(course_by_teacher__course=prerequisite, student=student)\n ok = sicbt.passed\n except Exception:\n ok = False\n\n return ok\n\n\ndef check_free_seats(course):\n return course.seats_taken < course.max_seats\n\n\nclass CustomModelViewSet(ModelViewSet):\n def get_queryset(self):\n qs = self.queryset\n qp = self.request.query_params.dict() # query-parameter\n\n if 'limit' in qp:\n qp.pop('limit')\n\n if 'offset' in qp:\n qp.pop('offset')\n\n if 'format' in qp:\n qp.pop('format')\n\n qs = qs.filter(**qp)\n return qs\n\n\nclass CustomDefaultRouter(DefaultRouter):\n \"\"\"\n Replace patch with post method in detail-view-route\n \"\"\"\n routes = [\n # List route.\n Route(\n url=r'^{prefix}{trailing_slash}$',\n mapping={\n 'get': 'list',\n 'post': 'create'\n },\n name='{basename}-list',\n detail=False,\n initkwargs={'suffix': 'List'}\n ),\n # Dynamically generated list routes. Generated using\n # @action(detail=False) decorator on methods of the viewset.\n DynamicRoute(\n url=r'^{prefix}/{url_path}{trailing_slash}$',\n name='{basename}-{url_name}',\n detail=False,\n initkwargs={}\n ),\n # Detail route.\n Route(\n url=r'^{prefix}/{lookup}{trailing_slash}$',\n mapping={\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'post': 'partial_update',\n 'delete': 'destroy'\n },\n name='{basename}-detail',\n detail=True,\n initkwargs={'suffix': 'Instance'}\n ),\n # Dynamically generated detail routes. Generated using\n # @action(detail=True) decorator on methods of the viewset.\n DynamicRoute(\n url=r'^{prefix}/{lookup}/{url_path}{trailing_slash}$',\n name='{basename}-{url_name}',\n detail=True,\n initkwargs={}\n ),\n ]\n","sub_path":"rest_poc/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"458029621","text":"from flask import Flask, render_template, request, url_for,redirect,session\nimport sqlite3 as sql\n\nconn = sql.connect(\"healthbuddy.db\")\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n@app.route(\"/doctor_signup\")\ndef doctor():\n return render_template(\"doc-signup.html\")\n\n\n@app.route(\"/doc_put\", methods=[\"POST\", \"GET\"])\ndef doc_put():\n if request.method == \"POST\":\n try:\n adoc_id = request.form[\"Doc-id\"]\n adoc_name = request.form[\"name\"]\n adoc_qual = request.form[\"degree\"]\n adoc_p = request.form[\"pass\"]\n adoc_con=request.form[\"contact\"]\n adoc_city=request.form[\"city\"]\n adoc_email=request.form[\"mailid\"]\n with sql.connect(\"healthbuddy.db\") as con:\n cur = con.cursor()\n\n cur.execute(\"INSERT INTO doc_profile (doc_id, doc_name, doc_qual, doc_p, doc_con,doc_email,doc_city)VALUES(?, ?, ?, ?, ?, ?,?)\",(adoc_id, adoc_name, adoc_qual, adoc_p, adoc_con,adoc_email, adoc_city) )\n\n con.commit()\n msgd = \"You have successfully signed up.\"\n except:\n con.rollback()\n msgd = \"error: Please try again\"\n\n finally:\n return render_template(\"result.html\", msg=msgd)\n con.close()\n\n@app.route(\"/patient_signup\")\ndef patient():\n return render_template(\"patient-signup.html\")\n\n@app.route(\"/patient_put\", methods=[\"POST\", \"GET\"])\ndef patient_put():\n if request.method == \"POST\":\n try:\n bp_id = request.form[\"pid\"]\n bp_name = request.form[\"pname\"]\n bp_email = request.form[\"pmailid\"]\n bp_p = request.form[\"ppass\"]\n bp_con=request.form[\"cno\"]\n bp_add1=request.form[\"phouse\"]\n bp_add2=request.form[\"pstreet\"]\n bp_city=request.form[\"pcityname\"]\n bp_pincode=request.form[\"p-Pin-code\"]\n bp_history=request.form[\"phis\"]\n\n with sql.connect(\"healthbuddy.db\") as con:\n cur = con.cursor()\n\n cur.execute(\"INSERT INTO p_profile (p_id, p_name, p_email, p_p, p_con, p_add1, p_add2,p_city, p_pincode, p_history) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\",(bp_id, bp_name, bp_email, bp_p, bp_con, bp_add1, bp_add2,bp_city, bp_pincode,bp_history) )\n\n con.commit()\n msgp = \"You have successfully signed up.\"\n except:\n con.rollback()\n msgp = \"error: Please try again\"\n\n finally:\n return render_template(\"result.html\", msg=msgp)\n con.close()\n\n# @app.route(\"/listd\")\n# def list():\n# con = sql.connect(\"healthbuddy.db\")\n# con.row_factory = sql.Row\n#\n# cur = con.cursor()\n# cur.execute(\"select * from doc_profile\")\n#\n# rows = cur.fetchall();\n# return render_template(\"listdoctor.html\", rows=rows)\n\n@app.route(\"/doctor_login\")\ndef doctor_log():\n return render_template(\"Doc-log.html\")\n\n@app.route(\"/doctor_after_login\")\ndef doctor_after_log():\n return render_template(\"doc-after-login.html\")\n\n@app.route(\"/patient_login\")\ndef patient_log():\n return render_template(\"Pat-log.html\")\n\n@app.route(\"/check_id\")\ndef check_id():\n if request.method == \"POST\":\n try:\n #session[\"u_id\"] = request.form(\"U-id\")\n uid= session[\"u_id\"]\n with sql.connect(\"healthbuddy.db\") as con:\n con.row_factory = sql.Row\n cur = con.cursor()\n cur.execute(\"select * from p_info where p_id = 'uid'\")\n con.commit()\n\n rows = cur.fetchall();\n return render_template(\"patient-histroy.html\", rows=rows)\n except:\n con.rollback()\n\n# @app.route(\"/patient_details\")\n# def pat_deatils():\n# id=session[\"u_id\"]\n# con = sql.connect(\"healthbuddy.db\")\n# con.row_factory = sql.Row\n#\n# cur = con.cursor()\n# cur.execute(\"select * from p_info where p_id = 'id'\")\n#\n# rows = cur.fetchall();\n# return render_template(\"pat-details.html\", rows=rows)\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"392668680","text":"import pandas as pd\nimport numpy as np\n\n\ndata = pd.read_csv('../data_origin/student_op.csv', header=0)\ndata = data.set_index('STUDENTCODE')\ndata = data.drop(['tz_students', 'FIN_JOB_NUM', 'DT'], axis=1)\n\nw_mean_data = []\nindex_name = []\ni = 0\nwhile i < len(data):\n w_mean_data.append(np.std(data.iloc[i:i+6].values * np.array([[1], [1], [1], [2], [2], [6]]), axis=0))\n index_name.append(data.index[i])\n i += 6\ndata = pd.DataFrame(w_mean_data, index_name, columns=['w_std'+name for name in data.columns])\ndata['STUDENTCODE'] = data.index\n\n\ndata2 = pd.read_csv('../feature_selection/feature.csv', header=0)\ndata = pd.merge(data2, data, on='STUDENTCODE', how='left')\nprint(len(data))\ndata.to_csv('../feature_selection/feature.csv', index=False, encoding='utf-8')\n","sub_path":"data_process/op_w_std.py","file_name":"op_w_std.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"271303774","text":"# Your function definition goes here\ndef count_case(strengur):\n storir = 0\n litlir = 0\n for stafur in strengur:\n if stafur.isupper():\n storir += 1\n elif stafur.islower():\n litlir += 1\n return storir, litlir\n\nuser_input = input(\"Enter a string: \")\n\n# Call the function here\nupper, lower = count_case(user_input)\n\nprint(\"Upper case count: \", upper)\nprint(\"Lower case count: \", lower)","sub_path":"Tímaverkefni/Timaverkefni8/8.2.py","file_name":"8.2.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"36595922","text":"# Comma Code \n# Say you have a list value like this:\n# spam = ['apples', 'bananas', 'tofu', 'cats']\n# Write a function that takes a list value as an argument and returns a string with all the items separated by a comma and a space, \n# with and inserted before the last item. For example, passing the previous spam list to the function would return \n# 'apples, bananas, tofu, and cats'. But your function should be able to work with any list value passed to it.\n\nspam = ['apples', 'bananas', 'tofu', 'cats', 'dogs', 'snakes']\na_string = ''\n\n# Returns a string with all the items separated by a comman and a space with *and*\n# inserted before the last item\ndef spam_func(my_list):\n new_string = ''\n for i in range(len(my_list)):\n if i < len(my_list) - 1:\n new_string = new_string + my_list[i] + ', '\n else:\n new_string = new_string + 'and ' + my_list[i]\n return new_string\n\na_string = spam_func(spam)\nprint(a_string)","sub_path":"MyPythonScripts/commaCode.py","file_name":"commaCode.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"452694666","text":"# Future\nfrom __future__ import division, print_function, unicode_literals\n\n# Standard Library\nfrom builtins import str\nfrom copy import copy\n\n# Third Party\nfrom dateutil.parser import parse as dateparser\nfrom future.utils import python_2_unicode_compatible\n\n# Local\nfrom .exceptions import DuplicateObjectError\nfrom .toolbox import get_id, merge_dicts\n\n\n@python_2_unicode_compatible\nclass APIResults(object):\n \"\"\"Class for encapsulating paginated list results from the API\"\"\"\n\n def __init__(\n self, resource, client, response, extra=None, next_=None, previous=None\n ):\n if extra is None:\n extra = {}\n self.extra = extra\n\n self.resource = resource\n self.client = client\n json = response.json()\n\n self.count = json.get(\"count\")\n self.next_url = json[\"next\"]\n self.previous_url = json[\"previous\"]\n self._next = next_\n self._previous = previous\n self.results = [\n resource(client, merge_dicts(r, self.extra)) for r in json[\"results\"]\n ]\n\n def __repr__(self):\n return \"\".format(\n self.__class__.__name__, self.id, self\n ) # pragma: no cover\n\n def __eq__(self, obj):\n return isinstance(obj, type(self)) and self.id == obj.id\n\n def put(self):\n \"\"\"Alias for save\"\"\"\n return self.save()\n\n def save(self):\n data = {f: getattr(self, f) for f in self.writable_fields if hasattr(self, f)}\n self._client.put(\"{}/{}/\".format(self.api_path, self.id), json=data)\n\n def delete(self):\n self._client.delete(\"{}/{}/\".format(self.api_path, self.id))\n\n\n@python_2_unicode_compatible\nclass APISet(list):\n def __init__(self, iterable, resource):\n super(APISet, self).__init__(iterable)\n self.resource = resource\n if not all(isinstance(obj, self.resource) for obj in self):\n raise TypeError(\n \"Only {} can be added to this list\".format(\n self.resource.__class__.__name__\n )\n )\n ids = [obj.id for obj in self]\n for id_ in ids:\n if ids.count(id_) > 1:\n raise DuplicateObjectError(\n \"Object with ID {} appears in the list more than once\".format(id_)\n )\n\n def append(self, obj):\n if not isinstance(obj, self.resource):\n raise TypeError(\n \"Only {} can be added to this list\".format(\n self.resource.__class__.__name__\n )\n )\n if obj.id in [i.id for i in self]:\n raise DuplicateObjectError(\n \"Object with ID {} appears in the list more than once\".format(obj.id)\n )\n super(APISet, self).append(copy(obj))\n\n def add(self, obj):\n if not isinstance(obj, self.resource):\n raise TypeError(\n \"Only {} can be added to this list\".format(\n self.resource.__class__.__name__\n )\n )\n # skip duplicates silently\n if obj.id not in [i.id for i in self]:\n super(APISet, self).append(copy(obj))\n\n def extend(self, list_):\n if not all(isinstance(obj, self.resource) for obj in list_):\n raise TypeError(\n \"Only {} can be added to this list\".format(\n self.resource.__class__.__name__\n )\n )\n ids = [obj.id for obj in self + list_]\n for id_ in ids:\n if ids.count(id_) > 1:\n raise DuplicateObjectError(\n \"Object with ID {} appears in the list more than once\".format(id)\n )\n super(APISet, self).extend(copy(obj) for obj in list_)\n","sub_path":"documentcloud/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"645096656","text":"from lufei import models\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSetMixin\nfrom rest_framework.viewsets import ModelViewSet # 继承了增删改查\nfrom lufei.serializers.course import CourseSerializer,CourseDetailSerializer\nfrom lufei.auth.auth import LuffAuth\n\n\n\n# apiview 给我提供接口\nclass CourseView(ViewSetMixin, APIView):\n\n def list(self,request, *args, **kwargs):\n \"\"\"\n 课程列表接口\n \"\"\"\n ret = {'code':200,'data':None}\n try:\n querset = models.Course.objects.all()\n print(querset)\n ser = CourseSerializer(instance=querset, many=True)\n ret['data'] = ser.data\n\n except Exception as e:\n ret['code'] = 500\n ret['data'] = '课程获取失败'\n return Response(ret)\n\n def retrieve(self,request, *args, **kwargs):\n \"\"\"\n 课程详细接口\n \"\"\"\n ret = {'code': 200, 'data': None}\n\n try:\n pk = kwargs.get('pk')\n print(pk)\n #课程详细对象\n obj = models.CourseDetail.objects.filter(course_id=int(pk)).first()\n print(obj)\n ser = CourseDetailSerializer(instance=obj, many=False)\n ret['data'] = ser.data\n\n except Exception as e:\n ret['code'] = 204\n ret['data'] = '服务器成功处理了请求,但没有返回任何数据,也许是数据不存在'\n\n return Response(ret)\n\n\nclass MicroView(APIView):\n authentication_classes = [LuffAuth,]\n\n def get(self,request, *args, **kwargs):\n\n ret= {'code': 1000, 'title':'薇职位'}\n return Response(ret)\n\n\n","sub_path":"lufei/views/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"196439495","text":"from __future__ import annotations\n\nfrom math import floor, sqrt\nfrom pathlib import Path\nfrom typing import Union\n\nfrom numpy import float64, full, nan, uint8, zeros\n\nfrom ._ffi import ffi, lib\nfrom ._typing import CData, DtypeLike, Genotype\n\n__all__ = [\"bgen_file\"]\n\n\nclass bgen_file:\n def __init__(self, filepath: Union[str, Path]):\n self._filepath = Path(filepath)\n self._bgen_file: CData = ffi.NULL\n self._bgen_file = lib.bgen_file_open(bytes(self._filepath))\n if self._bgen_file == ffi.NULL:\n raise RuntimeError(f\"Failed to open {filepath}.\")\n\n @property\n def filepath(self) -> Path:\n return self._filepath\n\n @property\n def nvariants(self) -> int:\n return lib.bgen_file_nvariants(self._bgen_file)\n\n @property\n def nsamples(self) -> int:\n return lib.bgen_file_nsamples(self._bgen_file)\n\n @property\n def contain_samples(self) -> bool:\n return lib.bgen_file_contain_samples(self._bgen_file)\n\n def read_samples(self) -> DtypeLike:\n nsamples = self.nsamples\n bgen_samples: CData = lib.bgen_file_read_samples(self._bgen_file)\n if bgen_samples == ffi.NULL:\n raise RuntimeError(\"Could not fetch samples from the bgen file.\")\n\n try:\n samples_max_len = ffi.new(\"uint32_t[]\", 1)\n lib.read_samples_part1(bgen_samples, nsamples, samples_max_len)\n samples = zeros(nsamples, dtype=f\"S{samples_max_len[0]}\")\n lib.read_samples_part2(\n bgen_samples,\n nsamples,\n ffi.from_buffer(\"char[]\", samples),\n samples_max_len[0],\n )\n finally:\n lib.bgen_samples_destroy(bgen_samples)\n\n return samples\n\n def create_metafile(self, filepath: Union[str, Path], verbose=True):\n n = estimate_best_npartitions(self.nvariants)\n filepath = Path(filepath)\n\n mf = lib.bgen_metafile_create(self._bgen_file, bytes(filepath), n, verbose)\n if mf == ffi.NULL:\n raise RuntimeError(f\"Error while creating metafile {filepath}.\")\n\n lib.bgen_metafile_close(mf)\n\n def read_genotype(self, offset: int) -> Genotype:\n gt: CData = lib.bgen_file_open_genotype(self._bgen_file, offset)\n if gt == ffi.NULL:\n raise RuntimeError(f\"Could not open genotype (offset {offset}).\")\n\n nsamples = self.nsamples\n ncombs = lib.bgen_genotype_ncombs(gt)\n probs = full((nsamples, ncombs), nan, dtype=float64)\n err: int = lib.bgen_genotype_read(gt, ffi.cast(\"double *\", probs.ctypes.data))\n if err != 0:\n msg = f\"Could not read genotype probabilities (offset {offset}).\"\n raise RuntimeError(msg)\n\n phased = lib.bgen_genotype_phased(gt)\n\n ploidy = full(nsamples, 0, dtype=uint8)\n lib.read_ploidy(gt, ffi.cast(\"uint8_t *\", ploidy.ctypes.data), nsamples)\n\n missing = full(nsamples, 0, dtype=bool)\n lib.read_missing(gt, ffi.cast(\"bool *\", missing.ctypes.data), nsamples)\n\n lib.bgen_genotype_close(gt)\n\n return Genotype(probs, phased, ploidy, missing)\n\n def close(self):\n if self._bgen_file != ffi.NULL:\n lib.bgen_file_close(self._bgen_file)\n\n def __enter__(self) -> bgen_file:\n return self\n\n def __exit__(self, *_):\n self.close()\n\n\ndef estimate_best_npartitions(nvariants: int) -> int:\n min_variants = 128\n m = max(min(min_variants, nvariants), floor(sqrt(nvariants)))\n return nvariants // m\n","sub_path":"cbgen/_bgen_file.py","file_name":"_bgen_file.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"422141700","text":"'''create qmap using finaldb and qid'''\n\n\nimport pickle\nf=open(\"finaldb.pk\",\"rb\")\ndb=pickle.load(f)\nf.close()\n\nf=open(\"qid.pk\",\"rb\")\nqid=pickle.load(f)\nf.close()\n\nqmap=dict()\nfor i in qid:\n qmap[i]=0\n\n\nfor i in db:\n for j in i:\n qmap[j]+=1\n\nf=open(\"qmap.pk\",\"wb\")\npickle.dump(qmap,f)\nf.close()\n","sub_path":"database/createqmap.py","file_name":"createqmap.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"617712356","text":"# Ciclo for\n# ¿Como imprimir el primer y ultimo valor de un ciclo For antes de un cambio?\n\ninicio = int(input(\"Inicio: \"))\nfin = int(input(\"Fin: \"))\n\n# imprimir del 1 al 21\n# inicio = 1\n# fin = 21\nprint(inicio)\nfor i in range(inicio-1, fin, 7):\n i = 7+i\n print(i)","sub_path":"09-Ciclo-for_de-7-en-7.py","file_name":"09-Ciclo-for_de-7-en-7.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"77077909","text":"# -*- coding:utf-8 -*-\n__author__ = 'Jackie'\n\nimport os\nimport math\nfrom socket import *\nimport logging\nimport datetime\nimport pickle\nimport threading\nimport sys\nimport shutil\nfrom utils import constant as CONS\n\nclass Client:\n #initialize the server\n def __init__(self, port):\n self.uploadFileThreadNum = 10\n self.downloadFileThreadNum = 10\n self.threads = []\n self.fileName = \"\"\n logging.info('client start...')\n\n # 上传文件多线程方法\n def uploadFileMutiThreading(self, serverInforLi, start, end):\n if not serverInforLi:\n logging.warning(\"can't get server information\")\n return -1\n if len(serverInforLi) < 3 or len(serverInforLi) > 4:\n logging.warning(\"active server is less than Three or more than Four!\")\n return -1\n\n server1FileEndNum = serverInforLi[0][\"blockEnd\"]\n currentPosition = start\n with open(self.fileName, 'rb') as f:\n while currentPosition <= end:\n if len(serverInforLi) == 3:\n backServerIP = serverInforLi[2][\"ip\"]\n backServerPort = serverInforLi[2][\"port\"]\n if currentPosition <= server1FileEndNum:\n serverIP = serverInforLi[0][\"ip\"]\n serverPort = serverInforLi[0][\"port\"]\n else:\n serverIP = serverInforLi[1][\"ip\"]\n serverPort = serverInforLi[1][\"port\"]\n else:\n if currentPosition <= server1FileEndNum:\n serverIP = serverInforLi[0][\"ip\"]\n serverPort = serverInforLi[0][\"port\"]\n backServerIP = serverInforLi[2][\"ip\"]\n backServerPort = serverInforLi[2][\"port\"]\n else:\n serverIP = serverInforLi[1][\"ip\"]\n serverPort = serverInforLi[1][\"port\"]\n backServerIP = serverInforLi[3][\"ip\"]\n backServerPort = serverInforLi[3][\"port\"]\n print(\"Upload plan: currentPosition:{0}, server:{1} {2},backServer:{3} {4}\".format(currentPosition, serverIP,serverPort, backServerIP, backServerPort))\n f.seek(int((currentPosition - 1)*CONS.BLOCK_SIZE))\n # print(\"filePosition:\", f.tell())\n\n msgType = CONS.from_client_01\n fileNameLi = self.fileName.split('.')\n if len(fileNameLi) >= 2:\n fileNumber = '0' * (CONS.FILE_NAME_NUMBER_LENGTH - len(str(currentPosition))) + str(currentPosition)\n fileName = fileNameLi[0] + fileNumber + '.' + fileNameLi[1]\n fileName = fileName + (CONS.FILE_NAME_LENGTH-len(fileName)) * \" \"\n # print(\"filename\", fileName)\n else:\n logging.info('File type is wrong!')\n\n packetDataDic = dict()#包括msgType, filename, back file host ip and port\n packetDataDic['msgType'] = msgType\n packetDataDic['fileName'] = fileName\n packetDataDic['backServerIP'] = backServerIP + \" \" * (CONS.SERVER_IP_LENGTH - len(str(backServerIP)))\n packetDataDic['backServerPort'] = str(backServerPort) + \" \" * (CONS.SERVER_PORT_LENGTH - len(str(backServerPort)))\n packetData = pickle.dumps(packetDataDic)\n\n #test currentPosition\n try:\n client = socket(AF_INET, SOCK_STREAM)\n client.connect((serverIP, serverPort))\n\n #传递上传文件基本信息\n # print(\"packetData length:\", len(packetData))\n client.send(packetData)\n\n # fileSizeCount = 0\n logging.info(\"File:{0} chunk {1} upload to server:{2}:{3} start...\".format(self.fileName, currentPosition, serverIP, serverPort))\n for i in range(int(CONS.BLOCK_SIZE/CONS.ONCE_READ_FILE_SIZE)):\n content = f.read(CONS.ONCE_READ_FILE_SIZE)\n # fileSizeCount = fileSizeCount + len(content)\n if not content:\n # logging.info(\"File{0} chunk {1} upload finished!\".format(self.fileName, currentPosition))\n break\n client.send(content)\n logging.info(\"File:{0} chunk {1} upload to server:{2}:{3} finished!\".format(self.fileName, currentPosition, serverIP, serverPort))\n # print(\"length of file send\", fileSizeCount)\n client.close()\n except Exception as e: #不考虑上传失败\n client.close()\n logging.error(\"send file:{0} to server:{1} fail! {2}\".format(fileName, serverIP, e))\n\n currentPosition = currentPosition + 1\n\n def uploadFile(self, fileName):\n self.fileName = fileName\n blockSize = 0\n if os.path.isfile(self.fileName): #block 大小为64M\n fileSize = os.path.getsize(fileName)\n # print(fileSize)\n blockSize = math.ceil(fileSize/CONS.BLOCK_SIZE)\n\n # self.uploadFileThreadNum = blockSize ?? test the best one\n if blockSize > 0:\n serverInforLi = self.getInfoFromMonitor(CONS.from_client_03, fileName, blockSize)\n eachThreadBlockCeilSize = math.ceil(blockSize/self.uploadFileThreadNum)\n\n if eachThreadBlockCeilSize <= 1:\n self.uploadFileThreadNum = blockSize\n\n eachThreadBlockFloorSize = math.floor(blockSize/self.uploadFileThreadNum)\n\n leftBlockSizeNum = blockSize - eachThreadBlockFloorSize * self.uploadFileThreadNum\n\n tempLi = list(range(self.uploadFileThreadNum))\n\n currentPos = 0 #控制各线程文件上传起始位置\n label = -1\n for i, each in enumerate(tempLi):\n if i < leftBlockSizeNum:\n start = eachThreadBlockCeilSize * i + 1\n end = eachThreadBlockCeilSize * (i + 1)\n if i == (leftBlockSizeNum - 1):\n currentPos = end\n label = i\n else:\n start = currentPos + eachThreadBlockFloorSize * (i - label - 1) + 1\n end = currentPos + eachThreadBlockFloorSize * (i - label)\n\n if i == len(tempLi)-1:\n end = blockSize\n print('start:{0}, end:{1}'.format(start, end))\n\n thread = threading.Thread(target=self.uploadFileMutiThreading,\n args=(serverInforLi, start, end))\n self.threads.append(thread)\n thread.start()\n logging.info(\"start threading{0} to upload file {3} blocks {1}-{2}\".format(i+1, start, end, self.fileName))\n else:\n logging.info(\"file size is not right\")\n\n # 下载文件多线程方法\n def downloadFileMutiThreading(self, serverInforLi, start, end):\n firstBlockSize = serverInforLi[0][\"blockEnd\"]\n\n currentPosition = start\n while currentPosition <= end:\n msgType = CONS.from_client_02\n fileNameLi = self.fileName.split('.')\n if len(fileNameLi) >= 2:\n fileNumber = '0' * (CONS.FILE_NAME_NUMBER_LENGTH - len(str(currentPosition))) + str(currentPosition)\n fileName = fileNameLi[0] + fileNumber + '.' + fileNameLi[1]\n # fileName = fileName + (CONS.FILE_NAME_LENGTH - len(fileName)) * \" \"\n # print(\"filename\", fileName)\n else:\n logging.info('File type is wrong!')\n\n packetDataDic = dict()#包括msgType, filename, back file host ip and port\n packetDataDic['msgType'] = msgType\n packetDataDic['fileName'] = fileName\n packetData = pickle.dumps(packetDataDic)\n\n try:\n if currentPosition <= firstBlockSize:\n serverIP = serverInforLi[0][\"ip\"]\n serverPort = serverInforLi[0][\"port\"]\n else:\n serverIP = serverInforLi[1][\"ip\"]\n serverPort = serverInforLi[1][\"port\"]\n\n client = socket(AF_INET, SOCK_STREAM)\n client.connect((serverIP, serverPort))\n\n client.send(packetData)\n\n if not os.path.isdir('temp'):\n os.mkdir('temp')\n\n with open('temp/{0}'.format(fileName), 'wb') as f:\n # print(\"position\", currentPosition)\n # f.seek(CONS.BLOCK_SIZE * (currentPosition - 1))\n logging.info(\"File:{0} chunk {1} download to server:{2}:{3} finished!\".format(self.fileName, currentPosition, serverIP, serverPort))\n while True:\n revContent = client.recv(CONS.ONCE_READ_FILE_SIZE)\n # fileSizeCount = fileSizeCount + len(revContent)\n if not revContent:\n print(\"file position:\", f.tell())\n break\n f.write(revContent)\n logging.info(\"File:{0} chunk {1} download to server:{2}:{3} finished!\".format(self.fileName, currentPosition, serverIP, serverPort))\n client.close()\n logging.info(\"download file:{0} from server:{1}\".format(fileName, serverIP))\n except Exception as e: #不考虑上传失败\n client.close()\n logging.error(\"send file:{0} to server:{1} fail! {2}\".format(fileName, serverIP, e))\n\n currentPosition = currentPosition + 1\n\n def downloadFile(self, fileName):\n self.fileName = fileName\n serverInforLi = self.getInfoFromMonitor(CONS.from_client_04, fileName)\n\n if len(serverInforLi) < 2:\n logging.warning(\"服务器不存在此文件或数量不足以支持下载此文件!\")\n return -1\n\n\n secondBlockSize = serverInforLi[1][\"blockEnd\"]\n\n blockSize = secondBlockSize\n # self.downloadFileThreadNum = blockSize ?? test the best one\n if serverInforLi:\n eachThreadBlockCeilSize = math.ceil(blockSize/self.downloadFileThreadNum)\n\n if eachThreadBlockCeilSize <= 1:\n self.downloadFileThreadNum = blockSize\n\n eachThreadBlockFloorSize = math.floor(blockSize/self.downloadFileThreadNum)\n\n leftBlockSizeNum = blockSize - eachThreadBlockFloorSize * self.downloadFileThreadNum\n\n tempLi = list(range(self.downloadFileThreadNum))\n\n currentPos = 0 #控制各线程文件上传起始位置\n label = -1\n for i, each in enumerate(tempLi):\n if i < leftBlockSizeNum:\n start = eachThreadBlockCeilSize * i + 1\n end = eachThreadBlockCeilSize * (i + 1)\n if i == (leftBlockSizeNum - 1):\n currentPos = end\n label = i\n else:\n start = currentPos + eachThreadBlockFloorSize * (i - label - 1) + 1\n end = currentPos + eachThreadBlockFloorSize * (i - label)\n\n if i == len(tempLi)-1:\n end = blockSize\n print('start:{0}, end:{1}'.format(start, end))\n\n thread = threading.Thread(target=self.downloadFileMutiThreading,\n args=(serverInforLi, start, end))\n self.threads.append(thread)\n thread.start()\n logging.info(\"start threading{0} to download file{3} blocks {1}-{2}\".format(i+1, start, end, self.fileName))\n else:\n logging.info(\"file size is not right\")\n\n def getInfoFromMonitor(self, msgType, fileName, blockSize = 0):\n recData = None\n try:\n client = socket(AF_INET, SOCK_STREAM)\n client.connect((CONS.MONITOR__SERVER_IP, CONS.MONITOR__SERVER_PORT))\n\n packetDataDic = dict()\n packetDataDic['msgType'] = msgType\n packetDataDic['fileName'] = fileName\n packetDataDic['blockSize'] = blockSize\n packetData = pickle.dumps(packetDataDic)\n\n client.send(packetData)#发送上传文件信息\n logging.info(\"Send request to monitor\".format(packetDataDic))\n\n recData = client.recv(1024)\n recData = pickle.loads(recData)\n logging.info(\"get server information from monitor\".format(recData))\n client.close()\n except Exception as e:\n client.close()\n logging.error('Connect to monitor fail.{0}'.format(e))\n return recData\n\n# initialize the log\ndef initLog(logName):\n # createa log folder\n if not os.path.isdir('log'):\n os.mkdir('log')\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M', filename='log/' + logName)\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(levelname)-6s:%(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n\n\nif __name__ == \"__main__\":\n initLog('customer.log')\n print(\"********Welcome to client**********\")\n client = Client(CONS.PORT_START + 5)\n\n commandLis = [\"download\", \"upload\", \"abort\"]\n while True:\n print(\"Please select the operation you want::\")\n inputCmd = input(\"download, upload, abort?\\n\")\n fileName = \"\"\n if inputCmd == commandLis[0]:\n fileName = input(\"Input the file name:\")\n starttime = datetime.datetime.now()\n logging.info('Start to download File {0}...{1}'.format(fileName, starttime))\n client.downloadFile(fileName)\n for t in client.threads:\n t.join()\n\n if os.path.isdir(\"temp\"):\n fileList = os.listdir(\"temp\")\n\n if not os.path.isdir(\"download\"):\n os.mkdir(\"download\")\n\n with open('download/{0}'.format(client.fileName), 'wb') as f:\n for tempFile in fileList:\n shutil.copyfileobj(open('temp/{0}'.format(tempFile), 'rb'), f)\n\n shutil.rmtree(\"temp\")\n\n endtime = datetime.datetime.now()\n\n logging.info('Client download file{0} end time:{1}'.format(fileName, endtime))\n logging.info('Total time to download the file{1}:{0}'.format(endtime - starttime,fileName))\n elif inputCmd == commandLis[1]:\n starttime = datetime.datetime.now()\n fileName = input(\"Input the file name:\")\n logging.info('start to upload file{0}...{1}'.format(fileName, starttime))\n client.uploadFile(fileName)\n for t in client.threads:\n t.join()\n endtime = datetime.datetime.now()\n\n logging.info('Client upload file{0} end time:{1}'.format(fileName, endtime))\n logging.info('Total time to upload the file{1}:{0}'.format(endtime - starttime, fileName))\n elif inputCmd == commandLis[2]:\n print(\"Thank you for your use^=^\")\n sys.exit()\n else:\n print(\"Operation is no right, try again\")\n\n\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":15721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"159989110","text":"#!Measurement\n'''\nbaseline:\n after: true\n before: false\n counts: 3\n detector: H1\n mass: 39.59\ndefault_fits: nominal_fasad\nequilibration:\n eqtime: 5\n inlet: R\n inlet_delay: 3\n outlet: S\n use_extraction_eqtime: false\nmulticollect:\n counts: 100\n detector: H1\n isotope: Ar40\npeakcenter:\n after: false\n before: false\n detector: H1\n isotope: Ar40\npeakhop:\n hops_name: hop\n use_peak_hop: false\n'''\n\n#equilibration\n#EQ_TIME= 5.0\n\n#PEAK HOP\nUSE_PEAK_HOP= False\n#PEAK_HOPS=[((('Ar40','H1'), 'CDD'), 10),\n# ((('Ar39','CDD')), 10),\n# ((('Ar38', 'CDD')), 10),\n# ((('Ar37', 'CDD')), 10),\n# ((('Ar36', 'CDD')), 10),\n # ]\n\n\nACTIVE_DETECTORS=('H2','H1','AX','L1','L2', 'CDD')\n#FITS=('Ar41:linear','Ar40:linear', 'Ar39:parabolic','Ar38:parabolic','Ar37:parabolic','Ar36:parabolic')\n\ndef main():\n #this is a comment\n '''\n this is a multiline\n comment aka docstring\n '''\n #display information with info(msg)\n info('unknown measurement script')\n\n #set the spectrometer parameters\n #provide a value\n set_source_parameters(YSymmetry=10)\n\n #or leave blank and values are loaded from a config file (setupfiles/spectrometer/config.cfg)\n set_source_optics()\n\n #set the cdd operating voltage\n set_cdd_operating_voltage(100)\n\n if mx.peakcenter.before:\n peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)\n\n #open a plot panel for this detectors\n activate_detectors(*ACTIVE_DETECTORS)\n\n if mx.baseline.before:\n baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector)\n\n\n #position mass spectrometer\n position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)\n\n #gas is staged behind inlet\n\n #post equilibration script triggered after eqtime elapsed\n #equilibrate is non blocking\n #so use either a sniff of sleep as a placeholder until eq finished\n if mx.equilibration.use_extraction_eqtime:\n e = ex.eqtime\n else:\n e = mx.equilibration.eqtime\n\n equilibrate(eqtime=e, inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet)\n\n #equilibrate returns immediately after the inlet opens\n set_time_zero()\n\n sniff(e)\n #set default regression\n set_fits()\n set_baseline_fits()\n if USE_PEAK_HOP:\n '''\n\n hop = (Isotope, DetA)[,DetB,DetC...], counts\n\n ex.\n hops=[((('Ar40','H1'),'CDD'), 10),\n ((('Ar39','CDD')), 30),\n\n '''\n peak_hop(hops=PEAK_HOPS)\n else:\n #multicollect on active detectors\n multicollect(ncounts=mx.multicollect.counts, integration_time=1)\n\n if mx.baseline.after:\n baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector)\n\n if mx.peakcenter.after:\n peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)\n info('finished measure script')\n\n#========================EOF==============================================================\n #peak_hop(detector='CDD', isotopes=['Ar40','Ar39','Ar36'], cycles=2, integrations=3)\n #baselines(counts=50,mass=0.5, detector='CDD')s\n\n#isolate sniffer volume\n # close('S')\n# sleep(1)\n#\n# #open to mass spec\n# open('R')\n#\n# set_time_zero()\n# #display pressure wave\n# sniff(5)\n#\n# #define sniff/split threshold\n# sniff_threshold=100\n#\n# #test condition\n# #if get_intensity('H1')>sniff_threshold:\n# if True:\n# gosub('splits:jan_split', klass='ExtractionLinePyScript')\n#\n","sub_path":"scripts/measurement/jan_detector_ic.py","file_name":"jan_detector_ic.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"105527284","text":"import mazeGenerator\n\nclass Queue:\n def __init__(self):\n self.list = []\n self.front = 0\n self.last = -1\n\n def add(self, node):\n self.list.append(node)\n self.last = self.last + 1\n\n def remove(self):\n if self.front > self.last:\n raise Exception(\"Underflow!\")\n else:\n self.front = self.front + 1\n return self.list[self.front-1]\n\n def isEmpty(self):\n if self.front > self.last:\n return True\n else:\n return False\n\n\nclass Coordinate:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\nclass Node:\n def __init__(self, coordinates, previous):\n self.state = coordinates\n self.previous = previous\n\nclass Maze:\n matrix = []\n visited = []\n row = 0\n column = 0\n start = Coordinate(-1, -1)\n goal = Coordinate(-1, -1)\n\n def __init__(self, matrix, row, column):\n self.matrix = matrix\n self.visited = [['0'] * column for _ in range(row)]\n self.row = row\n self.column = column\n\n def load(self):\n for i in range(self.row):\n for j in range(self.column):\n if self.matrix[i][j] == 'S':\n self.start.x = j\n self.start.y = i\n if self.matrix[i][j] == 'D':\n self.goal.x = j\n self.goal.y = i\n if self.matrix[i][j] == '1':\n self.visited[i][j] = '2'\n else:\n self.visited[i][j] = '0'\n\n def checkNeighbour(self, state, dir):\n var = Coordinate(-1, -1)\n if dir == 0:\n if state.x-1 >= 0:\n if self.visited[state.y][state.x-1] == '0':\n var.x = state.x-1\n var.y = state.y\n\n if dir == 1:\n if state.x+1 < self.column:\n if self.visited[state.y][state.x+1] == '0':\n var.x = state.x+1\n var.y = state.y\n\n if dir == 2:\n if state.y-1 >= 0:\n if self.visited[state.y-1][state.x] == '0':\n var.x = state.x\n var.y = state.y-1\n\n if dir == 3:\n if state.y+1 < self.row:\n if self.visited[state.y+1][state.x] == '0':\n var.x = state.x\n var.y = state.y+1\n\n return var\n\n\ndef bfs(matrix, row, column):\n\n queue = Queue()\n path = []\n maze = Maze(matrix, row, column)\n maze.load()\n start = Node(maze.start, None)\n goal = Node(Coordinate(-1, -1), None)\n queue.add(start)\n maze.visited[maze.start.y][maze.start.x] = '1'\n\n while not queue.isEmpty():\n current = queue.remove()\n for i in range(4):\n neighbour = maze.checkNeighbour(current.state, i)\n if not (neighbour.x == -1) and not (neighbour.y == -1):\n temp = Node(neighbour, queue.front-1)\n queue.add(temp)\n maze.visited[neighbour.y][neighbour.x] = '1'\n if maze.goal.x == neighbour.x and maze.goal.y == neighbour.y:\n goal = temp\n break\n\n if goal.state.x == -1 or goal.state.y == -1:\n return None, None\n else:\n current = goal\n while current.previous is not None:\n path.append((current.state.x, current.state.y))\n maze.matrix[current.state.y][current.state.x] = 'P'\n current = queue.list[current.previous]\n path.append((int(maze.start.x), int(maze.start.y)))\n maze.matrix[maze.start.y][maze.start.x] = 'S'\n maze.matrix[maze.goal.y][maze.goal.x] = 'D'\n\n path = list(reversed(path))\n del path[-1]\n return maze.matrix, path\n\n\nif __name__ == \"__main__\":\n row = 15\n column = 25\n maze = mazeGenerator.Maze(column, row)\n\n matrix = maze.matrix()\n solutionMatrix, path = bfs(matrix, row, column)\n\n if solutionMatrix is None:\n print(\"No Solution Found!\")\n else:\n for w in solutionMatrix:\n for x in w:\n print(x, end=\" \")\n print(\"\")\n\n print(\"\\n\")\n path = list(path)\n for elem in path:\n print(elem)\n","sub_path":"bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"210705494","text":"import plotly.graph_objects as go\nimport repository as rep\n\ntoken = \"pk.eyJ1IjoidWdoaXRzc2lkIiwiYSI6ImNrNm54Z3I5cTE1aDIzbW55MjcwdWp4MnEifQ.xylSSPUA0Yt9ly6jOBMg4w\"\n\nlats = [rep.codeDay.lat, rep.sb1.lat, rep.chpt1.lat, rep.pnbrd1.lat]\nlons = [rep.codeDay.lon, rep.sb1.lon, rep.chpt1.lon, rep.pnbrd1.lon]\ntexts = [rep.codeDay.text, rep.sb1.text, rep.chpt1.text, rep.pnbrd1.text]\n\nfig = go.Figure(go.Scattermapbox(\n lat=lats,\n lon=lons,\n mode='markers',\n marker=go.scattermapbox.Marker(size=14),\n text=texts,\n ))\n\nfig.update_layout(\n autosize=True,\n hovermode='closest',\n mapbox=dict(\n accesstoken=token,\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=rep.avgLat,\n lon=rep.avgLon\n ),\n pitch=0,\n zoom=14\n ),\n)\n\nfig.show()","sub_path":"plotlyBackup.py","file_name":"plotlyBackup.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"236081885","text":"#!/usr/bin/env python\nimport sqlite3\n\nDataBaseName = 'storageBase.db'\n\n#Получить список (ID делати, название детали, количество деталей) для всех деталей, в названии которых содержится строка searchString\ndef searchItems(searchString):\n\n items = []\n foundItems = __searchItemsByName(searchString)\n for foundItem in foundItems:\n if foundItem is not None:\n numOfItems = __getNumOfItems(foundItem[0])\n items.append( (foundItem[0], foundItem[1], numOfItems,) )\n\n return items\n\n#Получить список хранилищ с искомым элементом itemId (ID хранилища, название хранилища, количество деталей)\ndef getStoragesOfItem(itemId):\n\n items = []\n\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute('SELECT ItemsAndStorages.storageId, storages.StorageName, ItemsAndStorages.Quantity FROM itemsAndStorages, storages WHERE ItemId = ? AND storages.StorageId = ItemsAndStorages.StorageId', [str(itemId)])\n\n while True:\n item = c.fetchone()\n items.append(item)\n if item is None:\n break\n conn.close()\n return items\n\n#Получить всех предков хранилища (Список ID текущего хранилища, название текущего хранилища, ID родителя)\ndef getStoragePath(storageId):\n storageList = []\n currentStorageId = storageId\n\n while currentStorageId is not 0:\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute('SELECT ParrentStorageId, StorageName FROM storages WHERE StorageId = ?', [str(currentStorageId)])\n text = c.fetchone()\n conn.close()\n if text is None:\n break\n\n parrentStorageId = text[0]\n storageName = text[1]\n currentStorage = (currentStorageId, storageName, parrentStorageId, )\n storageList.append(currentStorage)\n\n currentStorageId = parrentStorageId\n\n return storageList\n\n#Получить все детали в хранилище (список ID детали, название детали, количество деталей)\ndef getItemsInStorage(storageId):\n items = []\n\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute(\"SELECT ItemsAndStorages.Itemid, items.ItemName, ItemsAndStorages.Quantity FROM ItemsAndStorages, items WHERE ItemsAndStorages.StorageId = ? AND items.ItemId = ItemsAndStorages.Itemid\", [str(storageId)])\n\n while True:\n item = c.fetchone()\n items.append(item)\n if item is None:\n break\n conn.close()\n return items\n\n#Проверить существует ли деталь с заданным названием\ndef isItemExist(itemName):\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute('SELECT COUNT(ItemId) FROM items WHERE upper(ItemName) = upper(?)', [itemName])\n text = c.fetchone()\n conn.close()\n\n if text[0] is 0:\n return False\n else:\n return True\n\n#Попытаться вставить деталь с заданным названием, если она еще не существует\ndef createNewItem(newItemName):\n if isItemExist(newItemName) == True:\n return False\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute(\"INSERT INTO items (ItemName) VALUES (?)\", [newItemName])\n conn.commit()\n conn.close()\n return True\n\ndef addItems(itemId, storageId, quantityToAdd):\n if quantityToAdd is 0:\n return False\n\n quantityInStorage = __getNumOfItemsInStorage(itemId, storageId)\n\n if quantityInStorage is 0:\n __insertItemInStorage(itemId, storageId, quantityToAdd)\n else:\n __updateItemInStorage(itemId, storageId, quantityToAdd)\n return True\n\ndef removeItems(itemId, storageId, quantityToRemove):\n if quantityToRemove is 0:\n return False\n\n quantityInStorage = __getNumOfItemsInStorage(itemId, storageId)\n\n if quantityInStorage > quantityToRemove:\n __updateItemInStorage(itemId, storageId, quantityToRemove * -1)\n elif quantityInStorage == quantityToRemove:\n __removeItemFromStorage(itemId, storageId)\n else:\n return False\n\n return True\n\ndef moveItems(itemId, srcStorageId, dstStorageId, quantity):\n if quantity is 0:\n return False\n\n result = removeItems(itemId, srcStorageId, quantity);\n if result is False:\n return False\n\n result = addItems(itemId, dstStorageId, quantity)\n return result\n\ndef getItemIdByName(itemName):\n items = []\n\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute(\"SELECT ItemId FROM items WHERE upper(ItemName) LIKE upper(?)\", (itemName,))\n\n while True:\n item = c.fetchone()\n if item is None:\n break\n items.append(item)\n conn.close()\n\n if len(items) == 1:\n return items[0][0], True\n return -1, False\n\ndef getItemNameById(itemId):\n\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute(\"SELECT ItemName FROM items WHERE ItemId = ?\", (itemId,))\n\n text = c.fetchone()\n conn.close()\n\n if text is not None:\n return text[0], True\n return -1, False\n\ndef getStorageNameById(storageId):\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute(\"SELECT StorageName FROM storages WHERE StorageId = ?\", (storageId,))\n\n text = c.fetchone()\n conn.close()\n\n if text is not None:\n return text[0], True\n return -1, False\n\ndef getStorageParentId(storageId):\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute(\"SELECT ParrentStorageId FROM storages WHERE StorageId = ?\", (storageId,))\n\n text = c.fetchone()\n conn.close()\n\n if text is not None:\n return text[0], True\n return -1, False\n\ndef addStorage(storageName, parentId):\n name, result = getStorageNameById(parentId)\n if result is False:\n return False #Не существует хранилища-родителя\n\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute(\"INSERT INTO storages (StorageName, ParrentStorageId) VALUES (?, ?)\", [storageName, parentId])\n conn.commit()\n conn.close()\n return True\n\ndef moveStorage(storageId, newParentId):\n name, result = getStorageNameById(storageId)\n if result is False:\n return False #Не существует хранилища\n\n name, result = getStorageNameById(newParentId)\n if result is False:\n return False #Не существует хранилища\n\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute(\"UPDATE storages SET ParrentStorageId = ? WHERE StorageId = ?\", [str(newParentId), str(storageId)])\n conn.commit()\n conn.close()\n return True\n\n#Получить список (ID детали и название детали) для всех деталей, в названии которых содержится строка itemName\ndef __searchItemsByName(itemName):\n items = []\n\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute(\"SELECT ItemId, ItemName FROM items WHERE lower(ItemName) LIKE lower(?)\", (\"%\" + itemName + \"%\",))\n\n while True:\n item = c.fetchone()\n items.append(item)\n if item is None:\n break\n conn.close()\n return items\n\n#Получить общее количество деталей во всех хранилищах\ndef __getNumOfItems(itemId):\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute('SELECT SUM(Quantity) FROM itemsAndStorages WHERE ItemId=?', [str(itemId)])\n text = c.fetchone()\n conn.close()\n\n try:\n numOfItems = int(text[0])\n except TypeError:\n numOfItems = 0\n\n return numOfItems\n\n#Получить количество деталей в заданном хранилище\ndef __getNumOfItemsInStorage(itemId, storageId):\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute('SELECT Quantity FROM ItemsAndStorages WHERE StorageId=? AND ItemId=?', [str(storageId), str(itemId)])\n text = c.fetchone()\n conn.close()\n\n try:\n numOfItems = int(text[0])\n except TypeError:\n numOfItems = 0\n\n return numOfItems\n\n#Добавить новую запись о детали в хранилище, где его еще нет\ndef __insertItemInStorage(itemId, storageId, quantity):\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute('INSERT INTO ItemsAndStorages (ItemId, StorageId, Quantity) VALUES (?, ?, ?)', [str(itemId), str(storageId), str(quantity)])\n conn.commit()\n conn.close()\n\n#Изменить количество деталей в хранилище, где деталь уже присутствует\ndef __updateItemInStorage(itemId, storageId, quantityDiff):\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute('UPDATE ItemsAndStorages SET Quantity = Quantity+? WHERE ItemId=? AND StorageId=?', [str(quantityDiff), str(itemId), str(storageId)])\n conn.commit()\n conn.close()\n\n#Удалить запись о детали в хранилище\ndef __removeItemFromStorage(itemId, storageId):\n conn = sqlite3.connect(DataBaseName)\n c = conn.cursor()\n c.execute('DELETE FROM ItemsAndStorages WHERE Itemid = ? AND StorageId = ?;', [str(itemId), str(storageId)])\n conn.commit()\n conn.close()\n\nif __name__ == '__main__':\n main()","sub_path":"DataBase.py","file_name":"DataBase.py","file_ext":"py","file_size_in_byte":9647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"348119702","text":"f=open('input.txt')\nN=int(f.readline())\nfree=0\nfives=0\nmassiv=list(map(int, f.readline().split()))\nfor i in massiv:\n if i==5:\n free+=1\n else:\n d=(i-5)//5\n if free>0:\n if free>=d: d,free = 0, free-d\n else: d,free = d-free, 0\n fives+=d\n \nf=open('output.txt', 'w') \n \n\nprint(fives, file = f) \nf.close()\n","sub_path":"B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"482247386","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nfrom .models import User, Quote, Favorite\nimport bcrypt\n\n# Create your views here.\ndef main(request):\n return render(request,'quotes/main.html')\n\ndef register(request):\n if request.method == 'POST':\n errors = User.objects.basic_validation(request.POST)\n if len(errors) == 0:\n user_alias = User.objects.filter(alias=request.POST['alias'])\n user_email = User.objects.filter(email=request.POST['email'])\n if len(user_alias)>0:\n messages.add_message(request,messages.INFO,'user alias already exists!')\n return redirect(main)\n if len(user_email)>0:\n messages.add_message(request, messages.INFO, 'user email already exists!')\n return redirect(main)\n User.objects.create(first_name = request.POST['first_name'],\n last_name=request.POST['last_name'],\n alias=request.POST['alias'],\n email=request.POST['email'],\n password=bcrypt.hashpw(request.POST[\"password\"].encode(),bcrypt.gensalt()),\n dob =request.POST['dob'])\n messages.add_message(request, messages.INFO, 'Registration successful! Try logging in.')\n return redirect(main)\n else:\n for error_key,error_value in errors.items():\n messages.add_message(request,messages.INFO,{error_key:error_value})\n return redirect(main)\n else:\n return redirect(main)\n\n\ndef login(request):\n if request.method == 'POST':\n user = User.objects.filter(email=request.POST[\"email\"])\n if len(user)>0:\n pw = request.POST['password']\n pw2 = user[0].password\n if bcrypt.checkpw(pw.encode(),pw2.encode()):\n request.session[\"id\"] = user[0].id\n return redirect(quotes)\n else:\n messages.add_message(request, messages.INFO, 'Password is incorrect!')\n\n else:\n messages.add_message(request,messages.INFO,'Email is incorrect!')\n return redirect(main)\n else:\n redirect(main)\n\ndef quotes(request):\n user = User.objects.get(id=request.session['id'])\n quotes = Quote.objects.all()\n context = {\n 'user': user,\n 'quotes': quotes,\n 'favorites': Favorite.objects.filter(user=User.objects.get(id=request.session['id'])),\n 'favorites_id':Favorite.objects.filter(user=User.objects.get(id=request.session['id'])).values_list('id',flat=True),\n }\n return render(request,'quotes/quotes.html',context)\n\ndef create_quote(request):\n if 'id' in request.session:\n if len(request.POST['message']) < 10:\n messages.add_message(request,messages.INFO,'message must be more than 10 characters!')\n return redirect(quotes)\n if len(request.POST['author']) < 3:\n messages.add_message(request,messages.INFO,'quoted by must be more than 3 characters!')\n return redirect(quotes)\n Quote.objects.create(message=request.POST['message'],\n author=request.POST['author'],\n user=User.objects.get(id=request.session['id']))\n return redirect(quotes)\n else:\n return redirect(main)\n\ndef users(request, user_id):\n context = {\n \"user\": User.objects.get(id=user_id),\n \"number_of_quotes\": len(User.objects.get(id=user_id).quotes.all()),\n \"quotes\": User.objects.get(id=user_id).quotes.all()\n }\n return render(request,'quotes/users.html',context)\n\ndef favorites(request, quote_id):\n if 'id' in request.session:\n Favorite.objects.create(quote=Quote.objects.get(id=quote_id),user=User.objects.get(id=request.session['id']))\n return redirect(quotes)\n else:\n return redirect(main)\n\ndef remove_favorite(request, quote_id):\n if 'id' in request.session:\n Favorite.objects.filter(quote=Quote.objects.filter(id=quote_id)).delete()\n return redirect(quotes)\n else:\n return redirect(main)\n\n\n\ndef logout(request):\n request.session.clear()\n return redirect(main)\n#have to build last html page (quotes/user_id)\n#have to add functionaltiy such that when click favorites it saves to favorites table and goes from quotable qutoes table to favorites table\n#add logout button to html pages 1 and 2 that clears the session\n#\n\n\n\n\n\n\n","sub_path":"apps/quotes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"456540251","text":"# -----------------------------------------------------------------------------\n# Name: aggregator.py\n# Purpose: implement general purpose aggregator\n#\n# Author: Talal Khalil\n# -----------------------------------------------------------------------------\n\"\"\"\nImplement a simple general purpose aggregator\n\nUsage: aggregator.py filename topic\nfilename: input file that contains a list of the online sources (urls).\ntopic: topic to be researched and reported on\n\"\"\"\n\nimport urllib.request\nimport urllib.error\nimport re\nimport sys\n\n\ndef get_page(url):\n \"\"\"\n get the HTML content of a given web page\n Parameter:\n url (string) - the address of the web page to be read\n Returns (string) - a text string containing the HTML content\n or an empty string if there is an error opening\n the url or decoding it\n \"\"\"\n page = ''\n try:\n with urllib.request.urlopen(url) as url_file:\n page = url_file.read().decode('utf-8')\n except urllib.error.URLError as url_err:\n print('Error opening url: ', url, url_err)\n except UnicodeDecodeError as decode_err:\n print('Error decoding url: ', url, decode_err)\n finally:\n return page\n\n\ndef extract_topic(url, content, topic):\n \"\"\"\n extract the text referencing the given topic from the web page.\n Parameters:\n url (string) - the address of a web page\n content (string - HTML) - the content of the web page\n topic (string) - the topic of interest\n Returns:\n string - header identifying the url followed by all references\n to the topic of interest\n an empty string if there are no such references\n \"\"\"\n summary = ''\n # extract text outside angle brackets containing the topic of interest\n pattern = r'>([^<]*\\b' + topic + r'\\b.*?)<'\n matches = re.findall(pattern, content, re.IGNORECASE | re.DOTALL)\n if matches:\n summary = 'Source url:' + url + '\\n' + '\\n'.join(matches)\n return summary\n\n\ndef get_news(source, topic):\n \"\"\"\n aggregate content from all the urls found in the source file that reference\n the given topic and save it in a file topicsummary.txt.\n Parameters:\n source (string) - the name of the file containing the list of urls to be\n read\n topic (string) - a topic to be researched and reported on\n Returns: None\n \"\"\"\n output_filename = topic + 'summary.txt'\n with open(source, 'r', encoding='utf-8') as input_file:\n with open(output_filename, 'w', encoding='utf-8') as output_file:\n for url in input_file:\n content = get_page(url)\n result = extract_topic(url, content, topic)\n if result:\n output_file.write(result)\n output_file.write('\\n--------------------------------\\n\\n')\n\n\ndef main():\n if len(sys.argv) != 3:\n print('Error: invalid number of arguments')\n print('Usage: aggregator.py filename topic')\n else:\n source_file = sys.argv[1]\n topic = sys.argv[2]\n get_news(source_file, topic)\n\n\nif __name__ == '__main__':\n main()","sub_path":"aggregator.py","file_name":"aggregator.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"77458581","text":"from PyPDF2 import PdfFileReader\nfrom PyPDF2.pdf import PageObject\nfrom PyPDF2.generic import FloatObject\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.units import mm\nimport io\nimport math\n\n_paper = {\n 'A0': {'size': (841, 1189), 'margin': (2, 2, 2, 2, 2, 2)}, # margin: 上右下左,上下间距,左右间距\n 'A1': {'size': (594, 841), 'margin': (2, 2, 2, 2, 2, 2)},\n 'A2': {'size': (420, 594), 'margin': (2, 2, 2, 2, 2, 2)},\n 'A3': {'size': (297, 420), 'margin': (2, 2, 2, 2, 2, 2)},\n 'A4': {'size': (210, 297), 'margin': (2, 2, 2, 2, 2, 2)}\n}\n\n\ndef get_paper_size(paper_spec):\n if paper_spec in _paper:\n return _paper[paper_spec]['size']\n else:\n return 100 * mm, 100 * mm\n\n\ndef get_margin(paper_spec, page):\n if paper_spec == '':\n return None\n\n paper_size = get_paper_size(paper_spec)\n paper_width = paper_size[0] * mm\n paper_height = paper_size[1] * mm\n\n page_width = float(page.cropBox[2]) - float(page.cropBox[0])\n page_height = float(page.cropBox[3]) - float(page.cropBox[1])\n\n if (paper_width + 2) % page_width < 20 or (paper_height + 2) % page_height < 20:\n return 0, 0, 0, 0, 0, 0\n\n\ndef create_empty_paper(paper_spec='A4'):\n paper_size = get_paper_size(paper_spec)\n return PageObject.createBlankPage(None, math.ceil(paper_size[0] * mm), math.ceil(paper_size[1] * mm))\n\n\ndef merge_text(page, text, position, font_name='STSong-Light'):\n can_bio = io.BytesIO()\n can = canvas.Canvas(can_bio, pagesize=(math.ceil(page.cropBox.getWidth()), math.ceil(page.cropBox.getHeight())),\n initialFontName=font_name)\n _draw_strings(can, page, text.split('\\n'), position)\n can.save()\n\n can_page = PdfFileReader(can_bio).getPage(0)\n page.mergePage(can_page)\n\n\ndef merge_page(page, page2, merge_point=None, paper_spec='A4', margin=None):\n paper = _paper[paper_spec]\n if margin is None:\n margin = paper['margin']\n\n if merge_point is None:\n merge_point = _get_first_merge_point(page, margin)\n\n next_merge_point = _get_merge_point(page, page2, merge_point, margin)\n if next_merge_point is not None:\n txy = _get_merge_txy(next_merge_point[0], page2)\n\n if \"/Annots\" in page2:\n for annot in page2['/Annots']:\n annot_obj = annot.getObject()\n if annot_obj['/Subtype'] == '/Square':\n rect = annot_obj['/Rect']\n rect[0] = FloatObject(rect[0] + txy[0])\n rect[1] = FloatObject(rect[1] + txy[1])\n rect[2] = FloatObject(rect[2] + txy[0])\n rect[3] = FloatObject(rect[3] + txy[1])\n elif annot_obj['/Subtype'] == '/Line':\n rect = annot_obj['/L']\n rect[0] = FloatObject(rect[0] + txy[0])\n rect[1] = FloatObject(rect[1] + txy[1])\n rect[2] = FloatObject(rect[2] + txy[0])\n rect[3] = FloatObject(rect[3] + txy[1])\n rect_r = annot_obj['/Rect']\n rect_r[0] = FloatObject(rect_r[0] + txy[0])\n rect_r[1] = FloatObject(rect_r[1] + txy[1])\n rect_r[2] = FloatObject(rect_r[2] + txy[0])\n rect_r[3] = FloatObject(rect_r[3] + txy[1])\n\n page.mergeTranslatedPage(page2, txy[0], txy[1], False)\n\n return next_merge_point\n\n\ndef _get_merge_txy(merge_point_lt, page2):\n merge_point_tx = merge_point_lt[0]\n merge_point_ty = merge_point_lt[1]\n page2_tx = math.ceil(page2.cropBox[0])\n page2_ty = math.ceil(page2.cropBox[3])\n tx = merge_point_tx - page2_tx if page2_tx <= merge_point_tx else merge_point_tx - page2_tx\n ty = merge_point_ty - page2_ty if page2_tx <= merge_point_tx else merge_point_ty - page2_ty\n return tx, ty\n\n\ndef _get_space_x(x, margin):\n space_x = math.ceil(margin[5] * mm)\n if x <= math.ceil((margin[3] + margin[5]) * mm):\n space_x = 0\n return space_x\n\n\ndef _get_first_merge_point(page, margin):\n page_height = math.ceil(page.cropBox.getHeight())\n point = (math.ceil(margin[3] * mm), math.ceil(page_height - margin[0] * mm))\n return point, point\n\n\ndef _get_merge_point(page, page2, merge_point, margin):\n point_lt = merge_point[0]\n point_rb = merge_point[1]\n\n r_point_lt = [-1, -1]\n r_point_rb = [-1, -1]\n\n page_width = math.ceil(page.cropBox.getWidth())\n page_height = math.ceil(page.cropBox.getHeight())\n page2_width = math.ceil(page2.cropBox.getWidth())\n page2_height = math.ceil(page2.cropBox.getHeight())\n\n if point_lt[1] - math.ceil(margin[2] * mm) < page2_height: # 空白最大高度不足,另起page\n return None\n\n space_x = _get_space_x(point_rb[0], margin)\n if (page_width - point_rb[0] - space_x - math.ceil(margin[1] * mm)) >= page2_width: # 当前行宽度足\n r_point_lt[0] = point_rb[0] + space_x\n r_point_lt[1] = point_lt[1]\n else: # 当前行宽度不足,另起一行\n if point_rb[1] - math.ceil(margin[4] * mm) - math.ceil(margin[2] * mm) < page2_height: # 换行后高度不足\n return None\n else: # 换行后高度充足\n r_point_lt[0] = math.ceil(margin[3] * mm)\n r_point_lt[1] = point_rb[1] - math.ceil(margin[4] * mm)\n\n r_point_rb[0] = r_point_lt[0] + page2_width\n r_point_rb[1] = r_point_lt[1] - page2_height\n\n if r_point_rb[1] > point_rb[1]: # 取最底高度\n r_point_rb[1] = point_rb[1]\n\n return r_point_lt, r_point_rb\n\n\ndef _draw_strings(can, page, texts, position):\n tl = len(texts)\n for i in range(0, tl):\n if position == 'lt': # 左上\n x = math.ceil(page.cropBox.getUpperLeft_x())\n y = math.ceil(page.cropBox.getUpperLeft_y()) - (i + 1) * can._fontsize\n can.drawString(x, y, texts[i])\n\n elif position == 'rt': # 右上\n x = math.ceil(page.cropBox.getUpperRight_x())\n y = math.ceil(page.cropBox.getUpperRight_y()) - (i + 1) * can._fontsize\n can.drawRightString(x, y, texts[i])\n elif position == 'rb': # 右下\n x = math.ceil(page.cropBox.getLowerRight_x())\n y = math.ceil(page.cropBox.getLowerRight_y()) + (tl - 1 - i) * can._fontsize\n can.drawRightString(x, y, texts[i])\n else: # 默认 'lb' 左下\n x = math.ceil(page.cropBox.getLowerLeft_x())\n y = math.ceil(page.cropBox.getLowerLeft_y()) + (tl - 1 - i) * can._fontsize\n can.drawString(x, y, texts[i])\n","sub_path":"printpdf/pdf_util.py","file_name":"pdf_util.py","file_ext":"py","file_size_in_byte":6531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"154278340","text":"\"\"\"\nThis module contains global settings for all modules.\n\"\"\"\n\n\n#: the bind address of the :class:`~queue_manager.QueueServer`\nSERVER_ADDRESS = 'localhost'\n\n#: the bind port of the :class:`~queue_manager.QueueServer`\nSERVER_PORT = 50001\n\n#: the authentication string of the :class:`~queue_manager.QueueServer`\nSERVER_AUTH = b'abc'\n\n\n#: the number of sslyze worker processes\nNUMBER_PROCESSES = 1\n\n#: a list of commands that sslyze will be used for scanning.\n#: available commands are:\n#: [\"tlsv1_2\", \"tlsv1_1\", \"tlsv1\", \"sslv3\", \"sslv2\", \"reneg\", \"hsts\", \"resum\", \"resum_rate\",\n#: \"heartbleed\", \"chrome_sha1\", \"compression\", \"certinfo\"]\n#: For details see `SSLyze `_.\nCOMMAND_LIST = [\n \"tlsv1_2\",\n \"tlsv1_1\",\n \"tlsv1\",\n \"sslv3\",\n \"sslv2\",\n # \"reneg\",\n # \"hsts\",\n # \"resum\",\n # \"resum_rate\",\n # \"heartbleed\",\n # \"chrome_sha1\",\n # \"compression\",\n # \"certinfo\",\n]\n\n\n\n#: this are shared settings used by sslyze.\n#: For details see `SSLyze `_.\nSHARED_SETTINGS = {\n 'ca_file': None,\n 'certinfo': 'basic',\n 'starttls': None,\n 'resum': True,\n 'resum_rate': None,\n 'http_get': True,\n 'xml_file': None,\n 'compression': True,\n 'tlsv1': True,\n 'targets_in': None,\n 'keyform': 1,\n 'hsts': None,\n 'chrome_sha1': None,\n 'sslv3': True,\n 'sslv2': True,\n 'https_tunnel': None,\n 'nb_retries': 4,\n 'heartbleed': True,\n 'sni': None,\n 'https_tunnel_host': None,\n 'regular': False,\n 'key': None,\n 'reneg': True,\n 'tlsv1_2': True,\n 'tlsv1_1': True,\n 'hide_rejected_ciphers': True,\n 'quiet': None, 'keypass': '',\n 'cert': None, 'timeout': 5,\n 'xmpp_to': None\n}\n\n\n\n\n\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"403107512","text":"#definition for singly - linked list\r\nclass ListNode:\r\n def __init__(self, data):\r\n self.data = data\r\n self.link = None\r\n\r\ndef hasCycle(head:ListNode) -> bool:\r\n \"\"\"initialize slow & fast ptr to head node\"\"\"\r\n slow = head\r\n fast = head\r\n \"\"\"traverse the linked list\"\"\"\r\n while fast!= None and fast.link!= None:\r\n \"\"\"increment slow by one node\"\"\"\r\n slow = slow.link\r\n \"\"\"increment fast by two nodes\"\"\"\r\n fast = fast.link.link\r\n \"\"\"if slow & fast meet then loop found, return true\"\"\"\r\n if slow == fast:\r\n return True\r\n \"\"\"if slow & fast don't meet there is no loop return false\"\"\"\r\n return False\r\n\r\nif __name__ == \"__main__\":\r\n head = ListNode(1)\r\n head.link = l1 = ListNode(2)\r\n l1.link = l2 = ListNode(4)\r\n l2.link = l3 = ListNode(16)\r\n l3.link = l4 = ListNode(10)\r\n l4.link = l2\r\n \"\"\"1->2->4->16->10--|\r\n |__________|\"\"\"\r\n print(hasCycle(head))","sub_path":"Algorithm/cycleDetection.py","file_name":"cycleDetection.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"516383292","text":"class Solution(object):\n # 滑窗+hashtable\n def findAnagrams(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: List[int]\n \"\"\"\n if len(s) == 0 or len(p) == 0 or len(s) < len(p):\n return []\n hashset = {i:0 for i in s}\n for i in p:\n hashset[i] -= 1\n n = len(p)\n res = []\n print('hashset1', hashset)\n for i in range(n):\n if s[i] not in hashset:\n continue\n hashset[s[i]] += 1\n print('hashset2'\n '',hashset)\n if self.find(hashset,p):\n res.append(0)\n for i in range(len(s)-n):\n left = s[i]\n right = s[i+len(p)]\n hashset[left] -= 1\n hashset[right] += 1\n if self.find(hashset,p):\n res.append(i+1)\n return res\n\n\n\n def find(self, hashset, p):\n for i in p:\n if hashset[i] != 0:\n return False\n return True\n\na = Solution()\nprint(a.findAnagrams(\"abab\",\"ab\"))","sub_path":"oracle/leetcode438.py","file_name":"leetcode438.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"648211709","text":"from math import log\n\ndef createDataSet():\n \"\"\"\n 创建数据集\n \"\"\"\n dataSet = [[u'青年', u'否', u'否', u'一般', u'拒绝'],\n [u'青年', u'否', u'否', u'好', u'拒绝'],\n [u'青年', u'是', u'否', u'好', u'同意'],\n [u'青年', u'是', u'是', u'一般', u'同意'],\n [u'青年', u'否', u'否', u'一般', u'拒绝'],\n [u'中年', u'否', u'否', u'一般', u'拒绝'],\n [u'中年', u'否', u'否', u'好', u'拒绝'],\n [u'中年', u'是', u'是', u'好', u'同意'],\n [u'中年', u'否', u'是', u'非常好', u'同意'],\n [u'中年', u'否', u'是', u'非常好', u'同意'],\n [u'老年', u'否', u'是', u'非常好', u'同意'],\n [u'老年', u'否', u'是', u'好', u'同意'],\n [u'老年', u'是', u'否', u'好', u'同意'],\n [u'老年', u'是', u'否', u'非常好', u'同意'],\n [u'老年', u'否', u'否', u'一般', u'拒绝'],\n ]\n labels = [u'年龄', u'有工作', u'有房子', u'信贷情况']\n # 返回数据集和每个维度的名称\n return dataSet, labels\n\n\ndef splitDataSet(dataSet,axis,value):\n \"\"\"\n 按照给定特征划分数据集\n :param axis:划分数据集的特征的维度\n :param value:特征的值\n :return: 符合该特征的所有实例(并且自动移除掉这维特征)\n \"\"\"\n\n # 循环遍历dataSet中的每一行数据\n retDataSet = []\n for featVec in dataSet:\n if featVec[axis] == value:\n reduceFeatVec = featVec[:axis] # 删除这一维特征\n reduceFeatVec.extend(featVec[axis+1:])\n retDataSet.append(reduceFeatVec)\n return retDataSet\n\n# 计算的始终是类别标签的不确定度\ndef calcShannonEnt(dataSet):\n \"\"\"\n 计算训练数据集中的Y随机变量的香农熵\n :param dataSet:\n :return:\n \"\"\"\n numEntries = len(dataSet) # 实例的个数\n labelCounts = {}\n for featVec in dataSet: # 遍历每个实例,统计标签的频次\n currentLabel = featVec[-1] # 表示最后一列\n # 当前标签不在labelCounts map中,就让labelCounts加入该标签\n if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] =0\n labelCounts[currentLabel] +=1\n\n shannonEnt = 0.0\n for key in labelCounts:\n prob = float(labelCounts[key]) / numEntries\n shannonEnt -= prob * log(prob,2) # log base 2\n return shannonEnt\n\ndef calcConditionalEntropy(dataSet,i,featList,uniqueVals):\n \"\"\"\n 计算x_i给定的条件下,Y的条件熵\n :param dataSet: 数据集\n :param i: 维度i\n :param featList: 数据集特征列表\n :param unqiueVals: 数据集特征集合\n :return: 条件熵\n \"\"\"\n ce = 0.0\n for value in uniqueVals:\n subDataSet = splitDataSet(dataSet,i,value)\n prob = len(subDataSet) / float(len(dataSet)) # 极大似然估计概率\n ce += prob * calcShannonEnt(subDataSet) #∑pH(Y|X=xi) 条件熵的计算\n return ce\n\ndef calcInformationGain(dataSet,baseEntropy,i):\n \"\"\"\n 计算信息增益\n :param dataSet: 数据集\n :param baseEntropy: 数据集中Y的信息熵\n :param i: 特征维度i\n :return: 特征i对数据集的信息增益g(dataSet | X_i)\n \"\"\"\n featList = [example[i] for example in dataSet] # 第i维特征列表\n uniqueVals = set(featList) # 换成集合 - 集合中的每个元素不重复\n newEntropy = calcConditionalEntropy(dataSet,i,featList,uniqueVals)\n infoGain = baseEntropy - newEntropy # 信息增益\n return infoGain\n\n\ndef chooseBestFeatureToSplitByID3(dataSet):\n \"\"\"\n 选择最好的数据集划分\n :param dataSet:\n :return:\n \"\"\"\n numFeatures = len(dataSet[0]) -1 # 最后一列是分类\n baseEntropy = calcShannonEnt(dataSet)\n bestInfoGain = 0.0\n bestFeature = -1\n for i in range(numFeatures): # 遍历所有维度特征\n infoGain = calcInformationGain(dataSet,baseEntropy,i)\n if(infoGain > bestInfoGain):\n bestInfoGain = infoGain\n bestFeature = i\n return bestFeature # 返回最佳特征对应的维度\n\ndef createTree(dataSet,labels,chooseBestFeatureToSplitFunc = chooseBestFeatureToSplitByID3):\n \"\"\"\n 创建决策树\n :param dataSet: 数据集\n :param labels: 数据集每一维的名称\n :return: 决策树\n \"\"\"\n classList = [example[-1] for example in dataSet] # 类别列表\n if classList.count(classList[0]) == len(classList): # 统计属于列别classList[0]的个数\n return classList[0] # 当类别完全相同则停止继续划分\n if len(dataSet[0]) ==1: # 当只有一个特征的时候,遍历所有实例返回出现次数最多的类别\n return majorityCnt(classList) # 返回类别标签\n bestFeat = chooseBestFeatureToSplitFunc(dataSet)\n bestFeatLabel = labels[bestFeat]\n myTree ={bestFeatLabel:{}} # map 结构,且key为featureLabel\n del (labels[bestFeat])\n # 找到需要分类的特征子集\n featValues = [example[bestFeat] for example in dataSet]\n uniqueVals = set(featValues)\n for value in uniqueVals:\n subLabels = labels[:] # 复制操作\n myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet,bestFeat,value),subLabels)\n return myTree\n\n# 测试决策树的构建\ndataSet,labels = createDataSet()\nmyTree = createTree(dataSet,labels)\n\n","sub_path":"machinelearning/16_decisionTree/ID3.py","file_name":"ID3.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"156501926","text":"import re\nfrom os import mkdir, remove, listdir, walk\nfrom os.path import abspath, join, isdir, isfile, getsize\nfrom pickle import UnpicklingError\nfrom shutil import rmtree\n\nimport dill\nimport numpy as np\n\ntry:\n from matplotlib.figure import Figure\nexcept ImportError:\n class Figure(object):\n pass\nfrom umsgpack import packb, unpackb\n\n\nclass MeMapDictError(Exception):\n pass\n\n\nclass dotdict(dict):\n __setattr__ = dict.__setitem__\n __getattr__ = dict.__getitem__\n __delattr__ = dict.__delitem__\n\n\nclass readonly_memapdict(object):\n supported_numpy_dtypes = \"int,float,complex\".split(\",\")\n\n def __init__(self, folder: str):\n self._folder = abspath(folder) # type:str\n if not isdir(folder):\n raise NotADirectoryError(f\"{folder} does not exist\")\n\n def _abspath_(self, item):\n return join(str(self._folder), str(item))\n\n def __read_header(self, fp, length_size=64 // 8):\n try:\n length = np.frombuffer(fp.read(length_size), dtype=np.uint64)[0]\n header = dotdict(unpackb(fp.read(length)))\n return int(length + length_size), header\n except MemoryError:\n return 0, dotdict(type=None)\n\n def __getitem__(self, item):\n path = self._abspath_(item)\n if isfile(path):\n with open(path, \"rb\") as fp:\n length, header = self.__read_header(fp)\n if header.type == \"numpy\":\n dtype = header.dtype\n if all(i not in dtype for i in self.supported_numpy_dtypes):\n raise MeMapDictError(\"Unsupported numpy dtype: %s\" % dtype)\n dtype = getattr(np, dtype)\n shape = tuple(header.shape)\n size = np.dtype(header.dtype).itemsize\n item_size = getsize(path)\n if (item_size - length) % size:\n raise MeMapDictError(\"Numpy array file does not have a correct size\")\n res = np.memmap(path, dtype=dtype, offset=length, shape=shape)\n elif header.type == \"dill\":\n res = dill.load(fp)\n elif header.type == \"str\":\n res = fp.read().decode(header.encoding)\n elif header.type == \"bytes\":\n res = fp.read()\n elif header.type is None:\n try:\n fp.seek(0)\n res = dill.load(fp)\n except UnpicklingError:\n raise MeMapDictError(f\"Failed to open {path}\")\n else:\n raise NotImplementedError(f\"Type {header.type} is not supported\")\n return res\n return self.__class__(path)\n\n def __getattr__(self, item):\n if item[:2] == \"__\":\n return super(readonly_memapdict, self).__getattr__(item)\n return self[item]\n\n def __contains__(self, item):\n return self.keys().__contains__(item)\n\n def keys(self):\n return listdir(object.__getattribute__(self, \"_folder\"))\n\n def items(self):\n for i in self.keys():\n yield i, self[i]\n\n def values(self):\n for k, v in self.items():\n yield v\n\n def walk(self, regex, types=None):\n r = re.compile(\".+\" + str(regex))\n for root, folders, files in walk(self._folder):\n for names in [files, folders]:\n for i in names:\n i = join(root, i)\n match = r.fullmatch(i)\n if match is not None:\n groups = match.groups()\n groups = list(groups)\n for j, (t, v) in enumerate(zip(types, groups)):\n if type(t) == str:\n groups[j] = t.lower() in v.lower()\n else:\n groups[j] = t(v)\n yield groups, self[i]\n\n def __len__(self):\n return len(self.keys())\n\n def __str__(self):\n k = self.keys()\n res = [\"%s(%s)\" % (k, \"fd\"[int(i)]) for k, i in zip(k, map(isdir, (self._abspath_(i) for i in k)))]\n res = '{%s}' % \", \".join(res)\n return res\n\n def __dict__(self):\n return {k: v.__dict__() if issubclass(type(v), readonly_memapdict) else v for k, v in self.items()}\n\n def parent(self, n_levels=1):\n new_folder = abspath(join(self._folder, \"/\".join([\"..\"] * n_levels)))\n return self.__class__(new_folder)\n\n def first(self):\n return self[self.keys()[0]]\n\n\nclass memapdict(readonly_memapdict):\n def __init__(self, folder):\n try:\n super(memapdict, self).__init__(folder)\n except NotADirectoryError:\n mkdir(folder)\n\n def __setattr__(self, key, value):\n if key[0] == \"_\":\n super(memapdict, self).__setattr__(key, value)\n else:\n self[key] = value\n\n def __get_header(self, **kwargs):\n res = packb(kwargs)\n length = np.uint64(len(res)).tobytes()\n return length + res\n\n def __setitem__(self, key, value):\n abskey = self._abspath_(key)\n vtype = type(value)\n if vtype == Figure:\n value.savefig(f\"{abskey}.svg\")\n elif issubclass(vtype, dict):\n self[key].update(value)\n elif issubclass(vtype, np.ndarray) and any([i in value.dtype.__class__.__name__\n for i in self.supported_numpy_dtypes]):\n dtype = value.dtype.name\n shape = value.shape\n header = self.__get_header(type=\"numpy\", shape=shape, dtype=dtype)\n data = header + np.ravel(value).tobytes()\n self.write(abskey, data)\n elif issubclass(vtype, bytes):\n header = self.__get_header(type=\"bytes\")\n data = header + value\n self.write(abskey, data)\n elif issubclass(vtype, str):\n encoding = \"utf-8\"\n header = self.__get_header(type=\"str\", encoding=encoding)\n data = header + bytes(value, encoding)\n self.write(abskey, data)\n else:\n header = self.__get_header(type=\"dill\")\n self.write(abskey, header + dill.dumps(value))\n\n def __delattr__(self, item):\n del self[item]\n\n def __delitem__(self, item):\n path = self._abspath_(item)\n if isdir(path):\n rmtree(path)\n elif isfile(path):\n remove(path)\n else:\n raise FileNotFoundError(f\"{path} does not exist\")\n\n def update(self, dic):\n for k, v in dic.items():\n self[k] = v\n\n def write(self, filename, data, mode=None):\n if mode is None:\n mode = \"w\"\n if type(data) == bytes:\n mode = \"wb\"\n with open(self._abspath_(filename), mode) as fp:\n fp.write(data)\n","sub_path":"memapdict/memapdict.py","file_name":"memapdict.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"90816267","text":"import pandas as pd\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom scipy import stats\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom copy import deepcopy\nimport torch.nn.functional as F\nfrom numpy import inf\nfrom math import exp, gamma\nfrom datetime import timedelta\nfrom sklearn.metrics import r2_score\nimport matplotlib.patheffects as PathEffects\nfrom scipy.special import softmax\nimport warnings\nimport os\nimport math\nfrom scipy.stats import pearsonr, spearmanr\n\nwarnings.simplefilter(\"ignore\")\n\nplt.style.use(['science'])\nplt.rcParams[\"text.usetex\"] = True\n\ndf = pd.read_csv('owid-covid-data.csv')\ndf['date'] = pd.to_datetime(df.date)\n\ndfHealth = pd.read_excel('datasets/world-health.xls')\nindicators = list(pd.unique(dfHealth['Indicator Name']))[7:]\nindicators.append('Meat Consumption (kg/person)')\nindicators.append('Average Yearly Temperature (C)')\nindicators.remove('Incidence of malaria (per 1,000 population at risk)')\n\ndfMeat = pd.read_excel('datasets/meat.xlsx')\ndfTemp = pd.read_excel('datasets/temp.xlsx')\ndfStrains = pd.read_excel('datasets/strains.xlsx')\ndfStrains2 = pd.read_excel('datasets/strains2.xlsx')\ndfMalaria = pd.read_excel('datasets/malaria.xlsx')\nmalariadata = ['Malaria Cases/1000', 'Malaria Deaths/1000']\nothers = ['Consumption of iodized salt (% of households)', 'Prevalence of overweight, weight for height (% of children under 5)',\\\n'Prevalence of underweight, weight for age (% of children under 5)', 'Vitamin A supplementation coverage rate (% of children ages 6-59 months) newdata',\\\n'Immunization, DPT (% of children ages 12-23 months)', 'Immunization, measles (% of children ages 12-23 months)', \\\n'Immunization, HepB3 (% of one-year-old children)', 'People using at least basic sanitation services (% of population)', \\\n'People using safely managed drinking water services (% of population)', 'Tuberculosis treatment success rate (% of new cases)', \\\n'Current health expenditure per capita (current US$)']\nothers = [i+' newdata' for i in others]\ndfothers = [pd.read_excel('datasets/world-health2.xlsx', sheet_name='Sheet'+str(i)) for i in range(len(others))]\nstrainTypes = ['O', 'B', 'B1', 'B2', 'B4', 'A3', 'A6', 'A7', 'A1a', 'A2', 'A2a']\nstrainTypes2 = ['Cluster '+str(i) for i in range(9)]\nindicators.extend(malariadata + others + strainTypes + strainTypes2)\n\ncountries = list(pd.unique(df['location']))\n\ndef gauss(x, mu, sigma, scale):\n return scale * np.exp(-1 * ((x - mu) ** 2) / (2 * (sigma ** 2) )) \n\ndef weib(x, k, a, b, g):\n\treturn k * g * b * (a ** b) * np.exp(-1 * g * ((a / x) ** b)) / (x ** (b + 1))\n\ndef beta(x, k, a, b, p, q):\n\treturn k * gamma(p + q) * ((x - a)** (p-1)) * (b-x)**(q-1) / (gamma(p) * gamma(q) * (b-a)**(p+q-1))\n\ndef ft(x, k, e, d, o):\n\treturn k * np.exp(-1 * (1 + e * (x-o)) ** (-1 / (e + d)))\n\ndef getMetric(countryname, metricname):\n\tif metricname in strainTypes+strainTypes2:\n\t\tif metricname in strainTypes:\n\t\t\tdf2 = dfStrains[dfStrains['Country'] == countryname]\n\t\telse:\n\t\t\tdf2 = dfStrains2[dfStrains['Country'] == countryname]\n\t\tif len(df2[metricname].values) == 0: val = 1\n\t\telse: val = float(df2[metricname].values[0])+1 if not math.isnan(df2[metricname]) else 1\n\t\tif len(df2['Total'].values) == 0: tot = len(strainTypes)\n\t\telse: tot = float(df2['Total'].values[0])+1\n\t\treturn float(val/tot)\n\tif metricname == 'Meat Consumption (kg/person)':\n\t\tdf2 = dfMeat[dfMeat['Country'] == countryname]\n\t\treturn float(df2[2009].values[0]) if len(df2[2009].values) != 0 else 0\n\tif metricname == 'Average Yearly Temperature (C)':\n\t\tdf2 = dfTemp[dfTemp['Country'] == countryname]\n\t\ttemp = str(df2['temp'].values[0]) if len(df2['temp'].values) > 0 else 0\n\t\treturn float(temp)\n\tif metricname in malariadata:\n\t\tdf2 = dfMalaria[dfMalaria['Country'] == countryname]\n\t\ttemp = str(df2[metricname].values[0]) if len(df2[metricname].values) > 0 else 0\n\t\treturn float(temp)\n\tif metricname in others:\n\t\tddd = dfothers[others.index(metricname)]\n\t\tdf2 = ddd[ddd['Country'] == countryname]\n\t\ttemp = str(df2['Value'].values[0]) if len(df2['Value'].values) > 0 else 0\n\t\treturn float(temp)\n\tdf2 = dfHealth[dfHealth['Country Name'] == countryname]\n\tdf3 = df2[df2['Indicator Name'] == metricname]\n\treturn float(df3['2017'].values[0]) if len(df3['2017'].values) != 0 else 0\n\ndef getInfoCountry(df2, isdead):\n\tdf2['Delta'] = (df2.date - min(df2.date)).dt.days\n\tstartDate = min(df2.date)\n\ttotalLength = max(df2.Delta)\n\tconfirmed = []; new = []\n\tfor day in range(totalLength):\n\t\tnewc = max(0, int(sum(df2.new_cases[df2.Delta == day] if not isdead else df2.new_deaths[df2.Delta == day])))\n\t\tnew.append(newc)\n\t\tconfirmed.append(new[-1] + (confirmed[-1] if len(confirmed) > 1 else 0))\n\treturn startDate, totalLength, confirmed, new\n\ndef totalExpected(func, popt, data):\n\ttotal = 0; day = 1\n\twhile True:\n\t\ttoday = func(day, *popt) if day >= len(data) else data[day]\n\t\ttotal += today\n\t\tday += 1\n\t\tif day > len(data) and today <= 1: break\n\treturn day, total\n\ndef calcWhen(func, popt, match, data):\n\ttotal = 0; day = 1\n\twhile True:\n\t\ttoday = func(day, *popt) if day >= len(data) else data[day]\n\t\ttotal += today\n\t\tday += 1\n\t\tif total >= match or (today == 0 and day > data.index(max(data))): break\n\treturn day\n\ndef iterativeCurveFit(func, x, y, start):\n\toutliersweight = None\n\tfor i in range(10):\n\t\tpopt, pcov = curve_fit(func, x, y, start, sigma=outliersweight, maxfev=100000)\n\t\tpred = np.array([func(px, *popt) for px in x])\n\t\told = outliersweight\n\t\toutliersweight = np.abs(pred - y)\n\t\toutliersweight = 1 - np.tanh(outliersweight)\n\t\toutliersweight = outliersweight / np.max(outliersweight)\n\t\toutliersweight = softmax(1 - outliersweight)\n\t\tif i > 1 and sum(abs(old - outliersweight)) < 0.001: break\n\treturn popt, pcov\n\ndef seriesIterativeCurveFit(func, xIn, yIn, start):\n\tres = []\n\tfor ignore in range(15, 0, -1):\n\t\tx = xIn[:-1*ignore]; y = yIn[:-1*ignore]\n\t\toutliersweight = None\n\t\tfor i in range(10):\n\t\t\tpopt, pcov = curve_fit(func, x, y, start, sigma=outliersweight, absolute_sigma=True, maxfev=100000)\n\t\t\tpred = np.array([func(px, *popt) for px in x])\n\t\t\told = outliersweight\n\t\t\toutliersweight = np.abs(pred - y)\n\t\t\toutliersweight = 1 - np.tanh(outliersweight)\n\t\t\toutliersweight = outliersweight / np.max(outliersweight)\n\t\t\toutliersweight = softmax(1 - outliersweight)\n\t\t\tif i > 1 and sum(abs(old - outliersweight)) < 0.001: break\n\t\tpred = [func(px, *popt) for px in xIn]\n\t\tres.append((mean_absolute_percentage_error(yIn, pred), popt, pcov, ignore))\n\t# for i in res: print(i)\n\terrors = [i[0] for i in res]\n\tval = res[errors.index(min(errors))]\n\treturn val[1], val[2]\n\ndef getMaxCases(y, data):\n\tm = 0; dday = 0\n\tfor day,cases in enumerate(y):\n\t\t# if day < len(data):\n\t\t# \tif data[day] > m:\n\t\t# \t\tm = data[day]; dday = day\n\t\t# else:\n\t\t\tif cases > m:\n\t\t\t\tm = cases; dday = day\n\treturn m, dday\n\ndef mean_absolute_percentage_error(y_true, y_pred): \n return np.mean(np.abs((np.array(y_true) - np.array(y_pred)) / (np.array(y_true)+1))) * 100\n\ninsufficient = ['Central African Republic', 'Cambodia', 'Sudan', 'Ecuador', 'Chile', 'Colombia', 'Peru'] \nfinaldata = []; gooddataNew = []; gooddataDead = []\nignore = -1\ntraining_data = -5\nfor country in countries:\n\tif country in insufficient:\n\t\tcontinue\n\ttry:\n\t\tdead = False\n\t\tprint(\"--\", country)\n\t\tdf2 = df[df['location'] == country]\n\t\tres = getInfoCountry(df2, False)\n\t\tdata = res[-1]\n\t\tif sum(data) < (2000 if not dead else 100) and not data in ['Brazil', 'Iran', 'Israel', 'Oman']:\n\t\t\tprint('skip', country,)\n\t\t\tcontinue\n\t\tdays = res[1]\n\t\tstart = res[0]\n\n\t\tfunc = [(gauss, [0, 20, 100]), (weib, [60000, 14, 4, 500]), (ft, [7000, 0.5, 0.001, 100])]\n\n\t\twhichFunc = 0\n\t\ttimes = 2; skip = 30\n\t\tplt.figure(figsize=(6,3))\n\t\tx = list(range(len(data)))\n\t\tdatacopy = np.absolute(np.array(deepcopy(data[1:training_data])))\n\t\tif country == 'China': datacopy[datacopy == 15141] = 4000\n\t\tpoptg, pcovg = curve_fit(func[whichFunc][0], x[1:training_data], datacopy, func[whichFunc][1], maxfev=100000)\n\t\twhichFunc = 1\n\t\tpopt, pcov = seriesIterativeCurveFit(func[whichFunc][0], x[1:training_data], datacopy, func[whichFunc][1])\n\t\tfinalday, finalexp = totalExpected(func[whichFunc][0], popt, data)\n\t\twhen97 = calcWhen(func[whichFunc][0], popt, 0.97 * finalexp, data)\n\n\t\twhen97 = 1000 if when97 > 1000 else when97\n\t\txlim = max(len(data)*times, when97+10)\n\t\tpred = [func[whichFunc][0](px, *popt) for px in list(range(xlim))[1:]]\n\n\t\tplt.plot(list(range(xlim))[1:], pred, color='red', label='Robust Weibull Prediction (new)')\n\t\t_ = plt.bar(x, data, width=1, edgecolor='black', linewidth=0.01, alpha=0.2, label='Actual Data (new)')\n\t\tplt.ylabel(\"Number of cases\"); plt.xlabel(\"Date\"); plt.tight_layout(); \n\t\tplt.legend(loc='best');\tplt.title(country)\n\n\t\ty = [func[1][0](px, *popt) for px in x[1:]]\n\t\tr2 = r2_score(data[1:], y)\n\t\tmape = mean_absolute_percentage_error(data[1:], y)\n\n\t\tprint(\"MSE \", \"{:e}\".format(mean_squared_error(data[1:], y)))\n\t\tprint(\"R2 \", r2)\n\t\tprint(\"97 day\", (start + timedelta(days=when97)).strftime(\"%d %b %y\"))\n\t\tprint(\"MAPE\", mape)\n\t\tmape_error_new = mean_absolute_percentage_error(data[1:training_data], y[:training_data])\n\n\t\t# Metrics\n\t\ty = [func[whichFunc][0](px, *popt) for px in list(range(xlim))[1:]]\n\t\tmaxcases, maxday = getMaxCases(y, data)\n\n\t\tdead = True\n\t\tres = getInfoCountry(df2, True)\n\t\tdata = res[-1]\n\n\t\txlim2 = max(len(data)*times, when97+10)\n\n\t\txlim = max(xlim, xlim2)\n\t\tplt.xticks(list(range(0,xlim,30)), [(start+timedelta(days=i)).strftime(\"%d %b %y\") for i in range(0,xlim,skip)], rotation=45, ha='right')\n\t\tplt.twinx()\n\n\t\tdatacopy = np.absolute(np.array(deepcopy(data[1:training_data])))\n\t\tpoptold = popt\n\t\tfinalexpold = finalexp\n\t\tpopt, pcov = seriesIterativeCurveFit(func[whichFunc][0], x[1:training_data], datacopy, [2000, 54, 4, 500])\n\t\ty = [func[1][0](px, *popt) for px in x[1:]]\n\t\tr2Dead = r2_score(data[1:], y)\n\t\tmapeDead = mean_absolute_percentage_error(data[1:], y)\n\t\tmape_error_dead = mean_absolute_percentage_error(data[1:training_data], y[:training_data])\n\t\tfinalday, finalexp = totalExpected(func[whichFunc][0], popt, data)\n\t\tpred = [func[whichFunc][0](px, *popt) for px in list(range(xlim2))[1:]]\n\t\tmaxcases2, maxday2 = getMaxCases(pred, data)\n\t\tplt.plot(list(range(xlim2))[1:], pred, color='purple', label='Robust Weibull Prediction (dead)')\n\t\t_ = plt.bar(x, data, width=1, color='green', edgecolor='black', linewidth=0.01, alpha=0.2, label='Actual Data (dead)')\n\t\tplt.legend(loc=7)\n\t\tplt.ylabel(\"Number of deaths\")\n\n\t\tplt.savefig('graphs/'+'both'+'/'+country.replace(\" \", \"_\")+'.pdf')\n\n\t\tpopulation = getMetric(country, 'Population, total')\n\t\tvalues = [country, mape_error_new, mape_error_dead, r2, mape, r2Dead, mapeDead, maxday2-maxday, finalexpold, finalexp, finalexpold/population, finalexp/population, 100*finalexp/finalexpold]\n\t\tfinaldata.append(values)\n\t\tif maxday2 - maxday >= -10 and mape <= 46: \n\t\t\tgooddataNew.append(finaldata[-1])\n\t\t\tplt.savefig('graphs/'+'good'+'/'+country.replace(\" \", \"_\")+'.pdf')\n\t\tif maxday2 - maxday >= -10 and mapeDead <= 47: \n\t\t\tgooddataDead.append(finaldata[-1])\n\t\tprint(\"----\", country)\n\texcept Exception as e:\n\t\tprint(str(e))\n\t\t# raise(e)\n\t\tpass\n\nparams = ['peaks diff', 'total cases', 'total deaths', 'cases/pop', 'deaths/pop', 'mortality']\ndf = pd.DataFrame(finaldata,columns=['Country', 'Prediction MAPE (new)', 'Prediction MAPE (dead)', 'R2', 'MAPE', 'R2 Deaths', 'MAPE Deaths']+params)\ndfgood = pd.DataFrame(gooddataNew,columns=['Country', 'Prediction MAPE (new)', 'Prediction MAPE (dead)', 'R2', 'MAPE', 'R2 Deaths', 'MAPE Deaths']+params)\ndfgoodm = pd.DataFrame(gooddataDead,columns=['Country', 'Prediction MAPE (new)', 'Prediction MAPE (dead)', 'R2', 'MAPE', 'R2 Deaths', 'MAPE Deaths']+params)\n\nwith pd.ExcelWriter('error.xlsx') as writer: \n df.to_excel(writer, sheet_name='All errors')\n dfgood.to_excel(writer, sheet_name='Errors of good models (new)')\n dfgoodm.to_excel(writer, sheet_name='Errors of good models (deaths)')\n","sub_path":"error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":11929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"214006746","text":"# Python standard Libraries\nimport time\nimport operator\n\n# ESA Snappy\nfrom snappy import ProductIO\n\n# Scientific Libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport joblib\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import metrics\nfrom pykml import parser\n\n# Self Defined Modules\nfrom ground_truth import RosebelPixelClass3, GroundTruthBoundaries\n\n# PRODUCT_PATH = \"..\\\\data\\\\processed\\\\Rosebel_GRD\\\\Subset_S1A_IW_GRDH_1SDV_20170903T092838_20170903T092903_018209_01E9A9_D2A2_Orb_NR_Cal_Spk_TC_GLCM.dim\"\n# PRODUCT_PATH = \"..\\\\data\\\\processed\\\\Rosebel_GRD\\\\Subset_S1A_IW_GRDH_1SDV_20170903T092838_20170903T092903_018209_01E9A9_D2A2_Orb_NR_Cal_Spk_TF_TC_Gamma.dim\"\n# PRODUCT_PATH = \"..\\\\data\\\\processed\\\\Rosebel_GRD\\\\Subset_S1B_IW_GRDH_1SDV_20170902T215854_20170902T215945_007219_00CB9B_FFA6_Orb_NR_Cal_Asm_Spk_TF_TC.dim\"\n# PRODUCT_PATH = \"..\\\\data\\\\processed\\\\Rosebel_SLC\\\\Subset_S1A_IW_SLC__1SDV_20170903T092838_20170903T092905_018209_01E9A9_63BD_Orb_NR_Cal_Deb_Spk_TC_5m.dim\"\n# PRODUCT_PATH = \"..\\\\data\\\\processed\\\\Rosebel_SLC\\\\Subset_S1A_IW_SLC__1SDV_20170903T092838_20170903T092905_018209_01E9A9_63BD_Orb_TNR_Cal_deb_Spk_TF_TC_5m_Gamma.dim\"\nPRODUCT_PATH = \"..\\\\data\\\\processed\\\\Rosebel_GRD\\\\Temp\\\\Subset_S1A_IW_GRDH_1SDV_20190917T092852_20190917T092917_029059_034C31_1942_Orb_NR_Cal_TF_Spk_TC.dim\"\n\n# PRODUCT_PATH = \"..\\\\data\\\\processed\\\\Obuasi\\\\Subset_S1A_IW_GRDH_1SDV_20190208T182602_20190208T182631_025842_02E01C_899E_Orb_NR_Cal_Spk_TF_TC_Gamma.dim\"\n\nMODEL_PATH = \"..\\\\data\\\\models\\\\\"\n\ndef print_duration_string(start_time):\n t = time.time() - start_time\n print(\" Completed in - \" + str(int(t // 3600)) + \" hours \" + str(int((t // 60) % 60)) + \" minutes \" + str(\n int(t % 60)) + \" seconds \")\n\nif __name__ == \"__main__\":\n start_time = time.time()\n\n print(\"Reading product:\" + PRODUCT_PATH)\n p = ProductIO.readProduct(PRODUCT_PATH)\n\n print('Extracting Bands')\n band_names = p.getBandNames()\n bands = []\n number_of_bands = 0\n for band_name in band_names:\n print(str(number_of_bands + 1) + \": \" + str(band_name))\n number_of_bands += 1\n bands.append(p.getBand(band_name))\n print(\"Number of bands in product: \" + str(number_of_bands))\n\n print('Extracting Feature Data from Bands')\n features = []\n for band in bands:\n w = band.getRasterWidth()\n h = band.getRasterHeight()\n x = np.zeros(w * h, np.float32)\n band.readPixels(0, 0, w, h, x)\n features.append(x)\n image_width = bands[0].getRasterWidth()\n image_height = bands[0].getRasterHeight()\n number_of_pixels = image_height * image_width\n print(\"Number of pixels: \" + str(number_of_pixels))\n\n features = np.array(features)\n print(\"Preparing Features for Logistic Regression, Random Forest, KMeans\")\n\n ################## Remove elevation band only ##################\n rf_features = np.delete(features, 2, axis=0).transpose()\n\n ######## Remove all features except for base VV and VH bands #########\n # print(\"Removing all features except for base VV and VH bands\")\n # feature_indexes_to_drop = [2, 3, 4, 5, 6, 7, 8]\n # rf_features = np.delete(features, feature_indexes_to_drop, axis=0).transpose()\n # print(rf_features.shape)\n #####################################################################\n\n print(\"Importing Random Forest model\")\n rf_model = joblib.load(\n MODEL_PATH + \"rf_R_O_M_g_model_balanced.joblib\") # rf_R_O_M_g_model_balanced / rf_rosebel_water_model_VV_VH / rf_rosebel_slc_g_model / rf_rosebel_slc_g_water_model\n print(\"Predicting rf assignments\")\n rf_predictions = rf_model.predict(rf_features).astype(int)\n # rf_predictions_proba = rf_model.predict_proba(rf_features)\n print(rf_predictions)\n # unique, counts = np.unique(rf_predictions, return_counts=True)\n # print(dict(zip(unique, counts)))\n # print(rf_predictions_proba)\n # exit()\n\n print(\"Exporting image based on rf assignments\")\n rf_predictions = np.reshape(rf_predictions, (image_height, image_width))\n print(rf_predictions.shape)\n imgplot = plt.imshow(rf_predictions, cmap='brg')\n imgplot.write_png(\"C:\\\\Users\\\\royce\\\\Desktop\\\\rosebel_2019.png\")\n\n print_duration_string(start_time)","sub_path":"src/generate_prediction_image.py","file_name":"generate_prediction_image.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"538045046","text":"from __future__ import unicode_literals\n\nimport os\nfrom decimal import Decimal\nfrom uuid import uuid4 as uuid\n\nimport magic # pylint: disable=E0401\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\nfrom django.db.models import FileField as OriginFileField\nfrom django.db.models.fields import DecimalField as OriginDecimalField\nfrom django.forms import forms\nfrom django.template.defaultfilters import filesizeformat\nfrom django.utils.encoding import force_text\nfrom django.utils.translation import ugettext\n\nfrom chamber import config\nfrom chamber.forms import fields as chamber_fields\nfrom chamber.models.humanized_helpers import price_humanized\nfrom chamber.utils.datastructures import SequenceChoicesEnumMixin, SubstatesChoicesNumEnum\n\n\ntry:\n from sorl.thumbnail import ImageField as OriginImageField\nexcept ImportError:\n from django.db.models import ImageField as OriginImageField\n\n\nclass DecimalField(OriginDecimalField):\n\n def __init__(self, *args, **kwargs):\n self.step = kwargs.pop('step', 'any')\n self.min = kwargs.pop('min', None)\n self.max = kwargs.pop('max', None)\n kwargs['validators'] = kwargs.get('validators', [])\n if self.min is not None:\n kwargs['validators'].append(MinValueValidator(self.min))\n if self.max is not None:\n kwargs['validators'].append(MaxValueValidator(self.max))\n super(DecimalField, self).__init__(*args, **kwargs)\n\n def formfield(self, **kwargs):\n defaults = {\n 'form_class': chamber_fields.DecimalField,\n 'step': self.step,\n 'min': self.min,\n 'max': self.max,\n }\n defaults.update(kwargs)\n return super(DecimalField, self).formfield(**defaults)\n\n\nclass RestrictedFileValidator(object):\n\n def __init__(self, max_upload_size):\n self.max_upload_size = max_upload_size\n\n def __call__(self, data):\n if data.size > self.max_upload_size:\n raise forms.ValidationError(\n ugettext('Please keep filesize under {max}. Current filesize {current}').format(\n max=filesizeformat(self.max_upload_size),\n current=filesizeformat(data.size)\n )\n )\n else:\n return data\n\n\nclass AllowedContentTypesFileValidator(object):\n\n def __init__(self, content_types):\n self.content_types = content_types\n\n def __call__(self, data):\n with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as m:\n mime_type = m.id_buffer(data.file.read(1024))\n data.file.seek(0)\n if mime_type not in self.content_types:\n raise ValidationError(\n ugettext('Unsupported file type')\n )\n return data\n\n\nclass RestrictedFileFieldMixin(object):\n \"\"\"\n Same as FileField, but you can specify:\n * allowed_content_types - list of allowed content types. Example: ['application/json', 'image/jpeg']\n * max_upload_size - a number indicating the maximum file size allowed for upload in MB.\n \"\"\"\n def __init__(self, *args, **kwargs):\n max_upload_size = kwargs.pop('max_upload_size', config.CHAMBER_MAX_FILE_UPLOAD_SIZE) * 1024 * 1024\n allowed_content_types = kwargs.pop('allowed_content_types', None)\n super(RestrictedFileFieldMixin, self).__init__(*args, **kwargs)\n self.validators.append(RestrictedFileValidator(max_upload_size))\n if allowed_content_types:\n self.validators.append(AllowedContentTypesFileValidator(allowed_content_types))\n\n def get_filename(self, filename):\n \"\"\"\n removes UTF chars from filename\n \"\"\"\n from unidecode import unidecode\n return super(RestrictedFileFieldMixin, self).get_filename(unidecode(force_text(filename)))\n\n\nclass FileField(RestrictedFileFieldMixin, OriginFileField):\n pass\n\n\nclass ImageField(RestrictedFileFieldMixin, OriginImageField):\n pass\n\n\ndef generate_random_upload_path(instance, filename):\n \"\"\"\n Pass this function to upload_to argument of FileField to store the file on an unguessable path.\n The format of the path is class_name/hash/original_filename.\n \"\"\"\n return os.path.join(instance.__class__.__name__.lower(), uuid().hex, filename)\n\n\nclass PrevValuePositiveIntegerField(models.PositiveIntegerField):\n\n def __init__(self, *args, **kwargs):\n self.copy_field_name = kwargs.pop('copy_field_name', None)\n super(PrevValuePositiveIntegerField, self).__init__(*args, **kwargs)\n\n def pre_save(self, model_instance, add):\n if add or hasattr(model_instance, 'changed_fields') and self.copy_field_name in model_instance.changed_fields:\n setattr(\n model_instance, self.attname,\n getattr(model_instance, self.copy_field_name)\n if add else model_instance.initial_values[self.copy_field_name]\n )\n return super(PrevValuePositiveIntegerField, self).pre_save(model_instance, add)\n\n\nclass SubchoicesPositiveIntegerField(models.PositiveIntegerField):\n\n empty_values = ()\n\n def __init__(self, *args, **kwargs):\n self.enum = kwargs.pop('enum', None)\n self.subchoices_field_name = kwargs.pop('subchoices_field_name', None)\n assert self.enum is None or isinstance(self.enum, SubstatesChoicesNumEnum)\n if self.enum:\n kwargs['choices'] = self.enum.choices\n super(SubchoicesPositiveIntegerField, self).__init__(*args, **kwargs)\n\n def _get_subvalue(self, model_instance):\n return getattr(model_instance, self.subchoices_field_name)\n\n def clean(self, value, model_instance):\n if self.enum and self._get_subvalue(model_instance) not in self.enum.categories:\n return None\n else:\n return super(SubchoicesPositiveIntegerField, self).clean(value, model_instance)\n\n def _raise_error_if_value_should_be_empty(self, value, subvalue):\n if self.enum and subvalue not in self.enum.categories and value is not None:\n raise ValidationError(ugettext('Value must be empty'))\n\n def _raise_error_if_value_not_allowed(self, value, subvalue, model_instance):\n allowed_values = self.enum.get_allowed_states(getattr(model_instance, self.subchoices_field_name))\n if subvalue in self.enum.categories and value not in allowed_values:\n raise ValidationError(ugettext('Allowed choices are {}.').format(\n ', '.join(('{} ({})'.format(*(self.enum.get_label(val), val)) for val in allowed_values))\n ))\n\n def validate(self, value, model_instance):\n if not self.enum:\n return\n\n self._raise_error_if_value_should_be_empty(value, self._get_subvalue(model_instance))\n self._raise_error_if_value_not_allowed(value, self._get_subvalue(model_instance), model_instance)\n\n\nclass EnumSequenceFieldMixin(object):\n\n # TODO Once SmartWidget mixin is not in is-core, add formfield method with the appropriate widget\n def __init__(self, *args, **kwargs):\n self.enum = kwargs.pop('enum', None)\n self.prev_field_name = kwargs.pop('prev_field', None)\n assert self.enum is None or isinstance(self.enum, SequenceChoicesEnumMixin)\n if self.enum:\n kwargs['choices'] = self.enum.choices\n super(EnumSequenceFieldMixin, self).__init__(*args, **kwargs)\n\n def validate(self, value, model_instance):\n super(EnumSequenceFieldMixin, self).validate(value, model_instance)\n if self.enum:\n prev_value = model_instance.pk and model_instance.initial_values[self.attname] or None\n allowed_next_values = self.enum.get_allowed_next_states(prev_value, model_instance)\n if self.name in model_instance.changed_fields and value not in allowed_next_values:\n raise ValidationError(\n ugettext('Allowed choices are {}.').format(\n ', '.join(('{} ({})'.format(*(self.enum.get_label(val), val)) for val in allowed_next_values))))\n\n\nclass EnumSequencePositiveIntegerField(EnumSequenceFieldMixin, models.PositiveIntegerField):\n pass\n\n\nclass EnumSequenceCharField(EnumSequenceFieldMixin, models.CharField):\n pass\n\n\nclass PriceField(DecimalField):\n\n def __init__(self, *args, **kwargs):\n self.currency = kwargs.pop('currency', ugettext('CZK'))\n default_kwargs = {\n 'decimal_places': 2,\n 'max_digits': 10,\n 'humanized': lambda val, inst, field: price_humanized(val, inst, currency=field.currency)\n }\n default_kwargs.update(kwargs)\n super(PriceField, self).__init__(*args, **default_kwargs)\n\n def formfield(self, **kwargs):\n default_kwargs = {\n 'form_class': chamber_fields.PriceField,\n 'currency': self.currency,\n }\n default_kwargs.update(kwargs)\n\n return super(PriceField, self).formfield(**default_kwargs)\n\n\nclass PositivePriceField(PriceField):\n\n def __init__(self, *args, **kwargs):\n kwargs['validators'] = kwargs.get('validators', [])\n kwargs['validators'].append(MinValueValidator(Decimal('0.00')))\n super(PositivePriceField, self).__init__(*args, **kwargs)\n\n def deconstruct(self):\n name, path, args, kwargs = super(PositivePriceField, self).deconstruct()\n del kwargs['validators']\n return name, path, args, kwargs\n","sub_path":"chamber/models/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":9495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"247489920","text":"import re\ndef a_1(stiing):\n charRE = re.compile(r'[^a-zA-Z0-9.]')\n stiing = charRE.search(stiing)\n return not bool(stiing)\nprint(a_1(\"5./%^%$**^;\"))\nprint(a_1(\"skjdsfhskfhfkhkzbjdgejfakhai33543443545\"))\n\n\n#2\nimport re\ndef w_1(aqwe):\n patterns = '\\w\\d'\n if re.search (patterns, aqwe):\n return 'found a match!'\n else:\n return ('Not matched!')\n\nprint(w_1(\"the quick brown fox jumps over the lazy\"))\nprint(w_1('rishab'))\n\n\n#3\nimport re\ndef w_7(text):\n pattern = re.compile(r'.*[0-9]$')\n if pattern.match(text):\n return 'found match!'\n else:\n return ('Not matched!')\n\nprint(w_7(\"vdhgfhfgdhj9\"))\nprint(w_7('hhghgkjgjfkhkr'))\n\n#4\nimport re\nresults = re.finditer(r'([0-9]{1,3})', 'Exercises number 1,12,13, and 345 are important')\nprint('number of length 1 to 3')\nfor n in results:\n print(n.group(0))\n\n\n#5\nimport re\ndef w_9(sddd):\n pat = '[^A-B.]'\n if re.search (pat, sddd):\n return 'found a match!'\n else:\n return ('Not matched!')\n\nprint(w_9(\"SJHFKSFHKSFSHF\"))\nprint(w_9(\"GHJGJGJ\"))\n","sub_path":"Day13.py","file_name":"Day13.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"76398790","text":"import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport requests\r\nimport json\r\nimport pandas as pd\r\nimport datetime as dt\r\nimport plotly.express as px\r\nimport plotly.graph_objects as go\r\n\r\n\r\n#######################################\r\n########DATAFRAME######################\r\n#######################################\r\ndef earthquake(path, starttime, endtime, alert):\r\n paramss = {\"format\": \"geojson\", \"starttime\": starttime, \"endtime\": endtime, \"alertlevel\": alert}\r\n data = requests.get(path, params = paramss)\r\n data = json.loads(data.text)\r\n return data\r\n\r\npath = r\"https://earthquake.usgs.gov/fdsnws/event/1/query?\"\r\ngreen = earthquake(path, '2010-01-01', '2021-10-1', 'green')\r\nyellow = earthquake(path, '2010-01-01', '2021-10-1', 'yellow')\r\norange = earthquake(path, '2010-01-01', '2021-10-1', 'orange')\r\nred = earthquake(path, '2010-01-01', '2021-10-1', 'red')\r\n\r\n# Convert the JSON to Pandas dataframe\r\ndf_green = pd.json_normalize(green['features'])\r\ndf_yellow = pd.json_normalize(yellow['features'])\r\ndf_orange = pd.json_normalize(orange['features'])\r\ndf_red = pd.json_normalize(red['features'])\r\n\r\ndf = pd.concat([df_green, df_yellow, df_orange, df_red])\r\n\r\ndef refactor_date(row):\r\n actual_value = row['properties.time']\r\n actual_time = dt.datetime.fromtimestamp(actual_value // 1000.0)\r\n row['properties.time'] = actual_time\r\n return row\r\n\r\ndf = df.apply(refactor_date, axis='columns')\r\n\r\ndf2 = df[['id', 'properties.mag', 'properties.place', 'properties.time', 'properties.detail', \\\r\n'properties.felt', 'properties.cdi', 'properties.mmi', 'properties.alert', 'properties.tsunami', \\\r\n'properties.sig', 'properties.net', 'properties.dmin', 'properties.type', 'geometry.coordinates']]\r\ndf2.set_axis(['id', 'magnitude', 'location', 'time', 'detail', 'felt', 'cdi', 'mmi', 'alert', 'tsunami', 'sig', 'net', 'dmin', 'type', 'coordinates'], axis=1, inplace=True)\r\ndf2.head()\r\n\r\ndf2[['latitud', 'longitud', 'depth']] = pd.DataFrame(df2.coordinates.tolist(), index= df2.index) \r\n\r\n# Filling Missing Values\r\ndf2.felt = df2.felt.fillna(0)\r\ndf2.cdi = df2.cdi.fillna(0)\r\ndf2.dmin = df2.dmin.fillna(df2.dmin.median())\r\ndf2.location = df2.location.fillna('Unknown')\r\ngreen_mag = df2.magnitude[df2.alert == 'green']\r\ndf2.magnitude = df2.magnitude.fillna(green_mag.mean())\r\n\r\n#######################################\r\n############ Dash App #################\r\n#######################################\r\n\r\napp = dash.Dash(__name__, suppress_callback_exceptions=True)\r\n\r\napp.layout = html.Div([\r\n dcc.Location(id='url', refresh=False),\r\n html.Div([\r\n html.H1('Earthquake Dash App'),\r\n html.Div([\r\n dcc.Link('Page 1', href='/page-1'),\r\n dcc.Link('Page 2', href='/page-2'),\r\n dcc.Link('Page 3', href='/page-3')\r\n ], className='nav_div')\r\n ], className='nav'),\r\n html.Div(id='page-content'),\r\n html.Footer([\r\n html.P('© 2021 Fernando Sirias / David Mairena'),\r\n html.Div([\r\n html.P('Data Source: '),\r\n html.A('USGS', href='https://earthquake.usgs.gov/fdsnws/event/1/?ref=springboard', target='_blank')\r\n ], className='source')\r\n ])\r\n], className='content_container')\r\n\r\ndefault_layout= html.Div([\r\n html.Div([\r\n html.H2('PLEASE SELECT A PAGE'),\r\n html.Img(src='https://media0.giphy.com/media/xT1R9JFTKhIpOYBvos/giphy.gif?cid=790b761186ce2b415be65a3c705bea87481b3dad7a3b6985&rid=giphy.gif&ct=g')\r\n ], className='default_container')\r\n], className='default_layout')\r\n\r\n##################################### First Page ######################################### \r\nfirst_page_layout = html.Div([\r\n html.H1('Earthquakes from 2010 to 2021'),\r\n html.Div([\r\n html.Div(html.Label([\r\n 'Select an alert to filter:',\r\n dcc.Dropdown(\r\n id='page-1-dropdown', value='all', clearable=False,\r\n options=[\r\n {'label': 'Green', 'value': 'green'}, \r\n {'label': 'Yellow', 'value' : 'yellow'},\r\n {'label': 'Orange', 'value': 'orange'},\r\n {'label': 'Red', 'value': 'red'},\r\n {'label': 'All', 'value': 'all'}]\r\n )], className='filter_text'), className='inline'),\r\n html.Div([\r\n html.Label([\r\n 'Tsunami alert:',\r\n dcc.RadioItems(\r\n id='tsunami_alert',\r\n value='default',\r\n options=[\r\n {'label': 'Default', 'value': 'default'},\r\n {'label': 'Yes', 'value': 0},\r\n {'label': 'No', 'value': 1}\r\n ]\r\n )\r\n ])\r\n ], className='inline'),\r\n html.Div([\r\n html.Label([\r\n 'Select a year to filter:',\r\n dcc.Slider(\r\n id='slider',\r\n min=2010,\r\n max=2022,\r\n value=2022,\r\n marks={\r\n 2010: {'label': '2010', 'style': {'color': 'white'}}, 2011: {'label': '2011', 'style': {'color': 'white'}}, 2012: {'label': '2012', 'style': {'color': 'white'}},\r\n 2013: {'label': '2013', 'style': {'color': 'white'}}, 2014: {'label': '2014', 'style': {'color': 'white'}}, 2015: {'label': '2015', 'style': {'color': 'white'}},\r\n 2016: {'label': '2016', 'style': {'color': 'white'}}, 2017: {'label': '2017', 'style': {'color': 'white'}},\r\n 2018: {'label': '2018', 'style': {'color': 'white'}}, 2019: {'label': '2019', 'style': {'color': 'white'}}, 2020: {'label': '2020', 'style': {'color': 'white'}},\r\n 2021: {'label': '2021', 'style': {'color': 'white'}}, 2022: {'label': '2022', 'style': {'color': 'white'}}\r\n }\r\n )\r\n ])\r\n ], className='inline'),\r\n ], className='filter_container'),\r\n dcc.Graph(id='scatterplot', figure={}),\r\n html.Div([\r\n dcc.Graph(id='barplot', figure={})\r\n ])\r\n], className='content')\r\n\r\n@app.callback(Output('scatterplot', 'figure'),\r\n [Input('page-1-dropdown', 'value')],\r\n Input('tsunami_alert', 'value'),\r\n Input('slider', 'value')\r\n )\r\ndef scatter_out(value1, value2, value3):\r\n if value1 == 'all':\r\n if value2 == 'default':\r\n fig = px.scatter(df2, x = 'time', y = 'magnitude', color='alert', size='cdi', color_discrete_sequence=['green', '#d6c800', '#fa9602', 'red'], hover_name='location')\r\n fig.update_layout(yaxis_range=[0,9], xaxis_range=[dt.datetime(2009,10,1), dt.datetime(value3, 3 if value3 == 2022 else 12, 30)])\r\n return fig\r\n else:\r\n temp = df2[df2.tsunami == value2]\r\n fig = px.scatter(temp, x = 'time', y = 'magnitude', color='alert', size='cdi', color_discrete_sequence=['green', '#d6c800', '#fa9602', 'red'], hover_name='location')\r\n fig.update_layout(yaxis_range=[0,9], xaxis_range=[dt.datetime(2009,10,1), dt.datetime(2022, 4, 1)])\r\n return fig\r\n else:\r\n if value2 == 'default':\r\n temp = df2[df2.alert == value1]\r\n fig = px.scatter(temp, x = 'time', y = 'magnitude', color='alert', size='cdi', color_discrete_sequence=[value1], hover_name='location')\r\n fig.update_layout(yaxis_range=[0,9], xaxis_range=[dt.datetime(2009,10,1), dt.datetime(2022, 4, 1)])\r\n return fig\r\n else:\r\n temp = df2[(df2.alert == value1) & (df2.tsunami == value2)]\r\n fig = px.scatter(temp, x = 'time', y = 'magnitude', color='alert', size='cdi', color_discrete_sequence=[value1], hover_name='location')\r\n fig.update_layout(yaxis_range=[0,9], xaxis_range=[dt.datetime(2009,10,1), dt.datetime(2022, 4, 1)])\r\n return fig\r\n\r\n\r\n@app.callback(Output('barplot', 'figure'),\r\n [Input('page-1-dropdown', 'value')],\r\n Input('tsunami_alert', 'value'),\r\n Input('slider', 'value')\r\n )\r\ndef barplot_out(value1, value2, value3):\r\n if value1 == 'all':\r\n if value2 == 'default':\r\n df2_temp = df2.time.dt.year.value_counts().reset_index()\r\n fig = px.bar(df2_temp, x = 'index', y = 'time', labels={'x': 'Year', 'y': 'number of earthquakes'}, title='Amount of earthquakes registered by year')\r\n fig.update_layout(xaxis_range=[2009, value3])\r\n return fig\r\n else:\r\n temp = df2[df2.tsunami == value2]\r\n df2_temp = temp.time.dt.year.value_counts().reset_index()\r\n fig = px.bar(df2_temp, x = 'index', y = 'time', labels={'x': 'Year', 'y': 'number of earthquakes'}, title='Amount of earthquakes registered by year')\r\n fig.update_layout(xaxis_range=[2009, value3])\r\n return fig\r\n else:\r\n if value2 == 'default':\r\n temp = df2[df2.alert == value1]\r\n df2_temp = temp.time.dt.year.value_counts().reset_index()\r\n fig = px.bar(df2_temp, x = 'index', y = 'time', labels={'x': 'Year', 'y': 'number of earthquakes'}, title='Amount of earthquakes registered by year')\r\n fig.update_layout(xaxis_range=[2009, value3])\r\n return fig\r\n else:\r\n temp = df2[(df2.alert == value1) & (df2.tsunami == value2)]\r\n df2_temp = temp.time.dt.year.value_counts().reset_index()\r\n fig = px.bar(df2_temp, x = 'index', y = 'time', labels={'x': 'Year', 'y': 'number of earthquakes'}, title='Amount of earthquakes registered by year')\r\n fig.update_layout(xaxis_range=[2009, value3])\r\n return fig\r\n\r\n\r\n\r\n#####################################Second Page######################################### \r\nsecond_page_layout = html.Div([\r\n html.H1('Earthquakes from 2010 to 2021'),\r\n html.Div([\r\n html.Div(html.Label([\r\n 'Select an alert to filter the first plot:',\r\n dcc.Dropdown(\r\n id='page-2-dropdown', value='all', clearable=False,\r\n options=[\r\n {'label': 'Green', 'value': 'green'}, \r\n {'label': 'Yellow', 'value' : 'yellow'},\r\n {'label': 'Orange', 'value': 'orange'},\r\n {'label': 'Red', 'value': 'red'},\r\n {'label': 'All', 'value': 'all'}\r\n ]\r\n )],\r\n className='filter_text'), className='inline'),\r\n html.Div([\r\n html.Label([\r\n 'Select an alert to filter the second plot:',\r\n dcc.RadioItems(\r\n id='page-2-dropdown_strip', value='all',\r\n options=[\r\n {'label': 'Green', 'value': 'green'}, \r\n {'label': 'Yellow', 'value' : 'yellow'},\r\n {'label': 'Orange', 'value': 'orange'},\r\n {'label': 'Red', 'value': 'red'},\r\n {'label': 'All', 'value': 'all'}\r\n ]\r\n )\r\n ])\r\n ], className='inline'),\r\n html.Div([\r\n html.Label([\r\n 'Sig factor (how significant the event is):',\r\n dcc.Slider(\r\n id='strip_slider',\r\n value=0,\r\n min = 0,\r\n max = 2910,\r\n updatemode = 'drag',\r\n marks={0: {'label': '0', 'style': {'color': 'white'}},\r\n 100: {'label': '100', 'style': {'color': 'white'}}, 300: {'label': '300', 'style': {'color': 'white'}}, 500: {'label': '500', 'style': {'color': 'white'}},\r\n 700: {'label': '700', 'style': {'color': 'white'}}, 900: {'label': '900', 'style': {'color': 'white'}}, 1100: {'label': '1100', 'style': {'color': 'white'}},\r\n 1300: {'label': '1300', 'style': {'color': 'white'}}, 1500: {'label': '1500', 'style': {'color': 'white'}}, 1700: {'label': '1700', 'style': {'color': 'white'}},\r\n 1900: {'label': '1900', 'style': {'color': 'white'}}, 2100: {'label': '2100', 'style': {'color': 'white'}}, 2300: {'label': '2300', 'style': {'color': 'white'}},\r\n 2500: {'label': '2500', 'style': {'color': 'white'}}, 2700: {'label': '2700', 'style': {'color': 'white'}}, 2910: {'label': '2910', 'style': {'color': 'white'}},\r\n }\r\n )\r\n ])\r\n ])\r\n ], className='filter_container'),\r\n dcc.Graph(id='boxplot', figure={}),\r\n html.Div([\r\n dcc.Graph(id='stripplot', figure={})\r\n ]),\r\n html.Div([\r\n html.A('Nuclear Explosion?', id='notice', href='https://www.bbc.com/mundo/noticias-internacional-42309219', target='_blank')\r\n ], className='notice_container')\r\n], className='content')\r\n\r\n\r\n@app.callback(\r\n Output(component_id='boxplot', component_property='figure'),\r\n [Input(component_id='page-2-dropdown', component_property='value')]\r\n)\r\ndef boxplot_out(value):\r\n if value == 'all':\r\n box =px.box(df2, x = 'alert', y = 'magnitude', color='alert', color_discrete_sequence=['green', '#d6c800', '#fa9602', 'red'], height=600)\r\n return box\r\n else:\r\n temp = df2[df2.alert == value]\r\n box = px.box(temp, x = 'alert', y = 'magnitude', color='alert', color_discrete_sequence=[str(value)], height=600)\r\n return box\r\n\r\n@app.callback(\r\n Output(component_id='stripplot', component_property='figure'),\r\n [Input(component_id='page-2-dropdown_strip', component_property='value')],\r\n Input('strip_slider','value')\r\n)\r\ndef strip_out(drop,slider):\r\n if drop == 'all':\r\n if slider == 0:\r\n fig = px.strip(df2, x = 'type', y = 'magnitude', color='type', height=530)\r\n fig.update_traces(marker=dict(size=12, line= dict(width=2, color='DarkSlateGrey')))\r\n fig.update_layout(yaxis_range=[0,9])\r\n return fig\r\n else:\r\n temp = df2[df2.sig <= slider]\r\n fig = px.strip(temp, x = 'type', y = 'magnitude', color='type', height=530)\r\n fig.update_traces(marker=dict(size=12, line= dict(width=2, color='DarkSlateGrey')))\r\n fig.update_layout(yaxis_range=[0,9])\r\n return fig\r\n else:\r\n if slider == 0: \r\n temp = df2[df2.alert == drop]\r\n fig = px.strip(temp, x = 'type', y = 'magnitude', color='type', height=530)\r\n fig.update_traces(marker=dict(size=12, line= dict(width=2, color='DarkSlateGrey')))\r\n fig.update_layout(yaxis_range=[0,9])\r\n return fig\r\n else:\r\n temp = df2[(df2.alert == drop) & (df2.sig <= slider)]\r\n fig = px.strip(temp, x = 'type', y = 'magnitude', color='type', height=530)\r\n fig.update_traces(marker=dict(size=12, line= dict(width=2, color='DarkSlateGrey')))\r\n fig.update_layout(yaxis_range=[0,9])\r\n return fig\r\n\r\n\r\n#####################################Third Page######################################### \r\nthird_page_layout = html.Div([\r\n html.H1('Earthquake Map'),\r\n html.Div([\r\n html.Label([\r\n 'Select an alert to filter:',\r\n dcc.RadioItems(\r\n id='page-3-radios',\r\n options=[\r\n {'label': 'Green', 'value': 'green'}, \r\n {'label': 'Yellow', 'value' : 'yellow'},\r\n {'label': 'Orange', 'value': 'orange'},\r\n {'label': 'Red', 'value': 'red'},\r\n {'label': 'All', 'value': 'all'}],\r\n value='all'\r\n )\r\n ]),\r\n html.Label([\r\n 'Filter by Tsunami:',\r\n dcc.RadioItems(\r\n id='tsunami',\r\n options=[\r\n {'label': 'Default', 'value': 'default'},\r\n {'label': 'Yes', 'value': 0},\r\n {'label': 'No', 'value': 1}\r\n ],\r\n value='default'\r\n )\r\n ])\r\n ], className='filter_container'),\r\n dcc.Graph(id='page-3-content', figure={}),\r\n html.Div([\r\n dcc.Graph(id='page-3-content-2', figure={})\r\n ])\r\n], className='content')\r\n\r\n@app.callback(Output('page-3-content', 'figure'),\r\n [Input('page-3-radios', 'value')],\r\n Input('tsunami', 'value')\r\n )\r\ndef page_3_radios(value1, value2):\r\n if value1 == 'all':\r\n if value2 == 'default':\r\n return px.density_mapbox(df2, lat='longitud', lon='latitud', z='magnitude', radius=5,\r\n center=dict(lat=7, lon=37), zoom=0.5,\r\n mapbox_style=\"stamen-terrain\", hover_name='location')\r\n else:\r\n temp = df2[df2.tsunami == value2]\r\n return px.density_mapbox(temp, lat='longitud', lon='latitud', z='magnitude', radius=7,\r\n center=dict(lat=7, lon=37), zoom=0.5,\r\n mapbox_style=\"stamen-terrain\", hover_name='location')\r\n else:\r\n if value2 == 'default':\r\n temp = temp = df2[df2.alert == value1]\r\n return px.density_mapbox(temp, lat='longitud', lon='latitud', z='magnitude', radius=7,\r\n center=dict(lat=7, lon=37), zoom=0.5,\r\n mapbox_style=\"stamen-terrain\", hover_name='location')\r\n else:\r\n temp = temp = df2[(df2.alert == value1) & (df2.tsunami == value2)]\r\n return px.density_mapbox(temp, lat='longitud', lon='latitud', z='magnitude', radius=7,\r\n center=dict(lat=7, lon=37), zoom=0.5,\r\n mapbox_style=\"stamen-terrain\", hover_name='location')\r\n\r\n@app.callback(Output('page-3-content-2', 'figure'),\r\n [Input('page-3-radios', 'value')],\r\n Input('tsunami', 'value')\r\n )\r\ndef page_3_second(value1, value2):\r\n if value1 == 'all':\r\n if value2 == 'default':\r\n fig = go.Figure(data=go.Scattergeo(\r\n lon = df2.latitud,\r\n lat = df2.longitud,\r\n text = df2.location,\r\n mode = 'markers',\r\n marker_color = df2.alert,\r\n ))\r\n fig.update_layout(\r\n autosize=False,\r\n margin=dict(\r\n l=0,\r\n r=0,\r\n b=0,\r\n t=0,\r\n pad=90\r\n )\r\n )\r\n return fig\r\n else:\r\n temp = df2[df2.tsunami == value2]\r\n fig = go.Figure(data=go.Scattergeo(\r\n lon = temp.latitud,\r\n lat = temp.longitud,\r\n text = temp.location,\r\n mode = 'markers',\r\n marker_color = temp.alert\r\n ))\r\n return fig\r\n else:\r\n if value2 == 'default':\r\n temp = df2[df2.alert == value1]\r\n fig = go.Figure(data=go.Scattergeo(\r\n lon = temp.latitud,\r\n lat = temp.longitud,\r\n text = temp.location,\r\n mode = 'markers',\r\n marker_color = temp.alert\r\n ))\r\n return fig\r\n else:\r\n temp = df2[(df2.alert == value1) & (df2.tsunami == value2)]\r\n fig = go.Figure(data=go.Scattergeo(\r\n lon = temp.latitud,\r\n lat = temp.longitud,\r\n text = temp.location,\r\n mode = 'markers',\r\n marker_color = temp.alert\r\n ))\r\n return fig\r\n\r\n\r\n# Update the index\r\n@app.callback(dash.dependencies.Output('page-content', 'children'),\r\n [dash.dependencies.Input('url', 'pathname')])\r\ndef display_page(pathname):\r\n if pathname == '/page-1':\r\n return first_page_layout\r\n elif pathname == '/page-2':\r\n return second_page_layout\r\n elif pathname == '/page-3':\r\n return third_page_layout\r\n else:\r\n return default_layout\r\n # You could also return a 404 \"URL not found\" page here\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)","sub_path":"sismic_event_dash_app.py","file_name":"sismic_event_dash_app.py","file_ext":"py","file_size_in_byte":20611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"464457136","text":"import tempfile\nfrom pathlib import Path\nimport tarfile\nimport logging\nimport shutil\nimport sys\nimport stat\nimport os\nimport platform\n\nimport coloredlogs\n\nimport conda.cli.python_api as Conda\nfrom conda_build import api as CondaBuild\nfrom conda_build.config import Config\nfrom conda_pack import core as CondaPack\n\nroot = logging.getLogger()\nroot.setLevel(logging.INFO)\n\nlogger = logging.getLogger('hexrdgui')\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setLevel(logging.INFO)\nformatter = coloredlogs.ColoredFormatter('%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\npackage_env_name = 'hexrd_package_env'\n\ndef patch_qt_config(base_path):\n logger.info('Patching qt.conf.')\n with (base_path / 'bin' / 'qt.conf').open('w') as fp:\n fp.write('[Paths]\\n')\n fp.write('Plugins=../plugins')\n\ndef install_macos_script(base_path, package_path):\n # Add hexrd bash start script\n executable_path = package_path / 'hexrd'\n shutil.copyfile(base_path / 'darwin' / 'hexrd', executable_path)\n st = os.stat(executable_path)\n os.chmod(executable_path, st.st_mode | stat.S_IXUSR)\n\ndef build_mac_app_bundle(base_path, tar_path):\n package_path = base_path / 'package'\n package_path.mkdir()\n hexrd_app_path = package_path / 'HEXRD.app'\n hexrd_app_path.mkdir()\n hexrd_app_contents = hexrd_app_path / 'Contents'\n hexrd_app_contents.mkdir()\n hexrd_app_contents_macos = hexrd_app_contents / 'MacOS'\n hexrd_app_contents_macos.mkdir()\n hexrd_app_contents_resources = hexrd_app_contents / 'Resources'\n hexrd_app_contents_resources.mkdir()\n\n # Add Info.plist\n shutil.copyfile(base_path / 'darwin' / 'Info.plist', hexrd_app_contents / 'Info.plist')\n\n # Extract conda-pack tar into Resources/\n logger.info('Extracting tar into Resources/ directory.')\n tar = tarfile.open(tar_path)\n tar.extractall(path=hexrd_app_contents_resources)\n tar.close()\n\n patch_qt_config(hexrd_app_contents_resources)\n install_macos_script(base_path, hexrd_app_contents_macos)\n\ndef install_linux_script(base_path, package_path):\n logger.info('Generating hexrd script.')\n\n # First we rename the setuptools script\n hexrd_path = package_path / 'bin' / 'hexrd'\n hexrdgui_path = package_path / 'bin' / 'hexrdgui.py'\n hexrd_path.rename(hexrdgui_path)\n\n # Now install a shell script to call the setuptools script\n hexrd_executable = str(package_path / 'bin' / 'hexrd')\n shutil.copyfile(base_path / 'linux' / 'hexrd', hexrd_executable)\n st = os.stat(hexrd_executable)\n os.chmod(hexrd_executable, st.st_mode | stat.S_IXUSR)\n\ndef build_linux_package_dir(base_path, tar_path):\n logger.info('Extracting tar into package/ directory.')\n # Now extract the tar into to packge directory so it ready for cpack.\n package_path = base_path / 'package'\n package_path.mkdir(parents=True, exist_ok=True)\n tar = tarfile.open(tar_path)\n tar.extractall(path=package_path)\n tar.close()\n\n patch_qt_config(package_path)\n install_linux_script(base_path, package_path)\n\ndef build_conda_pack(base_path, tmp):\n # First build the hexrdgui package\n recipe_path = str(base_path / '..' / 'conda.recipe')\n config = Config()\n config.channel = ['cjh1', 'conda-forge']\n config.channel_urls = ['cjh1', 'conda-forge']\n logger.info('Building hexrd conda package.')\n CondaBuild.build(recipe_path, config=config)\n\n logger.info('Creating new conda environment.')\n # Now create a new environment to install the package into\n env_prefix = str(tmp / package_env_name)\n Conda.run_command(\n Conda.Commands.CREATE,\n '--prefix', env_prefix ,\n 'python=3.7'\n )\n\n logger.info('Installing hexrdgui into new environment.')\n # Install hexrdgui into new environment\n params = [\n Conda.Commands.INSTALL,\n '--prefix', env_prefix,\n '--channel', 'cjh1',\n '--channel', 'conda-forge',\n '--use-local', 'hexrdgui'\n ]\n if platform.system() == 'Darwin':\n params.append('python.app=2')\n Conda.run_command(*params)\n\n logger.info('Generating tar from environment using conda-pack.')\n # Now use conda-pack to great relocatable tar\n tar_path = str(tmp / 'hexrdgui.tar')\n CondaPack.pack(\n prefix=env_prefix,\n output=tar_path,\n format='tar'\n )\n\n return tar_path\n\n# We install a script that ensure the current working directory in\n# the bin directory.\ndef install_windows_script(base_path, package_path):\n logger.info('Patch hexrd script.')\n\n # Now install a shell script to call the setuptools script\n hexrd_script = str(package_path / 'Scripts' / 'hexrd-script.py')\n shutil.copyfile(base_path / 'windows' / 'hexrd-script.py', hexrd_script)\n\ndef patch_qt_config_windows(base_path):\n logger.info('Patching qt.conf.')\n with (base_path / 'qt.conf').open('w') as fp:\n fp.write('[Paths]\\n')\n fp.write('Prefix = Library\\n')\n fp.write('Binaries = Library/bin\\n')\n fp.write('Libraries = Library/lib\\n')\n fp.write('Headers = Library/include/qt\\n')\n fp.write('TargetSpec = win32-msvc\\n')\n fp.write('HostSpec = win32-msvc\\n')\n\ndef build_windows_package_dir(base_path, tar_path):\n logger.info('Extracting tar into package/ directory.')\n # Now extract the tar into to packge directory so it ready for cpack.\n package_path = base_path / 'package'\n package_path.mkdir(parents=True, exist_ok=True)\n tar = tarfile.open(tar_path)\n tar.extractall(path=package_path)\n tar.close()\n\n patch_qt_config_windows(package_path)\n install_windows_script(base_path, package_path)\n\ndef build_package():\n with tempfile.TemporaryDirectory() as tmp:\n tmp = Path(tmp)\n base_path = Path(__file__).parent\n tar_path = build_conda_pack(base_path, tmp)\n\n package_path = base_path / 'package'\n # Remove first so we start fresh\n shutil.rmtree(str(package_path), ignore_errors=True)\n\n if platform.system() == 'Darwin':\n build_mac_app_bundle(base_path, tar_path)\n elif platform.system() == 'Linux':\n build_linux_package_dir(base_path, tar_path)\n elif platform.system() == 'Windows':\n build_windows_package_dir(base_path, tar_path)\n else:\n raise Exception('Unsupported platform: %s' % platform.system())\n\nif __name__ == '__main__':\n build_package()","sub_path":"packaging/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"385713569","text":"from tkinter import *\nfrom tkinter.ttk import Combobox, Radiobutton\nfrom tkinter import messagebox, scrolledtext\nfrom dict_logic import Dictionary, Quiz\n\nclass GUI(Tk):\n \"\"\" A szótár megjelenítéséért felelős osztály.\"\"\"\n def __init__(self, dictionary=Dictionary):\n super().__init__()\n self.dictionary = dictionary\n self.title(\"My dictionary\")\n self.configure(bg=\"#33cccc\")\n self.__widgets()\n\n def __widgets(self):\n \"\"\"Létrehozza a szükséges widgeteket.\"\"\"\n Label(self, text = \"DICTIONARY \", font=\"Times 18 bold\", bg=\"#33cccc\").grid(row = 1, column = 0, sticky = W, padx = 5)\n\n self.entryvar = StringVar()\n self.entryvar.set(\"Please enter a word\")\n text_entry = Combobox(self, textvariable=self.entryvar,width = 25, font=\"Times 15 bold\")\n text_entry.grid(row = 2, column = 0, sticky = W, pady=10, padx = 10)\n text_entry.bind(\"\", self.reset)\n text_entry['values'] = [word for word in self.dictionary.data.keys()]\n text_entry.bind('<>', self.search)\n\n Button(self, text=\"RANDOM WORD\",width = 20, command = self.get_random).grid(row=3,column=0,sticky=W, padx = 10, pady=10)\n\n Button(self, text= \"SEARCH\", width = 20, command = self.search).grid(row=4, column = 0, sticky = W, padx = 10)\n self.bind('', self.search)\n\n Label(self, text =\"\\nDefinition: \", font=\"Times 18 bold\", bg=\"#33cccc\").grid(row= 5, column=0, sticky = W)\n\n self.output = scrolledtext.ScrolledText(self, width=25, height=6, wrap=WORD, font=\"Times 15 bold\")\n self.output.grid(row=6, column=0, sticky = W, pady=20, padx = 10)\n self.output.config(state=DISABLED) #hogy ne lehessen beleírni\n\n self.add_update_var = StringVar()\n self.add_update_var.set('')\n self.add_update_button = Button(self, textvariable=self.add_update_var,width = 35, command = self.NewWords)\n self.add_update_button.grid(row = 12, column =0, sticky = W, padx = 10)\n self.add_update_button.config(state=DISABLED)\n Button(self, text=\"LET'S TAKE A QUIZ!\",width = 15,fg=\"#1a1aff\",command=self.play).grid(row=3, column=1, sticky = W, pady=5, padx = 10)\n\n self.statbutton = Button(self, text=\"PROBABILITY OF OCCURRENCE\",width = 25,command=self.stat)\n self.statbutton.grid(row=4, column=1, sticky = W, pady=5, padx = 10)\n self.statbutton.config(state=DISABLED)\n\n Button(self, text=\"EXIT\", width = 15, command = self.exit).grid(row=12,column=1,sticky=W, padx = 10, pady = 5)\n\n def search(self,*args):\n \"\"\"Megkeresi a beírt szóhoz tartozó jelentést, és a SEARCH gomb megnyomása, vagy az enter billentyű lenyomása\n után ki is írja azt. Ha nincs olyan szó, akkor kiírja, hogy Sorry.\"\"\"\n self.output.config(state=NORMAL)\n entered_text = self.entryvar.get()\n self.output.delete(0.0, END)\n try:\n definition = self.dictionary.data[entered_text]\n self.dictionary.word_exist= True\n except:\n if entered_text not in self.dictionary.not_existing_words:\n self.dictionary.not_existing_words.append(entered_text)\n definition = \"Sorry, there is no word like that.\"\n self.dictionary.word_exist= False\n\n self.output.insert(END, definition)\n self.output.config(state=DISABLED)\n\n self.add_update_button.config(state=NORMAL)\n if self.dictionary.word_exist == False:\n self.add_update_var.set(\"ADD\")\n else: self.add_update_var.set(\"UPDATE\")\n\n\n def exit(self):\n \"\"\"Ezzel zárhatjuk be a programot, az EXIT gombhoz van hozzárendelve\"\"\"\n self.dictionary.save_words()\n self.destroy()\n \n def get_random(self):\n \"\"\"A szótár létező szavai közül kiválaszt random egyet, amelyet beilleszt az Entry-be.\"\"\"\n self.entryvar.set('')\n Random = self.dictionary.get_random_word()\n self.entryvar.set(Random)\n self.search()\n\n def reset(self,*args):\n \"\"\"A Entry tartalmát törli, a bal egérkattintáshoz van bind-olva.\"\"\"\n self.entryvar.set('')\n\n def play(self):\n \"\"\"A LET'S PLAY gombhoz van hozzárendelve, felugró ablakban megjeleníti a QUIZ-t\"\"\"\n self.quiz1 = Quiz(self.dictionary)\n playQuiz = MakeQuiz(self,self.quiz1)\n playQuiz.make_game()\n self.withdraw() #eltűnteti a főablakot, amíg a QUIZ fut\n\n def stat(self):\n \"\"\"Az aktuális statisztikát készíti el a matplotlib segítségével, a SHOW MY STAT gomb megnyomása után.\n Ha nem játszottunk még a QUIZ-zel, akkor nem jelenik meg semmi. \"\"\"\n try:\n self.quiz1.make_plot(\"plot.jpg\")\n except:pass\n\n def NewWords(self):\n AddWords(self,self.dictionary)\n\n\nclass MakeQuiz(Toplevel):\n \"\"\" A QUIZ megjelenítő osztálya, amely egy felugró ablakban jelenik meg. \"\"\"\n def __init__(self, master, quiz=Quiz):\n super().__init__(master)\n self.master = master\n self.title(\"QUIZ\")\n self.geometry(\"800x300\")\n self.quiz = quiz\n \n def check_answer(self):\n \"\"\"Megnézi, hogy a kijelölt Radiobutton a jó válasz-e, ha igen növeli a pontszámot 1-gyel.\"\"\"\n if self.radiobutton_var.get() == self.quiz.the_good_answer:\n self.quiz.good += 1\n\n def __make_the_widgets(self):\n \"Létrehozza a Labeleket és a Radiobuttonokat a QUIZ-hez.\"\n Label(self, text=self.quiz.random_word, fg=\"blue\", font=\"Times 15 bold\").pack()\n self.radiobutton_var = StringVar()\n for answer in self.quiz.answers:\n Radiobutton(self, text=answer,variable=self.radiobutton_var, value=answer).pack()\n\n self.points_var = IntVar()\n self.points_var.set(self.quiz.good)\n Label(self,text= \"YOUR POINTS:\", fg=\"green\", font=\"Times 12 bold\").pack()\n points_label = Label(self,textvariable=self.points_var, fg=\"green\", font=\"Times 12 bold\")\n points_label.pack()\n Button(self,text=\"NEXT\", command=self.make_and_update).pack()\n Button(self,text=\"RESET\", command=self.Reset).pack()\n Button(self, text= \"EXIT THE QUIZ\", command=self.exit).pack()\n \n def make_game(self):\n \"Kezdetben ez a függvény hozza létre a Quiz-t, majd a NEXT gombbal a make_and_update függvény hívódik meg.\"\n self.quiz.answers = []\n self.quiz.make_quiz()\n self.__make_the_widgets()\n\n def make_and_update(self):\n \"Az előző widgeteket törli, lecsekkolja, hogy jó-e az előző válasz, majd új QUIZ kérdést és válaszlehetőségeket ad. \"\n for widget in self.winfo_children():\n widget.destroy()\n self.quiz.questions += 1\n self.check_answer()\n self.make_game()\n \n def exit(self):\n \"\"\"Az EXIT gombhoz van hozzárendelve, ezzel léphetünk ki a QUIZ-ből. Előtte megmutatja egy messageboxban,\n hány százalékot értél el. Valamint újra megnyitja a főablakot.\"\"\"\n percentage = self.quiz.result()\n messagebox.showinfo(title=\"RESULT\",message=\"Az eredményed: \"+ str(percentage) +\"%\")\n self.master.statbutton.config(state=NORMAL)\n self.destroy()\n self.master.deiconify()\n\n def Reset(self):\n self.quiz.reset()\n self.points_var.set(self.quiz.good)\n\n \nclass AddWords(Toplevel):\n \"\"\" Új szavak - definíciok hozzáadása, új ablakban jelenik meg. \"\"\"\n def __init__(self, master, dictionary:Dictionary):\n super().__init__(master)\n self.master = master\n self.dictionary = dictionary\n self.configure(bg=\"#33cccc\")\n self.__add_word_widgets()\n\n def __add_word_widgets(self):\n Label(self, text =\"Word: \", bg=\"#33cccc\", font=\"Times 15 bold\").grid(row = 2, column =0, sticky = W, padx = 10)\n\n self.text1 = Combobox(self, width = 30, height=1,textvariable=self.master.entryvar)\n self.text1.grid(row = 3, column =0, sticky = W, padx = 10, pady=10)\n if self.master.dictionary.word_exist == False:\n self.text1['values'] = [word for word in self.dictionary.not_existing_words]\n else: \n self.text1['values'] = [word for word in self.dictionary.data.keys()]\n self.text1.bind('<>', self.search_def)\n\n Label(self, text =\"Definition: \", bg=\"#33cccc\", font=\"Times 15 bold\").grid(row = 4, column =0, sticky = W, padx = 10)\n self.text2 = Text(self, width = 30, height =6, wrap=WORD)\n self.text2.grid(row = 5, column= 0, sticky = W, padx = 10, pady=10)\n\n if self.master.dictionary.word_exist == False:\n Button(self, text=\"ADD WORD\", width = 33,command = self.add_new).grid(row = 6, column =0, sticky = W, padx = 10, pady=10)\n else:\n Button(self, text=\"UPDATE DEFINITION\", width = 33,command = self.update).grid(row = 7, column =0, sticky = W, padx = 10, pady=10)\n self.text2.insert(END, self.dictionary.data[self.master.entryvar.get()])\n\n self.add_var=StringVar()\n Label(self, textvariable=self.add_var, bg=\"#33cccc\").grid(row=8,column=0) #hogy sikeres volt-e a hozzadas vagy sem\n\n self.bind('', self.search_def)\n\n def add_new(self):\n \"\"\"Kiolvassa a két szövegmezőbe beírt szót és definíciót, majd az ADD gomb megnyomása után\n hozzáadja a már létező json fájlhoz, így ezután ez a kérdés is megjelenhet a QUIZ-ben.\"\"\"\n\n get_text1 = self.text1.get()\n get_text2 = self.text2.get(\"1.0\",\"end-1c\")\n try:\n self.dictionary.add_word(get_text1,get_text2)\n self.add_var.set(\"Sikeres hozzaadas\")\n except Exception as e:\n self.add_var.set(e)\n \n self.text1.set('')\n self.text2.delete('1.0', 'end')\n\n for word in self.dictionary.not_existing_words:\n if word in self.dictionary.data.keys():\n self.dictionary.not_existing_words.remove(word)\n self.text1['values'] = [word for word in self.dictionary.not_existing_words]\n\n\n def update(self):\n get_text1 = self.text1.get()\n get_text2 = self.text2.get(\"1.0\",\"end-1c\")\n\n try:\n self.master.dictionary.update_word(get_text1,get_text2)\n self.add_var.set(\"Sikeres Update\")\n except Exception as e:\n self.add_var.set(e)\n\n self.text1.set('')\n self.text2.delete('1.0', 'end')\n\n def search_def(self,*args):\n self.text2.delete(0.0, END)\n self.text2.insert(END, self.dictionary.data[self.master.entryvar.get()])\n","sub_path":"dict_gui.py","file_name":"dict_gui.py","file_ext":"py","file_size_in_byte":10639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"640391879","text":"#import sys\nfrom Classes.polygon import Polygon\n# Considering origin to be top left\nclass Tile(Polygon):\n def __init__(self, size, x, y, label=\"\"):\n \"\"\"\n Constructor for tile class. This class inherits from polygon\n Args:\n size: tiles side length\n x, y: top left corner of the tile\n label is optional\n \"\"\"\n self.size = size\n # Top left corner\n self.x1 = x\n self.y1 = y\n # Bottom right corner\n self.x2 = self.x1 + self.size\n self.y2 = self.y1 + self.size\n self.labelStatus = {}\n vertices = [(self.x1, self.y1), (self.x2, self.y1), (self.x2, self.y2), (self.x1, self.y2), (self.x1, self.y1)]\n Polygon.__init__(self, vertices, label)\n def setSize(self, size):\n self.size = size\n def getSize(self):\n return self.size\n def setVertices(self, x, y):\n # Top left corner\n self.x1 = x\n self.y1 = y\n # Bottom right corner\n self.x2 = self.x1 + self.size\n self.y2 = self.y1 + self.size\n vertices = [(self.x1, self.y1), (self.x2, self.y1), (self.x2, self.y2), (self.x1, self.y2), (self.x1, self.y1)]\n Polygon.setVertices(self, vertices)\n def updateLabelStatus(self, poly):\n \"\"\"\n Updates the status of a tile wrt an annotation (represented by poly)\n Different types of status:\n Tile is completely inside poly - 0\n Tile intersects the boundary of poly - 1\n Tile is completely outside poly - 2\n Poly lies completely inside the tile - 3\n Args:\n poly: the polygon that denotes an annotation\n Returns:\n void\n This function doesn't return anything but it updates the status of the tile with respect to\n various annotations with the help of a dictionary. It also stores the percentage of overlap\n of the tile with poly\n \"\"\"\n \n label = poly.label\n p1 = poly.polygon.buffer(0)\n p2 = self.polygon.buffer(0)\n #print(poly.polygon)\n #print(self.polygon)\n #print(label)\n isInsideTile = (self).contained(poly)\n isIntersection = poly.isIntersecting(self)\n isContained = poly.contained(self)\n if (isInsideTile):\n #intersection_area = ((poly.polygon).intersection(self.polygon)).area\n intersection_area = ((p1).intersection(p2)).area\n percent_overlap = (float(intersection_area)/self.getArea())*100\n self.labelStatus[label] = [3,percent_overlap] # polygon lies inside tile self.labelStatus[label] = (3, percent_overlap)\n elif (isContained):\n self.labelStatus[label] = [0,100] # tile lies completely inside the boundary of the polygon self.labelStatus[label] = (3, 100) \n elif (isIntersection):\n #intersection_area = ((poly.polygon).intersection(self.polygon)).area\n intersection_area = ((p1).intersection(p2)).area\n percent_overlap = (float(intersection_area)/self.getArea())*100\n self.labelStatus[label] = [1,percent_overlap] # tile intersects the boundary of the polygon self.labelStatus[label] = (3, percent_overlap)\n \n def getLabelStatus(self):\n return self.labelStatus","sub_path":"Classes/tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"447549096","text":"import sys \nsys.path.append(\"/Users/will/Dropbox/circle-fit\")\nimport circle_fit\n\nfits_file = \"../data/LP-Ori-HST/lp-ori-f435w.fits\"\n\nfor delta_theta in 110, 130, 150:\n plotfile = f\"lp-ori-{delta_theta:03d}.pdf\"\n print('#### '*10)\n print(\"Creating\", plotfile)\n circle_fit.plot_solution(\n f\"lp-ori-acs-forma.reg\",\n fits_file,\n plotfile,\n delta_theta=delta_theta,\n vmin=0.0, vmax=8.0,\n )\n","sub_path":"arc-fits/lp-ori-circle-fit.py","file_name":"lp-ori-circle-fit.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"183418007","text":"#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 07:03, 18/03/2020 %\n# %\n# Email: nguyenthieu2102@gmail.com %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieu1995 %\n#-------------------------------------------------------------------------------------------------------%\n\nimport concurrent.futures as parallel\nfrom functools import partial\nimport numpy as np\nimport time\nfrom mealpy.optimizer import Optimizer\n\n\nclass BaseHGSO(Optimizer):\n \"\"\"\n The original version of: Henry Gas Solubility Optimization (HGSO)\n Henry gas solubility optimization: A novel physics-based algorithm\n Link:\n https://www.sciencedirect.com/science/article/abs/pii/S0167739X19306557\n \"\"\"\n\n def __init__(self, problem, epoch=10000, pop_size=100, n_clusters=2, **kwargs):\n \"\"\"\n Args:\n problem ():\n epoch (int): maximum number of iterations, default = 10000\n pop_size (int): number of population size, default = 100\n n_clusters (int): number of clusters, default = 2\n **kwargs ():\n \"\"\"\n super().__init__(problem, kwargs)\n self.nfe_per_epoch = 1.5 * pop_size\n\n self.epoch = epoch\n self.pop_size = pop_size\n self.n_clusters = n_clusters\n self.n_elements = int(self.pop_size / self.n_clusters)\n\n self.T0 = 298.15\n self.K = 1.0\n self.beta = 1.0\n self.alpha = 1\n self.epxilon = 0.05\n\n self.l1 = 5E-2\n self.l2 = 100.0\n self.l3 = 1E-2\n self.H_j = self.l1 * np.random.uniform()\n self.P_ij = self.l2 * np.random.uniform()\n self.C_j = self.l3 * np.random.uniform()\n\n def _create_population__(self, n_clusters=2):\n pop = []\n group = []\n for i in range(n_clusters):\n team = []\n for j in range(self.n_elements):\n pos_new = np.random.uniform(self.problem.lb, self.problem.ub)\n fit_new = self.get_fitness_position(pos_new)\n team.append([pos_new.copy(), fit_new, i])\n pop.append([pos_new.copy(), fit_new, i])\n group.append(team)\n return pop, group\n\n def _get_best_solution_in_team(self, group=None):\n list_best = []\n for i in range(len(group)):\n _, best_agent = self.get_global_best_solution(group[i])\n list_best.append(best_agent)\n return list_best\n\n def solve(self, mode='sequential'):\n \"\"\"\n Args:\n mode (str): 'sequential', 'thread', 'process'\n + 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)\n + 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)\n + 'process': recommended for hard and big task (> 2 minutes for calculating objective)\n\n Returns:\n [position, fitness value]\n \"\"\"\n if mode != \"sequential\":\n print(\"HGSO is support sequential mode only!\")\n exit(0)\n self.termination_start()\n pop, group = self._create_population__(self.n_clusters)\n _, g_best = self.get_global_best_solution(pop)\n self.history.save_initial_best(g_best)\n p_best = self._get_best_solution_in_team(group) # multiple element\n\n for epoch in range(0, self.epoch):\n time_epoch = time.time()\n\n ## Loop based on the number of cluster in swarm (number of gases type)\n for i in range(self.n_clusters):\n\n ### Loop based on the number of individual in each gases type\n for j in range(self.n_elements):\n F = -1.0 if np.random.uniform() < 0.5 else 1.0\n\n ##### Based on Eq. 8, 9, 10\n self.H_j = self.H_j * np.exp(-self.C_j * (1.0 / np.exp(-epoch / self.epoch) - 1.0 / self.T0))\n S_ij = self.K * self.H_j * self.P_ij\n gama = self.beta * np.exp(- ((p_best[i][self.ID_FIT][self.ID_TAR] + self.epxilon) /\n (group[i][j][self.ID_FIT][self.ID_TAR] + self.epxilon)))\n\n X_ij = group[i][j][self.ID_POS] + F * np.random.uniform() * gama * (p_best[i][self.ID_POS] - group[i][j][self.ID_POS]) + \\\n F * np.random.uniform() * self.alpha * (S_ij * g_best[self.ID_POS] - group[i][j][self.ID_POS])\n pos_new = self.amend_position_faster(X_ij)\n fit_new = self.get_fitness_position(pos_new)\n group[i][j] = [pos_new, fit_new, i]\n pop[i * self.n_elements + j] = [pos_new, fit_new, i]\n\n ## Update Henry's coefficient using Eq.8\n self.H_j = self.H_j * np.exp(-self.C_j * (1.0 / np.exp(-epoch / self.epoch) - 1.0 / self.T0))\n ## Update the solubility of each gas using Eq.9\n S_ij = self.K * self.H_j * self.P_ij\n ## Rank and select the number of worst agents using Eq. 11\n N_w = int(self.pop_size * (np.random.uniform(0, 0.1) + 0.1))\n ## Update the position of the worst agents using Eq. 12\n sorted_id_pos = np.argsort([x[self.ID_FIT][self.ID_TAR] for x in pop])\n\n for item in range(N_w):\n id = sorted_id_pos[item]\n j = id % self.n_elements\n i = int((id - j) / self.n_elements)\n X_new = np.random.uniform(self.problem.lb, self.problem.ub)\n pos_new = self.amend_position_faster(X_new)\n fit_new = self.get_fitness_position(pos_new)\n pop[id] = [pos_new, fit_new, i]\n group[i][j] = [pos_new, fit_new, i]\n\n p_best = self._get_best_solution_in_team(group)\n # update global best position\n _, g_best = self.update_global_best_solution(pop) # We sort the population\n\n ## Additional information for the framework\n time_epoch = time.time() - time_epoch\n self.history.list_epoch_time.append(time_epoch)\n self.history.list_population.append(pop.copy())\n self.print_epoch(epoch + 1, time_epoch)\n if self.termination_flag:\n if self.termination.mode == 'TB':\n if time.time() - self.count_terminate >= self.termination.quantity:\n self.termination.logging(self.verbose)\n break\n elif self.termination.mode == 'FE':\n self.count_terminate += self.nfe_per_epoch\n if self.count_terminate >= self.termination.quantity:\n self.termination.logging(self.verbose)\n break\n elif self.termination.mode == 'MG':\n if epoch >= self.termination.quantity:\n self.termination.logging(self.verbose)\n break\n else: # Early Stopping\n temp = self.count_terminate + self.history.get_global_repeated_times(self.ID_FIT, self.ID_TAR, self.EPSILON)\n if temp >= self.termination.quantity:\n self.termination.logging(self.verbose)\n break\n\n ## Additional information for the framework\n self.save_optimization_process()\n return self.solution[self.ID_POS], self.solution[self.ID_FIT][self.ID_TAR]\n","sub_path":"mealpy/physics_based/HGSO.py","file_name":"HGSO.py","file_ext":"py","file_size_in_byte":7939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"7322016","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\nfrom copy import copy\nfrom back.FilterClass import FilterType, FilterData, ApproxType\nfrom back.Approx.butterworth import Butterworth\nfrom back.Approx.bessel import Bessel\nfrom back.Approx.legendre import Legendre\nfrom back.Approx.cheby1 import ChebyI\nfrom back.Approx.cheby2 import ChebyII\nfrom back.Approx.cauer import Cauer\nfrom back.Approx.gauss import Gauss\n\nclass FilterSpace:\n def __init__(self):\n self.filters = [] # Arreglo de filtros\n self.w_unit = \"Hz\" # Unidad de frecuencia\n self.mod_unit = \"dB\" # Unidad de módulo\n self.ph_unit = \"°\" # Unidad de fase\n\n # addFilter: Recibe parámetros para el filtro y si tienen sentido, lo crea.\n # Devuelve True si pudo crearlo, False si no.\n # OJO: LAS FRECUENCIAS SE INGRESAN EN RAD/S (Chaquear esto desde el front)\n def addFilter(self, filter_type, approx, wp, wa, Ap, Aa, des, G=1, n=None, Q=None, nmin=None, nmax=None, Qmax=None, rp=None, GD=None, tol=None):\n m = self.check_filter(filter_type, approx, wp, wa, Ap, Aa)\n if m != \"\":\n print(\"No se pudo crear el filtro\")\n return m\n wp, wa = self.check_symmetry(filter_type, wp, wa)\n f = switch_atypes.get(approx)(filter_type, wp, wa, Ap, Aa, des/100, G, n, Q, nmin, nmax, Qmax, rp, GD, tol)\n f.add_name_index(self.get_name_index())\n if f.type != FilterType.ERR:\n self.filters.append(f)\n else:\n print(\"Error al crear el filtro\")\n del f\n return \"\"\n\n # delFilter: Saca el filtro del FilterSpace y lo destruye\n # Recibe el filtro (elemento) (Lo puedo cambiar al índice o nombre, lo que resulte más cómodo)\n def delFilter(self, f):\n self.filters.remove(f)\n del f\n return\n\n def get_name_index(self):\n index = None\n ixs = []\n for i in range(len(self.filters)):\n ixs.append(int(self.filters[i].name[1]))\n ixs.sort()\n for i in range(len(ixs)):\n if ixs[i] != i:\n index = i\n break\n if index is None:\n index = len(self.filters)\n return index\n\n def get_wminmax(self):\n wmin = []\n wmax = []\n for i in range(len(self.filters)):\n if self.filters[i].visibility:\n w = self.filters[i].get_wminmax()\n wmin.append(w[0])\n wmax.append(w[1])\n wmin = min(wmin)\n wmax = max(wmax)\n return wmin, wmax\n\n def plot_mod(self, ax, A=False):\n wmin, wmax = self.get_wminmax()\n wmin = wmin / (2 * np.pi)\n wmax = wmax / (2 * np.pi)\n w = np.linspace(wmin, wmax, int(100*wmax/wmin))\n cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\n ax.grid()\n for i in range(len(self.filters)):\n if self.filters[i].visibility:\n self.filters[i].plot_mod(ax, cycle[i % len(cycle)], w, A)\n ax.legend(loc=\"best\")\n if not A: ax.set_title(\"Frequency response - Module\")\n else: ax.set_title(\"Attenuation\")\n ax.set_xlabel(\"$f$ [Hz]\")\n if not A: ax.set_ylabel(\"$|H(s)|$ [dB]\")\n else: ax.set_ylabel(\"A [dB]\")\n ax.set_xlim([wmin, wmax])\n return\n\n def plot_ph(self, ax):\n wmin, wmax = self.get_wminmax()\n wmin = wmin / (2 * np.pi)\n wmax = wmax / (2 * np.pi)\n w = np.linspace(wmin, wmax, int(100*wmax/wmin))\n cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\n ax.grid()\n for i in range(len(self.filters)):\n if self.filters[i].visibility:\n self.filters[i].plot_ph(ax, cycle[i % len(cycle)], w)\n ax.legend(loc=\"best\")\n ax.set_title(\"Frequency response - Phase\")\n ax.set_xlabel(\"$f$ [Hz]\")\n ax.set_ylabel(\"$\\\\angle{H(s)}$ [dB]\")\n ax.set_xlim([wmin, wmax])\n return\n\n def plot_zp(self, ax):\n cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\n ax.grid()\n ax.scatter(0, 0, marker='.', edgecolors=\"None\", facecolors=\"None\")\n for i in range(len(self.filters)):\n if self.filters[i].visibility:\n self.filters[i].plot_zp(ax, cycle[i % len(cycle)])\n ax.legend(loc=\"best\")\n ax.set_title(\"Poles and Zeros\")\n ax.set_xlabel(\"$\\\\alpha$ $[\\\\frac{rad}{s}]$\")\n ax.set_ylabel(\"$j \\omega$ $[\\\\frac{rad}{s}]$\")\n return\n\n def plot_gd(self, ax):\n wmin, wmax = self.get_wminmax()\n wmin = wmin / (2 * np.pi)\n wmax = wmax / (2 * np.pi) / 3\n w = np.linspace(wmin, wmax, int(100 * wmax / wmin))\n cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\n ax.grid()\n for i in range(len(self.filters)):\n if self.filters[i].visibility:\n self.filters[i].plot_gd(ax, cycle[i % len(cycle)], w)\n ax.legend(loc=\"best\")\n ax.set_title(\"Group Delay\")\n ax.set_xlabel(\"$f$ [Hz]\")\n ax.set_ylabel(\"$\\\\frac{d(\\\\angle{H(s)})}{d (f)}$ [dB]\")\n ax.set_xlim([wmin, wmax])\n return\n\n # check_filter: Revisa que el filtro sea válido. Devuelve True si lo es, False si no.\n def check_filter(self, filter_type, approx, wp, wa, Ap, Aa):\n m = \"\"\n m = self.check_freq(filter_type, wp)\n if m == \"\":\n m = self.check_freq(filter_type, wa)\n if m == \"\" and filter_type == FilterType.LP and not wp < wa:\n m = \"El orden de las frecuencias de atenuación y paso no corresponde al de un filtro pasa bajos\"\n if m == \"\" and filter_type == FilterType.HP and not wa < wp:\n m = \"El orden de las frecuencias de atenuación y paso no corresponde al de un filtro pasa altos\"\n if m == \"\" and filter_type == FilterType.BP and not (wa[0] < wp[0] < wp[1] < wa[1]):\n m = \"El orden de las frecuencias de atenuación y paso no corresponde al de un filtro pasa banda\"\n elif m == \"\" and filter_type == FilterType.BR and not (wp[0] < wa[0] < wa[1] < wp[1]):\n m = \"El orden de las frecuencias de atenuación y paso está mal\"\n elif m == \"\" and filter_type == FilterType.GD and not (approx != ApproxType.B or approx != ApproxType.G):\n m = \"Sólo se permiten filtros de retardo de grupo con aproximaciones de Bessel o de Gauss\"\n #elif m == \"\" and (approx == ApproxType.B or approx == ApproxType.G) and filter_type != FilterType.GD:\n # m = \"Las aproximaciones de Bessel o Gauss deben ser de retardo de grupo\"\n if filter_type != FilterType.GD and Ap > Aa:\n m = \"Ap no puede ser mayor que Aa\"\n return m\n\n # check_freq: Revisa que el formato de la frecuencia sea consistente con el tipo.\n def check_freq(self, filter_type, w):\n m = \"\"\n try:\n len_w = len(w)\n except TypeError:\n len_w = 1\n if (filter_type == FilterType.LP or filter_type == FilterType.HP or filter_type == FilterType.GD) and len_w != 1:\n m = \"ERROR: La frecuencia de paso o atenuación ingresada no es un único número\"\n elif (filter_type == FilterType.BP or filter_type == FilterType.BR) and len_w != 2:\n m = \"ERROR: Las frecuencias de paso o atenuación ingresadas no son un arreglo\"\n return m\n\n def check_symmetry(self, filter_type, wp, wa):\n if filter_type == FilterType.BP:\n if wp[0] * wp[1] <= wa[0] * wa[1]:\n wa[1] = (wp[0] * wp[1]) / wa[0]\n else:\n wa[0] = (wp[0] * wp[1]) / wa[1]\n elif filter_type == FilterType.BR:\n if wa[0] * wa[1] <= wp[0] * wp[1]:\n wp[1] = (wa[0] * wa[1]) / wp[0]\n else:\n wp[0] = (wa[0] * wa[1]) / wp[1]\n return wp, wa\n\ndef butterworth(filter_type, wp, wa, Ap, Aa, des, G, n, Q, nmin, nmax, Qmax, rp, GD, tol):\n data = FilterData(wp, wa, Ap, Aa, des, G)\n f = Butterworth(filter_type, data, n, Q, nmin, nmax, Qmax, GD)\n return f\n\ndef cheby1(filter_type, wp, wa, Ap, Aa, des, G, n, Q, nmin, nmax, Qmax, rp, GD, tol):\n data = FilterData(wp, wa, Ap, Aa, des, G)\n f = ChebyI(filter_type, data, n, Q, nmin, nmax, Qmax, rp, GD)\n return f\n\ndef cheby2(filter_type, wp, wa, Ap, Aa, des, G, n, Q, nmin, nmax, Qmax, rp, GD, tol):\n data = FilterData(wp, wa, Ap, Aa, des, G)\n f = ChebyII(filter_type, data, n, Q, nmin, nmax, Qmax, GD)\n return f\n\ndef legendre(filter_type, wp, wa, Ap, Aa, des, G, n, Q, nmin, nmax, Qmax, rp, GD, tol):\n data = FilterData(wp, wa, Ap, Aa, des, G)\n f = Legendre(filter_type, data, n, Q, nmin, nmax, Qmax, GD)\n return f\n\ndef cauer(filter_type, wp, wa, Ap, Aa, des, G, n, Q, nmin, nmax, Qmax, rp, GD, tol):\n data = FilterData(wp, wa, Ap, Aa, des, G)\n f = Cauer(filter_type, data, n, Q, nmin, nmax, Qmax, GD)\n return f\n\ndef bessel(filter_type, wp, wa, Ap, Aa, des, G, n, Q, nmin, nmax, Qmax, rp, GD, tol):\n data = FilterData(wp, wa, Ap, Aa, des, G)\n f = Bessel(filter_type, data, n, Q, nmin, nmax, Qmax, GD, tol/100 if tol is not None else None)\n return f\n\ndef gauss(filter_type, wp, wa, Ap, Aa, des, G, n, Q, nmin, nmax, Qmax, rp, GD, tol):\n data = FilterData(wp, wa, Ap, Aa, des, G)\n f = Gauss(filter_type, data, n, Q, nmin, nmax, Qmax, GD, tol/100 if tol is not None else None)\n return f\n\n# SWITCH\nswitch_atypes = {\n 0: butterworth,\n 1: cheby1,\n 2: cheby2,\n 3: legendre,\n 4: cauer,\n 5: bessel,\n 6: gauss\n}\n\n# plot_template: Dibuja la plantilla.\n# Recibe: - ax (axis)\n# - ftype (Tipo de filtro)\n# - fdata (FilterData: wp, wa, Aa, Ap, des)\n# - A: Atenuación (True) o Ganancia (False)\ndef plot_template(ax, ftype, fdata, A=True, N=False):\n if not A:\n Ap = - copy(fdata.Ap)\n Aa = - copy(fdata.Aa)\n G = fdata.G\n #ax.set_ylim([Aa*4 - Ap, Aa/Ap])\n else:\n Ap = copy(fdata.Ap)\n Aa = copy(fdata.Aa)\n G = 1\n #ax.set_ylim([-Aa/Ap, Aa * 4 - Ap])\n\n rp = None\n ra = None\n r2 = None\n\n wp = np.array(fdata.wp) / (2 * np.pi)\n wa = np.array(fdata.wa) / (2 * np.pi)\n\n if ftype == FilterType.LP:\n if N:\n wp = np.array(1)\n wa = np.array(fdata.wan)\n rp = Rectangle((wp/10, Ap + 20 * np.log10(G)), wp - wp/10, Aa*4, color=\"orange\", alpha=0.4)\n ra = Rectangle((wa, 20 * np.log10(G)), wa*10 - wa, Aa, color=\"orange\", alpha=0.4)\n ax.set_xlim([wp/10, wa * 10 - wa])\n\n elif ftype == FilterType.HP:\n if N:\n wp = np.array(fdata.wan)\n wa = np.array(1)\n ra = Rectangle((wa / 10, 20 * np.log10(G)), wa - wa / 10, Aa, color=\"orange\", alpha=0.4)\n rp = Rectangle((wp, Ap + 20 * np.log10(G)), wp * 10 - wp, Aa*4 - Ap, color=\"orange\", alpha=0.4)\n ax.set_xlim([wa/10, wp * 10 - wp])\n\n elif ftype == FilterType.BP:\n ra = Rectangle((wa[0] / 10, 20 * np.log10(G)), wa[0] - wa[0] / 10, Aa, color=\"orange\", alpha=0.4)\n rp = Rectangle((wp[0], Ap + 20 * np.log10(G)), wp[1] - wp[0], Aa * 4 - Ap, color=\"orange\", alpha=0.4)\n r2 = Rectangle((wa[1], 20 * np.log10(G)), wa[1]*10 - wa[1], Aa, color=\"orange\", alpha=0.4)\n ax.set_xlim([wa[0]/10, wa[1]*10 - wa[1]])\n\n elif ftype == FilterType.BR:\n rp = Rectangle((wp[0]/10, Ap + 20 * np.log10(G)), wp[0] - wp[0]/10, Aa*4 - Ap, color=\"orange\", alpha=0.4)\n ra = Rectangle((wa[0], 20 * np.log10(G)), wa[1] - wa[0], Aa, color=\"orange\", alpha=0.4)\n r2 = Rectangle((wp[1], Ap + 20 * np.log10(G)), wp[1] * 10 - wp[1], Aa*4 - Ap, color=\"orange\", alpha=0.4)\n ax.set_xlim([wp[0]/10, wp[1]*10 - wp[1]])\n\n elif ftype == FilterType.GD:\n ax.set_xlim([wp/10, wp * 10])\n\n if ftype != FilterType.GD:\n ax.add_patch(rp)\n ax.add_patch(ra)\n if r2 is not None: ax.add_patch(r2)\n\n return\n\n","sub_path":"back/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":11896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"218023875","text":"import argparse\nimport logging\nimport threading\n\nfrom tasks.fetch_papers import fetch_papers_main\nfrom tasks.twitter_daemon import main_twitter_fetcher, recalculate\nfrom tasks.paperswithcode import fetch_code_data\n\nfrom logger import logger_config\n\nfrom dotenv import load_dotenv\nload_dotenv()\n\ndef run_threaded(job_func):\n job_thread = threading.Thread(target=job_func)\n job_thread.start()\n\n\nif __name__ == '__main__':\n logger_config(info_filename='background_tasks.log')\n logger = logging.getLogger(__name__)\n\n parser = argparse.ArgumentParser(description='Run tasks')\n parser.add_argument('-ct', '--calc_twitter', action=\"store_true\", help='Recalculate twitter')\n parser.add_argument('-t', '--twitter', action=\"store_true\", help='Fetch twitter')\n parser.add_argument('-p', '--papers', action=\"store_true\", help='Fetch papers')\n parser.add_argument('-c', '--code', action=\"store_true\", help='Fetch code data')\n args = parser.parse_args()\n\n if args.calc_twitter:\n logger.info('Recalculating Twitter')\n recalculate()\n\n if args.twitter:\n logger.info('Fetching tweets')\n main_twitter_fetcher()\n\n if args.papers:\n logger.info('Fetching papers')\n fetch_papers_main()\n\n if args.code:\n logger.info('Fetching code data')\n fetch_code_data()\n\n","sub_path":"single_background_tasks_run.py","file_name":"single_background_tasks_run.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"125103120","text":"class Solution:\r\n # @param A : root node of tree\r\n # @return an integer\r\n def util(self, A, B):\r\n if A is None and B is None:\r\n return True\r\n if A is None or B is None:\r\n return False\r\n return (A.val == B.val) and self.util(A.left, B.right) and self.util(A.right, B.left)\r\n\r\n def isSymmetric(self, A):\r\n if self.util(A.left, A.right):\r\n return 1\r\n else:\r\n return 0","sub_path":"Trees/Symmetric_or_not.py","file_name":"Symmetric_or_not.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"412884397","text":"import asyncio\nfrom logging import getLogger\n\nfrom discord.ext import commands\n\nfrom SubService.Basic.basic_engine import BasicEngine\n\nlog = getLogger(__name__)\n\n\nclass BasicBot(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def state(self, ctx):\n \"\"\"\n 디코 봇 상태 확인\n \"\"\"\n embed = BasicEngine.get_state()\n msg = await ctx.send(embed=embed)\n await asyncio.sleep(60)\n\n await ctx.message.delete() # 입력된 명령 제거\n await msg.delete() # 메세지 삭제\n\n @commands.command()\n async def github(self, ctx):\n \"\"\"\n 디코 봇 깃허브 링크\n \"\"\"\n text = 'https://github.com/greenrain78/discordBot4'\n msg = await ctx.send(text)\n await asyncio.sleep(60)\n\n await ctx.message.delete() # 입력된 명령 제거\n await msg.delete() # 메세지 삭제\n\n @commands.command()\n async def report(self, ctx):\n \"\"\"\n 정식적으로 디코봇 버그 리포트 제출\n \"\"\"\n embed = BasicEngine.report_issue()\n msg = await ctx.send(embed=embed)\n await asyncio.sleep(60)\n\n await ctx.message.delete() # 입력된 명령 제거\n await msg.delete() # 메세지 삭제\n","sub_path":"SubService/Basic/basic_bot.py","file_name":"basic_bot.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"359719408","text":"import turtle\ncolors = [ 'red' , 'blue' , 'brown' , 'yellow' , 'grey']\n\nfor i in range(5):\n turtle.pencolor(colors[i])\n\n for j in range(i+3):\n\n turtle.forward(100)\n radial = ((i+1)*180)/(i+3)\n turtle.left(180 - radial)\n\nturtle.mainloop()\n","sub_path":"Session03/homework/turtle1.py","file_name":"turtle1.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"6860018","text":"from PIL import Image\nimport numpy as np\nimport os\npath=r'D:\\百知人工智能训练营\\人工智能\\数据集\\水质检测\\cut_images'\nos.mkdir(path)\ndir_path=r'D:\\百知人工智能训练营\\人工智能\\数据集\\水质检测\\images'\nfor file in os.listdir(dir_path):\n img=Image.open(os.path.join(dir_path,file))\n x,y=img.size\n# print(x,y)\n img=img.crop((0.5*x,0.5*y,0.5*x+100,0.5*y+100)) # (left, upper, right, lower)像素值\n img.save(os.path.join(path,file))\n# img.show()\n# nd=np.array(img)\n# print(type(img))\n# print(nd)\n# print(nd.shape)","sub_path":"ai/day15/图片切割.py","file_name":"图片切割.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"610780336","text":"from lib import get_spherical_distance\nimport pandas as pd\nimport os\nimport read_config\n'''def read_file(file_name):\n\n file_obj = open(file_name).readlines()[1:]\n if file_obj==None:\n \tprint(\"n\")\n \treturn\n for i in xrange(len(file_obj)):\n if \"\\r\" in file_obj[i]:\n file_obj[i] = file_obj[i].split(\"\\r\")[0]\n if \"\\n\" in file_obj[i]:\n file_obj[i] = file_obj[i].split(\"\\n\")[0]\n file_obj[i] = file_obj[i].split(\",\")\n return file_obj\n\ndef write_file(file_name,header,data):\n\n\tfile_obj = open(file_name,\"w\")\n\tfile_obj.write(header+\"\\n\")\n\n\tfor line in data:\n\t\tline = [str(i) for i in line]\n\t\tline = \",\".join(line)+\"\\n\"\n\t\tfile_obj.write(line)\n\n\tfile_obj.close()\n'''\n\n\n\ndef compare_ground_truth(ground_truth_file, bus_stop_file,GROUND_TRUTH_THRESHOLD):\n\n\tgt=[]\n\tbus_stops=[]\n\ttext = open(ground_truth_file,\"r\")\n\tgt_1=text.readlines()[1:]\n\tfor gt_point in gt_1:\n\t\tgt_point=gt_point.replace('\\n','')\n\t\tk=gt_point.split(',')\n\t\t#print(k)\n\t\t#print('\\n')\n\t\tfor i in k:\n\t\t\tgt.append(float(i))\n\n\t#print(gt)\n\t#print('\\n')\n\n\ttext.close()\n\ttext = open(bus_stop_file,\"r\")\n\tbus_stops_1=text.readlines()[1:]\n\tfor gt_point in bus_stops_1:\n\t\tgt_point=gt_point.replace('\\n','')\n\t\tk=gt_point.split(',')\n\t\t#print(k)\n\t\t#print('\\n')\n\t\tcnt=0\n\t\tfor i in k:\n\t\t\tif cnt!=2:\n\t\t\t\tbus_stops.append(float(i))\n\t\t\t\tcnt+=1\n\t\t#bus_stops.append(k)\n\t#print(bus_stops)\n\ttext.close()\n\t#bus_stops = read_file()\n\tdf=pd.read_csv(bus_stop_file)\n\n\t#print \"GT: \",len(gt)\n\t#print \"all bus_stops: \",len(bus_stops)\n\t#print gt[0]\n\t#print bus_stops[0]\n\n\t#detected = []\n\t#false_negative=[]\n\n\t#gt_dict={}\n\tused=[]\n\tstop=[]\n\tl_gt=len(gt)\n\tl_bs=len(bus_stops)\n\tcnt_gt=0\n\t#for gt_point in gt:\n\twhile cnt_gt[-_.a-zA-Z0-9]+)\"))\n\n def __init__(self, frame, lineno):\n self.frame = frame\n self.filename = frame.f_code.co_filename\n self.lineno = lineno\n self.name = frame.f_code.co_name\n self.id = str(uuid.uuid4())\n\n @staticmethod\n def get_charset(filename):\n with open(filename, 'rb') as srcfile:\n for i in range(2):\n l = srcfile.readline()\n m = TbFrame.coding_regex.match(l)\n if m:\n return m.group('coding').decode('ascii')\n if six.PY2:\n return u'ascii'\n else:\n return u'utf-8'\n\n @property\n def code_fragment(self):\n fragment_length = 50\n start = max(1, self.lineno - fragment_length)\n stop = self.lineno + fragment_length\n lexer = lexers.Python3Lexer(stripnl=False)\n formatter = formatters.HtmlFormatter(full=False, linenos=False)\n\n loader = self.frame.f_globals.get('__loader__')\n module_name = self.frame.f_globals.get('__name__') or ''\n source = None\n if loader is not None and hasattr(loader, \"get_source\"):\n try:\n source = loader.get_source(module_name)\n except ImportError:\n pass\n if source is None:\n try:\n charset = self.get_charset(self.filename)\n with codecs.open(self.filename, 'r', encoding=charset) as infile:\n source = infile.read()\n except IOError:\n return\n\n try:\n for lineno, frag in enumerate(\n formatter._highlight_lines(\n formatter._format_lines(\n lexer.get_tokens(source))),\n start=1):\n if lineno >= start:\n yield self.CodeLine(\n lineno, frag[1].rstrip(),\n lineno == self.lineno,\n lineno <= self.lineno - 2 or lineno >= self.lineno + 2\n )\n if lineno >= stop:\n break\n except UnicodeDecodeError as e:\n yield self.CodeLine(None, six.u(str(e)), True, False)\n\n @property\n def loc_vars(self):\n lexer_text = lexers.TextLexer()\n lexer = lexers.Python3Lexer(stripnl=False)\n formatter = formatters.HtmlFormatter(full=False, linenos=False)\n for name, value in sorted(self.frame.f_locals.items()):\n try:\n value = pprint.pformat(value, indent=4)\n value = highlight(value, lexer, formatter)\n except Exception:\n try:\n value = six.u(repr(value))\n except Exception as e:\n try:\n value = six.u(e)\n except Exception:\n value = ''\n value = highlight(value, lexer_text, formatter)\n yield self.VarLine(name, value)\n\n\nclass Traceback(object):\n \"\"\"\n Expose one traceback to jinja2.\n \"\"\"\n\n def __init__(self, name, msg, tb):\n self.name = name\n self.msg = msg\n self.tb = tb\n\n def __iter__(self):\n tb = self.tb\n while tb:\n yield TbFrame(tb.tb_frame, tb.tb_lineno)\n tb = tb.tb_next\n\n\nclass TracebackHandler(list):\n \"\"\"\n Expose traceback list to jinja2.\n \"\"\"\n\n @staticmethod\n def get_msg(ev):\n if six.PY2:\n try:\n return six.binary_type(ev).decode('utf-8')\n except UnicodeDecodeError:\n try:\n return six.text_type(ev)\n except:\n return u\"encoding error while retreiving message\"\n else:\n try:\n return six.text_type(ev)\n except:\n return u\"encoding error while retreiving message\"\n\n def __init__(self, exc_info):\n etype, evalue, tb = exc_info\n if six.PY2:\n self.append(Traceback(evalue.__class__.__name__,\n self.get_msg(evalue), tb))\n else:\n while evalue:\n self.append(Traceback(evalue.__class__.__name__,\n self.get_msg(evalue),\n evalue.__traceback__))\n evalue = evalue.__context__\n self.reverse()\n\n\nclass TestIndex(dict):\n\n def __init__(self, name=None, status=None, url=None):\n self._name = name\n self._status = status\n self._url = url\n\n def append(self, name, status, url):\n toks = name.split('.', 1)\n if len(toks) == 1:\n self[name] = TestIndex(name, status, url)\n else:\n root, path = toks\n if root not in self:\n self[root] = TestIndex(root)\n self[root].append(path, status, url)\n\n def get_status(self):\n if self._status is None:\n status_count = {\n 'success': 0,\n 'fail': 0,\n 'error': 0,\n 'skip': 0,\n }\n for child in self.values():\n status_count[child.get_status()] += 1\n for name in ('error', 'fail', 'skip', 'success'):\n if status_count[name]:\n self._status = name\n break\n return self._status\n\n def as_json(self):\n return {\n 'title': self._name,\n 'url': self._url,\n 'status': self.get_status(),\n 'childs': [x[1].as_json() for x in sorted(self.items())]\n }\n\n\nclass ResultMixIn(object):\n \"\"\"\n Shared code of HtmlTestResult with nose plugin.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ResultMixIn, self).__init__(*args, **kwargs)\n self._results = []\n\n def add_result_method(self, status, test, exc_info=None, reason=None):\n \"\"\"\n Add test result.\n \"\"\"\n logs = {}\n if exc_info is not None:\n logs['tracebacks'] = TracebackHandler(exc_info)\n logs['reason'] = reason\n try:\n console = self._buffer_console.getvalue()\n except AttributeError:\n console = None\n logs['console'] = console\n try:\n log = self._buffer_log.getvalue()\n log = [json.loads(x) for x in log.splitlines()]\n except AttributeError:\n log = None\n if log:\n logs['log'] = log\n result = MethodResult(status, test, logs)\n stdout.write(result.status_color + \"\\n\")\n self._results.append(result)\n\n def startTest(self, test):\n stdout.write(\n \"Run test: %s.%s... \" %\n (test.__class__.__name__, test._testMethodName))\n # Capture stdout and stderr.\n self._old_stderr = sys.stderr\n self._old_stdout = sys.stdout\n self._buffer_console = StringIO()\n sys.stdout = sys.stderr = self._buffer_console\n\n # Capture logs\n self._old_handlers = []\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n self._old_handlers.append(handler)\n self._buffer_log = StringIO()\n handler = logging.StreamHandler(stream=self._buffer_log)\n handler.setFormatter(TestFormatter())\n handler.setLevel(logging.DEBUG)\n logging.root.addHandler(handler)\n\n def stopTest(self, test):\n # Restore stdout and stderr.\n sys.stdout = self._old_stdout\n sys.stderr = self._old_stderr\n self._buffer_console.close()\n self._buffer_console = None\n # Restore logs\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n for handler in self._old_handlers:\n logging.root.addHandler(handler)\n self._buffer_log.close()\n self._buffer_log = None\n\n def get_index(self):\n index = TestIndex()\n for result in self._results:\n index.append(result.name, result.status, result.url)\n return index\n\n\nclass HtmlTestResult(ResultMixIn, unittest.TestResult):\n\n def addError(self, test, err):\n super(HtmlTestResult, self).addError(test, err)\n self.add_result_method('error', test, exc_info=err)\n\n def addFailure(self, test, err):\n super(HtmlTestResult, self).addFailure(test, err)\n self.add_result_method('fail', test, exc_info=err)\n\n def addSuccess(self, test):\n super(HtmlTestResult, self).addSuccess(test)\n self.add_result_method('success', test)\n\n def addSkip(self, test, reason):\n super(HtmlTestResult, self).addSkip(test, reason)\n self.add_result_method('skip', test, reason=reason)\n\n def addExpectedFailure(self, test, err):\n super(HtmlTestResult, self).addExpectedFailure(self, test, err)\n self.add_result_method('fail', test, exc_info=err)\n\n def addUnexpectedSuccess(self, test):\n super(HtmlTestResult, self).addUnexpectedSuccess(self, test)\n self.add_result_method('fail', test)\n\n\nclass HtmlTestRunner(object):\n \"\"\"\n Alternative to standard unittest TextTestRunner rendering test with full\n logs, image, attached file, in a nice html way.\n\n Can be use:\n * standalone, just replace `python -m unittest` with `html-test`\n \"\"\"\n\n def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,\n failfast=False, buffer=False, resultclass=None):\n self.stream = stream\n self.descriptions = descriptions\n self.verbosity = verbosity\n self.failfast = failfast\n self.buffer = buffer\n self.resultclass = resultclass\n self.start_time = datetime.datetime.now()\n\n def run(self, test):\n result = HtmlTestResult(self.verbosity)\n test(result)\n self.stop_time = datetime.datetime.now()\n Report(result).make_report()\n print('Time Elapsed: %s' % (self.stop_time - self.start_time))\n return result\n\n\nclass Report(object):\n\n def __init__(self, result):\n self.result = result\n\n def make_report(self):\n \"\"\"\n Create html report for the tests results.\n \"\"\"\n # Create index data\n template = Template(\"var index = {{data|safe}};\")\n index_js = os.path.join(Config().dest_path, 'index.js')\n with open(index_js, 'w') as outfile:\n outfile.write(template.render({\n 'data': json.dumps(self.result.get_index().as_json(), indent=4)\n }))\n # Create index page\n template = Template(\n pkg_resources.resource_string(\n 'html_test',\n os.path.join('templates', 'test-case.html')).decode('utf-8')\n )\n filename = os.path.join(Config().dest_path, 'index.html')\n with codecs.open(filename, 'w', encoding=\"utf-8\") as outfile:\n outfile.write(template.render(Config().get_context()))\n","sub_path":"html_test/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":18154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"489664741","text":"# ----------------------------------------------------------------------------\n# Copyright (c) 2016--, gneiss development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\nimport unittest\nimport numpy as np\nimport pandas as pd\nimport pandas.util.testing as pdt\nfrom skbio.stats.composition import ilr_inv\nfrom skbio import TreeNode\nfrom skbio.util import get_data_path\nfrom gneiss.regression import ols\nimport numpy.testing as npt\n\n\nclass TestOLS(unittest.TestCase):\n def setUp(self):\n A = np.array # aliasing for the sake of pep8\n self.table = pd.DataFrame({\n 's1': ilr_inv(A([1., 1.])),\n 's2': ilr_inv(A([1., 2.])),\n 's3': ilr_inv(A([1., 3.])),\n 's4': ilr_inv(A([1., 4.])),\n 's5': ilr_inv(A([1., 5.]))},\n index=['a', 'b', 'c']).T\n self.tree = TreeNode.read(['(c, (b,a)Y2)Y1;'])\n self.unannotated_tree = TreeNode.read(['(c, (b,a));'])\n self.metadata = pd.DataFrame({\n 'lame': [1, 1, 1, 1, 1],\n 'real': [1, 2, 3, 4, 5]\n }, index=['s1', 's2', 's3', 's4', 's5'])\n\n np.random.seed(0)\n n = 15\n a = np.array([1, 4.2, 5.3, -2.2, 8])\n x1 = np.linspace(.01, 0.1, n)\n x2 = np.logspace(0, 0.01, n)\n x3 = np.exp(np.linspace(0, 0.01, n))\n x4 = x1 ** 2\n self.x = pd.DataFrame({'x1': x1, 'x2': x2, 'x3': x3, 'x4': x4})\n y = (a[0] + a[1]*x1 + a[2]*x2 + a[3]*x3 + a[4]*x4 +\n np.random.normal(size=n))\n sy = np.vstack((y, y/10)).T\n self.y = pd.DataFrame(ilr_inv(sy), columns=['a', 'b', 'c'])\n self.t2 = TreeNode.read([r\"((a,b)n,c);\"])\n\n\nclass TestOLSFunctions(TestOLS):\n\n def test_ols(self):\n res = ols('real', self.table, self.metadata, self.tree)\n res.fit()\n res_coef = res.coefficients()\n exp_coef = pd.DataFrame(\n {'Intercept': [0, 1.00],\n 'real': [1.0, 0]},\n index=['Y1', 'Y2'])\n\n pdt.assert_frame_equal(res_coef, exp_coef,\n check_exact=False,\n check_less_precise=True)\n # Double check to make sure the fit is perfect\n self.assertAlmostEqual(res.r2, 1)\n\n # Double check to make sure residuals are zero\n exp_resid = pd.DataFrame([[0., 0.],\n [0., 0.],\n [0., 0.],\n [0., 0.],\n [0., 0.]],\n index=['s1', 's2', 's3', 's4', 's5'],\n columns=['Y1', 'Y2'])\n pdt.assert_frame_equal(exp_resid, res.residuals())\n\n # make sure that it is a dataframe\n self.assertEqual(pd.DataFrame, type(res.basis))\n\n def test_ols_rename(self):\n res = ols('real', self.table, self.metadata,\n self.unannotated_tree)\n res.fit()\n res_coef = res.coefficients()\n exp_coef = pd.DataFrame(\n {'Intercept': [0, 1.00],\n 'real': [1.0, 0]},\n index=['y0', 'y1'])\n\n pdt.assert_frame_equal(res_coef, exp_coef,\n check_exact=False,\n check_less_precise=True)\n # Double check to make sure the fit is perfect\n self.assertAlmostEqual(res.r2, 1)\n\n # Double check to make sure residuals are zero\n exp_resid = pd.DataFrame([[0., 0.],\n [0., 0.],\n [0., 0.],\n [0., 0.],\n [0., 0.]],\n index=['s1', 's2', 's3', 's4', 's5'],\n columns=['y0', 'y1'])\n pdt.assert_frame_equal(exp_resid, res.residuals())\n\n def test_ols_immutable(self):\n A = np.array # aliasing for the sake of pep8\n table = pd.DataFrame({\n 's1': ilr_inv(A([1., 1.])),\n 's2': ilr_inv(A([1., 2.])),\n 's3': ilr_inv(A([1., 3.])),\n 's4': ilr_inv(A([1., 4.])),\n 's5': ilr_inv(A([1., 5.])),\n 's6': ilr_inv(A([1., 5.]))},\n index=['a', 'b', 'c']).T\n exp_table = pd.DataFrame({\n 's1': ilr_inv(A([1., 1.])),\n 's2': ilr_inv(A([1., 2.])),\n 's3': ilr_inv(A([1., 3.])),\n 's4': ilr_inv(A([1., 4.])),\n 's5': ilr_inv(A([1., 5.])),\n 's6': ilr_inv(A([1., 5.]))},\n index=['a', 'b', 'c']).T\n\n tree = TreeNode.read(['((c,d),(b,a));'])\n exp_tree = TreeNode.read(['((b,a)y1,c)y0;\\n'])\n metadata = pd.DataFrame({\n 'lame': [1, 1, 1, 1, 1],\n 'real': [1, 2, 3, 4, 5]\n }, index=['s1', 's2', 's3', 's4', 's5'])\n\n res = ols('real + lame', table, metadata, tree)\n res.fit()\n self.assertEqual(str(table), str(exp_table))\n self.assertEqual(str(exp_tree), str(res.tree))\n\n def test_ols_empty_table_error(self):\n A = np.array # aliasing for the sake of pep8\n table = pd.DataFrame({\n 's1': ilr_inv(A([1., 1.])),\n 's2': ilr_inv(A([1., 2.])),\n 's3': ilr_inv(A([1., 3.])),\n 's4': ilr_inv(A([1., 4.])),\n 's5': ilr_inv(A([1., 5.])),\n 's6': ilr_inv(A([1., 5.]))},\n index=['x', 'y', 'z']).T\n\n tree = TreeNode.read(['((c,d),(b,a)Y2)Y1;'])\n metadata = pd.DataFrame({\n 'lame': [1, 1, 1, 1, 1],\n 'real': [1, 2, 3, 4, 5]\n }, index=['s1', 's2', 's3', 's4', 's5'])\n with self.assertRaises(ValueError):\n res = ols('real + lame', table, metadata, tree)\n res.fit()\n\n def test_ols_empty_metadata_error(self):\n A = np.array # aliasing for the sake of pep8\n table = pd.DataFrame({\n 'k1': ilr_inv(A([1., 1.])),\n 'k2': ilr_inv(A([1., 2.])),\n 'k3': ilr_inv(A([1., 3.])),\n 'k4': ilr_inv(A([1., 4.])),\n 'k5': ilr_inv(A([1., 5.])),\n 'k6': ilr_inv(A([1., 5.]))},\n index=['a', 'b', 'c']).T\n\n tree = TreeNode.read(['((c,d),(b,a)Y2)Y1;'])\n metadata = pd.DataFrame({\n 'lame': [1, 1, 1, 1, 1],\n 'real': [1, 2, 3, 4, 5]\n }, index=['s1', 's2', 's3', 's4', 's5'])\n with self.assertRaises(ValueError):\n res = ols('real + lame', table, metadata, tree)\n res.fit()\n\n def test_ols_zero_error(self):\n table = pd.DataFrame({\n 's1': [0, 0, 0],\n 's2': [0, 0, 0],\n 's3': [0, 0, 0],\n 's4': [0, 0, 0],\n 's5': [0, 0, 0],\n 's6': [0, 0, 0]},\n index=['a', 'b', 'c']).T\n\n tree = TreeNode.read(['((c,d),(b,a)Y2)Y1;'])\n metadata = pd.DataFrame({\n 'lame': [1, 1, 1, 1, 1],\n 'real': [1, 2, 3, 4, 5]\n }, index=['s1', 's2', 's3', 's4', 's5'])\n with self.assertRaises(ValueError):\n res = ols('real + lame', table, metadata, tree)\n res.fit()\n\n def test_summary(self):\n A = np.array # aliasing for the sake of pep8\n table = pd.DataFrame({\n 's1': ilr_inv(A([1., 3.])),\n 's2': ilr_inv(A([2., 2.])),\n 's3': ilr_inv(A([1., 3.])),\n 's4': ilr_inv(A([3., 4.])),\n 's5': ilr_inv(A([1., 5.]))},\n index=['a', 'b', 'c']).T\n tree = TreeNode.read(['(c, (b,a)Y2)Y1;'])\n metadata = pd.DataFrame({\n 'lame': [1, 2, 1, 4, 1],\n 'real': [1, 2, 3, 4, 5]\n }, index=['s1', 's2', 's3', 's4', 's5'])\n\n np.random.seed(0)\n self.maxDiff = None\n model = ols('real', table, metadata, tree)\n model.fit()\n\n fname = get_data_path('exp_ols_results.txt')\n res = str(model.summary())\n with open(fname, 'r') as fh:\n exp = fh.read()\n self.assertEqual(res, exp)\n\n def test_summary_head(self):\n A = np.array # aliasing for the sake of pep8\n table = pd.DataFrame({\n 's1': ilr_inv(A([1., 3.])),\n 's2': ilr_inv(A([2., 2.])),\n 's3': ilr_inv(A([1., 3.])),\n 's4': ilr_inv(A([3., 4.])),\n 's5': ilr_inv(A([1., 5.]))},\n index=['a', 'b', 'c']).T\n tree = TreeNode.read(['(c, (b,a)Y2)Y1;'])\n metadata = pd.DataFrame({\n 'lame': [1, 2, 1, 4, 1],\n 'real': [1, 2, 3, 4, 5]\n }, index=['s1', 's2', 's3', 's4', 's5'])\n\n np.random.seed(0)\n self.maxDiff = None\n model = ols('real', table, metadata, tree)\n model.fit()\n\n fname = get_data_path('exp_ols_results2.txt')\n res = str(model.summary(ndim=1))\n with open(fname, 'r') as fh:\n exp = fh.read()\n self.assertEqual(res, exp)\n\n def test_loo(self):\n res = ols(formula=\"x1 + x2 + x3 + x4\",\n table=self.y, metadata=self.x, tree=self.t2)\n res.fit()\n exp_loo = pd.DataFrame([[0.66953263510975791, 10.994700550912553],\n [0.69679777354984163, 2.3613911713947062],\n [0.84934173316473072, 0.4057812892157881],\n [0.6990546679957772, 2.2872776593899351],\n [0.72855466737125463, 1.7615637744849277],\n [0.55998953661859308, 3.617823652256889],\n [0.81787392852582308, 0.72395497360494043],\n [0.8653549732546999, 0.17706927499520822],\n [0.86983181933002329, 0.1216027316667969],\n [0.87779006612352628, 0.028600627330344405],\n [0.86591226075609384, 0.16724511075065476],\n [0.7787232221539, 1.2820054843437292],\n [0.88032413856094505, 3.4113910096200831e-06],\n [0.83195133809800792, 0.62276589277034022],\n [0.85352707356786695, 1.4038585971691198]],\n columns=['mse', 'pred_err'],\n index=self.y.index)\n res_loo = res.loo().astype(np.float)\n # Precision issues ...\n # pdt.assert_frame_equal(exp_loo, res_loo, check_less_precise=True)\n npt.assert_allclose(exp_loo, res_loo, atol=1e-3, rtol=1e-3)\n\n def test_lovo(self):\n res = ols(formula=\"x1 + x2 + x3 + x4\",\n table=self.y, metadata=self.x, tree=self.t2)\n res.fit()\n exp_lovo = pd.DataFrame([[0.799364, 0.978214],\n [0.799363, 0.097355],\n [0.799368, 0.0973498],\n [0.799364, 0.097354],\n [0.799361, 0.0973575]],\n columns=['mse', 'Rsquared'],\n index=['Intercept', 'x1', 'x2', 'x3', 'x4'])\n res_lovo = res.lovo().astype(np.float)\n pdt.assert_frame_equal(exp_lovo, res_lovo, check_less_precise=True)\n\n def test_percent_explained(self):\n res = ols(formula=\"x1 + x2 + x3 + x4\",\n table=self.y, metadata=self.x, tree=self.t2)\n res.fit()\n res_perc = res.percent_explained()\n exp_perc = pd.Series({'y0': 0.009901,\n 'y1': 0.990099})\n pdt.assert_series_equal(res_perc, exp_perc)\n\n def test_mse(self):\n res = ols(formula=\"x1 + x2 + x3 + x4\",\n table=self.y, metadata=self.x, tree=self.t2)\n res.fit()\n self.assertAlmostEqual(res.mse, 0.79228890379010453, places=4)\n\n def test_write(self):\n res = ols(formula=\"x1 + x2 + x3 + x4\",\n table=self.y, metadata=self.x, tree=self.t2)\n res.fit()\n res.write_pickle('ols.pickle')\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"gneiss/regression/tests/test_ols.py","file_name":"test_ols.py","file_ext":"py","file_size_in_byte":12141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"516184102","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\npackages = find_packages()\n\nsetup(\n name = \"labm8\",\n version = \"2019.10.17\",\n description = \"Utility libraries for doing science\",\n long_description = open('README.md').read(),\n long_description_content_type=\"text/markdown\",\n classifiers = [\"Development Status :: 4 - Beta\", \"Environment :: Console\", \"Intended Audience :: Developers\", \"License :: OSI Approved :: Apache Software License\", \"Programming Language :: Python :: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language :: Python :: 3.8\"],\n keywords = \"utility library bazel protobuf\",\n url = \"https://github.com/ChrisCummins/labm8\",\n author = \"Chris Cummins\",\n author_email = \"chrisc.101@gmail.com\",\n license = \"Apache License, Version 2.0\",\n packages=packages,\n install_requires=[\"SQLAlchemy==1.3.10\", \"Send2Trash==1.5.0\", \"absl-py==0.7.0\", \"checksumdir==1.0.5\", \"cycler==0.10.0\", \"decorator==4.3.0\", \"grpcio==1.18.0\", \"humanize==0.5.1\", \"kiwisolver==1.0.1\", \"matplotlib==2.2.0rc1\", \"mysqlclient==1.4.2.post1\", \"networkx==2.2\", \"numpy==1.16.4\", \"pandas==0.24.1\", \"protobuf==3.6.1\", \"py==1.5.2\", \"pyparsing==2.2.0\", \"python-dateutil==2.6.1\", \"pytz==2018.3\", \"scipy==1.2.1\", \"six==1.11.0\"],\n zip_safe=False,\n)\n","sub_path":"pypi_install_script/labm8-2019.10.17.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"451427723","text":"import unittest as ut\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_almost_equal\n\nfrom pyqumo import arrivals as ar\nfrom pyqumo.distributions import Erlang\nfrom pyqumo import stats\n\n\nclass TestPoisson(ut.TestCase):\n\n def test_valid_creation(self):\n p1 = ar.PoissonProcess(1.0)\n p2 = ar.PoissonProcess(2.0)\n self.assertAlmostEqual(p1.rate, 1.0)\n self.assertAlmostEqual(p2.rate, 2.0)\n\n def test_invalid_creation(self):\n with self.assertRaises(ValueError):\n ar.PoissonProcess(0.0)\n with self.assertRaises(ValueError):\n ar.PoissonProcess(-1.0)\n with self.assertRaises(TypeError):\n ar.PoissonProcess('1.0')\n with self.assertRaises(TypeError):\n ar.PoissonProcess([1, 2])\n\n def test_moments(self):\n p = ar.PoissonProcess(2.0)\n self.assertAlmostEqual(p.mean(), 0.5)\n self.assertAlmostEqual(p.std(), 0.5)\n self.assertAlmostEqual(p.var(), 0.25)\n self.assertAlmostEqual(p.moment(1), 0.5)\n self.assertAlmostEqual(p.moment(2), 0.5)\n self.assertAlmostEqual(p.moment(3), 0.75)\n self.assertAlmostEqual(p.cv(), 1.0)\n\n def test_lag_is_zero(self):\n p = ar.PoissonProcess(2.0)\n self.assertAlmostEqual(p.lag(1), 0.0)\n self.assertAlmostEqual(p.lag(2), 0.0)\n\n # noinspection PyTypeChecker\n def test_generate(self):\n p = ar.PoissonProcess(3.0)\n samples = list(p.generate(20000))\n self.assertAlmostEqual(np.mean(samples), p.mean(), 1)\n self.assertAlmostEqual(np.std(samples), p.std(), 1)\n\n\nclass TestMAP(ut.TestCase):\n\n def test_valid_creation(self):\n d0 = [[-1, 0.5], [0.5, -1]]\n d1 = [[0, 0.5], [0.2, 0.3]]\n m1 = ar.MAP(d0, d1)\n assert_allclose(m1.generator, [[-1, 1], [0.7, -0.7]])\n\n def test_invalid_creation(self):\n # Off-diagonal D0 elements must be positive\n with self.assertRaises(ValueError):\n ar.MAP(\n d0=[[-1.0, -0.1], [0, -1]],\n d1=[[0, 1.1], [1., 0.]]\n )\n # D1 elements must be positive\n with self.assertRaises(ValueError):\n ar.MAP(\n d0=[[-1., 1.], [1.5, -1.]],\n d1=[[0., 0.], [-0.5, 0.0]]\n )\n # D0 + D1 must give infinitesimal matrix\n with self.assertRaises(ValueError):\n ar.MAP(\n d0=[[-1., 1.], [0.5, -1.0]],\n d1=[[0.01, 0.0], [0, 0.5]]\n )\n with self.assertRaises(ValueError):\n ar.MAP(\n d0=[[-1., 1.], [0.5, -1.0]],\n d1=[[0.0, 0.0], [0, 0.49]]\n )\n\n def test_erlang_constructor(self):\n m1 = ar.MAP.erlang(1, 1.0)\n m2 = ar.MAP.erlang(2, 5.0)\n m3 = ar.MAP.erlang(3, 10.0)\n\n assert_allclose(m1.D0, [[-1.0]])\n assert_allclose(m1.D1, [[1.0]])\n assert_allclose(m2.D0, [[-5, 5], [0, -5]])\n assert_allclose(m2.D1, [[0, 0], [5, 0]])\n assert_allclose(m3.D0, [[-10, 10, 0], [0, -10, 10], [0, 0, -10]])\n assert_allclose(m3.D1, [[0, 0, 0], [0, 0, 0], [10, 0, 0]])\n\n def test_exponential_constructor(self):\n m1 = ar.MAP.exponential(1.0)\n m2 = ar.MAP.exponential(2.0)\n\n assert_almost_equal(m1.D0, [[-1.0]])\n assert_almost_equal(m1.D1, [[1.0]])\n assert_almost_equal(m2.D0, [[-2.0]])\n assert_almost_equal(m2.D1, [[2.0]])\n\n with self.assertRaises(ValueError):\n ar.MAP.exponential(-1)\n\n with self.assertRaises(ValueError):\n ar.MAP.exponential(0.0)\n\n def test_moments_like_erlang(self):\n e1 = Erlang(1, 1.0)\n e2 = Erlang(2, 5.0)\n e3 = Erlang(3, 10.0)\n m1 = ar.MAP.erlang(e1.shape, e1.rate)\n m2 = ar.MAP.erlang(e2.shape, e2.rate)\n m3 = ar.MAP.erlang(e3.shape, e3.rate)\n\n for k in range(10):\n self.assertAlmostEqual(m1.moment(k), e1.moment(k))\n self.assertAlmostEqual(m2.moment(k), e2.moment(k))\n self.assertAlmostEqual(m3.moment(k), e3.moment(k))\n\n # noinspection PyTypeChecker\n def test_generate(self):\n D0 = [\n [-9.0, 0.0, 0.0, 0.0],\n [0.0, -9.0, 9.0, 0.0],\n [0.0, 0.0, -0.1, 0.0],\n [0.1, 0.0, 0.0, -0.1],\n ]\n D1 = [\n [8.0, 1.0, 0.00, 0.00],\n [0.0, 0.0, 0.00, 0.00],\n [0.0, 0.0, 0.09, 0.01],\n [0.0, 0.0, 0.00, 0.00],\n ]\n m = ar.MAP(D0, D1, check=True)\n NUM_SAMPLES = 25000\n samples = list(m.generate(NUM_SAMPLES))\n\n self.assertEqual(len(samples), NUM_SAMPLES)\n assert_allclose(np.mean(samples), m.mean(), rtol=0.1)\n assert_allclose(np.std(samples), m.std(), rtol=0.1)\n assert_allclose(np.var(samples), m.var(), rtol=0.1)\n assert_allclose(stats.lag(samples, 2), [m.lag(1), m.lag(2)], rtol=0.1)\n\n # noinspection PyTypeChecker\n def test_call(self):\n D0 = [\n [-99.0, 0.0, 0.0, 0.0],\n [0.0, -99.0, 99.0, 0.0],\n [0.0, 0.0, -0.01, 0.0],\n [0.01, 0.0, 0.0, -0.01],\n ]\n D1 = [\n [98.0, 1.00, 0.000, 0.000],\n [0.00, 0.00, 0.000, 0.000],\n [0.00, 0.00, 0.009, 0.001],\n [0.00, 0.00, 0.000, 0.000],\n ]\n m = ar.MAP(D0, D1, check=True)\n NUM_SAMPLES = 25000\n samples = [m() for _ in range(NUM_SAMPLES)]\n\n self.assertEqual(len(samples), NUM_SAMPLES)\n assert_allclose(np.mean(samples), m.mean(), rtol=0.2)\n assert_allclose(np.std(samples), m.std(), rtol=0.2)\n assert_allclose(np.var(samples), m.var(), rtol=0.2)\n assert_allclose(stats.lag(samples, 2), [m.lag(1), m.lag(2)], rtol=0.2)\n","sub_path":"tests/statistical_tests/test_arrivals.py","file_name":"test_arrivals.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"579381372","text":"\"\"\"\nCopyright (c) 2017-2019 Gyrfalcon Technology Inc. All rights reserved.\nSee LICENSE file in the project root for full license information.\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport os\nimport numpy as np\nimport caffe\nimport cv2\nimport gtilib\nimport json\nimport filecmp\nimport shutil\nimport caffe.proto.caffe_pb2 as caffe_pb2\nfrom google.protobuf import text_format\nfrom argparse import ArgumentParser\nfrom utils import make_call_with\n\nclass BitMatch():\n def __init__(self, args):\n caffe.set_mode_gpu()\n self.caffe_prototxt = args.caffe_prototxt\n self.caffe_model = args.caffe_model\n self.net_json = args.net_json\n self.gti_model = args.gti_model\n self.evaluate_path = args.evaluate_path\n self.output_dir = args.output_dir\n self.log_fname = args.log_fname\n self.img_bin = os.path.join(self.output_dir, \"image.bin\")\n\n self.caffe_net = caffe.Net(self.caffe_prototxt, self.caffe_model, caffe.TEST)\n with open(self.caffe_prototxt) as f:\n self.caffe_net_txt = caffe_pb2.NetParameter()\n text_format.Merge(f.read(), self.caffe_net_txt)\n\n with open(self.net_json) as j:\n self.net_config = json.load(j)\n\n self.GTI_IMAGE_WIDTH = self.GTI_IMAGE_HEIGHT = self.net_config['layer'][0]['image_size']\n self.INPUT_CHANNELS = self.net_config['layer'][0]['input_channels']\n\n self.caffe_output_dir = os.path.join(self.output_dir, \"caffe\")\n try:\n os.makedirs(self.caffe_output_dir)\n except OSError as e:\n pass\n \n self.chip_output_dir = os.path.join(self.output_dir, \"chip\")\n try:\n os.makedirs(self.chip_output_dir)\n except OSError as e:\n pass\n\n self.OUTPUT_CHANNELS = self.net_config['layer'][-1]['output_channels']\n self.OUTPUT_IMAGE_SIZE = self.net_config['layer'][-1]['image_size']\n if 'pooling' in self.net_config['layer'][-1] and self.net_config['layer'][-1]['pooling']:\n self.OUTPUT_IMAGE_SIZE >>= 1\n if 'upsample_enable' in self.net_config['layer'][-1] and self.net_config['layer'][-1]['upsample_enable']:\n self.OUTPUT_IMAGE_SIZE <<= 1\n\n self.caffe_layers = []\n self.chip_layers = []\n self.GTIMODEL = \"\"\n\n def get_caffe_layers(self):\n caffe_layers = []\n for idx, layer in enumerate(self.caffe_net.layers):\n if idx < len(self.caffe_net.layers) - 1:\n layer_name = self.caffe_net._layer_names[idx]\n layer_name_next = self.caffe_net._layer_names[idx + 1]\n if layer.type == \"QuantConvolution\":\n caffe_layers.append(layer_name_next) if self.caffe_net.layers[idx+1].type == \"Eltwise\" else caffe_layers.append(layer_name)\n return caffe_layers\n\n def get_chip_layers(self):\n chip_layers = []\n prefix = \"dump_sublayer\"\n for layer in self.net_config['layer']:\n major_layer = str(layer['major_layer'])\n for i in range(layer['sublayer_number']):\n sub_layer = str(i+1)\n chip_layers.append(prefix + major_layer + '-' + sub_layer)\n return chip_layers\n\n def get_last_out_layer(self):\n last_layer = self.caffe_net_txt.layer[0]\n for idx, layer in enumerate(self.caffe_net_txt.layer):\n if idx < len(self.caffe_net_txt.layer) - 2:\n if layer.type == \"QuantConvolution\":\n last_layer = self.caffe_net_txt.layer[idx].name\n if self.caffe_net_txt.layer[idx+1].type == \"Eltwise\" or self.caffe_net_txt.layer[idx+1].type == \"Pooling\":\n last_layer = self.caffe_net_txt.layer[idx+1].name\n elif self.caffe_net_txt.layer[idx+2].type == \"Pooling\" and self.caffe_net_txt.layer[idx+2].pooling_param.pool == 0:\n last_layer = self.caffe_net_txt.layer[idx+2].name\n return last_layer\n\n def is_fc_mode(self):\n return self.net_config['layer'][-1]['image_size'] == 14 and self.net_config['layer'][-1]['pooling']\n\n def forward_caffe(self, image_path, endlayer):\n img = cv2.imread(image_path)\n img = cv2.resize(img, (self.GTI_IMAGE_WIDTH, self.GTI_IMAGE_HEIGHT))\n b,g,r = cv2.split(img)\n b2 = np.concatenate((b, g, r))\n d2_in = np.reshape(b2, (3, self.GTI_IMAGE_WIDTH, self.GTI_IMAGE_WIDTH)).astype(np.uint8)\n d2_in_clip = np.clip(np.right_shift((np.right_shift(d2_in, 2) + 1),1), 0, 31)\n self.caffe_net.blobs['data'].data[...] = d2_in_clip\n self.caffe_net.forward(end=endlayer)\n caffe_out = np.where(self.caffe_net.blobs[endlayer].data>=0, np.clip(np.floor(self.caffe_net.blobs[endlayer].data+0.5),0, 31), 0)\n return caffe_out\n\n def forward_chip(self, image_path):\n img = cv2.imread(image_path)\n img = cv2.resize(img, (self.GTI_IMAGE_WIDTH, self.GTI_IMAGE_HEIGHT))\n b,g,r = cv2.split(img)\n b2 = np.concatenate((b, g, r))\n img_ary= np.asarray(b2).reshape(-1, self.GTI_IMAGE_HEIGHT)\n\n output_size = self.OUTPUT_CHANNELS * self.OUTPUT_IMAGE_SIZE * self.OUTPUT_IMAGE_SIZE\n chip_res = self.GTIMODEL.GtiEvaluate(img_ary, self.GTI_IMAGE_WIDTH,self.GTI_IMAGE_HEIGHT,self.INPUT_CHANNELS)\n chip_res = chip_res[:output_size]\n chip_out = np.reshape(chip_res, (self.OUTPUT_CHANNELS, self.OUTPUT_IMAGE_SIZE, self.OUTPUT_IMAGE_SIZE))\n return chip_out\n\n def dump_caffe_layers(self, image_path):\n self.caffe_layers = self.get_caffe_layers()\n self.forward_caffe(image_path, self.caffe_layers[-1])\n for layer in self.caffe_layers:\n layer_feature = np.where(self.caffe_net.blobs[layer].data>=0, np.clip(np.floor(self.caffe_net.blobs[layer].data+0.5),0, 31), 0)\n if self.is_fc_mode(): layer_feature *= 8\n layer_feature_flatten = layer_feature.flatten().astype(np.uint8)\n bin_file = os.path.join(self.caffe_output_dir, layer) + \".bin\"\n layer_feature_flatten.tofile(bin_file)\n\n def dump_chip_layers(self, image_path):\n img = cv2.imread(image_path)\n img = cv2.resize(img, (self.GTI_IMAGE_WIDTH, self.GTI_IMAGE_HEIGHT))\n b,g,r = cv2.split(img)\n b2 = np.concatenate((b, g, r))\n d2_in = np.reshape(b2, (3, self.GTI_IMAGE_WIDTH, self.GTI_IMAGE_WIDTH)).astype(np.uint8)\n d2_in.tofile(self.img_bin)\n self.chip_layers = self.get_chip_layers()\n call = make_call_with(self.log_fname)\n call(\"\"\"\n cd {} && \\\n GTI_LOG_LEVEL=9 \\\n {} \\\n {} \\\n {} \\\n >> {log} 2>>{log}\n \"\"\".format(\n self.chip_output_dir,\n os.path.join(os.path.dirname(os.path.realpath(os.path.abspath(__file__))), 'liteDemo'),\n self.gti_model,\n self.img_bin,\n log=self.log_fname)\n )\n\n def match_image(self):\n #check net_config for the learning modes\n for layer in self.net_config['layer']:\n if 'learning' not in layer or not layer['learning']:\n sys.exit(\"Please use the model with 'learning=true' enabled for all the layers in order to match layer by layer!\")\n self.dump_caffe_layers(self.evaluate_path)\n self.dump_chip_layers(self.evaluate_path)\n if len(self.caffe_layers) != len(self.chip_layers):\n sys.exit(\"caffe layers and chip layers not match! Please convert the model with 'learning=true' for all the layers.\")\n all_match = True\n for i in range(len(self.caffe_layers)):\n caffefile = os.path.join(self.caffe_output_dir, self.caffe_layers[i] + \".bin\")\n chipfile = os.path.join(self.chip_output_dir, self.chip_layers[i] + \".bin\")\n if not filecmp.cmp(caffefile, chipfile):\n all_match = False\n print(self.caffe_layers[i] + \" does not match chip output!\") \n if all_match:\n print(\"caffe output matches chip output for all the layers!\") \n\n def match_images(self):\n #check net_config for the learning modes\n for layer in self.net_config['layer']:\n if 'learning' in layer and layer['learning']:\n sys.exit(\"Please use the model with 'learning=false' for all the layers in order to run batch testing!\")\n self.GTIMODEL = gtilib.GtiModel(self.gti_model)\n endlayer = self.get_last_out_layer()\n match_count = 0\n image_count = 0\n for image_name in os.listdir(self.evaluate_path):\n image_path = os.path.join(self.evaluate_path, image_name)\n image_count += 1\n caffe_out = self.forward_caffe(image_path, endlayer)\n chip_out = self.forward_chip(image_path)\n bit_diff = (caffe_out == chip_out)\n if bit_diff.all():\n match_count += 1\n print(\"Comparing image %s: %r\"%(image_path, bit_diff.all()))\n else:\n output_size = self.OUTPUT_CHANNELS * self.OUTPUT_IMAGE_SIZE * self.OUTPUT_IMAGE_SIZE\n bit_match_ratio = np.sum(bit_diff)/float(output_size) \n print(\"Comparing image %s: %r(%3f match)\"%(image_path, bit_diff.all(), bit_match_ratio))\n print(\"Total Images: {:5d}, Match Count: {:.3f}, Match Ratio: {:.3f}\".format(image_count, match_count, float(match_count)/image_count))\n\n def convert_bin_txt(self, bin_file, dim):\n TILE_SIZE = 14\n x = np.fromfile(bin_file, dtype=np.uint8) \n channels, rows, cols = dim \n x = x.reshape(channels, rows, cols)\n txt_dir = bin_file.split(\".\")[0] \n if os.path.exists(txt_dir):\n shutil.rmtree(txt_dir)\n os.mkdir(txt_dir)\n for channel in range(channels):\n with open(os.path.join(txt_dir, \"c_{}.out\".format(channel+1)), \"w\") as f:\n row_idx = 0\n for row in range(0, rows, TILE_SIZE):\n col_idx = 0\n for col in range(0, cols, TILE_SIZE):\n f.write(\"blk_i = {:2d} blk_j = {:2d}\\n\".format(row_idx, col_idx)) \n np.savetxt(f, x[channel, row:row+TILE_SIZE, col:col+TILE_SIZE], fmt=\"%2d\")\n col_idx += 1\n row_idx += 1\n\ndef main(args):\n bitMatch = BitMatch(args)\n if os.path.isdir(args.evaluate_path):\n bitMatch.match_images()\n elif os.path.isfile(args.evaluate_path):\n bitMatch.match_image()\n else:\n sys.exit(\"evaluate path error!\")\n\ndef gen_txts_parse_args(argv):\n parser = ArgumentParser(description=\"pass arguments for bit matching\")\n parser.add_argument('caffe_prototxt', help='the net definition prototxt')\n parser.add_argument('caffe_model', help='the weights caffemodel')\n parser.add_argument('net_json', help='net.json')\n parser.add_argument('gti_model', help='the conversion out.model for chip use')\n parser.add_argument('evaluate_path', help='image or image dir path')\n parser.add_argument('output_dir', help='output directory')\n parser.add_argument('log_fname', help='output text log location, optional', nargs='?', default=None)\n args = parser.parse_args(['_' if x == '-o' else x for x in argv[1:]])\n return args\n\nif __name__ == '__main__':\n main(gen_txts_parse_args(sys.argv))","sub_path":"release/conversion_tool/bit_match.py","file_name":"bit_match.py","file_ext":"py","file_size_in_byte":11465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"199455335","text":"#_*_ coding:utf-8 _*_\nimport requests\nfrom cfg import vcode,url\nimport pprint\nfrom robot.libraries.BuiltIn import BuiltIn\nclass Classroom():\n def __init__(self):\n self.vcode=vcode\n self.url=url\n def listClassroom(self,gradeid=None):\n if gradeid==None:\n param={\n 'vcode':self.vcode,\n 'action':'list_classes_by_schoolgrade'\n }\n else:\n param = {\n 'vcode':self.vcode,\n 'action': 'list_classes_by_schoolgrade',\n 'gradeid':int(gradeid)\n }\n r = requests.get(self.url,params=param)\n pprint.pprint(r.json(),indent=2)\n return r.json()\n\n def addClassroom(self,grade,classroomName,studentlimit,returnid=None):\n data={\n 'vcode':self.vcode,\n 'action':'add',\n 'grade':int(grade),\n 'name':classroomName,\n 'studentlimit':int(studentlimit)\n }\n r= requests.post(self.url,data=data)\n pprint.pprint(r.json(),indent=2)\n aaa=r.json()\n\n if returnid:\n name=\"${%s}\"%returnid\n print(name)\n BuiltIn().set_global_variable(name,aaa['id'])\n return aaa\n def delClassroom(self,classid):\n url='{0}/{1}'.format(self.url,classid)\n data={\n 'vcode':self.vcode\n }\n a= requests.delete(url,data=data)\n return a.json()\n def delAllClassroom(self):\n a= self.listClassroom()\n if a['retlist']==[]:\n print('已经删除完毕')\n for i in a['retlist']:\n self.delClassroom(i['id'])\n b = self.listClassroom()\n if b['retlist']:\n raise Exception('no clear classroom')\n\n def tshouldContain(self,more,name,grade,invite,limit,student,id):\n now={\n 'name': name,\n 'grade__name': grade,\n 'invitecode': invite,\n 'studentlimit': int(limit),\n 'studentnumber': int(student),\n 'id': int(id),\n 'teacherlist': []\n }\n print(now)\n print(more)\n if more.count(now) != 1:\n raise Exception(\"bad guy\")\n\n def modifyClassroom(self,classid,name,limit):\n data={\n 'vcode':self.vcode,\n 'action':'modify',\n 'name':name,\n 'studentlimit':limit\n }\n url=\"{}/{}\".format(self.url,classid)\n r=requests.put(url,data=data)\n pprint.pprint(r.json())\n return r.json()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"py-lib/classroom.py","file_name":"classroom.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"375161066","text":"from django.contrib import admin\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom .models import Cat\nfrom .models import User\nfrom .models import UserCatOwner\nfrom .models import Prey\nfrom .models import Hunting\nfrom .models import HuntingDetails\n\n\ndef boolean_icons(value):\n icon_true = '✅'\n icon_false = '❌'\n html_icon = '

{}

'\n if value:\n return format_html(html_icon, mark_safe(icon_true))\n else:\n return format_html(html_icon, mark_safe(icon_false))\n\n\nclass UserAdmin(admin.ModelAdmin):\n list_display = ['id', 'name', 'email', 'cats_number']\n\n\nclass CatAdmin(admin.ModelAdmin):\n list_display = ['id', 'name', 'color', 'gender_type']\n\n def get_readonly_fields(self, request, obj=None):\n if obj:\n return ['gender']\n else:\n return []\n\n def gender_type(self, obj):\n return boolean_icons(obj.gender)\n\n\nclass PreyAdmin(admin.ModelAdmin):\n list_display = ['id', 'type']\n\n\nclass HuntingAdmin(admin.ModelAdmin):\n list_display = ['id', 'cat', 'duration']\n\n\nclass HuntingDetailsAdmin(admin.ModelAdmin):\n list_display = ['id', 'hunting', 'cat', 'prey']\n\n\nclass UserCatOwnerAdmin(admin.ModelAdmin):\n list_display = ['user', 'cat']\n\n\nadmin.site.register(Cat, CatAdmin)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(UserCatOwner,UserCatOwnerAdmin)\nadmin.site.register(Prey, PreyAdmin)\nadmin.site.register(Hunting, HuntingAdmin)\nadmin.site.register(HuntingDetails, HuntingDetailsAdmin)\n","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"212228038","text":"from datetime import date\nfrom decimal import Decimal\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models import signals\nfrom django.dispatch import receiver\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom djmoney.models.fields import CurrencyField, MoneyField\nfrom moneyed import Money\n\n\n@python_2_unicode_compatible\nclass GrainEvent(models.Model):\n CREATE, EDIT, DELETE = \"creation\", \"update\", \"deletion\"\n LOG_ACTIONS = (\n (CREATE, \"Create\"),\n (EDIT, \"Edit\"),\n (DELETE, \"Delete\")\n )\n\n action = models.CharField(choices=LOG_ACTIONS, max_length=20)\n model = models.CharField(max_length=30)\n object_pk = models.IntegerField()\n user = models.ForeignKey(User)\n time = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return \"%s %s (%s)\" % (self.model, self.action, self.user)\n\n\n@python_2_unicode_compatible\nclass UserProfile(models.Model):\n \"\"\"Profile for Grain\n\n A profile will handle ingredients and meals in a single currency.\n \"\"\"\n user = models.ManyToManyField(User)\n note = models.CharField(max_length=24)\n currency = CurrencyField(default='GBP')\n\n def add_user(self, user):\n self.user.add(user)\n\n def __str__(self):\n return \"%s: %s\" % (self.currency, self.note)\n\n\n@python_2_unicode_compatible\nclass Consumer(models.Model):\n owner = models.ForeignKey(UserProfile)\n actual_user = models.ForeignKey(User, blank=True, null=True)\n name = models.CharField(max_length=20)\n\n def __str__(self):\n return self.name\n\n\n@python_2_unicode_compatible\nclass Unit(models.Model):\n \"\"\"Custom unit class for groceries\"\"\"\n short = models.CharField(\"abbreviation\", max_length=8)\n verbose = models.CharField(\"name (singular)\", max_length=20)\n plural = models.CharField(max_length=20)\n\n def __str__(self):\n return self.short\n\n\n@python_2_unicode_compatible\nclass IngredientCategory(models.Model):\n \"\"\"Cascading categories for ingredients\"\"\"\n parent = models.ForeignKey('self', default=None, blank=True, null=True)\n name = models.CharField(max_length=40)\n\n def get_parent_name_list(self):\n li = self.parent.get_parent_name_list() if self.parent else []\n li.append(self.name)\n return li\n\n class Meta:\n verbose_name_plural = \"ingredient categories\"\n\n def __str__(self):\n return self.name\n\n\n@python_2_unicode_compatible\nclass Vendor(models.Model):\n name = models.CharField(max_length=20)\n\n def __str__(self):\n return self.name\n\n\n@python_2_unicode_compatible\nclass Product(models.Model):\n \"\"\"Classes of ingredients, in specific units and packaging\"\"\"\n name = models.CharField(max_length=60)\n vendor = models.ForeignKey(Vendor, blank=True, null=True, default=None)\n category = models.ForeignKey(IngredientCategory)\n price = MoneyField(max_digits=10, decimal_places=2, default_currency='GBP')\n amount = models.FloatField()\n units = models.ForeignKey(Unit)\n fixed = models.BooleanField(default=True)\n\n def get_vendor(self):\n return self.vendor if self.vendor else \"Other\"\n\n def __str__(self):\n if not self.fixed:\n return \"%s %s\" % (self.get_vendor(), self.name)\n return \"%s %s (%g%s)\" % (self.get_vendor(), self.name, self.amount,\n self.units)\n\n\n@python_2_unicode_compatible\nclass Ingredient(models.Model):\n \"\"\"Specific instances of ingredients\"\"\"\n BEST_BEFORE, EXPIRES = \"BBF\", \"EXP\"\n EXP_CHOICES = (\n (BEST_BEFORE, \"best before\"),\n (EXPIRES, \"expires\")\n )\n\n owner = models.ForeignKey(UserProfile)\n product = models.ForeignKey(Product)\n price = MoneyField(max_digits=10, decimal_places=2, default_currency='GBP')\n amount = models.FloatField()\n used_amount = models.FloatField(default=0)\n best_before = models.DateField(blank=True)\n expiry_type = models.CharField(max_length=3, choices=EXP_CHOICES)\n purchase_date = models.DateField(default=date.today)\n exhausted = models.BooleanField(default=False)\n\n def update_usage(self, delta):\n assert not self.exhausted, \"Ingredient has been exhausted\"\n\n self.used_amount += delta\n if self.used_amount != 0:\n cpu = self.price * (1 / self.used_amount)\n else:\n cpu = Money(0, self.price.currency.code)\n for ticket in self.ticket_set.all():\n ticket.update_cost(cpu)\n self.save()\n return cpu\n\n def set_exhausted(self, exhausted):\n if exhausted != self.exhausted:\n for ticket in self.ticket_set.all():\n ticket.set_final(exhausted)\n self.exhausted = exhausted\n self.save()\n\n def __str__(self):\n return \"%s %s (%g %s:%s)\" % (self.product.get_vendor(),\n self.product.name, self.amount,\n self.product.units, self.used_amount)\n\n\n@python_2_unicode_compatible\nclass Meal(models.Model):\n # FIXME: include docstring\n BREAKFAST, LUNCH, DINNER, SUPPER, TEA, SNACK = 0, 1, 2, 3, 4, 5\n MEAL_CHOICES = (\n (BREAKFAST, \"Breakfast\"),\n (LUNCH, \"Lunch\"),\n (DINNER, \"Dinner\"),\n (SUPPER, \"Supper\"),\n (TEA, \"Tea\"),\n (SNACK, \"Snack\"),\n )\n owner = models.ForeignKey(UserProfile)\n time = models.DateTimeField()\n meal_type = models.IntegerField(choices=MEAL_CHOICES)\n cost_closed = MoneyField(max_digits=10, decimal_places=4)\n cost_open = MoneyField(max_digits=10, decimal_places=4)\n consumer = models.ForeignKey(Consumer)\n\n def cost_progress_breakdown(self):\n cost_tot, pc_closed, pc_open = self.cost_closed + self.cost_open, 0, 0\n if cost_tot:\n pc_closed = 100 * self.cost_closed.amount / cost_tot.amount\n pc_open = 100 * self.cost_open.amount / cost_tot.amount\n return (cost_tot, pc_closed, pc_open)\n\n def __str__(self):\n return \"%s on %s\" % (self.get_meal_type_display(), self.time.strftime(\"%F\"))\n\n\n@python_2_unicode_compatible\nclass Dish(models.Model):\n # FIXME: include docstring\n COOKING_STYLES = (\n ('frying', \"Fried\"),\n ('boiling', \"Boiled\"),\n ('poaching', \"Poached\"),\n ('baking', \"Baked\"),\n ('roasting', \"Roasted\"),\n ('uncooked', \"Uncooked\"),\n ('instant', \"Microwaved\")\n )\n method = models.CharField(max_length=8, choices=COOKING_STYLES)\n meal = models.ForeignKey(Meal)\n cost_closed = MoneyField(max_digits=10, decimal_places=4)\n cost_open = MoneyField(max_digits=10, decimal_places=4)\n\n def costs_open_change(self, delta):\n self.cost_open += delta\n self.save()\n\n self.meal.cost_open += delta\n self.meal.save()\n\n def costs_close(self, delta):\n self.cost_closed += delta\n self.cost_open -= delta\n self.save()\n\n self.meal.cost_closed += delta\n self.meal.cost_open -= delta\n self.meal.save()\n\n def get_ticket_form(self, profile_pk=None):\n from .forms import TicketForm\n return TicketForm(profile_pk, initial={'dish': self})\n\n class Meta:\n verbose_name_plural = \"dishes\"\n\n def __str__(self):\n tickets = self.ticket_set.all().order_by('-cost')\n if tickets.distinct().count() == 0:\n return \"%s (empty)\" % self.method\n return \"%s %s\" % (self.get_method_display(), tickets[0])\n\n\nclass TicketManager(models.Manager):\n def create_ticket(self, ingredient, used_on_ticket, dish, currency,\n exhausted=False):\n assert used_on_ticket > 0, \"Must use positive quantity\"\n assert not ingredient.exhausted, \"Ingredient must not be exhausted\"\n\n ticket = self.create(ingredient=ingredient, used=0,\n dish=dish, cost=Money(0, currency))\n ticket.update_usage(used_on_ticket)\n if exhausted:\n ingredient.set_exhausted(True)\n return ticket\n\n\n@python_2_unicode_compatible\nclass Ticket(models.Model):\n # FIXME: include docstring\n objects = TicketManager()\n\n ingredient = models.ForeignKey(Ingredient)\n used = models.FloatField()\n cost = MoneyField(max_digits=10, decimal_places=4)\n final = models.BooleanField(default=False)\n dish = models.ForeignKey(Dish)\n\n def update_usage(self, delta):\n self.used += delta\n self.save()\n self.ingredient.update_usage(delta)\n\n def update_cost(self, cost_per_unit):\n assert not self.final, \"Cannot modify finalised tickets\"\n new_cost = self.used * cost_per_unit\n self.dish.costs_open_change(new_cost - self.cost)\n self.cost = new_cost\n self.save()\n\n def set_final(self, final):\n if final != self.final:\n if final:\n self.dish.costs_close(self.cost)\n else:\n self.dish.costs_close(-self.cost)\n self.final = final\n self.save()\n\n def __str__(self):\n return \"%s [%s]\" % (self.ingredient, self.used)\n\n\n@receiver(signals.pre_delete, sender=Ticket)\ndef clean_ticket(sender, **kwargs):\n ticket = kwargs.get('instance')\n was_final = ticket.final\n\n if was_final:\n ticket.ingredient.set_exhausted(False)\n ticket.final = False\n\n ticket.update_usage(-ticket.used)\n\n if was_final:\n ticket.ingredient.set_exhausted(True)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"209176190","text":"# -*- coding:utf-8 -*-\n__author__ = 'niean'\nfrom .bean import Bean\nfrom frame.config import ADMINS\nfrom frame.store import db\nfrom frame.params import required_chk\nimport time\nfrom datetime import datetime\n\nclass User(Bean):\n _tbl = 'user'\n _cols = 'id, name, sex, phone, birthday, card_id, card_ts, card_credit, comment, t_create, t_modify'\n _id = 'id'\n\n def __init__(self, _id, name, sex, phone, birthday, card_id, card_ts, card_credit, comment, t_create, t_modify):\n self.id = _id\n self.name = name\n self.sex = sex\n self.phone = phone\n self.birthday = birthday\n self.card_id = card_id\n self.card_ts = card_ts\n self.card_credit = card_credit\n self.comment = comment\n self.t_create = t_create\n self.t_modify = t_modify\n\n def writable(self, login_user):\n if login_user in ADMINS:\n return True\n\n return False\n \n @classmethod\n def query(cls, page, limit, query, today=None):\n where = ''\n params = []\n\n if query:\n where += ' and ' if where else ''\n where += 'name like %s'\n params.append('%' + query + '%')\n\n vs = cls.select_vs(where=where, params=params, page=page, limit=limit, order='name')\n total = cls.total(where, params)\n\n if today is not None:\n vs = [x for x in vs if x.birthday.strftime(\"%m-%d\")==today.strftime(\"%m-%d\")]\n return vs, len(vs)\n\n @classmethod\n def insert_or_update(cls, _id, name, sex, phone, birthday, card_id, card_credit, comment):\n if _id:\n e = cls.get(_id)\n if not e:\n return 'no such user %s' % _id\n cls.update_dict({'name': name, 'sex': sex, 'phone': phone, 'birthday':birthday, 'card_id':card_id, \n 'card_credit':card_credit,'comment':comment}, 'id=%s', [_id])\n return ''\n else:\n bd = datetime.strptime(birthday, '%Y-%m-%d')\n ts = time.strftime('%Y-%m-%d %H:%M:%S')\n uid = cls.insert({'name': name, 'sex': sex, 'phone': phone, 'birthday':bd, 'card_id':card_id, \n 'card_ts':ts, 'card_credit':card_credit,'comment':comment, 't_create':ts})\n if uid:\n return ''\n else:\n return 'save user failed'\n\n\n @classmethod\n def create(cls, name, sex, phone, birthday, card_id, card_ts, card_credit, comment):\n # check duplicate grp_name\n if cls.column('id', where='name=%s and sex=%s and phone=%s', params=[name, sex, phone]):\n return -1\n\n return cls.insert({'name': name, 'sex': sex, 'phone': phone, 'birthday':birthday, \n 'card_id':card_id, 'card_ts':card_ts, 'card_credit':card_credit,'comment':comment})\n ","sub_path":"web/model/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"274027515","text":"import os\n\nimport pytest\nfrom httpie.compat import is_windows\n\nfrom .utils import TESTS_ROOT\n\n\nROOT = TESTS_ROOT.parent\nSOURCE_DIRECTORIES = [\n 'docs',\n 'extras',\n 'httpie',\n 'tests',\n]\n\n\ndef md_filenames():\n yield from ROOT.glob('*.md')\n for directory in SOURCE_DIRECTORIES:\n yield from (ROOT / directory).glob('**/*.md')\n\n\nfilenames = sorted(md_filenames())\nassert filenames\n\n\n@pytest.mark.skipif(is_windows and 'CI' in os.environ,\n reason='Does not pass on GitHub.')\n@pytest.mark.parametrize('filename', filenames)\ndef test_md_file_syntax(filename):\n mdformat = pytest.importorskip('mdformat._cli')\n args = ['--end-of-line', 'lf', '--number']\n err = f'Running \"python -m mdformat {\" \".join(args)} {filename}; git diff\" should help.'\n assert mdformat.run(args + ['--check', str(filename)]) == 0, err\n","sub_path":"tests/test_docs.py","file_name":"test_docs.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"521482608","text":"# To run, execute these steps:\n# srun -p om_interactive -N1 -c2 --mem=8G --pty bash\n# source activate mathiasg_vd_env\n# module add openmind/xvfb-fix/0.1\n# export QT_API=pyqt\n# python plot_brain.py -i -o -c -r \n\n# https://github.com/cgoldberg/xvfbwrapper\n\ndef rotation_matrix(axis, theta):\n \"\"\"\n Return the rotation matrix associated with counterclockwise rotation about\n the given axis by theta radians.\n \"\"\" \n \n import numpy as np\n import math\n\n axis = np.asarray(axis)\n theta = np.asarray(theta)\n axis = axis/math.sqrt(np.dot(axis, axis))\n a = math.cos(theta/2)\n b, c, d = -axis*math.sin(theta/2)\n aa, bb, cc, dd = a*a, b*b, c*c, d*d\n bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d\n return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],\n [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],\n [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])\n\ndef useZstat(zstat, file_path_name_save, file_path_conte, file_path_name_resting_atlas):\n \"\"\"Plot and save the image.\n \n Arguments\n ---------\n zstat : string\n Full file path and name to nii to plot.\n \n file_path_name_save : string\n Full file path and name to png output. Output dir will be created if it doesn't exist.\n \n file_path_conte : string\n Full file path to Conte atlas\n \n file_path_name_resting_atlas : string\n \n Returns\n -------\n None. Normal error message: \n pixdim[1,2,3] should be non-zero; setting 0 dims to 1\n plot_brain.py: Fatal IO error: client killed\n \n Example\n -------\n python plot_brain.py -i /groupAnalysis/l2/zstat1_threshold.nii.gz -o /plots/l2test.png -c /git/bp2/32k_ConteAtlas_v2 -r rfMRI_REST1_LR_Atlas.dtseries.nii\n \n MIT OM Specific Tip\n -------------------\n Call this function from a shell script to run headerless BUT requires:\n source activate mathiasg_vd_env\n export QT_API=pyqt\n module add openmind/xvfb-fix/0.1\n\n #file_path_name=$1\n #file_path_name_save=$2\n #file_path_conte=$3\n #file_path_name_resting_atlas=$4\n python plot_brain.py \\\n -i $1 \\\n -o $2 \\\n -c $3 \\\n -r $4\n \n \"\"\"\n\n import matplotlib.pyplot as plt\n import os\n from glob import glob\n import numpy as np\n import nibabel as nb\n import nibabel.gifti as gifti\n\n # Crucial: xvfb must be imported and started before importing mayavi\n from xvfbwrapper import Xvfb\n print('XVb pre')\n vdisplay = Xvfb()\n vdisplay.start()\n\n print('pre maya')\n # Crashes on this line if run with plain python (not xvfb-run ... python) and if xvfbwrapper is after it.\n from mayavi import mlab\n print('post maya')\n from tvtk.api import tvtk\n print('post tvtk')\n import math\n\n print('display')\n mlab.options.offscreen = True #offscreen window for rendering\n\n img = nb.load(file_path_name_resting_atlas)\n #img = nb.load('/Users/MathiasMacbook/Desktop/rfMRI_REST1_LR_Atlas.dtseries.nii')\n mim = img.header.matrix.mims[1]\n #for idx, bm in enumerate(mim.brainModels):\n # print((idx, bm.indexOffset, bm.brainStructure))\n bm1 = mim.brainModels[0]\n lidx = bm1.vertexIndices.indices\n bm2 = mim.brainModels[1]\n ridx = bm1.surfaceNumberOfVertices + bm2.vertexIndices.indices\n bidx = np.concatenate((lidx, ridx))\n\n axis = [0, 0, 1]\n theta = np.pi\n\n inflated = True\n split_brain = True\n\n surf = gifti.read(file_path_conte + '/Conte69.L.midthickness.32k_fs_LR.surf.gii') \n verts_L_data = surf.darrays[0].data\n faces_L_data = surf.darrays[1].data\n\n surf = gifti.read(file_path_conte + '/Conte69.R.midthickness.32k_fs_LR.surf.gii') \n verts_R_data = surf.darrays[0].data\n faces_R_data = surf.darrays[1].data\n\n if inflated:\n surf = gifti.read(file_path_conte + '/Conte69.L.inflated.32k_fs_LR.surf.gii')\n verts_L_display = surf.darrays[0].data\n faces_L_display = surf.darrays[1].data\n surf = gifti.read(file_path_conte + '/Conte69.R.inflated.32k_fs_LR.surf.gii')\n verts_R_display = surf.darrays[0].data\n faces_R_display = surf.darrays[1].data\n else:\n verts_L_display = verts_L_data.copy()\n verts_R_display = verts_R_data.copy()\n faces_L_display = faces_L_data.copy()\n faces_R_display = faces_R_data.copy()\n\n verts_L_display[:, 0] -= max(verts_L_display[:, 0])\n verts_R_display[:, 0] -= min(verts_R_display[:, 0])\n verts_L_display[:, 1] -= (max(verts_L_display[:, 1]) + 1)\n verts_R_display[:, 1] -= (max(verts_R_display[:, 1]) + 1)\n\n faces = np.vstack((faces_L_display, verts_L_display.shape[0] + faces_R_display))\n\n if split_brain:\n verts2 = rotation_matrix(axis, theta).dot(verts_R_display.T).T\n else:\n verts_L_display[:, 1] -= np.mean(verts_L_display[:, 1])\n verts_R_display[:, 1] -= np.mean(verts_R_display[:, 1])\n verts2 = verts_R_display\n\n verts_rot = np.vstack((verts_L_display, verts2))\n verts = np.vstack((verts_L_data, verts_R_data))\n #print verts.shape\n #print faces.shape\n\n if not os.path.exists(os.path.split(file_path_name_save)[0]):\n os.makedirs(os.path.split(file_path_name_save)[0]) \n\n print('use zstat')\n img = nb.load(zstat)\n print('loaded img')\n \n threshold = 2.3 # 1000, lower limit\n display_threshold = 6 #8000, upper limit\n\n data = img.get_data()\n aff = img.affine\n indices = np.round((np.linalg.pinv(aff).dot(np.hstack((verts, \n np.ones((verts.shape[0], 1)))).T))[:3, :].T).astype(int)\n scalars2 = data[indices[:, 0], indices[:, 1], indices[:, 2]]\n scalars2[np.abs(scalars2) < threshold] = 0.\n scalars = np.zeros(verts.shape[0])\n scalars[bidx] = scalars2[bidx]\n\n negative = positive = False\n if np.any(scalars < 0):\n negative = True\n if np.any(scalars > 0):\n positive = True\n\n nlabels = 2\n vmin = 0\n vmax = 0\n if negative and positive:\n maxval = max(-scalars.min(), scalars.max())\n if maxval > display_threshold:\n maxval = display_threshold\n vmin = -maxval\n vmax = maxval\n nlabels = 3\n vmin = -display_threshold ######\n vmax = display_threshold ######\n elif negative:\n vmin = scalars.min()\n if vmin < -display_threshold:\n vmin = -display_threshold\n vmax = 0\n vmin = -display_threshold ######\n elif positive:\n vmax = scalars.max()\n if vmax > display_threshold:\n vmax = display_threshold\n vmin = 0\n vmax = display_threshold ######\n #print zstat\n \n dual_split = True\n\n fig1 = mlab.figure(1, bgcolor=(0, 0, 0))\n mlab.clf()\n mesh = tvtk.PolyData(points=verts_rot, polys=faces)\n mesh.point_data.scalars = scalars\n mesh.point_data.scalars.name = 'scalars'\n surf = mlab.pipeline.surface(mesh, colormap='autumn', vmin=vmin, vmax=vmax)\n if dual_split:\n verts_rot_shifted = verts_rot.copy()\n verts_rot_shifted = rotation_matrix(axis, theta).dot(verts_rot_shifted.T).T\n verts_rot_shifted[:, 2] -= (np.max(verts_rot_shifted[:, 2]) - np.min(verts_rot_shifted[:, 2]))\n verts_rot_shifted[:, 0] -= np.max(verts_rot_shifted[:, 0])\n mesh2 = tvtk.PolyData(points=verts_rot_shifted, polys=faces)\n mesh2.point_data.scalars = scalars\n mesh2.point_data.scalars.name = 'scalars'\n surf2 = mlab.pipeline.surface(mesh2, colormap='autumn', vmin=vmin, vmax=vmax)\n colorbar = mlab.colorbar(surf, nb_labels=nlabels) #, orientation='vertical')\n lut = surf.module_manager.scalar_lut_manager.lut.table.to_array()\n\n if negative and positive:\n half_index = lut.shape[0] / 2\n index = int(half_index * threshold / vmax)\n lut[(half_index - index + 1):(half_index + index), :] = 192\n lut[(half_index + index):, :] = 255 * plt.cm.autumn(np.linspace(0, 255, half_index - index).astype(int))\n lut[:(half_index - index), :] = 255 * plt.cm.cool(np.linspace(0, 255, half_index - index).astype(int))\n elif negative:\n index = int(lut.shape[0] * threshold / abs(vmin))\n lut[(lut.shape[0] - index):, :] = 192\n lut[:(lut.shape[0] - index), :] = 255 * plt.cm.cool(np.linspace(0, 255, lut.shape[0] - index).astype(int))\n elif positive:\n index = int(lut.shape[0] * threshold / vmax)\n lut[:index, :] = 192\n lut[index:, :] = 255 * plt.cm.autumn(np.linspace(0, 255, lut.shape[0] - index).astype(int))\n lut[:, -1] = 255\n\n surf.module_manager.scalar_lut_manager.lut.table = lut\n if dual_split:\n surf2.module_manager.scalar_lut_manager.lut.table = lut\n surf.module_manager.scalar_lut_manager.show_scalar_bar = False\n surf.module_manager.scalar_lut_manager.show_legend = False\n surf.module_manager.scalar_lut_manager.label_text_property.font_size = 10\n surf.module_manager.scalar_lut_manager.show_scalar_bar = True\n surf.module_manager.scalar_lut_manager.show_legend = True\n mlab.draw()\n\n translate = [0, 0, 0]\n if inflated:\n zoom = -700\n else:\n zoom = -600\n if dual_split:\n if inflated:\n translate = [0, 0, -104.01467148]\n else:\n translate = [0, 0, -54.76305802] \n if inflated:\n zoom = -750\n else:\n zoom = -570\n \n #mlab.view(0, 90.0, zoom, translate)\n mlab.view(9, 90.0)\n\n print(file_path_name_save)\n \n mlab.savefig(file_path_name_save, figure=fig1, magnification=5)\n\n vdisplay.stop() \n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(prog='plot_brain.py',\n description=__doc__)\n parser.add_argument('-i', '--input', required=True, help='input full file path and name .nii.gz')\n parser.add_argument('-o', '--output', required=True, help='output full file pand and name .png')\n parser.add_argument('-c', '--file_path_conte', required=True, help='file path to conte atlas folder')\n parser.add_argument('-r', '--file_path_name_resting_atlas', required=True, help='resting atlas nii')\n args = parser.parse_args()\n \n useZstat(args.input, args.output, args.file_path_conte, args.file_path_name_resting_atlas)\n\n\n\n\n\n\n\n\n","sub_path":"plot_brain.py","file_name":"plot_brain.py","file_ext":"py","file_size_in_byte":10316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"504838501","text":"#!/usr/bin/env python\n\nimport numpy as np\n\nfrom my_utils import *\nfrom sklearn import linear_model\nfrom sklearn.metrics import r2_score\n\ndef main():\n\n args, dbo_args, endpoint_args = parse_user_arguments()\n cal_expected_overlap_bcd_cnt(args, dbo_args, endpoint_args)\n return\n\ndef read_all_bcd12_files(bcd12_file_list):\n\n gc.enable()\n myprint ('reading bcd12 files')\n m1_all_list = list()\n m2_all_list = list()\n m0_all_list = list()\n d_all_list = list()\n\n for bcd12_file in bcd12_file_list:\n myprint ('current bcd12 file: %s' % bcd12_file)\n bcd12_fp = open(bcd12_file, 'r')\n while 1:\n line = bcd12_fp.readline()\n if not line:\n break\n line = line.strip().split(tab)\n m1_all_list.append(float(line[1]))\n m2_all_list.append(float(line[2]))\n m0_all_list.append(float(line[3]))\n d_all_list.append(float(line[4]) + float(line[5]))\n\n bcd12_fp.close()\n \n myprint ('finished reading bcd12 files')\n return m1_all_list, m2_all_list, m0_all_list, d_all_list\n\ndef cal_expected_overlap_bcd_cnt(args, dbo_args, endpoint_args):\n\n is_wgs = args.is_wgs\n bcd12_file_list = dbo_args.bcd12_file_list\n bcd13_file_list = dbo_args.bcd13_file_list\n \n m1_all_list, m2_all_list, m0_all_list, d_all_list = read_all_bcd12_files(bcd12_file_list)\n min_m1_value = 50\n min_m2_value = 50\n myprint ('filtering windows... min_m1_value=%.2f, min_m2_value=%.2f' %(min_m1_value, min_m2_value))\n m1list = list()\n m2list = list()\n m0list = list()\n dlist = list()\n\n for i in range(0, len(m1_all_list)): \n if m1_all_list[i] < min_m1_value or m2_all_list[i] < min_m1_value or m0_all_list[i] < 1:\n continue\n\n m1list.append(m1_all_list[i])\n m2list.append(m2_all_list[i])\n m0list.append(m0_all_list[i])\n dlist.append(d_all_list[i])\n\n del m1_all_list, m2_all_list, m0_all_list, d_all_list\n\n m0array = np.array(m0list)\n darray = np.array(dlist)\n\n logm1xm2array = np.log(m1list) + np.log(m2list)\n\n myprint ('fitting model')\n\n a = np.array([logm1xm2array, darray])\n x = np.asmatrix(a).transpose()\n\n b = np.asarray([m0array])\n y = np.asmatrix(b).transpose()\n logy = np.log(y)\n\n regr = linear_model.LinearRegression()\n regr.fit(x, logy)\n logy_predict = regr.predict(x)\n y_predict = np.exp(logy_predict) \n\n myprint ('finished fitting model')\n myprint ('Coefficients: \\n')\n myprint (regr.coef_)\n r2 = r2_score(logy, logy_predict)\n myprint ('R squired = %.4f' % (r2))\n \n predict_overlap_bcd_cnt(bcd12_file_list, regr, bcd13_file_list, min_m1_value, min_m2_value) \n\n myprint ('finished outputing bcd13 files')\n\n return min_m1_value, min_m2_value\n\ndef predict_overlap_bcd_cnt(bcd12_file_list, regr, bcd13_file_list, min_m1_value, min_m2_value):\n \n myprint ('calculating expected overlap barcode count...')\n for i in range(0, len(bcd12_file_list)):\n bcd12_file = bcd12_file_list[i]\n bcd13_file = bcd13_file_list[i]\n myprint ('current bcd12 file: %s, current output file: %s' %(bcd12_file, bcd13_file))\n bcd12_fp = open(bcd12_file, 'r')\n bcd13_fp = open(bcd13_file, 'w')\n\n pos_list = list()\n m1list = list()\n m2list = list()\n m0list = list()\n d1list = list()\n d2list = list()\n dlist = list()\n \n while 1:\n line = bcd12_fp.readline()\n if not line:\n break\n pos, m1, m2, m0, d1, d2 = line.strip().split(tab)\n\n pos = int(pos)\n m1 = float(m1) + 1e-6\n m2 = float(m2) + 1e-6\n m0 = float(m0)\n d1 = float(d1)\n d2 = float(d2)\n\n pos_list.append(pos)\n m1list.append(m1)\n m2list.append(m2)\n m0list.append(m0)\n d1list.append(d1)\n d2list.append(d2)\n dlist.append(d1+d2)\n\n\n m0array = np.array(m0list)\n darray = np.array(dlist)\n\n logm1xm2array = np.log(m1list) + np.log(m2list)\n\n a = np.array([logm1xm2array, darray])\n x = np.asmatrix(a).transpose()\n\n predict_logm0array = regr.predict(x) \n predict_m0_array = np.exp(predict_logm0array)\n \n for i in range(0, len(pos_list)):\n predict_m0 = predict_m0_array[i]\n min_m1_m2 = min(m1list[i], m2list[i])\n if predict_m0 > min_m1_m2:\n predict_m0 = float(min_m1_m2)\n if predict_m0 < 0:\n predict_m0 = 0.0\n\n m0 = m0list[i] \n if m0 == 0:\n m0 = 0.5\n if min_m1_m2 < 50:\n ratio = 1\n elif min_m1_m2 > 100:\n ratio = (predict_m0) / (m0)\n else:\n ratio1 = (predict_m0) / (m0) \n ratio2 = 1\n ratio = (2 * min_m1_m2 - 100) * ratio1 + (200 - 2 * min_m1_m2) * ratio2\n ratio = ratio / 100.0\n\n output = '%d\\t%.f\\t%.f\\t%.f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\n' %(pos_list[i], m1list[i]-1e-6, m2list[i]-1e-6, m0list[i], d1list[i], d2list[i], predict_m0, ratio)\n bcd13_fp.write(output)\n\n bcd12_fp.close()\n bcd13_fp.close()\n \n\n myprint ('finished output bcd13 files')\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/cal_expected_overlap_value.py","file_name":"cal_expected_overlap_value.py","file_ext":"py","file_size_in_byte":5409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"643381617","text":"from odoo import api, fields, models\n\n\nclass SaleOrder(models.Model):\n _inherit = 'sale.order.line'\n\n discount_percentage = fields.Float('Discount %')\n discount_amount = fields.Float('Discount Amount')\n final_amount = fields.Float('Final Amount')\n section_prod = fields.Many2one('section.product', string=\"Testing Section\")\n\n @api.onchange('price_unit', 'product_uom_qty', 'discount')\n def compute_discount_amount(self):\n for val in self:\n val.discount_amount = (val.price_unit*val.product_uom_qty * val.discount) / 100\n val.final_amount = (val.price_unit*val.product_uom_qty- val.discount_amount)\n\n\nclass AddSection(models.Model):\n _name = 'section.product'\n\n name = fields.Char(\"Section Name\")\n\n# class AddSection(models.Model):\n# _name = 'section.product'\n#\n# name = fields.Char(\"Section Name\")\n\n\n","sub_path":"so_report_smc/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"362926956","text":"class Solution:\n # @param {string} s\n # @param {string} t\n # @return {boolean}\n def isIsomorphic(self, s, t):\n dictS = {}\n dictT = {}\n if s == t:\n return True\n\n for index, item in enumerate(s):\n if item in dictS:\n if t[index] != dictS[item]:\n return False\n else:\n dictS[item] = t[index]\n\n if t[index] in dictT:\n if item != dictT[t[index]]:\n return False\n else:\n dictT[t[index]] = item\n return True\n","sub_path":"practice/lc205.py","file_name":"lc205.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"189627655","text":"__author__ = 'Dragos'\n\nfrom rdf_test import rdf_test\nimport unittest\n\n\nclass MyTestCase(rdf_test.TestCase):\n \"\"\"\n Tests if the friendship relationship between two humans is symmetric.\n \"\"\"\n\n def setUp(self):\n self.change_working_directory(__file__)\n\n def test_human_example(self):\n\n graph = rdf_test.Graph()\n graph.create_model(\"../source/samAndMax.ttl\")\n construct_results = graph.construct(\"../complete.rq\")\n self.assert_ask_true(\"testSamFriendsWithMax.rq\", construct_results)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"examples/human/test/test_human.py","file_name":"test_human.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"205838614","text":"from .blocks import (\n Block,\n)\nfrom .weave import (\n Weave,\n)\nfrom .query import (\n PyQueryUTF,\n)\nimport yaml\n\nclass Tangle(Weave):\n def tangle(self):\n \"\"\"Tangle non-code and code blocks.\"\"\"\n self.data, self.blocks = [\"\"\"\"\"\",[],]\n for child in self.query.children().items() if self.query.children() else self.query.items():\n self.blocks.append(Block(child,self.env))\n self.tangled.html(self.weave())\n return self.tangled.outer_html().decode('utf-8')\n\nclass Processor(Tangle):\n \"\"\"\n Slice front matter, tangle templates, and weave the code.\n \"\"\"\n templates=[]\n def __init__(self, raw):\n self.raw, self.frontmatter = [raw, {}]\n \"\"\"Split FrontMatter\"\"\"\n if raw.startswith('---\\n'):\n frontmatter, content = self.raw.lstrip('---').strip().split('---',1)\n if hasattr( self, 'widgets'):\n self.raw=content\n self.frontmatter=yaml.load(self.render(frontmatter))\n super(Processor,self).__init__()\n","sub_path":"literacy/tangle.py","file_name":"tangle.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"499776838","text":"import pygame, sys, math, time\nfrom pygame.locals import *\nimport time\nimport random\npygame.init()\n\nscrnwdthx = 800\nscrnwdthy = 800\n\n# Game window\nsurface = pygame.display.set_mode((scrnwdthx, scrnwdthy,), 0, 32)\npygame.display.set_caption('Square Walking')\n\n#Set Font\nbasicFont = pygame.font.SysFont(None, 48)\n\n#Lets get some colours\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (128, 0, 0)\nGREEN = (0, 128, 0)\nBLUE = (0, 0, 128)\nORANGE = (255, 100, 0)\nGREY = (128, 128, 128)\nCOLOUR = (0, 0, 0)\n\n#Constants\nw = 50\nsleepTime = 0.01\n\n#Arrays\ni = 0\nx = []\ny = []\nxm = 0\nym = 0\nxmove = 0\nymove = 0\nrect = []\n\n\"\"\"Game loop----------------------------------------------------------------\"\"\"\n\n#Game loop and X quite\nwhile True:\n\tfor event in pygame.event.get():\n\t\tif event.type == QUIT:\n\t\t\tpygame.quit()\n\t\t\tsys.exit()\n\n\trand = random.randint(0, 1)\n\tif rand == 0:\n\t\txm += 1\n\t\txmove = xm*w\n\t\tif xmove > scrnwdthx:\n\t\t\txmove = 0\n\t\t\txm = 0\n\tif rand == 1:\n\t\tym += 1\n\t\tymove = ym*w\n\t\tif ymove > scrnwdthy:\n\t\t\tymove = 0\n\t\t\tym = 0\n\n\tx.append(xmove)\n\ty.append(ymove)\n\n\t#Refresh the surface\n\tsurface.fill(WHITE)\n\n\t#rect.append((x[i], y[i], w, w))\n\tfor j in range(i):\n\t\t#pygame.draw.rect(surface, pygame.Color(100, 100, 100, 128), (x[j], y[j], w, w))\n\t\t#Try drawing a separate surface instead\n\t\tsquare_Surface = (pygame.Surface((w, w), pygame.SRCALPHA))\n\t\tsquare_Surface.fill((0, 0, 0, 30))\n\t\tsurface.blit(square_Surface, (x[j], y[j]))\n\n\t#Ipdate\n\ti += 1\n\n\tpygame.display.update()\n\ttime.sleep(sleepTime)\n\n\n","sub_path":"SquareWalkingPyGame.py","file_name":"SquareWalkingPyGame.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"527210460","text":"#!/usr/bin/env python3\n\n\"\"\"\n@author: xi\n@since: 2018-03-03\n\"\"\"\n\nimport tensorflow as tf\n\nimport photinia as ph\n\n\nclass AlexNet(ph.Widget):\n\n def __init__(self, name='alexnet'):\n self._height = 227\n self._width = 227\n super(AlexNet, self).__init__(name)\n\n def _build(self):\n #\n # conv1 padding=VALID\n self._conv1 = ph.Conv2D(\n 'conv1',\n input_size=[self._height, self._width, 3],\n output_channels=96,\n filter_height=11, filter_width=11, stride_width=4, stride_height=4,\n padding='VALID'\n )\n self._pool1 = ph.Pool2D(\n 'pool1',\n input_size=self._conv1.output_size,\n filter_height=3, filter_width=3, stride_height=2, stride_width=2,\n padding='VALID',\n pool_type='max'\n )\n #\n # conv2, 这里是拆分训练的\n self._conv2 = ph.GroupConv2D(\n 'conv2',\n input_size=self._pool1.output_size,\n output_channels=256,\n num_groups=2,\n filter_height=5, filter_width=5, stride_height=1, stride_width=1\n )\n self._pool2 = ph.Pool2D(\n 'pool2',\n input_size=self._conv2.output_size,\n filter_height=3, filter_width=3, stride_height=2, stride_width=2,\n padding='VALID',\n pool_type='max'\n )\n #\n # conv3\n self._conv3 = ph.Conv2D(\n 'conv3',\n input_size=self._pool2.output_size,\n output_channels=384,\n filter_width=3, filter_height=3, stride_width=1, stride_height=1\n )\n #\n # conv4, 这里是拆分训练的\n self._conv4 = ph.GroupConv2D(\n 'conv4',\n input_size=self._conv3.output_size,\n output_channels=384,\n num_groups=2,\n filter_width=3, filter_height=3, stride_width=1, stride_height=1\n )\n #\n # conv5, 这里是拆分训练的\n self._conv5 = ph.GroupConv2D(\n 'conv5',\n input_size=self._conv4.output_size,\n output_channels=256,\n num_groups=2,\n filter_width=3, filter_height=3, stride_width=1, stride_height=1\n )\n self._pool5 = ph.Pool2D(\n 'pool5',\n input_size=self._conv5.output_size,\n filter_height=3, filter_width=3, stride_height=2, stride_width=2,\n padding='VALID', pool_type='max'\n )\n #\n # fc layer\n self._fc6 = ph.Linear('fc6', input_size=self._pool5.flat_size, output_size=4096)\n self._fc7 = ph.Linear('fc7', input_size=self._fc6.output_size, output_size=4096)\n self._fc8 = ph.Linear(\n 'fc8',\n input_size=self._fc7.output_size, output_size=1000,\n w_init=ph.init.RandomNormal(stddev=1e-4)\n )\n\n def _setup(self, x):\n h = ph.setup(\n x,\n [self._conv1, tf.nn.relu, self._lrn, self._pool1,\n self._conv2, tf.nn.relu, self._lrn, self._pool2,\n self._conv3, tf.nn.relu,\n self._conv4, tf.nn.relu,\n self._conv5, tf.nn.relu, self._pool5,\n ph.ops.flatten,\n self._fc6, tf.nn.relu,\n self._fc7, tf.nn.relu]\n )\n y = self._fc8.setup(h)\n y = tf.nn.softmax(y)\n return y, h\n\n @staticmethod\n def _lrn(x):\n return tf.nn.local_response_normalization(\n x,\n depth_radius=1,\n alpha=1e-5,\n beta=0.75,\n bias=1.0\n )\n\n def load_pretrain(self, model_file='alexnet.pickle'):\n ph.io.load_model_from_file(self, model_file)\n","sub_path":"photinia/cnn/alexnet.py","file_name":"alexnet.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"306470140","text":"#!/usr/bin/env python3\nimport argparse\nimport glob\nimport itertools\nimport os\nimport sys\nimport warnings\n\nimport matplotlib.pyplot as plt\n\nimport artistools as at\n\n# import matplotlib.ticker as ticker\n\n\nwarnings.filterwarnings(action=\"ignore\", module=\"scipy\", message=\"^internal gelsd\")\n\n\ndef main():\n \"\"\"\n Plot ARTIS spectra and (optionally) reference spectra\n \"\"\"\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Plot ARTIS model spectra by finding spec.out files '\n 'in the current directory or subdirectories.')\n parser.add_argument('modelpath', default=[], nargs='*',\n help='Paths to ARTIS folders with spec.out or packets files'\n ' (may include wildcards such as * and **)')\n parser.add_argument('--frompackets', default=False, action='store_true',\n help='Read packets files directly instead of exspec results')\n parser.add_argument('--emissionabsorption', default=False, action='store_true',\n help='Show an emission/absorption plot')\n parser.add_argument('-maxseriescount', type=int, default=9,\n help='Maximum number of plot series (ions/processes) for emission/absorption plot')\n parser.add_argument('-listtimesteps', action='store_true', default=False,\n help='Show the times at each timestep')\n parser.add_argument('-timestep', nargs='?',\n help='First timestep or a range e.g. 45-65')\n parser.add_argument('-timemin', type=float,\n help='Lower time in days to integrate spectrum')\n parser.add_argument('-timemax', type=float,\n help='Upper time in days to integrate spectrum')\n parser.add_argument('-xmin', type=int, default=2500,\n help='Plot range: minimum wavelength in Angstroms')\n parser.add_argument('-xmax', type=int, default=11000,\n help='Plot range: maximum wavelength in Angstroms')\n parser.add_argument('--normalised', default=False, action='store_true',\n help='Normalise the spectra to their peak values')\n parser.add_argument('-obsspec', action='append', dest='refspecfiles',\n help='Also plot reference spectrum from this file')\n parser.add_argument('-legendfontsize', type=int, default=8,\n help='Font size of legend text')\n parser.add_argument('-o', action='store', dest='outputfile',\n help='path/filename for PDF file')\n args = parser.parse_args()\n\n if len(args.modelpath) == 0:\n args.modelpath = ['.', '*']\n\n # combined the results of applying wildcards on each input\n modelpaths = list(itertools.chain.from_iterable([glob.glob(x) for x in args.modelpath if os.path.isdir(x)]))\n\n if args.emissionabsorption:\n if len(modelpaths) > 1:\n print(\"ERROR: emission/absorption plot can only take one input model\")\n sys.exit()\n else:\n if not args.outputfile:\n args.outputfile = \"plotspecemission.pdf\"\n make_plot(modelpaths, args)\n elif args.listtimesteps:\n at.showtimesteptimes(modelpaths[0])\n else:\n if not args.outputfile:\n args.outputfile = \"plotspec.pdf\"\n make_plot(modelpaths, args)\n\n\ndef plot_artis_spectra(axis, modelpaths, args, filterfunc=None):\n \"\"\"\n Plot ARTIS emergent spectra\n \"\"\"\n\n # dashesList = [(), (1.5, 2, 9, 2), (5, 1), (0.5, 2), (4, 2)]\n # dash_capstyleList = ['butt', 'butt', 'butt', 'round', 'butt']\n # colorlist = [(0, .8*158./255, 0.6*115./255), (204./255, 121./255, 167./255), (213./255, 94./255, 0.0)]\n # inputfiles.sort(key=lambda x: os.path.dirname(x))\n for index, modelpath in enumerate(modelpaths):\n modelname = at.get_model_name(modelpath)\n print(f\"====> {modelname}\")\n plotkwargs = {}\n # plotkwargs['dashes'] = dashesList[index]\n # plotkwargs['dash_capstyle'] = dash_capstyleList[index]\n plotkwargs['linestyle'] = ['-', '--'][int(index / 7) % 2]\n plotkwargs['linewidth'] = 2.5 - (0.2 * index)\n at.spectra.plot_artis_spectrum(axis, modelpath, args=args, from_packets=args.frompackets, **plotkwargs)\n\n\ndef make_spectrum_plot(modelpaths, axis, filterfunc, args):\n \"\"\"\n Set up a matplotlib figure and plot observational and ARTIS spectra\n \"\"\"\n at.spectra.plot_reference_spectra(axis, [], [], args, flambdafilterfunc=filterfunc)\n plot_artis_spectra(axis, modelpaths, args, filterfunc)\n\n if args.normalised:\n axis.set_ylim(ymin=-0.1, ymax=1.25)\n axis.set_ylabel(r'Scaled F$_\\lambda$')\n\n\ndef make_emission_plot(modelpath, axis, filterfunc, args):\n from astropy import constants as const\n import pandas as pd\n maxion = 5 # must match sn3d.h value\n\n emissionfilename = os.path.join(modelpath, 'emissiontrue.out')\n if not os.path.exists(emissionfilename):\n emissionfilename = os.path.join(modelpath, 'emission.out')\n\n specfilename = os.path.join(modelpath, 'spec.out')\n specdata = pd.read_csv(specfilename, delim_whitespace=True)\n timearray = specdata.columns.values[1:]\n arraynu = specdata.loc[:, '0'].values\n arraylambda_angstroms = const.c.to('angstrom/s').value / arraynu\n\n (modelname, timestepmin, timestepmax,\n time_days_lower, time_days_upper) = at.get_model_name_times(\n specfilename, timearray, args.timestep, args.timemin, args.timemax)\n\n absorptionfilename = os.path.join(modelpath, 'absorption.out')\n contribution_list, maxyvalueglobal, array_flambda_emission_total = at.spectra.get_flux_contributions(\n emissionfilename, absorptionfilename, maxion, timearray, arraynu,\n filterfunc, args.xmin, args.xmax, timestepmin, timestepmax)\n\n at.spectra.print_integrated_flux(array_flambda_emission_total, arraylambda_angstroms)\n\n # print(\"\\n\".join([f\"{x[0]}, {x[1]}\" for x in contribution_list]))\n\n contributions_sorted_reduced = at.spectra.sort_and_reduce_flux_contribution_list(\n contribution_list, args.maxseriescount, arraylambda_angstroms)\n\n plotobjects = axis.stackplot(\n arraylambda_angstroms, [x.array_flambda_emission for x in contributions_sorted_reduced], linewidth=0)\n\n facecolors = [p.get_facecolor()[0] for p in plotobjects]\n\n axis.stackplot(\n arraylambda_angstroms, [-x.array_flambda_absorption for x in contributions_sorted_reduced],\n colors=facecolors, linewidth=0)\n\n plotobjectlabels = list([x.linelabel for x in contributions_sorted_reduced])\n\n at.spectra.plot_reference_spectra(axis, plotobjects, plotobjectlabels, args, flambdafilterfunc=None,\n scale_to_peak=(maxyvalueglobal if args.normalised else None), linewidth=0.5)\n\n axis.axhline(color='white', linewidth=0.5)\n\n plotlabel = f't={time_days_lower:.2f}d to {time_days_upper:.2f}d\\n{modelname}'\n axis.annotate(plotlabel, xy=(0.97, 0.03), xycoords='axes fraction',\n horizontalalignment='right', verticalalignment='bottom', fontsize=9)\n\n # axis.set_ylim(ymin=-0.05 * maxyvalueglobal, ymax=maxyvalueglobal * 1.3)\n\n return plotobjects, plotobjectlabels\n\n\ndef make_plot(modelpaths, args):\n import matplotlib.ticker as ticker\n\n fig, axis = plt.subplots(1, 1, sharey=True, figsize=(8, 5), tight_layout={\"pad\": 0.2, \"w_pad\": 0.0, \"h_pad\": 0.0})\n axis.set_ylabel(r'F$_\\lambda$ at 1 Mpc [erg/s/cm$^2$/$\\AA$]')\n\n # import scipy.signal\n #\n # def filterfunc(flambda):\n # return scipy.signal.savgol_filter(flambda, 5, 3)\n filterfunc = None\n if args.emissionabsorption:\n plotobjects, plotobjectlabels = make_emission_plot(modelpaths[0], axis, filterfunc, args)\n else:\n make_spectrum_plot(modelpaths, axis, filterfunc, args)\n plotobjects, plotobjectlabels = axis.get_legend_handles_labels()\n\n axis.legend(plotobjects, plotobjectlabels, loc='best', handlelength=2,\n frameon=False, numpoints=1, prop={'size': args.legendfontsize})\n\n # plt.setp(plt.getp(axis, 'xticklabels'), fontsize=fsticklabel)\n # plt.setp(plt.getp(axis, 'yticklabels'), fontsize=fsticklabel)\n # for axis in ['top', 'bottom', 'left', 'right']:\n # axis.spines[axis].set_linewidth(framewidth)\n\n axis.set_xlabel(r'Wavelength ($\\AA$)')\n axis.set_xlim(xmin=args.xmin, xmax=args.xmax)\n axis.xaxis.set_major_locator(ticker.MultipleLocator(base=1000))\n axis.xaxis.set_minor_locator(ticker.MultipleLocator(base=100))\n\n filenameout = args.outputfile\n fig.savefig(filenameout, format='pdf')\n # plt.show()\n print(f'Saved {filenameout}')\n plt.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"artistools/plot/spectrum.py","file_name":"spectrum.py","file_ext":"py","file_size_in_byte":8796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"1649316","text":"import sys\nimport os\nimport multiprocessing\nimport subprocess\nimport shlex\nimport numpy as np\n\n\ndef parse_sizefile(fn):\n return {line.strip().split()[0]: float(line.strip().split()[1])\n for line in open(fn) if line[0] != \"#\"}\n\n\ndef frac_off(est, corr):\n return float(abs(est - corr)) / corr\n\n\nclass DistData:\n def __init__(self, arr, names):\n self.arr = arr\n self.names = names\n\n\ndef parse_distfile(fn):\n fp = open(fn)\n header = next(fp)\n names = header.strip().split()[1:]\n arr = np.array([list(map(float,\n line.strip().split()[1:]))\n for line in fp], dtype=np.double)\n return DistData(arr, names)\n\n\ndef main():\n nthreads = multiprocessing.cpu_count()\n sketch_sizes = \"sketch_sizes.txt\"\n sketch_dists = \"sketch_dists.txt\"\n set_sizes = \"set_sizes.txt\"\n set_dists = \"set_dists.txt\"\n argc, argv = len(sys.argv), sys.argv\n for arg in argv:\n if \"-p\" in arg:\n nthreads = int(arg[2:])\n argv = [arg2 for arg2 in argv if arg2 != arg]\n print(argv)\n break\n ofp = sys.stdout\n ofw = ofp.write\n paths = argv[1:]\n for path in paths:\n if not os.path.isfile(path):\n raise RuntimeError(\"Path %s is not a file. Abort!\" % path)\n cstr = (\"flashdans dist -p%i -o %s \"\n \"-O %s %s\") % (nthreads, sketch_sizes,\n sketch_dists, \" \".join(paths))\n print(cstr)\n subprocess.check_call(shlex.split(cstr))\n cstr = (\"flashdans setdist -p%i -o %s \"\n \"-O %s %s\") % (nthreads, set_sizes, set_dists, \" \".join(paths))\n print(cstr)\n subprocess.check_call(shlex.split(cstr))\n estim_sizes = parse_sizefile(sketch_sizes)\n exact_sizes = parse_sizefile(set_sizes)\n ofw(\"#Name\\tEstim\\tExact\\tError\\n\")\n for pair in zip(estim_sizes.items(), exact_sizes.items()):\n ofw(\"%s.\\t%f.\\t%f\\t%f%%\\n\" % (pair[0][0], pair[0][1],\n pair[1][1],\n frac_off(pair[0][1],\n pair[1][1]) * 100))\n estim_dists, exact_dists = list(map(parse_distfile,\n [sketch_dists, set_dists]))\n names = estim_dists.names\n ofw(\"#Name1\\tName2\\tEstim\\tExact\\tError (relative)\\tError (absolute)\\n\")\n for i in range(len(estim_dists.arr)):\n for j in range(i + 1, len(estim_dists.arr)):\n ofw(\"%s\\t%s\\t%f\\t%f\\t%f%%\\t%f\\n\" % (names[i], names[j],\n estim_dists.arr[i, j], exact_dists.arr[i, j],\n frac_off(estim_dists.arr[i, j], exact_dists.arr[i, j]) * 100),\n abs(estim_dists.arr[i, j] - exact_dists.arr[i, j]))\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"python/compare_jaccard.py","file_name":"compare_jaccard.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"59555451","text":"import numpy as np\nimport cv2\nimport picamera\nimport picamera.array\nimport math\n\nwidth=1280\nheight=720\nx=0\ny=0\nw=1280\nh=height\n\ndef sortByX(coord):\n return coord[0]\n\ndef sortByY(coord):\n return coord[1]\n\nwith picamera.PiCamera() as camera:\n with picamera.array.PiRGBArray(camera) as stream:\n camera.resolution = (width, height)\n\n while True:\n camera.capture(stream, 'bgr', use_video_port=True)\n # stream.array now contains the image data in BGR order\n crop_img = stream.array[y:y+h, x:x+w]\n gray_img = cv2.cvtColor(crop_img, cv2.COLOR_RGB2GRAY)\n blur_img = cv2.GaussianBlur(gray_img, (5,5), 0)\n canny_img = cv2.Canny(blur_img, 100, 300)\n\n lines = cv2.HoughLinesP(\n cannyed_img,\n rho=6,\n theta=np.pi / 60,\n threshold=160,\n lines=np.array([]),\n minLineLength=40,\n maxLineGap=25)\n\n temp = []\n\n if (isinstance(lines, np.ndarray)):\n for line in lines:\n x1 = line[0][0]\n y1 = line[0][1]\n x2 = line[0][2]\n y2 = line[0][3]\n temp.append([x1, y1])\n temp.append([x2, y2])\n\n temp.sort(key=sortByY)\n highYCoords = temp[math.floor(len(temp)/2):]\n lowYCoords = temp[:math.floor(len(temp)/2)]\n\n highYCoords.sort(key=sortByX)\n lowYCoords.sort(key=sortByX)\n topScreenMedian = highYCoords[math.floor(len(highYCoords)/2)]\n bottomScreenMedian = lowYCoords[math.floor(len(lowYCoords)/2)]\n diffOfMedians = [topScreenMedian[0] - bottomScreenMedian[0],\n topScreenMedian[1] - bottomScreenMedian[1]]\n angle = math.degrees(math.atan(diffOfMedians[1] / diffOfMedians[0]))\n print(angle)\n\n cv2.imshow(\"cannied\", cannyed_img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n # reset the stream before the next capture\n stream.seek(0)\n stream.truncate()\n\n cv2.destroyAllWindows()","sub_path":"line_follow/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"469756322","text":"# Give the value printed by each of the following code fragments\n\n\ndef run_a():\n\tt = 9.0\n\twhile abs(t - 9.0 / t) > 0.001:\n\t\tt = (9.0 / t + t) / 2.0\n\tprint(\"{0:.5f}\".format(t))\n\t# print t: 3.00009\n\n\ndef run_b():\n\ts = 0\n\tfor i in range(1, 1000):\n\t\tfor j in range (0, i):\n\t\t\ts += 1\n\tprint(s)\n\t# print s: 499500\n\n\ndef run_c():\n\ts = 0\n\tfor i in range(1, 1000, 2):\n\t\t# The line below cannot run since N is not defined\n\t\tfor j in range(0, N):\n\t\t\ts += 1\n\t\t\tprint(s)\n\n\nrun_a()\nrun_b()\nrun_c()\n","sub_path":"Chapter1/Section1/Regular/Exercise117.py","file_name":"Exercise117.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"575602392","text":"\"\"\"Contacts API endpoint wrapper\"\"\"\n\nimport requests\n\nfrom hubspyt.base_wrapper import BaseWrapper\nfrom hubspyt.models.contact import Contact\n\nclass Contacts(BaseWrapper):\n \"\"\"Wrapper for the contacts endpoint\"\"\"\n\n def _contacts_url(self, addon):\n \"\"\"Adds an addon URL to the base Contacts url\"\"\"\n\n contacts_url = self.contacts_base_url\n\n url = '{contacts_url}{addon}'.format(contacts_url=contacts_url, addon=addon)\n\n return url\n\n def all(self, count=None):\n \"\"\"Fetches all current contacts with an optional count\"\"\"\n\n contacts_all_url = self.contacts_all_url\n contact_list = []\n\n if count is None:\n has_more = True\n vid_offset = None\n\n while has_more is True:\n url = self._get_url(\n self._contacts_url(contacts_all_url),\n [\n {'name': 'count', 'value': 250},\n {'name': 'vidOffset', 'value': vid_offset},\n ],\n )\n\n response = requests.get(url).json()\n contacts = response.get('contacts')\n\n if response.get('has-more') is False:\n has_more = False\n\n vid_offset = response.get('vid-offset')\n\n for item in contacts:\n vid = item.get('vid')\n first_name = None\n last_name = None\n company = None\n\n if item.get('properties').get('firstname'):\n first_name = item.get('properties').get('firstname').get('value')\n\n if item.get('properties').get('lastname'):\n last_name = item.get('properties').get('lastname').get('value')\n\n if item.get('properties').get('company'):\n company = item.get('properties').get('company').get('value')\n\n contact = Contact(\n vid=vid,\n first_name=first_name,\n last_name=last_name,\n company=company,\n )\n\n contact_list.append(contact)\n else:\n url = self._get_url(self._contacts_url(contacts_all_url))\n\n response = requests.get(url).json()\n contacts = response.get('contacts')\n\n for item in contacts:\n vid = item.get('vid')\n first_name = None\n last_name = None\n company = None\n\n if item.get('properties').get('firstname'):\n first_name = item.get('properties').get('firstname').get('value')\n\n if item.get('properties').get('lastname'):\n last_name = item.get('properties').get('lastname').get('value')\n\n if item.get('properties').get('company'):\n company = item.get('properties').get('company').get('value')\n\n contact = Contact(\n vid=vid,\n first_name=first_name,\n last_name=last_name,\n company=company,\n )\n\n contact_list.append(contact)\n\n return contact_list\n\n def batch_modify(self, modified):\n \"\"\"Send HubSpot's API modified/created contacts\"\"\"\n\n contacts_batch_url = self.contacts_batch_url\n url = self._get_url(self._contacts_url(contacts_batch_url))\n\n modified_list = []\n\n for contact in modified:\n modified_list.append(contact.get_update())\n\n response = requests.post(url, json=modified_list)\n status = response.status_code\n\n if status == 202:\n return True\n\n print(status, response.json())\n\n return False\n","sub_path":"hubspyt/contacts.py","file_name":"contacts.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"186069791","text":"import os\nimport re\n\ndef createKeywordTags(sFile):\n \n return sFile\n\nsRoot = './content'\nfor sDir, subdirList, vFiles in os.walk(sRoot):\n print('Found directory: %s' % sDir)\n for sFile in vFiles:\n print('\\t%s' % sFile)\n f = open(os.path.join(sDir,sFile), 'r')\n sFile = f.read()\n f.close()\n \n sFile = createKeywordTags(sFile)\n print(sFile)","sub_path":"code/create_tags.py","file_name":"create_tags.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"63546286","text":"\"\"\"\nGuide for creating plots based on the Matplotlib Usage Guide at\nhttps://matplotlib.org/tutorials/introductory/usage.html#sphx-glr-tutorials-introductory-usage-py\n\nExamples are meant to be run with iPython so make sure interactive mode is\nturned on. Otherwise, plot figures will not show on screen. This mode can be\nenabled with plt.ion() or in the iPython profile.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.close('all') # close all previous figures\n\n# Data to plot\n# ---------------------------------------------------------------------------\n\nx = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\ny = np.array([5, 7, 8, 7, 9, 4, 4, 8, 10, 7])\n\n# Example 1\n# ---------------------------------------------------------------------------\n\nfig, ax = plt.subplots()\nax.plot(x, y)\nax.plot(x, y*1.2)\nax.set_xlabel('Time [s]')\nax.set_ylabel('Distance [m]')\n\nfig, (ax1, ax2) = plt.subplots(1, 2)\nax1.plot(x, y)\nax1.set_xlabel('Time [s]')\nax1.set_ylabel('Distance [m]')\nax2.plot(x, y*3)\nax2.set_xlabel('Time [s]')\n\n# Example 2\n# ---------------------------------------------------------------------------\n\nfig1, ax = plt.subplots()\nax.plot(x, y)\nax.set_xlabel('Time [s]')\nax.set_ylabel('Distance [m]')\nax.grid(alpha=0.5)\n\nfig2, ax = plt.subplots()\nax.plot(x, y)\nax.plot(x, y*1.2)\nax.set_xlabel('Time [s]')\nax.set_ylabel('Distance [m]')\n\n# save plots in this example to PDF (uncomment lines below to enable)\n# fig1.savefig('figure1.pdf', bbox_inches='tight')\n# fig2.savefig('figure2.pdf', bbox_inches='tight')\n\n# Example 3\n# ---------------------------------------------------------------------------\n\n\ndef style(axis):\n axis.grid(color='0.85')\n axis.set_frame_on(False)\n axis.tick_params(color='0.85')\n\n\nfig, ax = plt.subplots()\nax.plot(x, y, label='data')\nax.plot(x, y*1.2, label='model')\nax.legend(loc='best')\nax.set_title('Styled plot')\nax.set_xlabel('Time [s]')\nax.set_ylabel('Distance [m]')\nstyle(ax)\n","sub_path":"matplotlib_usage.py","file_name":"matplotlib_usage.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"453040979","text":"#\n# Copyright 2022 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n#\nfrom typing import List, Optional, Union\n\nfrom enforce_typing import enforce_types\nfrom web3.exceptions import BadFunctionCallOutput\n\nfrom ocean_lib.models.data_nft import DataNFT\nfrom ocean_lib.models.datatoken import Datatoken\nfrom ocean_lib.models.erc721_token_factory_base import ERC721TokenFactoryBase\nfrom ocean_lib.models.fixed_rate_exchange import FixedRateExchange\nfrom ocean_lib.structures.abi_tuples import MetadataProof, OrderData\nfrom ocean_lib.web3_internal.constants import MAX_UINT256\nfrom ocean_lib.web3_internal.contract_base import ContractBase\n\n\nclass DataNFTFactoryContract(ERC721TokenFactoryBase):\n CONTRACT_NAME = \"ERC721Factory\"\n\n @enforce_types\n def verify_nft(self, nft_address: str) -> bool:\n \"\"\"Checks that a token was registered.\"\"\"\n data_nft_contract = DataNFT(self.config_dict, nft_address)\n try:\n data_nft_contract.getId()\n return True\n except BadFunctionCallOutput:\n return False\n\n @enforce_types\n def start_multiple_token_order(\n self, orders: List[OrderData], transaction_parameters: dict\n ) -> str:\n \"\"\"An order contains the following keys:\n\n - tokenAddress, str\n - consumer, str\n - serviceIndex, int\n - providerFeeAddress, str\n - providerFeeToken, str\n - providerFeeAmount (in Wei), int\n - providerData, bytes\n - v, int\n - r, bytes\n - s, bytes\n \"\"\"\n for order in orders:\n order._replace(\n token_address=ContractBase.to_checksum_address(order.token_address)\n )\n order._replace(consumer=ContractBase.to_checksum_address(order.consumer))\n provider_fees = list(order.provider_fees)\n provider_fees[0] = ContractBase.to_checksum_address(order.provider_fees[0])\n provider_fees[1] = ContractBase.to_checksum_address(order.provider_fees[1])\n order._replace(provider_fees=tuple(provider_fees))\n consume_fees = list(order.consume_fees)\n consume_fees[0] = ContractBase.to_checksum_address(order.consume_fees[0])\n consume_fees[1] = ContractBase.to_checksum_address(order.consume_fees[1])\n order._replace(consume_fees=tuple(consume_fees))\n\n return self.contract.startMultipleTokenOrder(orders, transaction_parameters)\n\n @enforce_types\n def create_nft_with_erc20(\n self,\n nft_name: str,\n nft_symbol: str,\n nft_template: int,\n nft_token_uri: str,\n nft_transferable: bool,\n nft_owner: str,\n datatoken_template: int,\n datatoken_name: str,\n datatoken_symbol: str,\n datatoken_minter: str,\n datatoken_fee_manager: str,\n datatoken_publish_market_order_fee_address: str,\n datatoken_publish_market_order_fee_token: str,\n datatoken_publish_market_order_fee_amount: int,\n datatoken_bytess: List[bytes],\n transaction_parameters: dict,\n datatoken_cap: Optional[int] = None,\n ) -> str:\n if datatoken_template == 2 and not datatoken_cap:\n raise Exception(\"Cap is needed for Datatoken Enterprise token deployment.\")\n datatoken_cap = datatoken_cap if datatoken_template == 2 else MAX_UINT256\n return self.contract.createNftWithErc20(\n (\n nft_name,\n nft_symbol,\n nft_template,\n nft_token_uri,\n nft_transferable,\n ContractBase.to_checksum_address(nft_owner),\n ),\n (\n datatoken_template,\n [datatoken_name, datatoken_symbol],\n [\n ContractBase.to_checksum_address(datatoken_minter),\n ContractBase.to_checksum_address(datatoken_fee_manager),\n ContractBase.to_checksum_address(\n datatoken_publish_market_order_fee_address\n ),\n ContractBase.to_checksum_address(\n datatoken_publish_market_order_fee_token\n ),\n ],\n [datatoken_cap, datatoken_publish_market_order_fee_amount],\n datatoken_bytess,\n ),\n transaction_parameters,\n )\n\n @enforce_types\n def create_nft_erc20_with_fixed_rate(\n self,\n nft_name: str,\n nft_symbol: str,\n nft_template: int,\n nft_token_uri: str,\n nft_transferable: bool,\n nft_owner: str,\n datatoken_template: int,\n datatoken_name: str,\n datatoken_symbol: str,\n datatoken_minter: str,\n datatoken_fee_manager: str,\n datatoken_publish_market_order_fee_address: str,\n datatoken_publish_market_order_fee_token: str,\n datatoken_publish_market_order_fee_amount: int,\n datatoken_bytess: List[bytes],\n fixed_price_address: str,\n fixed_price_base_token: str,\n fixed_price_owner: str,\n fixed_price_publish_market_swap_fee_collector: str,\n fixed_price_allowed_swapper: str,\n fixed_price_base_token_decimals: int,\n fixed_price_datatoken_decimals: int,\n fixed_price_rate: int,\n fixed_price_publish_market_swap_fee_amount: int,\n fixed_price_with_mint: int,\n transaction_parameters: dict,\n datatoken_cap: Optional[int] = None,\n ) -> str:\n if datatoken_template == 2 and not datatoken_cap:\n raise Exception(\"Cap is needed for Datatoken Enterprise token deployment.\")\n datatoken_cap = datatoken_cap if datatoken_template == 2 else MAX_UINT256\n return self.contract.createNftWithErc20WithFixedRate(\n (\n nft_name,\n nft_symbol,\n nft_template,\n nft_token_uri,\n nft_transferable,\n ContractBase.to_checksum_address(nft_owner),\n ),\n (\n datatoken_template,\n [datatoken_name, datatoken_symbol],\n [\n ContractBase.to_checksum_address(datatoken_minter),\n ContractBase.to_checksum_address(datatoken_fee_manager),\n ContractBase.to_checksum_address(\n datatoken_publish_market_order_fee_address\n ),\n ContractBase.to_checksum_address(\n datatoken_publish_market_order_fee_token\n ),\n ],\n [datatoken_cap, datatoken_publish_market_order_fee_amount],\n datatoken_bytess,\n ),\n (\n ContractBase.to_checksum_address(fixed_price_address),\n [\n ContractBase.to_checksum_address(fixed_price_base_token),\n ContractBase.to_checksum_address(fixed_price_owner),\n ContractBase.to_checksum_address(\n fixed_price_publish_market_swap_fee_collector\n ),\n ContractBase.to_checksum_address(fixed_price_allowed_swapper),\n ],\n [\n fixed_price_base_token_decimals,\n fixed_price_datatoken_decimals,\n fixed_price_rate,\n fixed_price_publish_market_swap_fee_amount,\n fixed_price_with_mint,\n ],\n ),\n transaction_parameters,\n )\n\n @enforce_types\n def create_nft_erc20_with_dispenser(\n self,\n nft_name: str,\n nft_symbol: str,\n nft_template: int,\n nft_token_uri: str,\n nft_transferable: bool,\n nft_owner: str,\n datatoken_template: int,\n datatoken_name: str,\n datatoken_symbol: str,\n datatoken_minter: str,\n datatoken_fee_manager: str,\n datatoken_publish_market_order_fee_address: str,\n datatoken_publish_market_order_fee_token: str,\n datatoken_publish_market_order_fee_amount: int,\n datatoken_bytess: List[bytes],\n dispenser_address: str,\n dispenser_max_tokens: int,\n dispenser_max_balance: int,\n dispenser_with_mint: bool,\n dispenser_allowed_swapper: str,\n transaction_parameters: dict,\n datatoken_cap: Optional[int] = None,\n ) -> str:\n if datatoken_template == 2 and not datatoken_cap:\n raise Exception(\"Cap is needed for Datatoken Enterprise token deployment.\")\n datatoken_cap = datatoken_cap if datatoken_template == 2 else MAX_UINT256\n return self.contract.createNftWithErc20WithDispenser(\n (\n nft_name,\n nft_symbol,\n nft_template,\n nft_token_uri,\n nft_transferable,\n ContractBase.to_checksum_address(nft_owner),\n ),\n (\n datatoken_template,\n [datatoken_name, datatoken_symbol],\n [\n ContractBase.to_checksum_address(datatoken_minter),\n ContractBase.to_checksum_address(datatoken_fee_manager),\n ContractBase.to_checksum_address(\n datatoken_publish_market_order_fee_address\n ),\n ContractBase.to_checksum_address(\n datatoken_publish_market_order_fee_token\n ),\n ],\n [datatoken_cap, datatoken_publish_market_order_fee_amount],\n datatoken_bytess,\n ),\n (\n ContractBase.to_checksum_address(dispenser_address),\n dispenser_max_tokens,\n dispenser_max_balance,\n dispenser_with_mint,\n ContractBase.to_checksum_address(dispenser_allowed_swapper),\n ),\n transaction_parameters,\n )\n\n @enforce_types\n def create_nft_with_metadata(\n self,\n nft_name: str,\n nft_symbol: str,\n nft_template: int,\n nft_token_uri: str,\n nft_transferable: bool,\n nft_owner: str,\n metadata_state: int,\n metadata_decryptor_url: str,\n metadata_decryptor_address: bytes,\n metadata_flags: bytes,\n metadata_data: Union[str, bytes],\n metadata_data_hash: Union[str, bytes],\n metadata_proofs: List[MetadataProof],\n transaction_parameters: dict,\n ) -> str:\n return self.contract.createNftWithMetaData(\n (\n nft_name,\n nft_symbol,\n nft_template,\n nft_token_uri,\n nft_transferable,\n ContractBase.to_checksum_address(nft_owner),\n ),\n (\n metadata_state,\n metadata_decryptor_url,\n metadata_decryptor_address,\n metadata_flags,\n metadata_data,\n metadata_data_hash,\n metadata_proofs,\n ),\n transaction_parameters,\n )\n\n @enforce_types\n def search_exchange_by_datatoken(\n self,\n fixed_rate_exchange: FixedRateExchange,\n datatoken: str,\n exchange_owner: Optional[str] = None,\n ) -> list:\n datatoken_contract = Datatoken(self.config_dict, datatoken)\n exchange_addresses_and_ids = datatoken_contract.getFixedRates()\n return (\n exchange_addresses_and_ids\n if exchange_owner is None\n else [\n exchange_address_and_id\n for exchange_address_and_id in exchange_addresses_and_ids\n if fixed_rate_exchange.getExchange(exchange_address_and_id[1])[0]\n == exchange_owner\n ]\n )\n\n @enforce_types\n def get_token_address(self, receipt):\n return receipt.events[\"NFTCreated\"][\"newTokenAddress\"]\n\n @enforce_types\n def check_datatoken(self, datatoken_address: str) -> bool:\n return self.contract.erc20List(datatoken_address)\n\n @enforce_types\n def check_nft(self, nft_address: str) -> bool:\n return self.contract.erc721List(nft_address) == nft_address\n","sub_path":"ocean_lib/models/data_nft_factory.py","file_name":"data_nft_factory.py","file_ext":"py","file_size_in_byte":12317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"38880467","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nimport math\nfrom mainApp.forms import FunctionForm\nfrom mainApp.models import Functions, create_from_DF\nfrom mainApp.serializers.functionserializer import FunctionsSerializer\nfrom django.contrib.auth.decorators import login_required\nfrom django.template.response import TemplateResponse\nfrom mainApp.middlewares import checkInUrl\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\nfrom mainApp.viewapi.logs import createLog\nimport pandas as pd\nimport csv\nfrom django.http import HttpResponse\n\n@login_required(login_url='/login/')\ndef functionPagination_page(request, num=1, limit=10):\n \"\"\"\n Hiển thị trang chức năng với trang và giới hạn bản ghi (chưa có dữ liệu)\n \"\"\"\n if checkInUrl(request, 'function') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n return TemplateResponse(request, 'adminuet/function.html', {'page': num, 'limit': limit})\n\n@login_required(login_url='/login/')\n@api_view(['GET'])\ndef function_getListForOffset(request, offset, limit):\n \"\"\"\n Hàm trả về các row trong function theo offset\n Trả về số lượng page mà chia theo limit\n \"\"\"\n if checkInUrl(request, 'function') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n if request.method == 'GET':\n functions = Functions.objects.order_by('-functionID').all()\n functionList = functions[offset:offset + limit]\n functionCount = functions.count()\n functionSerializer = FunctionsSerializer(functionList, many=True)\n page = math.ceil(functionCount/limit)\n data = {\n 'data': functionSerializer.data,\n 'numberOfPage': page,\n }\n createLog(request, 'VIEW - Chức năng', '')\n return Response(data)\n\n@login_required(login_url='/login/')\n@api_view(['GET','POST'])\ndef function_form(request, id=0):\n \"\"\"\n Form chung cho cả Thêm mới và Sửa\n Thêm mới dùng POST\n Sửa dùng GET để lấy thêm dữ liệu của row hiện tại\n \"\"\"\n\n if checkInUrl(request, 'function') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n try:\n if request.method == 'GET':\n # Rule admin\n if id == 0:\n functionForm = FunctionForm()\n else: # Rule quản tri cấp trường\n fun = Functions.objects.get(pk=id)\n functionForm = FunctionForm(instance = fun)\n return TemplateResponse(request, 'adminuet/functionform.html', {'form': functionForm})\n else:\n contentLog = 'UPDATE - Chức năng'\n contentMsg = 'Cập nhật thành công.'\n # Rule admin\n if id == 0:\n functionForm = FunctionForm(request.POST)\n contentLog = 'INSERT - Chức năng'\n contentMsg = 'Thêm mới thành công.'\n else: # Rule quản trị cấp trường\n fun = Functions.objects.get(pk=id)\n functionForm = FunctionForm(request.POST, instance=fun)\n if functionForm.is_valid():\n functionNameNew = functionForm['functionName'].value()\n if not Functions.objects.filter(functionName=functionNameNew):\n functionForm.save()\n createLog(request, contentLog, '')\n messages.success(request, contentMsg)\n else:\n messages.error(request, 'Vui lòng thay đổi tên chức năng. Chức năng này đã tồn tại.')\n return redirect('/adminuet/function-form/'+str(id))\n except Exception as error:\n print(error)\n messages.error(request, \"Thao tác thất bại.\")\n return redirect('/adminuet/function/')\n\n@login_required(login_url='/login/')\ndef function_delete(request, id):\n \"\"\"\n Thực hiện xóa 1 chức năng\n \"\"\"\n if checkInUrl(request, 'function') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n try:\n function = Functions.objects.get(pk=id)\n createLog(request, 'DELETE - Chức năng', str(function.functionName))\n function.delete()\n messages.success(request, \"Xóa thành công.\")\n except Exception as error:\n print(error)\n messages.error(request, \"Thao tác thất bại.\")\n return redirect('/adminuet/function/')\n\n\n@login_required(login_url='/login/')\ndef export_page(request):\n \"\"\"\n Thực hiện xuất file csv các chức năng\n \"\"\"\n\n if checkInUrl(request, 'function') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n try:\n nameFileExport = 'attachment; filename=\"{}.csv\"'.format(\"ListFunction\")\n list_function = Functions.objects.all()\n rows = ([i+1, function.functionName]for function, i in zip(list_function, range(list_function.count())))\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = nameFileExport\n writer = csv.writer(response)\n writer.writerow(['stt', 'functionName'])\n [writer.writerow([row[0], row[1]]) for row in rows]\n createLog(request, 'EXPORT - Chức năng', '')\n return response\n except Exception as error:\n print(error)\n messages.error(request, \"Thao tác thất bại.\")\n return redirect('/adminuet/function/')\n\n\n@login_required(login_url='/login/')\ndef import_page(request):\n \"\"\"Thực hiện nhập các chức năng từ file csv\n Hàm nhập từ file csv các trường stt, functionName vào model functions\n \"\"\"\n if checkInUrl(request, 'function') is False:\n listFunction = request.user.list_function()\n return HttpResponseRedirect(reverse(listFunction[0]))\n template = 'adminuet/functionimport.html'\n if request.method == 'GET':\n return TemplateResponse(request, template)\n try:\n csv_file = request.FILES['document']\n except (Exception) as error:\n messages.error(request,'Lỗi: Chưa chọn tệp dữ liệu.')\n return TemplateResponse(request, template)\n if not csv_file.name.endswith('.csv'):\n messages.error(request,'Lỗi: Sai định dạng tệp. Vui lòng chọn lại tệp')\n return TemplateResponse(request, template)\n try:\n df = pd.read_csv(csv_file)\n create_from_DF(df=df, model=Functions, searching_cols=['functionName'])\n except (Exception) as error:\n print(error)\n messages.error(request,'Lỗi: Dữ liệu không đúng định dạng.')\n return TemplateResponse(request, template)\n return redirect('/adminuet/function/')","sub_path":"mainApp/viewapi/functionview.py","file_name":"functionview.py","file_ext":"py","file_size_in_byte":7075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"489236956","text":"#!/usr/bin/env python2.7\n\nfrom flask import (Flask, render_template, request, redirect,\n url_for, flash, jsonify)\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem\n\napp = Flask(__name__)\n\n\nclass DB_Connection:\n def __init__(self):\n engine = create_engine('sqlite:///restaurantmenu.db')\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n self.session = DBSession()\n\n def get_all_restaurants(self):\n return self.session.query(Restaurant)\n\n def get_restaurant(self, restaurant_id):\n return self.session.query(Restaurant).filter_by(\n id=restaurant_id).one()\n\n def get_menu(self, restaurant_id):\n return self.session.query(\n MenuItem).filter_by(\n restaurant_id=restaurant_id)\n\n def get_menu_item(self, menu_id):\n return self.session.query(MenuItem).filter_by(\n id=menu_id).one()\n\n\n@app.route('/')\n@app.route('/restaurants')\ndef showRestaurants():\n db = DB_Connection()\n restaurants = db.get_all_restaurants()\n return render_template('index.html', restaurants=restaurants)\n\n\n@app.route('/api/restaurants')\ndef showRestaurantsJson():\n db = DB_Connection()\n restaurants = db.get_all_restaurants()\n return jsonify(\n Restaurants=[restaurant.serialize for restaurant in restaurants])\n\n\n@app.route('/restaurants/new', methods=['GET', 'POST'])\ndef newRestaurant():\n db = DB_Connection()\n if request.method == 'POST':\n new_restaurant = Restaurant(\n name=request.form['name']\n )\n db.session.add(new_restaurant)\n db.session.commit()\n flash('New restaurant {} created!'.format(new_restaurant.name))\n return redirect(\n url_for('newMenuItem', restaurant_id=new_restaurant.id))\n else:\n return render_template('new-restaurant.html')\n\n\n@app.route('/restaurants//edit', methods=['GET', 'POST'])\ndef editRestaurant(restaurant_id):\n db = DB_Connection()\n restaurant = db.get_restaurant(restaurant_id)\n old_name = restaurant.name\n if request.method == 'POST':\n restaurant.name = request.form['name']\n db.session.add(restaurant)\n db.session.commit()\n flash('{0} has been renamed to {1}'.format(old_name, restaurant.name))\n return redirect(url_for('showRestaurants'))\n else:\n return render_template('edit-restaurant.html', restaurant=restaurant)\n\n\n@app.route('/restaurants//delete', methods=['GET', 'POST'])\ndef deleteRestaurant(restaurant_id):\n db = DB_Connection()\n restaurant = db.get_restaurant(restaurant_id)\n old_restaurant = restaurant.name\n if request.method == 'POST':\n db.session.delete(restaurant)\n db.session.commit()\n flash('{0} has been deleted'.format(old_restaurant))\n return redirect(url_for('showRestaurants'))\n else:\n return render_template('delete-restaurant.html', restaurant=restaurant)\n\n\n@app.route('/restaurants//menu')\ndef showMenu(restaurant_id):\n db = DB_Connection()\n restaurant = db.get_restaurant(restaurant_id)\n menu = db.get_menu(restaurant_id)\n appetizer = any(item.course == \"Appetizer\" for item in menu)\n entree = any(item.course == \"Entree\" for item in menu)\n dessert = any(item.course == \"Dessert\" for item in menu)\n beverage = any(item.course == \"Beverage\" for item in menu)\n other = any(item.course not in [\"Appetizer\",\"Entree\",\"Dessert\",\"Beverage\"] for item in menu)\n return render_template(\n 'menu.html',\n restaurant=restaurant,\n menu=menu,\n appetizer=appetizer,\n entree=entree,\n dessert=dessert,\n beverage=beverage,\n other=other)\n\n\n@app.route('/api/restaurants//menu')\ndef showMenuJson(restaurant_id):\n db = DB_Connection()\n menu = db.get_menu(restaurant_id)\n return jsonify(MenuItem=[item.serialize for item in menu])\n\n\n@app.route('/api/restaurants//menu/')\ndef showMenuItemJson(restaurant_id, menu_id):\n db = DB_Connection()\n menu_item = db.get_menu_item(menu_id)\n return jsonify(MenuItem=menu_item.serialize)\n\n\n@app.route(\n '/restaurants//menu/new', methods=['GET', 'POST'])\ndef newMenuItem(restaurant_id):\n db = DB_Connection()\n restaurant = db.get_restaurant(restaurant_id)\n if request.method == 'POST':\n menu_item = MenuItem(\n course=request.form['course'],\n name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n restaurant_id=restaurant.id\n )\n db.session.add(menu_item)\n db.session.commit()\n flash(\"Added {0} to {1}\".format(menu_item.name, restaurant.name))\n return redirect(url_for('showMenu', restaurant_id=restaurant_id))\n return render_template('new-menu-item.html', restaurant=restaurant)\n\n\n@app.route(\n '/restaurants//menu//edit',\n methods=['GET', 'POST'])\ndef editMenuItem(restaurant_id, menu_id):\n db = DB_Connection()\n restaurant = db.get_restaurant(restaurant_id)\n menu_item = db.get_menu_item(menu_id)\n old_name = menu_item.name\n old_price = menu_item.price\n if request.method == 'POST':\n menu_item.course = request.form['course']\n menu_item.name = request.form['name']\n menu_item.description = request.form['description']\n menu_item.price = request.form['price']\n db.session.add(menu_item)\n db.session.commit()\n flash(\"Updated {0}. It's price was {1}\".format(old_name, old_price))\n return redirect(url_for('showMenu', restaurant_id=restaurant_id))\n else:\n return render_template(\n \"edit-menu-item.html\", restaurant=restaurant, menu_item=menu_item)\n\n\n@app.route(\n '/restaurants//menu//delete',\n methods=['GET', 'POST'])\ndef deleteMenuItem(restaurant_id, menu_id):\n db = DB_Connection()\n restaurant = db.get_restaurant(restaurant_id)\n menu_item = db.get_menu_item(menu_id)\n item_name = menu_item.name\n if request.method == 'POST':\n db.session.delete(menu_item)\n db.session.commit()\n flash(\"Deleted menu item {0}\".format(item_name))\n return redirect(url_for('showMenu', restaurant_id=restaurant_id))\n else:\n return render_template(\n 'delete-menu-item.html',\n restaurant=restaurant, menu_item=menu_item)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.secret_key = 'R34d23zDSdXT2NH00RLHBK6JetPtDxsg'\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"vagrant/course/Lesson4/finalProject.py","file_name":"finalProject.py","file_ext":"py","file_size_in_byte":6758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"10345137","text":"import ldap\nimport ldap.filter\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nfrom django.conf import settings\n#from accounts import settings\nfrom django.views.decorators.csrf import requires_csrf_token\nfrom django.http import HttpResponse, HttpResponseNotAllowed\n\nclass LDAPSearchResult(object):\n \"\"\"\n Convenience bag class to help organize the results of an LDAP search.\n \"\"\"\n def __init__(self, search_result):\n self.dn = search_result[0]\n self.cn = search_result[1]['cn'][0]\n\ndef search(canonical_name):\n \"\"\"\n Searches the LDAP server for all Organizational Units (OUs) whose Canonical\n Name (CN) contains the text argument canonical_name passed into the\n function.\n \"\"\"\n ldap.set_option(ldap.OPT_REFERRALS, 0)\n l = ldap.initialize(settings.LDAP_URL)\n l.set_option(ldap.OPT_PROTOCOL_VERSION, 3)\n binddn = ''\n try:\n binddn = \"%s@%s\" % (settings.BIND_USER, settings.NT4_DOMAIN)\n except AttributeError:\n binddn = settings.BIND_USER\n l.simple_bind_s(binddn, settings.BIND_PASSWORD)\n base = settings.SEARCH_DN\n scope = ldap.SCOPE_SUBTREE\n retrieve_attributes = ['cn']\n\n filtered_name = ldap.filter.escape_filter_chars(canonical_name)\n filter = 'cn=*%s*' % filtered_name\n\n results = l.search_s(base, scope, filter, retrieve_attributes)\n\n #result_objects = [LDAPSearchResult(result) for result in results]\n result_objects = []\n for result in results:\n if result[0]:\n result_objects.append(LDAPSearchResult(result))\n return result_objects\n\n@requires_csrf_token\ndef ldap_search(request):\n \"\"\"\n This view provides a JSON response of the Distinguished Names (DNs) returned\n by searching the LDAP server for OU name fragments. One can search for e.g.\n 'django' and retrieve all DNs with 'django' in the Canonical Name (CN).\n \"\"\"\n if request.method != 'POST':\n return HttpResponseNotAllowed('POST')\n req_cn = None\n if 'req_cn' in request.POST:\n req_cn = request.POST['req_cn']\n results = search(req_cn)\n res_json = json.dumps([res.dn for res in results], ensure_ascii=False)\n\n return HttpResponse(res_json, mimetype='application/json')\n","sub_path":"extension_cord/ldap_groups/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"541264724","text":"import unittest\nfrom .ds import ListNode\n\nclass Solution(unittest.TestCase):\n def deleteDuplicates(self, head: ListNode) -> ListNode:\n \"\"\"\nGiven a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list.\n\nExample 1:\n\nInput: 1->2->3->3->4->4->5\nOutput: 1->2->5\nExample 2:\n\nInput: 1->1->1->2->3\nOutput: 2->3\n\n---\nBasic idea: add a new head, after process, return head.next. During process, use lastValid to store last valid node\n\n \"\"\"\n if not head or not head.next:\n return head\n\n newHead = ListNode(-1)\n newHead.next = head\n head = newHead\n\n pre, curr = head.next, head.next.next\n lastValid = head\n while curr:\n if curr.val != pre.val:\n if lastValid.next == pre:\n # no break, then pre is valid\n lastValid = pre\n else:\n # pre is invalid (coming from last round), connect to curr\n lastValid.next = curr\n else:\n # when duplication, break the linked list until next valid node found\n lastValid.next = None\n pre = curr\n curr = curr.next\n\n return head.next\n\n def testDeleteDuplicates(self):\n self.assertEqual(self.deleteDuplicates(ListNode.fromList([1,2,3,3,4,4,5])).toList(), [1,2,5])\n self.assertEqual(self.deleteDuplicates(ListNode.fromList([1,1,1,2,3])).toList(), [2,3])\n self.assertEqual(self.deleteDuplicates(ListNode.fromList([1,1,1,2,2])), None)\n","sub_path":"src/main/python/remove_duplicates_from_sorted_list_ii.py","file_name":"remove_duplicates_from_sorted_list_ii.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"526681685","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n\nif __name__ == \"__main__\":\n # df = pd.read_csv('pretrained_losses_epochs200_lr0.1__21_15_37.csv', index_col=False)\n df = pd.read_csv('pretrained_losses_epochs1000_lr0.1__23_15_40.csv', index_col=False)\n # df = df.reset_index(drop=True, inplace=True)\n # df = df.drop(df.columns[0], axis=1, inplace=True)\n # plt.title('Pretraining of the MLP, epochs = 200, batch_size = 50, learning_rate = 0.1')\n # plt.xlabel('observations')\n # plt.ylabel('MSE')\n df.columns = [0, 'pretraining loss']\n\n df.plot(y=df.columns[1], figsize=(6, 2), xlim=[0, len(df)], lw=0.03)\n\n\n # plt.savefig(\"20_04.jpg\")\n plt.show()\n","sub_path":"test/results/models/plot_pretraining_losses.py","file_name":"plot_pretraining_losses.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"380328793","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 18 13:13:45 2019\r\n\r\n@author: dell\r\n\"\"\"\r\n\r\nimport os\r\nimport numpy as np\r\nimport SimpleITK as itk\r\nimport torch\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\nimport torchvision.datasets as datasets\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport PIL.Image as Image\r\nimport random\r\nfrom tqdm import tqdm\r\n\r\n\r\nclass my_dataset(Dataset):\r\n def __init__(self, filepath):\r\n self.filepath = filepath\r\n self.src_path = filepath + \"image/\"\r\n self.tgt_path = filepath + \"mha/\"\r\n self.src_list = os.listdir(self.src_path)\r\n self.tgt_list = os.listdir(self.tgt_path)\r\n\r\n def __getitem__(self, index):\r\n img_name = self.src_list[index]\r\n img_name = self.src_path + img_name\r\n img = Image.open(img_name).convert('L')\r\n # img = trans(img)\r\n img = np.array(img)\r\n img = torch.Tensor(img).cuda()\r\n img = img.unsqueeze(0)\r\n\r\n mha_name = self.tgt_list[index]\r\n mha_name = self.tgt_path + mha_name\r\n sitkimage = itk.ReadImage(mha_name)\r\n volume = itk.GetArrayFromImage(sitkimage)\r\n volume = torch.Tensor(volume).cuda()\r\n\r\n return img, volume\r\n\r\n def __len__(self):\r\n return len(self.src_list)\r\n\r\ndef split(full_dataset, ratio=0.8):\r\n train_size = int(len(full_dataset) * ratio)\r\n test_size = len(full_dataset) - train_size\r\n train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])\r\n return train_dataset, test_dataset\r\n\r\n\r\ndef file_list(path, data_len, if_random=True):\r\n image_path = path + 'image/'\r\n volume_path = path + 'mha/'\r\n \r\n filelist1 = os.listdir(image_path)\r\n filelist1 = filelist1[:data_len]\r\n for i in range(len(filelist1)):\r\n filelist1[i] = image_path + filelist1[i]\r\n filelist2 = os.listdir(volume_path)\r\n filelist2 = filelist2[:data_len]\r\n for i in range(len(filelist2)):\r\n filelist2[i] = volume_path + filelist2[i]\r\n \r\n if if_random:\r\n randnum = random.randint(0, 100)\r\n random.seed(randnum)\r\n random.shuffle(filelist1)\r\n random.seed(randnum)\r\n random.shuffle(filelist2)\r\n \r\n# print(filelist1)\r\n# print(filelist2)\r\n \r\n return filelist1, filelist2 \r\n \r\ndef load_data(filelist_src, filelist_tgt): \r\n data_src = []\r\n data_tgt = []\r\n \r\n trans=transforms.Compose([transforms.ToTensor()])\r\n \r\n for f in filelist_src:\r\n path = f\r\n img = Image.open(path).convert('L')\r\n img = trans(img)\r\n img = np.array(img)\r\n data_src.append(img)\r\n \r\n for f in filelist_tgt:\r\n path = f\r\n sitkimage = itk.ReadImage(path)\r\n volume = itk.GetArrayFromImage(sitkimage)\r\n data_tgt.append(volume)\r\n \r\n data_src = np.array(data_src)\r\n data_src = torch.Tensor(data_src)\r\n \r\n data_tgt = np.array(data_tgt)\r\n data_tgt = torch.Tensor(data_tgt)\r\n \r\n return data_src.cuda(), data_tgt.cuda()\r\n\r\nif __name__ == '__main__':\r\n # src, tgt = file_list('../data/', 10)\r\n # d1, d2 = load_data(src, tgt)\r\n full_dataset = my_dataset(filepath=\"../data/\")\r\n train_dataset, test_dataset = split(full_dataset)\r\n train_loader = DataLoader(train_dataset, shuffle=True, batch_size=1)\r\n test_loader = DataLoader(test_dataset, shuffle=False, batch_size=1)\r\n for i in tqdm(train_loader):\r\n img, mha = i\r\n\r\n","sub_path":"3D reconstruction for X-rays/code/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"125682557","text":"#!/usr/bin/python3\n\nimport argparse\nimport calendar\nimport csv\nimport json\nimport logging\nimport os\nimport requests\nimport time\n\n# Create id->name dict of tasks in a given workspace\n\n\n\ndef main():\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n toggl_fetcher = TogglFetcher()\n toggl_fetcher.get_args()\n task_list = toggl_fetcher.get_workspace_tasks()\n toggl_fetcher.tasks2json_dict(task_list)\n\n\nclass TogglFetcher:\n def __init__(self):\n self.apitoken = os.environ['TOGGL_APITOKEN']\n self.basepath = 'https://www.toggl.com/api/v8'\n self.workspace = {\"id\": 1954870, \"name\": \"Rainer Hoerbe's workspace\"}\n self.tasks = {}\n\n def get_args(self):\n self.parser = argparse.ArgumentParser(description='Toggl task name fetcher')\n self.parser.add_argument('-d', '--outdir', dest='outdir',\n default='/data_team', help='output directory')\n self.args = self.parser.parse_args()\n\n def get_workspace_tasks(self):\n urlargs = 'workspaces/{}/tasks?active=both'.format(self.workspace[\"id\"])\n return self.get_resource(urlargs)\n\n def get_resource(self, urlargs):\n self.uri = self.basepath + '/' + urlargs\n logging.info(self.uri)\n response = requests.get(self.uri, auth=(self.apitoken, 'api_token'))\n logging.info(response.status_code)\n if response.status_code >= 400:\n msg = 'Request to %s failed: %s' % (self.uri, response.text)\n logging.error(msg)\n raise Exception(msg)\n try:\n item_dict = json.loads(response.text)\n return item_dict\n except json.decoder.JSONDecodeError as e:\n logging.error('Error on accessing %s. Response:\\n' % (self.uri, response.text))\n logging.error(str(e))\n raise Exception\n\n def tasks2json_dict(self, task_list):\n os.makedirs(self.args.outdir, exist_ok=True)\n with open(os.path.join(self.args.outdir, 'workspaces/{}/tasks.json').format(self.workspace[\"id\"]),\n 'w', encoding='utf-8', newline='') as fd:\n task_dict = {}\n for t in task_list:\n tid = t[\"id\"]\n tname = t[\"name\"]\n task_dict[tid] = tname\n tasks_json = json.dumps(task_dict)\n fd.write(tasks_json)\n\n\nmain()\n\n","sub_path":"install/scripts/get_tasks.py","file_name":"get_tasks.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"155879701","text":"import os, time\nfrom selenium import webdriver\n\ndef create_img(driver, filename):\n \"\"\"\n 截图函数\n :param driver:\n :param filename:\n :return:\n \"\"\"\n # 截图存放路径\n base_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '\\\\Report\\\\screenshot')\n\n photofile = os.path.join(base_dir, filename)\n\n driver.get_screenshot_as_file(photofile)\n\n\n\nif __name__ == \"__main__\":\n driver = webdriver.Chrome()\n driver.get('http://www.baidu.com')\n time.sleep(2)\n create_img(driver, '百度一下.png')\n driver.quit()","sub_path":"appium_project/Public/img.py","file_name":"img.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"587942716","text":"from gcloud import pubsub\nimport time\nfrom random import randint\nimport csv\nimport requests\nimport json\nfrom firebase import Firebase\nimport time\nfrom datetime import datetime\nfrom time import mktime\nfrom datetime import timedelta\nimport os\nimport sys\nimport math\nfrom multiprocessing import Process\n\n\nif datetime.now().isoweekday() > 5:\n sys.exit(0)\n\n\n# get prices and trends snapshot\npricesResults = {}\n\nwhile True:\n\n try:\n # get prices and trends snapshot\n prices = Firebase(\"https://sv2.firebaseio.com/etf_prices\")\n pricesResults = prices.get()\n print(\"Got Prices\")\n\n break\n except:\n print(\"Download Error\")\n#\n# for price in pricesResults:\n#\n# while True: #delete previous days data\n# try:\n# Firebase(\"https://seagleall.firebaseio.com/\" + price).remove()\n# break\n# except:\n# print(\"delete error\")\n\nclient = pubsub.Client.from_service_account_json('/banshee/credentials.json', 'money-maker-1236')\ntopic = client.topic('hotalgos')\n\nfor stock in pricesResults:\n print(stock)\n with topic.batch() as batch:\n for i in range(1,8): #change to 7 days (1,8)\n while True:\n try:\n batch.publish('another stock to compute', stock=stock, days=str(i), algo=\"twilight\")\n break\n except:\n print(\"publish error\")\n\n while True:\n try:\n batch.publish('another stock to compute', stock=stock, days=str(i), algo=\"sage\")\n break\n except:\n print(\"publish error\")\n\n while True:\n try:\n batch.publish('another stock to compute', stock=stock, days=str(i), algo=\"wizard\")\n break\n except:\n print(\"publish error\")\n\n\n\n\n# # #ensure pipeline is ready\n# notReady = True\n#\n# while (notReady):\n# while True:\n# try:\n# pipeline = Firebase(\"https://sv2.firebaseio.com/pipeline\")\n# pipelineResults = pipeline.get()\n# if (\"google\" in pipelineResults and \"twitter\" in pipelineResults and \"wikipedia\" in pipelineResults and \"stocks\" in pipelineResults):\n# notReady = False\n# print(\"Ready to start\")\n# else:\n# time.sleep(180)\n#\n#\n# break\n# except:\n# print (\"Download error...trying again!\")\n\n\n# while True:\n# try:\n# pipeline = Firebase(\"https://sv2.firebaseio.com/pipeline\")\n# pipeline.remove()\n# break\n# except:\n# print(\"Download error\")\n\n\n\n# #start up all instances\n# for i in range(1,24):\n# while True:\n# try:\n# os.system(\"gcloud compute instances start etfshadow-\" + str(i) + \" --zone us-central1-b\")\n# break\n# except:\n# print(\"startup error\")\n#\n#\n# #check ready\n# time.sleep(60 * 55)\n#\n# pipelineResults = {}\n# notReady = True\n# itemsNotReady = 0\n# timesNotReadyWithCount = 0\n# itemsNeedToReplace = {}\n# while(notReady):\n# while True:\n# try:\n# pipeline = Firebase(\"https://sv2.firebaseio.com/hotpipeline\")\n# pipelineResults = pipeline.get()\n# notReady = False\n# newTryItemsNotReady = 0\n# newTryItems = {}\n# for i in range(1,8): #(1,8)\n#\n# for stock in pricesResults:\n#\n# if (\"sage:\" + stock + \":\" + str(i)) not in pipelineResults:\n# notReady = True\n# newTryItemsNotReady += 1\n# newTryItems[\"sage:\" + stock + \":\" + str(i)] = {\n# 'stock':stock, 'algo':'sage', 'days':str(i)\n# }\n# print(\"not ready to start\")\n# if (\"wizard:\" + stock + \":\" + str(i)) not in pipelineResults:\n# notReady = True\n# newTryItemsNotReady += 1\n# newTryItems[\"wizard:\" + stock + \":\" + str(i)] = {\n# 'stock':stock, 'algo':'wizard', 'days':str(i)\n# }\n# print(\"not ready to start\")\n# if (\"twilight:\" + stock + \":\" + str(i)) not in pipelineResults:\n# notReady = True\n# newTryItemsNotReady += 1\n# newTryItems[\"twilight:\" + stock + \":\" + str(i)] = {\n# 'stock':stock, 'algo':'twilight', 'days':str(i)\n# }\n# print(\"not ready to start\")\n#\n# if notReady == False:\n# break\n#\n# if newTryItemsNotReady != itemsNotReady:\n# itemsNotReady = newTryItemsNotReady\n# timesNotReadyWithCount = 1\n# itemsNeedToReplace = newTryItems\n#\n# if newTryItemsNotReady == itemsNotReady:\n# timesNotReadyWithCount += 1\n# itemsNeedToReplace = newTryItems\n#\n# if timesNotReadyWithCount > 5:\n#\n# timesNotReadyWithCount = 0\n#\n# #error detected as pipeline never finished\n# for i in range(1,24):\n# while True:\n# try:\n# os.system(\"gcloud compute instances stop etfshadow-\" + str(i) + \" --zone us-central1-b\")\n# break\n# except:\n# print(\"startup error\")\n#\n# time.sleep(30)\n#\n# while True:\n# try:\n# with topic.batch() as batch:\n# for item in itemsNeedToReplace:\n# while True:\n# try:\n# batch.publish('another stock to compute', stock=itemsNeedToReplace[item][\"stock\"], days=itemsNeedToReplace[item][\"days\"], algo=itemsNeedToReplace[item][\"algo\"])\n# break\n# except:\n# print(\"publish error\")\n# break\n# except:\n# print(\"publish error\")\n#\n# time.sleep(10)\n#\n#\n#\n# #start up first instance\n#\n# while True:\n# try:\n# os.system(\"gcloud compute instances start etfshadow-1 --zone us-central1-b\")\n# break\n# except:\n# print(\"startup error\")\n#\n# time.sleep(60 * 12) #wait 12 minutes\n#\n#\n# break\n# except:\n# print(\"Download Error\")\n#\n# if notReady:\n# time.sleep(180)\n#\n# print(\"READY!\")\n# #\n# while True:\n# try:\n# pipeline = Firebase(\"https://sv2.firebaseio.com/hotpipeline\")\n# pipeline.remove()\n# # pipeline.update({\"market_ready\":\"market_ready\"})\n#\n# #can kick subsequent analysis to start\n#\n# status = Firebase(\"https://sv2.firebaseio.com/status\")\n# status.update({\"etf_decision_data\":datetime.now().strftime('%Y-%m-%d %H:%M:%S')})\n# break\n# except:\n# print(\"Upload Error\")\n#\n# # make sure all machines are stopped\n# for i in range(1,24):\n# while True:\n# try:\n# os.system(\"gcloud compute instances stop etfshadow-\" + str(i) + \" --zone us-central1-b\")\n# break\n# except:\n# print(\"startup error\")\n#\n\n\n#add stocks to pipeline\n\n# client = pubsub.Client.from_service_account_json('/banshee/credentials.json', 'money-maker-1236')\n# topic = client.topic('hotstocks')\n# while True:\n# try:\n# with topic.batch() as batch:\n# for stock in pricesResults:\n# print(stock)\n#\n#\n# batch.publish('another stock to compute', stock=stock)\n#\n# break\n# except:\n# print(\"publish error\")\n\n#\n#\n#spawn stock processors (shouldn't be too many machines)\n#start up all instances\n# for i in range(1,2):\n# while True:\n# try:\n# os.system(\"gcloud compute instances start hot-shadow-stocks-\" + str(i) + \" --zone us-central1-b\")\n# break\n# except:\n# print(\"startup error\")\n#\n# pipelineResults = {}\n#\n#\n# time.sleep(180)\n# notReady = True\n# while(notReady):\n# while True:\n# try:\n# pipeline = Firebase(\"https://sv2.firebaseio.com/hotpipeline\")\n# pipelineResults = pipeline.get()\n# notReady = False\n#\n# for stock in pricesResults:\n#\n# if stock not in pipelineResults:\n# notReady = True\n# print(\"not ready to start\")\n#\n# break\n# except:\n# print(\"Download Error\")\n#\n# if notReady:\n# time.sleep(180)\n#\n# while True:\n# try:\n# pipeline = Firebase(\"https://sv2.firebaseio.com/hotpipeline\")\n# pipeline.remove()\n# # pipeline.update({\"market_ready\":\"market_ready\"})\n#\n# #can kick subsequent analysis to start\n#\n# status = Firebase(\"https://sv2.firebaseio.com/status\")\n# status.update({\"hot_market_ready\":datetime.now().strftime('%Y-%m-%d %H:%M:%S')})\n# break\n# except:\n# print(\"Upload Error\")\n#\n# print(\"READY!\")\n#\n#\n\n","sub_path":"addToPubSubHotAll.py","file_name":"addToPubSubHotAll.py","file_ext":"py","file_size_in_byte":9629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"481920004","text":"from django.conf.urls import patterns, include, url\nfrom .views import IndexView, PerfilView, LoginView, PreinscripcionView\n\nurlpatterns = patterns('',\n \n url(r'^index/', IndexView.as_view()),\n url(r'^$', LoginView.as_view()),\n url(r'^preinscripcion/$', PreinscripcionView.as_view(), name='preinscripcion'),\n url(r'^perfil/$', PerfilView.as_view(), name='perfil'),\n \n\n \n)\n","sub_path":"ega/SistemaEGA/apps/alumno/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"489511679","text":"import argparse\nimport os\nimport uuid\nfrom pathlib import Path\n\n\ndef flatten_dir(input_dir: Path, output_dir: Path, extension: str, use_hash: bool):\n \"\"\"\n Flatten dir\n root/file1.txt → output/parent_dir_root/file1.txt\n root/dir1/file1.txt → output/dir1/file1.txt\n root/dir1/file2.txt → output/dir1/file2.txt\n root/dir1/dir1_1/file1.txt → output/dir1/dir1_1_file2.txt\n root/dir2/file1.txt → output/dir2/file1.txt\n root/dir2/dir2_1/file1.txt → output/dir2/dir2_1_file2.txt\n root/dir2/dir2_2/file1.txt → output/dir2/dir2_2_file2.txt\n \"\"\"\n output_dir.mkdir(parents=True, exist_ok=True)\n parent_cache = {}\n for file in input_dir.rglob(extension):\n if file.is_dir():\n continue\n\n relative_path = file.relative_to(input_dir)\n\n try:\n parent = relative_path.parent.parts[0]\n first_level_dir_relative = file.relative_to(input_dir / parent)\n except IndexError:\n parent = 'parent_dir_root'\n first_level_dir_relative = file.relative_to(input_dir)\n\n if use_hash:\n new_filename = f'{uuid.uuid4().hex}{file.suffix}'\n\n if parent not in parent_cache:\n parent_cache[parent] = uuid.uuid4().hex\n new_path = output_dir / parent_cache[parent] / new_filename\n else:\n new_filename = str(first_level_dir_relative).replace(os.path.sep, '_')\n new_path = output_dir / parent / new_filename\n\n print(file, '→', new_path)\n new_path.parent.mkdir(parents=True, exist_ok=True)\n try:\n file.rename(new_path)\n except FileNotFoundError:\n \"\"\"Probaly path too long (>260 chars)\"\"\"\n print('Too long path, processing...')\n file = Path(u'\\\\\\\\?\\\\' + str(file.absolute()))\n new_path = Path(u'\\\\\\\\?\\\\' + str(new_path.absolute()))\n file.rename(new_path)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('input_dir', help=\"Input dir\")\n parser.add_argument('output_dir', help=\"Output dir\")\n parser.add_argument(\"--ext\", default='*', help=\"Move only files with extension\")\n parser.add_argument(\"--hash\", help=\"Use hash instead of filenames\", action=\"store_true\")\n args = parser.parse_args()\n flatten_dir(Path(args.input_dir), Path(args.output_dir), args.ext, args.hash)\n","sub_path":"audiobook_sort.py","file_name":"audiobook_sort.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"66467580","text":"\"\"\"Various constants used in the system\"\"\"\n\n# system constants\nAPP_NAME = \"SelfStabilizingReconfiguration\"\n\n# module constants\nRUN_SLEEP = 1\nINTEGRATION_RUN_SLEEP = 0.05\n\n# communication constants\nFD_SLEEP = 0.25\nFD_TIMEOUT = 5\nMAX_QUEUE_SIZE = 10 # Max allowed amount of messages in send queue\n","sub_path":"helpers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"61449950","text":"# problem using backtracking and heuristic \r\nfrom datetime import datetime\r\n\r\nprint(f'please enter country name :')\r\n\r\nstart_time = datetime.now()\r\ncountry = input()\r\nnoofbacktracks=0\r\nclass Backtrackingheuristic(): \r\n\r\n\tdef __init__(self, nodes):\r\n\t\tself.V = nodes\r\n\t\tself.graph = [[0 for column in range(nodes)]\r\n\t\t\t\t\t\t\tfor row in range(nodes)]\r\n\r\n\t## heuristic functions\r\n\r\n\tdef LCV(self, domain_dictionary, colours):\r\n\t\tMin_degree_constraint = 0\r\n\t\tmin_degree_constraint_state = -1\r\n\r\n\t\tfor v in range(self.V):\r\n\t\t\tif colours[v]!=0:\r\n\t\t\t\tcontinue\r\n\t\t\tcount = 0\r\n\t\t\tfor i in range(self.V):\r\n\t\t\t\tif self.graph[v][i] == 0:\r\n\t\t\t\t\tcount = count + 1\r\n\t\t\tif count > Min_degree_constraint:\r\n\t\t\t\tMin_degree_constraint = count\r\n\t\t\t\tmin_degree_constraint_state = v\r\n\r\n\t\treturn min_degree_constraint_state\r\n\r\n\t#MRV = Most Remainaing Values\r\n\tdef MRV(self, domain_dictionary, colours):\r\n\t\tminimum_values = {0:[],1:[],2:[],3:[],4:[]}\r\n\r\n\t\tfor key, value in domain_dictionary.items():\r\n\t\t\tif len(value)==0 and colours[key-1] == 0:\r\n\t\t\t\tminimum_values[0].append(key-1)\r\n\r\n\t\t\telif(len(value)==1 and colours[key-1]==0):\r\n\t\t\t\tminimum_values[1].append(key-1)\r\n\r\n\t\t\telif(len(value)==2 and colours[key-1]==0):\r\n\t\t\t\tminimum_values[2].append(key-1)\r\n\r\n\t\t\telif(len(value)==3 and colours[key-1]==0):\r\n\t\t\t\tminimum_values[3].append(key-1)\r\n\r\n\t\t\telif(len(value)==4 and colours[key-1]==0):\r\n\t\t\t\tminimum_values[4].append(key-1)\r\n\r\n\t\tif len(minimum_values[0])>0:\r\n\t\t\treturn minimum_values[0]\r\n\r\n\t\telif len(minimum_values[1])>0:\r\n\t\t\treturn minimum_values[1]\r\n\r\n\t\telif len(minimum_values[2])>0:\r\n\t\t\treturn minimum_values[2]\r\n\r\n\t\telif len(minimum_values[3])>0:\r\n\t\t\treturn minimum_values[3]\r\n\t\telse:\r\n\t\t\treturn minimum_values[4]\r\n\r\n\tdef degree_constraint(self, domain_dictionary, colours):\r\n\t\tmaxdegree_constraint = 0\r\n\t\tmax_degree_constraint_state = -1\r\n\r\n\t\tfor v in range(self.V): \r\n\t\t\tif colours[v]!=0:\r\n\t\t\t\tcontinue\r\n\t\t\tcount = 0\r\n\t\t\tfor i in range(self.V):\r\n\t\t\t\tif self.graph[v][i] == 1:\r\n\t\t\t\t\tcount = count + 1\r\n\t\t\tif count > maxdegree_constraint:\r\n\t\t\t\tmaxdegree_constraint = count\r\n\t\t\t\tmax_degree_constraint_state = v\r\n\t\treturn max_degree_constraint_state\r\n\r\n\r\n\r\n\tdef get_next_state(self, domain_dictionary, colours):\r\n\t\tnext_state = 0\r\n\r\n\t\tnext_MRV_states = self.MRV(domain_dictionary, colours)\r\n\t\tnext_degree_constraint_states = self.degree_constraint(domain_dictionary, colours)\r\n\t\tnext_LCV_states = self.LCV(domain_dictionary, colours)\r\n\r\n\t\tif (len(next_MRV_states)==1):\r\n\t\t\tnext_state = next_MRV_states[0]\r\n\t\telif(next_degree_constraint_states!=-1):\r\n\t\t\tnext_state = next_degree_constraint_states\r\n\t\telse:\r\n\t\t\tnext_state = next_LCV_states\r\n\r\n\t\treturn next_state\r\n\r\n\r\n\r\n\tdef is_safe(self, v, colour, c):\r\n\t\tfor i in range(self.V): \r\n\t\t\tif self.graph[v][i] == 1 and colour[i] == c: \r\n\t\t\t\treturn False\r\n\t\treturn True\r\n\t\r\n\tdef get_neighbors(self, state):\r\n\t\tneighbours = []\r\n\t\tfor i in range(self.V):\r\n\t\t\tif self.graph[state][i] == 1:\r\n\t\t\t\tneighbours.append(i)\r\n\t\treturn neighbours\r\n\r\n\r\n\tdef is_coloured(self, colors):\r\n\t\ttotalvertex = 0\r\n\r\n\t\tfor color in colors:\r\n\t\t\tif color != 0:\r\n\t\t\t\ttotalvertex = totalvertex + 1\r\n\r\n\t\tif totalvertex == 50:\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\r\n \r\n\tdef graph_color(self, m, colour, v):\r\n\t\tglobal noofbacktracks\r\n\t\ttry:\r\n\t\t\tif self.is_coloured(colour):\r\n\t\t\t\treturn True\r\n\t\t\tif v == self.V: \r\n\t\t\t\treturn True\r\n\r\n\t\t\tif not domain_dictionary[v+1]: \r\n\t\t\t\treturn False\r\n\r\n\t\t\tfor c in domain_dictionary[v+1]:\r\n\t\t\t\tif self.is_safe(v, colour, c) == True:\r\n\t\t\t\t\tcolour[v] = c \r\n\t\t\t\t\tneighbors = self.get_neighbors(v)\r\n\t\t\t\t\tnext_state = self.get_next_state(domain_dictionary, colour)\r\n\t\t\t\t\tif next_state != -1:\r\n\t\t\t\t\t\tif self.graph_color(m, colour, next_state) == True:\r\n\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif self.graph_color(m, colour, v+1) == True:\r\n\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\tcolour[v] = 0\r\n\t\t\t\tnoofbacktracks=noofbacktracks+1\r\n\t\texcept Exception as e:\r\n\t\t\tprint(\"something wrong\", e)\r\n\r\n\tdef graph_colouring(self, m): \r\n\t\tcolour = [0] * self.V \r\n\t\tif self.graph_color(m, colour, 0) == False: \r\n\t\t\treturn False\r\n\r\n\t\t# Print the solution \r\n\t\tprint(\"Graph is consistent:\")\r\n\t\tfor idx, val in enumerate(colour): \r\n\t\t\t result_dictionary[state_dictionary[str(idx+1)]] = color_dictionary[str(val)]\r\n\t\treturn True\r\n\r\n\r\n\r\n\r\n\r\ndef createdomain_dictionary(n):\r\n\tfor key, value in enumerate(state_dictionary):\r\n\t\tintegers = list(range(1,n))\r\n\t\tdomain_dictionary[key+1] = integers\r\n\r\nif country == 'America':\r\n change_position=[[1,9,33,40,42],[],[5,6,28,31,44],[18,24,25,36,42,43],[3,28,37],[3,16,27,31,34,36,50],[21,32,39],[20,30,38],[1,10],[9,10,24,42],[],[26,28,37,44,47,50],[14,15,17,22,25,49],[13,17,22,35],[13,23,25,27,41,49],[6,25,27,36],[13,14,25,35,42,46,48],[4,24,43],[29],[8,38,46,48],[7,29,32,39,45],[13,14,23,35,49],[15,22,34,41,49],[1,18,42,43],[4,13,15,16,17,27,36,42],[12,34,41,50],[6,15,16,25,41,50],[3,5,12,37,44],[19,21,45],[8,32,38],[3,6,36,43,44],[7,21,30,38,39,45],[10,23,26,40,41,42,46],[23,26,41],[4,6,16,25,31,43],[14,17,22,38,48],[5,12,28,47],[8,20,30,32,35,48],[7,21,32],[10,33],[15,23,26,27,34,50],[1,4,10,17,24,25,33],[4,18,31,36],[3,6,12,28,31,50],[21,29,32],[17,20,33,42,48],[12,37],[17,20,35,38,46],[6,12,26,27,41,44],[13,15,20,22,23]]\r\n\r\n positions = []\r\n for i in range(0,50) :\r\n individual=[]\r\n for j in range(0,50) :\r\n individual.append(0)\r\n for j in change_position[i] :\r\n individual[j-1] =1\r\n positions.append(individual)\r\n\r\n states = states=['Georgia','Alaska','Arizona','Arkansas','California','Colorado','Connecticut','Delaware','Florida','Alabama','Hawaii','Idaho','Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana','Maine','Maryland','Massachusetts','Michigan','Minnesota','Mississippi','Missouri','Montana','Nebraska','Nevada','New Hampshire','New Jersey','New Mexico','New York','North Carolina','North Dakota','Oklahoma','Ohio','Oregon','Pennsylvania','Rhode Island','South Carolina','South Dakota','Tennessee','Texas','Utah','Vermont','Virginia','Washington','West Virginia','Wyoming','Wisconsin']\r\n i=1\r\n state_dictionary = {}\r\n for state in states:\r\n state_dictionary[str(i)] = state\r\n i=i+1\r\n color_dictionary = {\"1\":\"red\", \"2\":\"green\", \"3\":\"yellow\", \"4\":\"blue\"}\r\n result_dictionary = {}\r\n domain_dictionary = {}\r\n\r\n createdomain_dictionary(5)\r\n\r\n\r\n # Driver Code\r\n g = Backtrackingheuristic(50) #number of states 48\r\n\r\n g.graph = positions\r\n\r\n m=4 ## chromataic number\r\n\r\nif country == 'Australia':\r\n change_position = [[3,4,6],[1,2,3,4,6,7],[1,2,4],[3,4,7],[],[2,4],[1,4]]\r\n positions = []\r\n for i in range(0,7) :\r\n individual=[]\r\n for j in range(0,7) :\r\n individual.append(0)\r\n for j in change_position[i] :\r\n individual[j-1] =1\r\n positions.append(individual)\r\n\r\n states= ['New South Wales','South Australia','Queensland','Northern Territory','Tasmania','Westren Australia','Victoria']\r\n i=1\r\n state_dictionary = {}\r\n for state in states:\r\n state_dictionary[str(i)]=state\r\n i=i+1\r\n color_dictionary = {\"1\": \"red\", \"2\": \"green\", \"3\": \"blue\"}\r\n result_dictionary = {}\r\n domain_dictionary = {}\r\n\r\n createdomain_dictionary(4)\r\n\r\n\r\n # Driver Code\r\n g = Backtrackingheuristic(7) #number of states\r\n\r\n g.graph = positions\r\n\r\n m=3 ## chromataic number\r\n\r\n\r\n\r\ng.graph_colouring(m) \r\n\r\nend_time = datetime.now()\r\n\r\nTime_difference = end_time - start_time\r\nprint(f'THE TOTAL EXECUTION TIME -->',str(Time_difference.total_seconds()))\r\nprint(f'Number of backtracks : {noofbacktracks}')\r\nfor key, value in result_dictionary.items():\r\n\tprint(f'{key} --> {value}')\r\n\r\n\r\n","sub_path":"MapColoring_Project3/Code/Heuristic/backtrackingheuristic.py","file_name":"backtrackingheuristic.py","file_ext":"py","file_size_in_byte":7639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"128653064","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 13 21:08:18 2020\n\n@author: rpira\n\"\"\"\nimport pandas as pd\nfrom sklearn.svm import SVR\nimport math\nimport numpy as np\nfrom random import choice\nimport csv\n# data for crossfold, feature, portion,K_fold\ndataset = pd.read_csv('DataI300Q7893.csv') # read data set using pandas\nFeature = 'Energy'\nprint('Output=', Feature)\nPortion = 1\nK_fold = 3\nMetrcis = Feature+'MAPEmse.csv'\n# Defining the dictionary\nDict = {}\nDict['Energy'] = {\"kernel\": ['rbf'], \"C\": [1, 10, 100, 1000, 10000],\n \"gamma\": [1e-4, 1e-3, 0.01, 0.1, 0.2, 0.5, 0.6, 0.9],\n \"epsilon\": [0.1, 1e-2, 1e-3, 1e-4]}\n\nDict['Error'] = {\"kernel\": ['linear', 'poly', 'rbf'],\n \"C\": [1, 10, 100, 1000, 10000],\n \"gamma\": [1e-4, 1e-3, 0.01, 0.1, 0.2, 0.5, 0.6, 0.9],\n \"epsilon\": [0.1, 1e-2, 1e-3, 1e-4]}\nDict['Time'] = {\"kernel\": ['linear', 'poly', 'rbf'],\n \"C\": [1, 10, 100, 1000, 10000],\n \"gamma\": [1e-4, 1e-3, 0.01, 0.1, 0.2, 0.5, 0.6, 0.9],\n \"epsilon\": [0.1, 1e-2, 1e-3, 1e-4]}\n\nDict['P'] = {\"kernel\": ['linear', 'poly', 'rbf'],\n \"C\": [1, 10, 100, 1000, 10000],\n \"gamma\": [1e-4, 1e-3, 0.01, 0.1, 0.2, 0.5, 0.6, 0.9],\n \"epsilon\": [0.1, 1e-2, 1e-3, 1e-4]}\n\nDict['D'] = {\"kernel\": ['linear', 'poly', 'rbf'],\n \"C\": [1, 10, 100, 1000, 10000],\n \"gamma\": [1e-4, 1e-3, 0.01, 0.1, 0.2, 0.5, 0.6, 0.9],\n \"epsilon\": [0.1, 1e-2, 1e-3, 1e-4]}\n\n# Defining MAPE\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n# Defining the model\n\n\ndef Model(train_dataset, train_labels, test_dataset, test_labels, index):\n print(\"Ind\", index)\n# The SVR Model\n regressor = SVR(kernel=Dict[Feature]['kernel'][index[0]],\n C=Dict[Feature]['C'][index[1]],\n gamma=Dict[Feature]['gamma'][index[2]],\n epsilon=Dict[Feature]['epsilon'][index[3]])\n# Fitting the lodel\n regressor.fit(train_dataset, train_labels)\n# Prediction from the model\n y_pred = regressor.predict(test_dataset)\n# Obtaining the MSE and MAPE\n MSE = ((y_pred-test_labels)**2).mean()\n MAPE = mean_absolute_percentage_error(test_labels, y_pred)\n return MAPE, MSE\n\n# Function for giving us the input and output based on the selected output\n\n\ndef Output_moddel_Data(CrossCount):\n # Deviding the data into the portion\n ppData = len(dataset)\n PortionData = math.floor(ppData*Portion)\n DataPortion = dataset[0:int(PortionData)]\n # Making the input\n InputData = DataPortion.copy()\n InputData.pop('P')\n InputData.pop('D')\n InputData.pop('Time')\n InputData.pop('Error')\n InputData.pop('Energy')\n # Making the output\n Out = DataPortion.copy()\n Output = Out[Feature]\n pp = len(InputData)\n counter = math.floor(pp/K_fold)\n # Making the data\n IndEnd = (CrossCount+1)*counter\n IndBeging = CrossCount*counter\n test_dataset = InputData[int(IndBeging):int(IndEnd)]\n train_dataset = InputData.drop(test_dataset.index)\n test_labels = Output[int(IndBeging):int(IndEnd)]\n train_labels = Output.drop(test_labels.index)\n return train_dataset, test_dataset, train_labels, test_labels\n# K-fold\n\n\ndef CrossOver(index):\n ScoresMAPE = []\n ScoresMSE = []\n for CrossCount in range(0, K_fold):\n [train_dataset, test_dataset, train_labels,\n test_labels] = Output_moddel_Data(CrossCount)\n mape, mse = Model(train_dataset, train_labels,\n test_dataset, test_labels, index)\n\n ScoresMAPE.append(mape)\n ScoresMSE.append(mse)\n print('MAPE=', ScoresMAPE)\n print('MSE=', ScoresMSE)\n return np.mean(ScoresMAPE), np.mean(ScoresMSE)\n\n\nCrossMape = []\nCrossMSE = []\nindex = []\nq1 = len(Dict[Feature]['kernel'])\nq2 = len(Dict[Feature]['gamma'])\nq3 = len(Dict[Feature]['C'])\nq4 = len(Dict[Feature]['epsilon'])\n# Defining the optimization treashold parameters\nMapeOpt = 1000000\nTreasholdCount = 5\nTreasureholdAccuracy = .5\ncount = 0\nk = 0\nm = 0\nz = 0\n\ntotMAPE = []\ntotMSE = []\nsize = q1*q2*q3*q4\nprint('Size=', size)\nfor CountParam in range(0, size):\n index = [choice(range(q1)), choice(range(q3)),\n choice(range(q2)), choice(range(q4))]\n count = count+1\n mape, mse = CrossOver(index)\n print(\"One crossOver\")\n if MapeOpt-mape > TreasureholdAccuracy:\n count = 0\n z = z+1\n TreasholdCount = 10*z+TreasholdCount\n if mape < MapeOpt:\n print(\"diff=\", MapeOpt-mape)\n MapeOpt = mape\n totMAPE.append(MapeOpt)\n totMSE.append(mse)\n m = m+1\n print(\"mape=\", mape)\n print(\"\")\n print(\"mse=\", mse)\n print(\"\")\n bestParam = index\n print(\"Model Number=\", bestParam)\n if count > TreasholdCount:\n break\n\nprint(\"\")\nprint(\"Resetting the counter=\", z)\nprint(\"Number of iteration MAPE reduces=\", m)\nprint(\"Number of iteration MAPE reduces without resettin=\", m-z)\nprint(\"Opt Iteration=\", z+count)\nprint(\"Treashold Count=\", TreasholdCount)\nprint(\"Count=\", count)\nInfo = {\"MAPE\": totMAPE, \"MSE\": totMSE, \"bestParam\": bestParam}\nwith open(Metrcis, 'w') as f: # You will need 'wb' mode in Python 2.x\n w = csv.DictWriter(f, Info.keys())\n w.writeheader()\n w.writerow(Info)\n\nprint(\"mape Total=\", totMAPE)\n","sub_path":"SVR/SVROptRandomized.py","file_name":"SVROptRandomized.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"188551340","text":"n, m = map(int, input().split())\na_list = [[int(i) for i in input().split()] for i in range(n)]\nif n == 1:\n print(a_list[0][0])\nelse:\n cnt_list = []\n for i in range(1, m+1):\n cnt = 0\n for a_sub_list in a_list:\n cnt += a_sub_list[1:].count(i)\n cnt_list.append(cnt)\n print(len(list(filter(lambda x: x == n, cnt_list))))","sub_path":"src/ABC118/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"30808204","text":"# 205. Isomorphic Strings\n\n# Given two strings s and t, determine if they are isomorphic.\n# Two strings s and t are isomorphic if the characters in s can be replaced to get t.\n# All occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character, but a character may map to itself.\n\n# Example 1:\n# Input: s = \"egg\", t = \"add\"\n# Output: true\n\n# Example 2:\n# Input: s = \"foo\", t = \"bar\"\n# Output: false\n\n# Example 3:\n# Input: s = \"paper\", t = \"title\"\n# Output: true\n\n\nclass Solution(object):\n def isIsomorphic(self, s, t):\n dic1 = {}\n dic2 = {}\n\n for i in range(len(s)):\n if not s[i] in dic1:\n dic1[s[i]] = i\n if not t[i] in dic2:\n dic2[t[i]] = i\n\n if len(dic1) != len(dic2):\n return False\n\n if dic1[s[i]] != dic2[t[i]]:\n return False\n\n return True\n","sub_path":"Map/205.py","file_name":"205.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"440787871","text":"import time\nimport numpy as np\nimport tensorflow as tf\nimport isaac as sc\nimport timeit\nfrom tensorflow.python.client import timeline\n\nisaac = tf.load_op_library(sc.tensorflow)\n\n# Session\nsess = tf.Session()\noptions = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\nrun_metadata = tf.RunMetadata()\n\n# Shapes to benchmark\nshapes = [([7, 7], 512, 8, 512, [3, 3])]\nprint('Shapes\\t\\t\\t\\tISAAC\\tcuDN\\tError')\nfor shape in shapes:\n # Extract shapes\n Ishape, C, N, K, Fshape = shape\n dim = len(Ishape)\n fmt = {2: 'NCHW', 3: 'NCDHW'}[dim]\n tf_op = {2: tf.nn.conv2d, 3: tf.nn.conv3d}[dim]\n sc_op = {2: isaac.conv2d, 3: isaac.conv3d}[dim]\n # Graph\n A = tf.Variable(tf.random_uniform(shape=[N, C] + Ishape, seed=1), dtype=tf.float32)\n filters = tf.Variable(tf.random_uniform(shape= Fshape + [C, K], seed=1), dtype=tf.float32)\n trans_filters = tf.Variable(tf.transpose(filters, [dim] + list(range(dim)) + [dim + 1]))\n y_tf = tf_op(input=A, filter=filters, strides=[1]*(2 + dim), padding=\"SAME\", data_format=fmt)\n y_sc = sc_op(input=A, filter=trans_filters, strides=[1]*(2 + dim), padding=\"SAME\", data_format=fmt)\n # Initialize\n sess.run(tf.global_variables_initializer())\n # Compute\n z_sc = sess.run(y_sc)\n z_tf = sess.run(y_tf)\n error = np.linalg.norm(z_tf - z_sc) / np.linalg.norm(z_tf)\n t_sc = timeit.repeat(lambda: sess.run(tf.group(y_sc)), repeat=10, number=1)\n t_tf = timeit.repeat(lambda: sess.run(tf.group(y_tf)), repeat=10, number=1)\n # Log\n num_ops = 2*C*N*K*np.prod(Ishape)*np.prod(Fshape)*1e-12\n print('{}\\t{:.2f}\\t{:.2f}\\t{:.3f}'.format(shape, min(t_sc)*1e3, min(t_tf)*1e3, error))\n\n","sub_path":"python/examples/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"480875589","text":"# coding: UTF-8\nimport time\nimport torch\nimport pickle\nimport numpy as np\nfrom Bert.train_eval import train, init_network\nfrom importlib import import_module\nimport argparse\nfrom tqdm import tqdm\nfrom Bert.utils import build_iterator, get_time_dif\nPAD, CLS = '[PAD]', '[CLS]'\n\nparser = argparse.ArgumentParser(description='Chinese Text Classification')\nparser.add_argument('--model', type=str, required=True, help='choose a model: Bert, ERNIE')\nargs = parser.parse_args()\n\n\ndef evaluate(config, model, data_set, test=False):\n model.eval()\n loss_total = 0\n predict_all = np.array([], dtype=int)\n labels_all = np.array([], dtype=int)\n batch_size = 10\n b_len = len(data_set) // batch_size\n data_set = build_iterator(data_set, config)\n embeddings = []\n with torch.no_grad():\n for batch_data, label in data_set:\n outputs = model(batch_data)\n embeddings.append(outputs.cpu().detach().numpy())\n\n return np.concatenate(embeddings, 0)\n\ndef test(config, model, test_set):\n # test\n #model.load_state_dict(torch.load(config.save_path))\n model.eval()\n start_time = time.time()\n hidden, pooled = evaluate(config, model, test_set, test=True)\n return hidden, pooled\n\n\ndef build_dataset(config, train_data, test_data, id2word):\n\n def load_dataset(data, pad_size=256):\n contents = []\n data = data.astype(np.int64).tolist()\n for line in tqdm(data):\n for k in range(len(line)):\n line[k] = id2word[line[k]]\n for k in range(len(line)-1, -1, -1):\n if line[k] == '_END_':\n del line[k]\n line_str = ''\n for word in line:\n line_str += (word)\n\n token = config.tokenizer.tokenize(line_str)\n token = [CLS] + token\n seq_len = len(token)\n mask = []\n token_ids = config.tokenizer.convert_tokens_to_ids(token)\n\n if pad_size:\n if len(token) < pad_size:\n mask = [1] * len(token_ids) + [0] * (pad_size - len(token))\n token_ids += ([0] * (pad_size - len(token)))\n else:\n mask = [1] * pad_size\n token_ids = token_ids[:pad_size]\n seq_len = pad_size\n contents.append((token_ids, int(0), seq_len, mask))\n return contents\n train = load_dataset(train_data[0:5000], pad_size=config.pad_size)\n test = load_dataset(test_data[0:1000], pad_size=config.pad_size)\n return train, test\n\nif __name__ == '__main__':\n f = open('./data/word2idx_chinese.pickle', 'rb')\n idx2word = {}\n word2idx = pickle.load(f)\n for key, value in word2idx.items():\n idx2word[value] = key\n train_data = np.load('./data/train_data_c.npy')\n test_data = np.load('./data/test_data_c.npy')\n dataset = 'THUCNews' # 数据集\n model_name = 'bert'\n x = import_module('models.' + model_name)\n config = x.Config(dataset)\n train_data, test_data = build_dataset(config, train_data, test_data, idx2word)\n np.random.seed(1)\n torch.manual_seed(1)\n torch.cuda.manual_seed_all(1)\n torch.backends.cudnn.deterministic = True # 保证每次结果一样\n\n start_time = time.time()\n print(\"Loading data...\")\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n\n # train\n model = x.Model(config).to(config.device)\n hidden, pooled = test(config, model, train_data)\n print(hidden)\n print(pooled)\n\n","sub_path":"Bert/get_embedding.py","file_name":"get_embedding.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"571228878","text":"\"\"\"\nThis module contains consolidated metrics\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\ntry:\n\timport pyfolio as pf\nexcept ImportError:\n\tprint('pyfolio not installed')\n\ndef spread_test(data, periods=['Y','Q','M']):\n\t\"\"\"\n\tTest whether the returns are spread over the entire period\n\tor consolidated in a single period\n\tdata\n\t\treturns/pnl as series with date as index\n\tperiods\n\t\tperiods to check as list.\n\t\tall valid pandas date offset strings accepted\n\n\treturns a dataframe with periods as index and \n\tprofit/loss count and total payoff\n\t\"\"\"\n\tcollect = []\n\tfor period in periods:\n\t\trsp = data.resample(period).sum()\n\t\tgt = rsp[rsp >= 0]\n\t\tlt = rsp[rsp < 0]\n\t\tvalues = (len(gt), gt.sum(), len(lt), lt.sum())\n\t\tcollect.append(values)\n\treturn pd.DataFrame(collect, index=periods, \n\t\tcolumns=['num_profit', 'profit', 'num_loss', 'loss'])\n\ndef shuffled_drawdown(data, capital=1000):\n\t\"\"\"\n\tCalculate the shuffled drawdown for the given data\n\t\"\"\"\n\tnp.random.shuffle(data)\n\tcum_p = data.cumsum() + capital\n\tmax_p = np.maximum.accumulate(cum_p)\n\tdiff = (cum_p - max_p)/capital\n\treturn diff.min()\n\n\t","sub_path":"src/fastbt/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"458306946","text":"#!/usr/bin/python\n#coding:utf8\n \nimport cgi\nimport os\nimport sys\n#import RPi.GPIO as GPIO\n\n#html ='''Cotent-Type:text/html\n\n#\n#\n#button %s\n#\n#\n#'''\n\nos.system(\"gpio mode 3 out\")\nform = cgi.FieldStorage()\n\n#GPIO.setmode(GPIO.BCM)\n#GPIO.setup(22,GPIO.OUT)\n\n#if form.getfirst(\"btn1\"):\nif form[\"id\"].value == \"btn1\":\n\tbtn = \"1\"\n\tos.system(\"gpio write 3 1\")\n#GPIO.output(22, True)\n#elif form.getfirst(\"btn2\"):\nelif form[\"id\"].value == \"btn2\":\n\tbtn = \"2\"\n\tos.system(\"gpio write 3 0\")\n#\tGPIO.output(22, False)\n\n#print html % btn\n","sub_path":"cgi-bin/led.py","file_name":"led.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"507330342","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"File content component\n\n$Id$\n\"\"\"\nfrom zope.app.file.interfaces import IFile\nfrom zope.app.index.interfaces.text import ISearchableText\nfrom zope.interface import implements\n\n# XXX need a test here!\n\nclass SearchableText(object):\n \"\"\"Make File objects searchable.\"\"\"\n\n implements(ISearchableText)\n __used_for__ = IFile\n\n def __init__(self, file):\n self.file = file\n\n def getSearchableText(self):\n if self.file.contentType == \"text/plain\":\n return [unicode(self.file.data)]\n else:\n return None\n","sub_path":"Zope3/tags/ZopeX3-3.0.0a2/src/zope/app/file/textindex/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"259183181","text":"import shutil\r\nimport os\r\nimport time\r\n\r\ndef main():\r\n path=\"path_to_delete\"\r\n days=1\r\n seconds=time.time()-(days*24*60*60)\r\n folderCount=0\r\n fileCount=0\r\n isExist=os.path.exists(path)\r\n\r\n if isExist==\"false\":\r\n print(path,\" Not found\")\r\n fileCount+=1\r\n else:\r\n for root,folders,files in os.walk(path):\r\n \r\n if seconds>=getAgeOfFile(root):\r\n removeFolder(root)\r\n folderCount+=1\r\n break\r\n else:\r\n for folder in folders:\r\n fp=os.path.join(root,folder)\r\n if seconds>=getAgeOfFile(root):\r\n removeFolder(fp)\r\n folderCount+=1\r\n \r\n for file in files:\r\n filePath=os.path.join(root,file)\r\n if seconds>=getAgeOfFile(root):\r\n removeFile(filePath)\r\n fileCount+=1\r\n else:\r\n if seconds>=getAgeOfFile(path):\r\n removeFile(path)\r\n fileCount+=1\r\n print(\"Total Folders Deleted: \",folderCount)\r\n print(\"Total Files Deleted: \",fileCount)\r\n\r\n\r\n\r\n\r\ndef getAgeOfFile(path):\r\n ctime=os.stat(path).st_ctime\r\n return ctime\r\n\r\ndef removeFolder(path):\r\n if not shutil.rmtree(path):\r\n print(path,\" removed successfully\")\r\n \r\n else:\r\n print(\"Unable to delete the \",path)\r\n\r\ndef removeFile(path):\r\n if not os.remove(path):\r\n print(path,\" removed successfully\")\r\n else:\r\n print(\"Unable to delete the \",path)\r\n\r\n\r\nif __name__==\"__main__\":\r\n main()","sub_path":"hwc99/Removefiles.py","file_name":"Removefiles.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"226839910","text":"import cv2\nimport sys\n\n'''define face_cascade variable'''\n'''load the classifier,add the path of cascade classifier which does classification'''\nface_cascade=cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\nvideo_capture = cv2.VideoCapture(0)\n\n'''frame for capturing frame by frame when video capture is on'''\nwhile True:\n '''capture frame by frame'''\n\n retval,frame = video_capture.read()\n\n '''convert the captured image to grayscale'''\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n '''detects specific features specified in the haarcascade by using these 3 parameters'''\n\n faces = face_cascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(35, 35)\n\n )\n\n '''for each face we draw the rectangle for some height and width'''\n # Draw a rectangle around recognized faces and color for the box is specified as (50,50,200)\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (50, 50, 200), 2)\n\n # Display the resulting frame\n cv2.imshow('Video', frame)\n\n # Exit the camera view\n if cv2.waitKey(1) & 0xFF == ord('q'):\n sys.exit()\n\n\n\n\n","sub_path":"face_rec.py","file_name":"face_rec.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"518801171","text":"import hug\nimport modelo_bayesiano\nimport requests\nimport random\n\n\ndef mensajesInicioPregunta(requestM):\n mensajesInicioPreguntas = []\n for i in requestM.json():\n if(i[\"classMessage\"] == \"GenericoInicio\" and i[\"isQorA\"]\n == \"Pregunta\"):\n mensajesInicioPreguntas.append(i)\n return mensajesInicioPreguntas\n\n\ndef mensajesFinalesPregunta(requestM):\n mensajesFinalesPreguntas = []\n for i in requestM.json():\n if(i[\"classMessage\"] == \"GenericoFinal\" and i[\"isQorA\"]\n == \"Pregunta\"):\n mensajesFinalesPreguntas.append(i)\n return mensajesFinalesPreguntas\n\n\ndef mensajespreguntas(idpaciente, requestM, requestG, reputacionPaciente):\n Preguntas = []\n for j in requestG.json():\n if(j[\"pat\"] == idpaciente and j[\"state\"] == \"2\"):\n MInicio = mensajesInicioPregunta(requestM)\n MFinales = mensajesFinalesPregunta(requestM)\n MInicioKindorAcertive = []\n MFinalesKindorAcertive = []\n cont1 = 0\n cont2 = 0\n cont3 = 0\n cont4 = 0\n if(reputacionPaciente >= 0.40):\n for i in MInicio:\n if(i[\"typeMessage\"] == \"kind\"):\n MInicioKindorAcertive.append(MInicio[cont1])\n cont1 = cont1 + 1\n for i in MFinales:\n if(i[\"typeMessage\"] == \"kind\"):\n MFinalesKindorAcertive.append(MFinales[cont2])\n cont2 = cont2 + 1\n else:\n for i in MInicio:\n if(i[\"typeMessage\"] == \"assertive\"):\n MInicioKindorAcertive.append(MInicio[cont3])\n cont3 = cont3 + 1\n for i in MFinales:\n if(i[\"typeMessage\"] == \"assertive\"):\n MFinalesKindorAcertive.append(MFinales[cont4])\n cont4 = cont4 + 1\n if(len(MInicioKindorAcertive) != 0 and len(MFinales) != 0):\n Preguntas.append({\"pregunta\": MInicioKindorAcertive[random.randint(\n 0, len(MInicioKindorAcertive)-1)]\n [\"description\"]+\" \" + j[\"description\"]+\" \" +\n MFinalesKindorAcertive[random.randint(0,\n len(MFinalesKindorAcertive)\n - 1)]\n [\"description\"],\n \"idmeta\": j[\"_id\"]})\n return Preguntas\n\n\ndef mensajesRespuestaPositiva(requestM):\n mensajesRespuestasPositivas = []\n for i in requestM.json():\n if(i[\"classMessage\"] == \"GenericoInicio\" and i[\"isQorA\"]\n == \"RespuestaPositiva\"):\n mensajesRespuestasPositivas.append(i)\n return mensajesRespuestasPositivas\n\n\ndef mensajesRespuestaNegativa(requestM):\n mensajesRespuestasNegativas = []\n for i in requestM.json():\n if(i[\"classMessage\"] == \"GenericoInicio\" and i[\"isQorA\"]\n == \"RespuestaNegativa\"):\n mensajesRespuestasNegativas.append(i)\n return mensajesRespuestasNegativas\n\n\ndef mensajesBienvenida(requestM, reputacionDelModelo):\n mensajesBienvenidas = []\n if(reputacionDelModelo >= 0.40):\n for i in requestM.json():\n if(i[\"classMessage\"] == \"GenericoInicio\" and i[\"isQorA\"] == \"Saludos\" and\n i[\"typeMessage\"] == \"kind\"):\n mensajesBienvenidas.append(i)\n else:\n for i in requestM.json():\n if(i[\"classMessage\"] == \"GenericoInicio\" and i[\"isQorA\"] == \"Saludos\" and\n i[\"typeMessage\"] == \"assertive\"):\n mensajesBienvenidas.append(i)\n saludos = mensajesBienvenidas[random.randint(0, len(mensajesBienvenidas) - 1)]\n return saludos\n\n\ndef mensajesDespedida(requestM, reputacionDelModelo):\n mensajesDespedidas = []\n if(reputacionDelModelo >= 0.40):\n for i in requestM.json():\n if(i[\"classMessage\"] == \"GenericoInicio\" and i[\"isQorA\"] == \"Despedidas\" and\n i[\"typeMessage\"] == \"kind\"):\n mensajesDespedidas.append(i)\n else:\n for i in requestM.json():\n if(i[\"classMessage\"] == \"GenericoInicio\" and i[\"isQorA\"] == \"Despedidas\" and\n i[\"typeMessage\"] == \"assertive\"):\n mensajesDespedidas.append(i)\n despedida = mensajesDespedidas[random.randint(0, len(mensajesDespedidas) - 1)]\n return despedida\n\n\ndef mensajesRespuesta(idpaciente, requestM, requestG, reputacionDelModelo):\n RPositivas = []\n RNegativas = []\n for J in requestG.json():\n if(idpaciente == J[\"pat\"] and J[\"state\"] == \"2\"):\n mensajesrespuestapositiva = mensajesRespuestaPositiva(requestM)\n mensajesrespuestanegativa = mensajesRespuestaNegativa(requestM)\n MRPKindorAssertive = []\n MRNKindorAssertive = []\n if(reputacionDelModelo >= 0.40):\n for i in mensajesrespuestapositiva:\n if(i[\"typeMessage\"] == \"kind\"):\n MRPKindorAssertive.append(i)\n\n for i in mensajesrespuestanegativa:\n if(i[\"typeMessage\"] == \"kind\"):\n MRNKindorAssertive.append(i)\n\n else:\n for i in mensajesrespuestapositiva:\n if(i[\"typeMessage\"] == \"assertive\"):\n MRPKindorAssertive.append(i)\n\n for i in mensajesrespuestanegativa:\n if(i[\"typeMessage\"] == \"assertive\"):\n MRNKindorAssertive.append(i)\n if(len(MRPKindorAssertive) != 0):\n RPositivas.append({\"RespuestaPositiva\":\n MRPKindorAssertive[random.randint(0,\n len(MRPKindorAssertive)\n - 1)][\"description\"],\n \"idmeta\": J[\"_id\"]})\n if(len(MRNKindorAssertive) != 0):\n RNegativas.append({\"RespuestaNegativa\":\n MRNKindorAssertive[random.randint(0,\n len(MRNKindorAssertive)\n - 1)][\"description\"],\n \"idmeta\": J[\"_id\"]})\n\n return RPositivas, RNegativas\n\n\n@hug.get('/getmessages/{id}')\ndef messages(id: str):\n requestM = requests.get(\"https://api-rest-botic.herokuapp.com/api/messages\")\n responseG = requests.get(\"https://api-rest-botic.herokuapp.com/api/goals\")\n if(len(responseG.json()) != 0):\n reputacionP = modelo_bayesiano.getreputation(id)\n else:\n reputacionP = 0.40\n MPreguntas = mensajespreguntas(id, requestM, responseG, reputacionP)\n MRespuestasP, MRespuestasN = mensajesRespuesta(id, requestM, responseG, reputacionP)\n saludos = mensajesBienvenida(requestM, reputacionP)\n despedida = mensajesDespedida(requestM, reputacionP)\n return MPreguntas, MRespuestasN, MRespuestasP, saludos, despedida\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"254134322","text":"# Copyright (c) 2018 LAC Co.,Ltd.\r\n# All rights reserved.\r\n#\r\n# This software is released under the BSD License.\r\n# https://opensource.org/licenses/BSD-2-Clause\r\n\r\n# define constants\r\n\r\n# API key dictionary.\r\n# key: user account(mail address)\r\n# value: apikey\r\nMISP_APIKEYS = {\r\n\t'sample@user.email': 'authkey'\r\n}\r\n\r\n# MISP URL\r\nMISP_URL = 'https://example.misp'\r\n\r\n# distribution\r\n# your_organization = 0\r\n# this_community = 1\r\n# connected_communities = 2\r\n# all_communities = 3\r\n# sharing_group = 4\r\nDISTRIBUTION = '0'\r\n\r\n# threat level\r\n# high = 1\r\n# medium = 2\r\n# low = 3\r\n# undefined = 4\r\nTHREAT_LEVEL = '2'\r\n\r\n# analysis level\r\n# initial = 0 \r\n# ongoing = 1\r\n# completed = 2\r\nANALYSIS_LEVEL = '0'\r\n","sub_path":"const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"536144500","text":"#!/usr/bin/python3.4\n# -*- coding:utf-8 -*-\n\nimport urllib.request\nimport re\n\nuser_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'\n\n\ndef download(url):\n print('Downloading {}'.format(url))\n req = urllib.request.Request(url)\n req.add_header('Referer', url)\n req.add_header('User_agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36')\n html = None\n try:\n response = urllib.request.urlopen(req)\n html = response.read()\n except urllib.error.HTTPError as e:\n html = None\n print(e.code)\n print(e.read().decode(\"utf-8\"))\n return None;\n return html\n\ndef get_links(html):\n \"\"\" Return a list of links from html\n \"\"\"\n webpage_regex = re.compile(']+href=[\"\\'](.*?)[\"\\']', re.IGNORECASE)\n return webpage_regex.findall(str(html))\n\ndef link_crawler(seed_url, link_regex):\n crawl_queue = [seed_url]\n seen = set(crawl_queue)\n while crawl_queue:\n url = crawl_queue.pop()\n html = download(url)\n for link in get_links(html):\n if re.match(link_regex, link):\n link = urllib.parse.urljoin(seed_url, link)\n if link not in seen:\n seen.add(link)\n crawl_queue.append(link)\n\n\nif __name__ == \"__main__\":\n url = \"https://www.runoob.com\"\n url = \"http://example.webscraping.com/\"\n html = download(url)\n link_crawler(url, '/(index|view)')\n# print(html.decode('utf-8'))\n\n\n","sub_path":"python/spider/simple/simple_regex.py","file_name":"simple_regex.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"291569656","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport matplotlib.pyplot as plt\n\nfrom framework.mongo import database\nfrom website import settings\n\nfrom utils import plot_dates, mkdirp\n\n\nnode_collection = database['node']\n\nFIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')\nmkdirp(FIG_PATH)\n\n\ndef main():\n dates = [\n record['date_created']\n for record in node_collection.find(\n {\n 'is_collection': True,\n 'is_bookmark_collection': {'$ne': True},\n },\n {'date_created': True},\n )\n ]\n if not dates:\n return\n plot_dates(dates)\n plt.title('folders ({0} total)'.format(len(dates)))\n plt.savefig(os.path.join(FIG_PATH, 'folder-actions.png'))\n plt.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/analytics/folders.py","file_name":"folders.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"454984384","text":"#!/usr/bin/python3\n\"\"\"\nThe Square class module\n\"\"\"\n\n\nfrom models.rectangle import Rectangle\n\n\nclass Square(Rectangle):\n \"\"\"\n The Square class\n \"\"\"\n\n def __init__(self, size, x=0, y=0, id=None):\n \"\"\" width, height, x, y, id - from rectangle \"\"\"\n super().__init__(size, size, x, y, id)\n\n \"\"\" STR \"\"\"\n\n def __str__(self):\n \"\"\"\n Returns the string version of the square\n \"\"\"\n return \"[Square] ({:d}) {:d}/{:d} - {:d}\".format(self.id, self.x,\n self.y, self.width)\n\n \"\"\" SIZE \"\"\"\n @property\n def size(self):\n \"\"\"\n Returns the size of the square. Using width property from rectangle\n \"\"\"\n return self.width\n\n @size.setter\n def size(self, value):\n \"\"\"\n Setter for size. It tries to set the values of width and height from\n rectangle. It has to pass their setter and getter checks to be\n assigned.\n \"\"\"\n self.width = value\n self.height = value\n\n \"\"\" UPDATE \"\"\"\n\n def update(self, *args, **kwargs):\n \"\"\"\n Update the square's properties by checking args first, then kwargs.\n \"\"\"\n if args and len(args) > 0:\n for ct, arg in enumerate(args):\n if ct == 0:\n self.id = arg\n if ct == 1:\n self.height = arg\n self.width = arg\n if ct == 2:\n self.x = arg\n if ct == 3:\n self.y = arg\n else:\n if kwargs is not None:\n for key, value in kwargs.items():\n if key == \"id\":\n self.id = value\n if key == \"size\":\n self.width = value\n self.height = value\n if key == \"x\":\n self.x = value\n if key == \"y\":\n self.y = value\n \"\"\" DICTIONARY \"\"\"\n def to_dictionary(self):\n \"\"\"\n Return the dictionary representation of the square\n \"\"\"\n dict = {}\n dict[\"x\"] = self.x\n dict[\"y\"] = self.y\n dict[\"id\"] = self.id\n dict[\"size\"] = self.height\n return dict\n","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"195994954","text":"#!/usr/bin/env python3\n\nimport csv\nimport os\nimport time\n\n\ndef find_max_price(datafile):\n f = open(datafile)\n dr = csv.DictReader(f, ['time', 'price', 'UNKNOWN']) # NOQA\n result = None\n try:\n price_BTC_highest = 0\n time_BTC_highest = 0\n for row in dr:\n price = float(row['price'])\n if price > price_BTC_highest:\n price_BTC_highest = price\n time_BTC_highest = time.gmtime(int(row['time']))\n time_BTC_highest = time.strftime(\"%Y-%m-%d\", time_BTC_highest)\n result = (time_BTC_highest, price_BTC_highest)\n return result\n\n finally:\n f.close()\n\n return\n\n\ndef solve():\n '''Tìm ngày giá BTC lên cao nht. Tr v Tuple cha ngày đnh dng\n YYYY-mm-dd (VD: 2017-06-19) và giá VND ca 1 BTC\n '''\n # http://api.bitcoincharts.com/v1/csv/\n datafile = 'localbtcVND.csv'\n exdir = os.path.dirname(__file__)\n datapath = os.path.join(exdir, datafile)\n result = find_max_price(datapath)\n return result\n\n\ndef main():\n now = time.gmtime(int(time.time()))\n print(now.tm_year, now.tm_mon, now.tm_mday)\n print(solve())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pymi/exercises/ex6_3.py","file_name":"ex6_3.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"456873675","text":"import planner\nfrom node import Node\nfrom plot_path import * # TA code\nfrom sys import argv\nfrom time import time\n\ndef main():\n goal = Node('123456780') # specify the puzzle's goal configuration\n start = Node(argv[1]) # read the puzzle's initial configuration from the command line\n\n if start.isSolvable():\n t0 = time()\n plan, *_ = planner.BFS(start, goal) # solve the puzzle using brute force search\n t1 = time() \n print(f'Puzzle is solved in {t1-t0} s\\n')\n logResults(plan)\n visualizePath()\n else:\n print('Puzzle is unsolvable')\n\ndef logResults(plan):\n with open('nodePath.txt', 'w') as file:\n for node in plan:\n tiles = restructure(node.state)\n for i in range(9):\n file.write(f'{tiles[i]} ')\n file.write('\\n')\n\n with open('plan.txt', 'w') as file:\n file.write('State\\t\\tAction\\n')\n for node in plan:\n file.write(f'{node.state}\\t')\n file.write(f'{node.action}\\n')\n\ndef restructure(state):\n ''' column-wise representation of the state '''\n state = list(state)\n state[1], state[3] = state[3], state[1]\n state[2], state[6] = state[6], state[2]\n state[5], state[7] = state[7], state[5]\n\n return state\n\nif __name__ == '__main__':\n main()","sub_path":"puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"157750799","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\njam.py\n\nCreated by James DESTINE on 20-03-03.\nCopyright (c) 2020 Copyright Holder. All rights reserved.\n\"\"\"\n\nfrom flask import Flask, jsonify\nfrom flask import request, render_template\nimport requests\napp = Flask(__name__)\n\n\n@app.route('/index', methods=['GET', 'POST'])\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html')\n \n@app.route('/me') \ndef me():\n link = \"http://data.fixer.io/api/latest?\\\n access_key=1b304350ccec957819f26e4c2003f74e\"\n req = requests.get(link+'&base=EUR')\n\n euro = None\n if req.status_code == 200:\n euro = req.json()\n return jsonify(euro)\n return {}\n ","sub_path":"jam.py","file_name":"jam.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"390285819","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport random\n\ndef win():\n # 可将小节5中的同名函数代码复制\n return\n \ndef lose():\n # 可将小节5中的同名函数代码复制\n return\n \ndef get_ch_table(line):\n ch_table = []\n for ch in line:\n if ch not in ch_table:\n ch_table.append(ch)\n return ch_table\n\ndef idiom_robot(file_name):\n with open(file_name) as fh:\n text = fh.read()\n idioms = text.split()\n idiom = random.choice(idioms)\n chs = get_ch_table(text.replace('\\n', ''))\n\n guess_ch_table = [ch for ch in idiom]\n while len(guess_ch_table) < 6:\n ch = random.choice(chs)\n if ch not in guess_ch_table:\n guess_ch_table.append(ch)\n \n random.shuffle(guess_ch_table)\n \n for i in range(0,6,2):\n print(guess_ch_table[i], guess_ch_table[i+1])\n \n return idiom\n\ndef main():\n filename = r'd:\\temp\\idioms_correct.txt'\n score = 10\n while score >= 0:\n real_idiom = idiom_robot(filename)\n answer_idiom = input('请输入猜测成语,回车结束,直接回车表示退出游戏:')\n if answer_idiom == real_idiom:\n print('答对了,加十分')\n score += 10\n print('你当前的分数是:', score)\n if score == 100:\n win()\n return\n elif answer_idiom == '':\n print('退出游戏。')\n print('你最后的分数是:', score)\n return\n else:\n score -= 10\n print('答错了,减十分')\n print('成语其实是:', real_idiom)\n print('你当前的分数是:', score)\n else:\n lose()\n return\n\nif __name__ == '__main()__':\n main()\n\n","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123158868","text":"from __future__ import print_function\n#===========================================================\n# this module is to do the maps around the ligands/peptide.\n#===========================================================\n\nfrom builtins import range\nimport os,sys, math,shutil\nimport config, util, tls\nimport cifparse as cif\n\n\n##########################################################\ndef cut_map_around_ligand_peptide(dccfile, dic, mapfile_in, xyzfile_in):\n '''It generate a complete set for ligand (map, html, jmol).\n dccfile: the density file by dcc.\n dic: a directory to hold all the file for webpage (url below).\n mapfile_in: a input map file.\n xyzfile_in: a input coordinate file.\n '''\n \n print('Cutting the density maps for ligands/peptide')\n \n tmpxyz=xyzfile_in\n if util.is_cif(xyzfile_in): tmpxyz= cif.cif2pdb(xyzfile_in)\n pdbfile = os.path.basename(dic['pdbfile']) + '_new'\n if pdbfile !=tmpxyz : shutil.copy(tmpxyz,pdbfile) \n \n mapfile=os.path.basename(dic['pdbfile']) + '_2fofc.map'\n if dic['ligmapcif'] : mapfile=dic['xyzfile_orig'] + '_2fofc.map'\n shutil.move(mapfile_in, mapfile)\n\n if dic['ligmapcif'] : #pre-parse the cif file.\n dic['cif']=1\n \n ciffile=dic['xyzfile_orig']\n\n flist=open(ciffile, 'r').readlines()\n cell_items,values = cif.cifparse(flist, '_cell.')\n cell=cif.get_rows(cell_items, values)\n dic['cell_items'], dic['lig_cell']=cell_items, cell \n\n sym_items,values = cif.cifparse(flist, '_symmetry.')\n sym=cif.get_rows(sym_items, values)\n dic['sym_items'], dic['lig_sym']=sym_items, sym\n \n items,values = cif.cifparse(flist, '_atom_site.')\n comp=cif.parse_values(items,values,\"_atom_site.auth_comp_id\")\n asym=cif.parse_values(items,values,\"_atom_site.auth_asym_id\")\n seq=cif.parse_values(items,values,\"_atom_site.auth_seq_id\")\n alt=cif.parse_values(items,values,\"_atom_site.label_alt_id\")\n ins=cif.parse_values(items,values,\"_atom_site.pdbx_PDB_ins_code\");\n mod=cif.parse_values(items,values,\"_atom_site.pdbx_PDB_model_num\")\n row=cif.get_rows(items, values)\n \n dic['items'], dic['comp1'],dic['asym'],dic['seq']=items,comp,asym,seq\n dic['alt'],dic['ins'],dic['mod'],dic['row']=alt,ins,mod,row\n\n fw_itool=open('LIG_PEPTIDE.cif','w') #a cif file contains table, filenames\n fw_itool.write('data_lig_peptide\\n')\n fw_itool.write('\\n# A \"!\" will be given if the residue is bad with real_space_R.\\n')\n fw_itool.write('\\n# Criteria: (CC<0.7 and R>0.4) or CC<0.5 or R>0.5\\n')\n\n url='http://sf-tool.wwpdb.org/users_data/dir_%s/' %dic['dir']\n #url=os.environ['THIS_SERVICE_URL__FIX_ME'] + '/users_data/dir_%s/' %dic['dir']\n\n ch_pep,chr_pep, ch_lig,chr_lig, ch_wat,chr_wat=tls.chain_res_range(pdbfile)\n\n ligpdb=non_poly_pdb(ch_pep, ch_lig, pdbfile) #get non-poly xyz file\n dcc=get_dcc(dccfile) #get a list for dcc of each residue\n\n if not dcc:\n util.perror('Warning: Failed to parse EDS values! No ligand/peptide maps will be generated. ')\n\n for k, v in ch_pep.items(): \n if len(v)< 15: #length of peptide\n if not dic['sdsc_map'] : map_around_peptide(fw_itool,dic, mapfile, ligpdb, dcc, ch_pep, url)\n break\n\n if ch_lig : map_around_ligand(fw_itool,dic, mapfile, ligpdb, dcc, ch_lig, url)\n\n get_html_table_baddcc_general(mapfile, dcc) #for polymer/lig/peptide\n \n fw_itool.close()\n\n if dic['sdsc_map'] :\n arg='rm -f %s %s %s LIG_PEPTIDE.cif ' %(mapfile, mapfile_in, xyzfile_in)\n arg=arg + ' %s_rcc_sum.cif.mtz %s_2fofc.map_all.html ' %(dic['pdbfile'], dic['pdbfile'])\n os.system(arg)\n\n# util.delete_file(pdbfile) \n return\n\n##########################################################\ndef non_poly_pdb(ch_pep, ch_lig, pdbfile):\n '''get the xyz for peptides and ligands\n ch_lig={'ch':[...], }; ch_pep={'ch':[...], }\n '''\n\n ligpdb=[]\n pdb=open(pdbfile, 'r').readlines()\n \n for x in pdb: #\n if ('CRYST1' in x[:6] or 'SCALE' in x[:5]) :\n ligpdb.append(x)\n if ('ATOM' in x[:4] or 'HETATM' in x[:6]) : break\n \n \n for k, v in ch_pep.items(): #k=chain, v=list of resi number\n if len(v)>= 15: continue #length of peptide\n for x in pdb:\n if (('ATOM' in x[:4] or 'HETATM' in x[:6]) and\n k==x[20:22].strip() and int(x[22:26]) in v):\n ligpdb.append(x)\n\n for k, v in ch_lig.items(): \n for x in pdb:\n if (('ATOM' in x[:4] or 'HETATM' in x[:6]) and\n k==x[20:22].strip() and int(x[22:26]) in v):\n ligpdb.append(x)\n\n return ligpdb\n\n##########################################################\ndef map_around_compound(mapfile, coord, compid):\n '''cut the ASU map to the residue level.\n compid: model_compound_chainID_resnumber_alt_insertion.\n mapfile: the CCP4 map in ASU.\n coord: the coordinate file for the map (in cif/pdb)\n '''\n\n if (not util.check_file(100, mapfile) or not util.check_file(100,coord)):\n err='Error: Either mapfile or coordinate file does not exist.'\n config.ERRLOG.append(err)\n print(err)\n return\n \n \n xyzlim,xyzcomp=find_xyzlim_compound(compid, coord) \n \n if(len(xyzlim.strip())<2):\n err='Error: compound boundary in fraction not found. check with compound ID'\n config.ERRLOG.append(err)\n print(err)\n return\n\n# below is cut map and get jmol\n t=compid.split('_')\n comp='_'.join([t[0], t[1], t[2]])\n mapout = comp + '_cut.map'\n maphtml = comp + '.html'\n \n mapscr=cut_map_bylimit(xyzlim)\n util.delete_file(mapout)\n arg = mapfile + ' ' + ' ' + mapout\n command=\"chmod +x %s ; ./%s \" %(mapscr, mapscr) + arg \n os.system(command)\n\n min,max,mean,sigma=map_info(mapfile)\n min1,max1,mean1,sigma1=map_info(mapout) \n cont={'0.5':0.5, '0.7':0.7, '1.0':1.0, '1.5':1.5, '2.0':2.0} #contour 4 map in asu.\n cont1=cont #contour 4 sub map.\n\n scale=1.0\n if(float(sigma1)>0) :\n scale = float(sigma)/float(sigma1)\n for z in list(cont.keys()): cont1[z]=cont[z]*scale\n\n maphtml = get_html4jmol(comp, xyzcomp, mapout, cont1)\n\n return maphtml , mapout\n \n##########################################################\ndef find_xyzlim_compound(compid, coord):\n '''find xyzlimit used by mapmask, and write the coord in cif or pdb format.\n compid: atom_group_id (model_compound_chainID_resnumber_alter_insertion)\n coord: the coordinate file\n idd = 0, cif format; =1, the pdb format\n '''\n\n comp='XXXX'\n t1=compid.split(':')\n for i,x in enumerate(t1):\n t=x.split('_')\n if i==0: comp='_'.join([t[0], t[1], t[2], t[3]])\n \n if len(t)!=6:\n print('Error: in group-id (%d). it should be (model_compound_chainID_resnumber_alter_insertion).' %(i+1))\n return '',''\n\n idd=util.is_cif(coord)\n xyzcomp=comp + '.pdb'\n if idd==1: xyzcomp=comp + '.cif'\n \n fw=open(xyzcomp, 'w')\n\n border=1 #extend a little to cover more density \n xx,yy,zz=[],[],[] \n if idd==1: #input cif format\n fw.write('data_xyzcomp\\n#\\n')\n\n flist=open(coord, 'r').readlines()\n items,values = cif.cifparse(flist, '_cell.')\n fw.write('\\n#\\n')\n for m, p in enumerate (items): fw.write(\"%s %s\\n\" %(p, values[m]))\n\n cell=cif.get_cell(flist)\n \n items,values = cif.cifparse(flist, '_atom_site.')\n comp=cif.parse_values(items,values,\"_atom_site.auth_comp_id\");\n asym=cif.parse_values(items,values,\"_atom_site.auth_asym_id\");\n seq=cif.parse_values(items,values,\"_atom_site.auth_seq_id\");\n alt=cif.parse_values(items,values,\"_atom_site.label_alt_id\");\n ins=cif.parse_values(items,values,\"_atom_site.pdbx_PDB_ins_code\");\n x=cif.parse_values(items,values,\"_atom_site.Cartn_x\");\n y=cif.parse_values(items,values,\"_atom_site.Cartn_y\");\n z=cif.parse_values(items,values,\"_atom_site.Cartn_z\");\n model=cif.parse_values(items,values,\"_atom_site.pdbx_PDB_model_num\");\n\n if(not (alt and comp and ins and asym and seq and x and y and z)): \n print('Error: not enough infor. extraced from (%s). Check ciftokens'%coord)\n sys.exit()\n \n fw.write('\\n#\\nloop_\\n')\n for p in items: fw.write(\"%s\\n\" %p)\n row=cif.get_rows(items, values)\n\n for i in range(len(x)):\n alter, inst, mod ='.', '.', '1'\n if model and util.is_number(model[i]): mod=model[i]\n if alt and alt[i] != '?' : alter = alt[i]\n if ins and ins[i] != '?' : inst = ins[i]\n \n id1='_'.join([mod, comp[i], asym[i], seq[i], alter, inst])\n \n if id1 in compid:\n xx.append(float(x[i]))\n yy.append(float(y[i]))\n zz.append(float(z[i]))\n \n for m in row[i] : fw.write(\"%s \" %m)\n fw.write('\\n')\n\n \n else: #pdb format\n fp=open(coord,'r')\n for x1 in fp:\n \n if ('CRYST1' in x1[:6] ):\n fw.write(x1)\n cell=[float(p) for p in x1[8:54].split()]\n \n elif ('ATOM' in x1[:4] or 'HETATM' in x1[:6] ):\n alt=x1[16:17]\n if alt.isspace() : alt='.'\n ins=x1[26:27]\n if ins.isspace() : ins='.'\n resname, chid, resnum = x1[17:20].strip(), x1[20:22].strip(), x1[22:26].strip()\n resid='_'.join([resname, chid, resnum, alt, ins])\n \n if resid in compid:\n fw.write(x1) #only write the selected section \n xx.append(float(x1[30:38]))\n yy.append(float(x1[38:46]))\n zz.append(float(x1[46:54]))\n fp.close()\n\n \n if not xx or not yy or not zz :\n print('Error: %s can not be found in the coordinate. try a new id. ' %(compid))\n return '',''\n \n frac,orth=util.frac_orth_matrix(cell) #get matrix\n border=2.0\n xx_min,xx_max =min(xx)-border, max(xx)+border\n yy_min,yy_max =min(yy)-border, max(yy)+border\n zz_min,zz_max =min(zz)-border, max(zz)+border\n \n xf_min = util.matrix_prod(frac,[xx_min,yy_min,zz_min])\n xf_max = util.matrix_prod(frac,[xx_max,yy_max,zz_max])\n \n xyzlim='%.3f %.3f %.3f %.3f %.3f %.3f' %(xf_min[0],xf_max[0], xf_min[1],xf_max[1],xf_min[2],xf_max[2])\n\n fw.close()\n return xyzlim, xyzcomp\n\n##########################################################\ndef parse_xyz_compound(compid, coord, idd):\n '''generate a file used by mapmask\n '''\n \n compdb=compid.split(':')[0] + '.pdb'\n fw=open(compdb,'w')\n \n if idd==0:\n tmpfile = cif.cif2pdb(coord)\n fp=open(tmpfile,'r')\n os.remove(tmpfile)\n else:\n fp=open(coord,'r')\n \n for x in fp:\n\n if ('CRYST1' in x[:6] or 'SCALE' in x[:5]):\n fw.write(x)\n \n elif ('ATOM' in x[:4] or 'HETATM' in x[:6] ):\n alt=x[16:17]\n if alt.isspace() : alt='.'\n ins=x[26:27]\n if ins.isspace() : ins='.'\n resname, chid, resnum = x[17:20].strip(), x[20:22].strip(), x[22:26].strip()\n resid='_'.join([resname, chid, resnum, alt, ins])\n if resid in compid: fw.write(x)\n \n \n fp.close() \n fw.close()\n \n return compdb\n\n##########################################################\ndef remove_ligand(pdbfile):\n '''\n '''\n \n newpdb='%s_NOLIG' %pdbfile\n fr=open(pdbfile, 'r')\n fw=open(newpdb, 'w')\n \n ch_pep,chr_pep,ch_lig,chr_lig,ch_wat,chr_wat=tls.chain_res_range(pdbfile)\n\n for x in fr:\n if (('ATOM' in x[:4] or 'HETA' in x[:4] or 'ANISOU' in x[:6]) ):\n ch=x[20:22].strip()\n nres=int(x[22:26])\n if ch in ch_lig and nres in ch_lig[ch]: continue\n fw.write(x)\n\n fw.close()\n fr.close()\n# shutil.move(newpdb,pdbfile)\n return newpdb\n \n##########################################################\ndef get_html_table_baddcc_general(mapfile, dcc):\n '''\n '''\n \n if not dcc: return\n \n html_table = mapfile + '_all.html'\n fw=open(html_table, 'w')\n \n fw.write(' \\n ')\n fw.write(\"\\n\")\n \n html_table_head(fw, 'bad density correlation/real space R factor', 0)\n for x in dcc:\n \n if ((float(x[4])<0.7 and float(x[5])>0.4) or\n float(x[4])<0.5 or float(x[5])>0.5):\n tmp=x[3]\n if '.' in x[3]: tmp='_'\n idd=x[1] + x[0] + tmp\n ss=[x[2], idd, x[4], x[5], x[6], x[7]]\n s1=' '\n for y in ss: s1= s1 + '
' %y\n all = '' + s1 + '\\n'\n fw.write(all)\n\n \n fw.write('
%s
\\n \\n')\n\n ss= '

Go Back
\\n \\n'\n# fw.write(ss)\n \n fw.close()\n return\n \n##########################################################\ndef get_dcc(dccfile):\n '''put dccfile as list of list\n '''\n\n dcc=[]\n if(util.check_file(100, dccfile)==0): return dcc\n flist=open(dccfile, 'r').readlines()\n items,values = cif.cifparse(flist, '_pdbx_rscc_mapman.')\n nseq=cif.parse_values(items,values,\"_pdbx_rscc_mapman.auth_seq_id\");\n chid=cif.parse_values(items,values,\"_pdbx_rscc_mapman.auth_asym_id\");\n comp=cif.parse_values(items,values,\"_pdbx_rscc_mapman.auth_comp_id\");\n alt=cif.parse_values(items,values,\"_pdbx_rscc_mapman.label_alt_id\");\n ins=cif.parse_values(items,values,\"_pdbx_rscc_mapman.label_ins_code\");\n cc=cif.parse_values(items,values,\"_pdbx_rscc_mapman.correlation\");\n rsr=cif.parse_values(items,values,\"_pdbx_rscc_mapman.real_space_R\");\n zrsr=cif.parse_values(items,values,\"_pdbx_rscc_mapman.real_space_Zscore\");\n biso=cif.parse_values(items,values,\"_pdbx_rscc_mapman.Biso_mean\");\n occ=cif.parse_values(items,values,\"_pdbx_rscc_mapman.occupancy_mean\");\n #model=cif.parse_values(items,values,\"_pdbx_rscc_mapman.model_id\");\n pdbid=cif.parse_values(items,values,\"_pdbx_rscc_mapman.pdb_id\");\n if not items: return dcc \n for i in range(len(chid)):\n a=[nseq[i], chid[i], comp[i], alt[i], cc[i], rsr[i], biso[i], occ[i],pdbid[i]]\n dcc.append(a)\n return dcc\n\n##########################################################\ndef html_table_head(fw, title, idd):\n ''' title: name in caption; idd==0, do not give map\n '''\n \n map1 = ' electron desity map '\n if idd==0: map1 = ''\n fw.write('
\\n' )\n fw.write(' \\n' %title )\n fw.write(' \\\n %s \\n' %map1 )\n \n##########################################################\ndef html_table_content(fw, idd, y, url, maphtml):\n '''Write a row in the table of HTML.\n idd: number, y: resid, id, rho, dcc, biso, occ\n '''\n warn=' . '\n\n if float(y[2])< -9.0:\n t='Warning: resid=%s_%s: No real_spaceR calculated (maybe 0 occupancy)\\n' %(y[0], y[1])\n util.perror(t)\n elif ((float(y[2])<0.7 and float(y[3])>0.4) or float(y[2])<0.5 or float(y[3])>0.5):\n t='Warning: resid=%s_%s: bad real_spaceR (%s) or density correlation (%s)\\n' %(y[0], y[1], y[3], y[2])\n config.ERRLOG.append(t)\n warn=' ! '\n \n \n ss=' '\n if len(y)<4: return\n for z in y:\n if ((float(y[2])<0.7 and float(y[3])>0.4) or\n float(y[2])<0.5 or float(y[3])>0.5):\n ss= ss + ' ' %z\n else:\n ss= ss + '' %z\n\n if idd ==0:\n map1='view map' %(url,maphtml)\n ss=ss+ '' %map1\n else:\n ss=ss+ ' '\n\n all = '' + ss + '\\n'\n fw.write(all)\n\n return warn\n#\n\n\n##########################################################\ndef get_subpdb(pdb, k, v, idd):\n '''pick the atoms in pdb (a list) for chain k in residue number v (a list)\n '''\n\n peppdb='%s.pdb' %(idd)\n fwt = open(peppdb, 'w')\n\n natom=0\n for x in pdb:\n if ('CRYST1' in x[:6] or 'SCALE' in x[:5]):\n fwt.write(x)\n elif (('ATOM' in x[:6] or 'HETATM' in x[:6]) and\n k==x[20:22].strip() and int(x[22:26]) in v):\n fwt.write(x)\n natom=natom+1\n \n fwt.close()\n if natom <2 : util.delete_file(peppdb) # ion \n return natom, peppdb\n\n##########################################################\ndef get_subcif(dic, k, v):\n '''k is chainID; v is list of residue number\n return idd, natom, ciffile name\n '''\n '''\n ciffile=dic['pdbfile']\n \n flist=open(ciffile, 'r').readlines()\n\n cell_items,values = cif.cifparse(flist, '_cell.')\n cell=cif.get_rows(cell_items, values)\n\n sym_items,values = cif.cifparse(flist, '_symmetry.')\n sym=cif.get_rows(sym_items, values)\n\n \n items,values = cif.cifparse(flist, '_atom_site.')\n comp=cif.parse_values(items,values,\"_atom_site.auth_comp_id\")\n asym=cif.parse_values(items,values,\"_atom_site.auth_asym_id\")\n seq=cif.parse_values(items,values,\"_atom_site.auth_seq_id\")\n alt=cif.parse_values(items,values,\"_atom_site.label_alt_id\")\n ins=cif.parse_values(items,values,\"_atom_site.pdbx_PDB_ins_code\");\n mod=cif.parse_values(items,values,\"_atom_site.pdbx_PDB_model_num\") \n row=cif.get_rows(items, values)\n \n '''\n cell_items, cell =dic['cell_items'], dic['lig_cell']\n sym_items, sym = dic['sym_items'], dic['lig_sym']\n items,comp,asym,seq,alt,ins,mod,row=dic['items'], dic['comp1'],dic['asym'],dic['seq'],dic['alt'],dic['ins'],dic['mod'],dic['row'] \n\n idd, natom, atom='1_X_X_X__', 0, []\n\n for i in range(len(asym)):\n if asym and asym[i]==k and seq and int(seq[i]) in v:\n natom=natom+1\n #print(i, natom, k, v[0],asym[i], seq[i], comp[i])\n alter, inst = '',''\n if alt and alt[i] != '?' and alt[i] != '.' : alter=alt[i]\n if ins and ins[i] != '?' and ins[i] != '.' : inst=ins[i]\n atom.append(row[i])\n if natom==1 and mod and comp:\n \n idd='_'.join([mod[i], asym[i], comp[i], seq[i], inst, alter])\n\n \n subpep='%s.cif' %(idd) #write subcif name\n fw = open(subpep, 'w')\n\n fw.write('data_%s\\n#\\n' %idd)\n for i, p in enumerate(cell_items):\n if ' ' in cell[0][i]:\n fw.write(\"%s '%s'\\n\" %(p, cell[0][i]))\n else:\n fw.write(\"%s %s\\n\" %(p, cell[0][i]))\n \n fw.write('#\\n')\n for i, p in enumerate(sym_items):\n if ' ' in sym[0][i]:\n fw.write(\"%s '%s'\\n\" %(p, sym[0][i]))\n else:\n fw.write(\"%s %s\\n\" %(p, sym[0][i]))\n\n \n fw.write('\\n#\\nloop_\\n')\n for p in items: fw.write(\"%s\\n\" %p)\n for x in atom:\n for m in x: fw.write(\"%s \" %m)\n fw.write('\\n')\n \n fw.close()\n if natom <2 : util.delete_file(subpep) # ion \n return idd, natom, subpep\n \n##########################################################\ndef map_around_peptide(fw_itool,dic, mapfile, pdb, dcc, ch_pep, url):\n '''mapfile: the map in cell; pdb: a list; dcc: a list of list; \n ch_pep: a dict for peptide {'ch': [n1,n2 ..]}; url: the url for html\n '''\n \n if not ch_pep: return\n\n cif1='''\nloop_\n_dcc_peptide.id\n_dcc_peptide.residue_name\n_dcc_peptide.chain_id\n_dcc_peptide.dcc_correlation \n_dcc_peptide.real_space_R\n_dcc_peptide.Biso_mean \n_dcc_peptide.occupancy_mean\n_dcc_peptide.warning\n_dcc_peptide.file_name_map_html\n_dcc_peptide.file_name_pdb\n_dcc_peptide.file_name_map\n_dcc_peptide.file_name_jmol\n_dcc_peptide.full_map_sigma\n_dcc_peptide.sub_map_sigma\n'''\n fw_itool.write(cif1)\n \n html_table = mapfile + '_pep.html'\n fw=open(html_table, 'w')\n html_table_head(fw, 'Peptides (or nucleic acid)', 1)\n\n min, max,mean,sigma=map_info(mapfile)\n cont={'0.5':0.5, '0.7':0.7, '1.0':1.0, '1.5':1.5, '2.0':2.0} #contour 4 map in asu.\n cont1=cont #contour 4 sub map.\n \n pepid=0\n for k, v in ch_pep.items(): #k: chainID; v: a list of residue number\n if len(v)> 15: continue\n \n pep=get_table_value(pdb,k, v, dcc)\n idd=pep[0][1] #ligid_alt_chid_resn\n\n if dic['cif'] :\n idd, natom, peppdb=get_subcif(dic, k, v)\n else:\n natom, peppdb=get_subpdb(pdb, k, v, idd)\n \n if natom<2 : continue\n\n mapout = cut_map_around_xyz(mapfile, peppdb, idd)\n min1, max1,mean1,sigma1=map_info(mapout)\n print('%s: natom=%d: FullMap-sigma=%s: PepMap-sigma=%s'%(idd,natom,sigma,sigma1)) \n\n scale=1.0\n if(float(sigma1)>0) :\n scale = float(sigma)/float(sigma1)\n for z in list(cont.keys()): cont1[z]=cont[z]*scale\n \n maphtml = get_html4jmol(idd, peppdb, mapout, cont1)\n \n\n pepid=pepid+1\n pp=pep[0][1]\n filename =' %s.html %s.pdb %s_cut.map %s_com %s %s' %(pp, pp, pp, pp, sigma, sigma1)\n if dic['cif'] :\n filename =' %s.html %s.cif %s_cut.map %s_com %s %s' %(idd, idd, idd, idd, sigma, sigma1)\n \n for i,y in enumerate(pep):\n warn=html_table_content(fw, i, y, url, maphtml) \n ss= '%d '%pepid + ' '.join(y) + warn + filename + '\\n'\n fw_itool.write('%s' %ss)\n \n fw.write('

Summary of %s

Name ID density
correlation
real_space_R Biso Occ
%s%s%s
\\n')\n fw.close()\n return\n \n\n##########################################################\ndef map_around_ligand(fw_itool, dic, mapfile, pdb, dcc, ch_lig, url):\n '''mapfile: the map in cell; pdb: a list (non-poly); dcc: a list of list;\n ch_lig: a dict for ligand {'ch': [n1,n2 ..]}; url: the url for html\n '''\n \n if (not ch_lig) or (not dcc) : return\n\n cif1='''\nloop_\n_dcc_ligand.id\n_dcc_ligand.residue_name\n_dcc_ligand.chain_id\n_dcc_ligand.dcc_correlation \n_dcc_ligand.real_space_R\n_dcc_ligand.Biso_mean \n_dcc_ligand.occupancy_mean\n_dcc_ligand.warning\n_dcc_ligand.file_name_map_html\n_dcc_ligand.file_name_pdb\n_dcc_ligand.file_name_map\n_dcc_ligand.file_name_jmol\n_dcc_ligand.full_map_sigma\n_dcc_ligand.sub_map_sigma\n'''\n \n if not dic['sdsc_map'] : \n fw_itool.write(cif1)\n html_table = mapfile + '_lig.html'\n fw=open(html_table, 'w')\n html_table_head(fw, 'Ligands', 1)\n \n min, max,mean,sigma=map_info(mapfile)\n cont={'0.5':0.5, '0.7':0.7, '1.0':1.0, '1.5':1.5, '2.0':2.0} #contour 4 map in asu.\n cont1={'0.5':0.5, '0.7':0.7, '1.0':1.0, '1.5':1.5, '2.0':2.0} #contour 4 sub map.\n contlist=['0.5','0.7', '1.0', '1.5', '2.0']\n lig_sdsc, level_sdsc, jmol_sdsc=[],[],[]\n\n nlig, ncov=0,0\n for k, v in ch_lig.items(): #k: chainID; v: a list of residue number\n \n nres_list = isolate_connect_ligand(pdb, k, v) \n ligid=0\n for ii, x in enumerate(nres_list):\n \n pep=get_table_value(pdb,k, x, dcc) #look through dcc table natom>=2\n \n if not pep : continue\n \n if dic['cif'] :\n idd, natom, ligpdb=get_subcif(dic, k, x)\n else:\n idd=pep[0][1] #ligid_alt_chid_resn\n natom, ligpdb=get_subpdb(pdb, k, x, idd)\n\n if natom<2 : continue\n \n mapout = cut_map_around_xyz(mapfile, ligpdb, idd)\n min1,max1,mean1,sigma1=map_info(mapout) #for \n print('%s: natom=%d: FullMap-sigma=%s: LigMap-sigma=%s'%(idd,natom,sigma,sigma1)) \n if len(x)>1: # exist of covelently bonded ligands\n ncov=ncov+1\n s1=[]\n for z in pep: s1.append(z[1])\n ss='\",\"'.join(s1)\n \n sss=' {\"id\":\"composite_%d\",\"ligands\":[\"'%ncov +ss + '\"]},' \n else:\n sss=' {\"id\":\"' + pep[0][1] + '\",\"ligands\":[\"' + pep[0][1] + '\"]},'\n \n lig_sdsc.append([sss])\n scale=1.0\n if(float(sigma1)>0) :\n scale = float(sigma)/float(sigma1)\n for z in list(cont.keys()): cont1[z]=cont[z]*scale\n else:\n util.perror('Warning: Negative sigma scale, possible no map cut. Check needed.') \n \n if dic['sdsc_map'] : \n level, jmol=gen_ligmap_sdsc(idd, contlist, cont1, cont)\n level_sdsc.append(level)\n jmol_sdsc.append(jmol)\n continue\n\n maphtml = get_html4jmol(idd, ligpdb, mapout, cont1)\n \n ligid=ligid + 1\n filename =' %s.html %s.pdb %s_cut.map %s_com %s %s' %(idd,idd,idd,idd, sigma,sigma1)\n if dic['cif'] :\n filename =' %s.html %s.cif %s_cut.map %s_com %s %s' %(idd,idd,idd,idd,sigma,sigma1)\n \n for i,y in enumerate(pep):\n warn=html_table_content(fw, i, y, url, maphtml)\n ss= '%d '%ligid + ' '.join(y) + warn + filename + '\\n'\n fw_itool.write('%s' %ss)\n nlig=nlig+1\n\n if not dic['sdsc_map'] and nlig==0 : fw_itool.write('? ? ? ? ? ? ? ? ? ? ? ? ? ? \\n')\n \n if dic['sdsc_map'] >0:\n fw=open('ERF_table.json', 'w')\n if len(level_sdsc)<=0:\n fw.close()\n return\n fw.write('{\\n \"components\":[\\n')\n write_sdsc_map(fw,lig_sdsc)\n fw.write(' ],\\n')\n\n fw.write('\\n \"ligmap\":[\\n')\n write_sdsc_map(fw,level_sdsc)\n fw.write(' ],\\n')\n\n fw.write('\\n \"contour_level\":[\\n')\n write_sdsc_map(fw,jmol_sdsc)\n fw.write(' ]\\n}\\n')\n \n fw.close()\n \n else:\n fw.write('\\n')\n fw.close()\n\n##########################################################\ndef write_sdsc_map(fw,alist):\n '''remove the last comma\n '''\n all_1=[]\n for x in alist :\n for y in x:\n all_1.append(y.rstrip())\n \n all_1[-1]=all_1[-1][0:-1] \n for x in all_1 : fw.write('%s\\n'%x)\n \n \n##########################################################\ndef isolate_connect_ligand(pdb, k, v):\n '''separate isolated and connected ligands\n pdb: the list of coordinate (all atoms)\n k: the chainID; v: the list of residue numbers. \n '''\n \n if len(v)==1: return [v]\n\n idd = '%s_%s_all' %(k, v[0])\n natom, pdb_lig=get_subpdb(pdb, k, v, idd)\n\n tmp=[v[0]]\n for i,x in enumerate(v):\n if i==0 : continue\n n1, n2 = i-1, i\n nc=connect(pdb_lig, n1, n2, k, v)\n if not nc: tmp.append(99999)\n tmp.append(x)\n \n ss='' \n for x in tmp: ss = ss + ' %d' %x\n t1=(ss.split('99999'))\n nres_list=[]\n for x in t1:\n tt=x.split()\n nres_list.append([int(i) for i in tt])\n \n util.delete_file(pdb_lig)\n# print(nres_list)\n return nres_list\n\n##########################################################\ndef get_table_value(pdb, ch, nres, dcc):\n '''dcc is a list of list; nres is a list of residue number\n '''\n\n \n pep=[]\n for x in nres: #residue number\n for y in dcc:\n if ch == y[1] and x == int(y[0]):\n \n natom=0 #\n for z in pdb:\n if (('ATOM' in z[:6] or 'HETATM' in z[:6]) and\n ch==z[20:22].strip() and x==int(z[22:26])):\n natom=natom+1\n if natom<2: continue\n \n s='%s_%s_%s_%d' %(y[2], y[3],y[1], x)\n pep.append([y[2], s, y[4],y[5],y[6],y[7]])\n break\n\n return pep\n\n##########################################################\ndef connect(pdb_ligfile, n1, n2, k, dic):\n '''check if residue n1 and n2 is connected. k is chainID\n pdb_lig is a pdb file\n \n '''\n \n cutoff=1.6\n \n if not os.path.exists(pdb_ligfile) or os.path.getsize(pdb_ligfile)<10 : return \n# if util.check_file(30, pdb_ligfile)==0: return\n \n pdb_lig=open(pdb_ligfile, 'r').readlines()\n data1, data2 = [], []\n for x in pdb_lig:\n if k==x[20:22].strip() :\n if dic[n1]==int(x[22:26]):\n data1.append(x)\n elif dic[n2]==int(x[22:26]):\n data2.append(x)\n conn=0 \n if data1 and data2:\n for x in data1:\n if ('ATOM' not in x[:6] and 'HETATM' not in x[:6]): continue\n x1,y1,z1=float(x[28:38]), float(x[38:46]), float(x[46:54])\n \n for y in data2:\n if ('ATOM' not in y[:6] and 'HETATM' not in y[:6]): continue\n x2,y2,z2=float(y[28:38]), float(y[38:46]), float(y[46:54])\n \n d=math.sqrt((x1-x2)**2 + (y1-y2)**2 + (z1-z2)**2)\n if d\n\n\n\n \n \n \n\n\tDisplaying Electron Density Map by Jmol\n\n\n\n\n\n
\n\n\n\n
\n\n    
\n\n\n

Go Back
\n\n\n\n\"\"\" %(jmol_pth,jmol_pth, jmol_com_name, cont['0.5'],mapout, cont['0.7'],mapout,\n cont['1.0'], mapout, cont['1.5'],mapout, cont['2.0'],mapout)\n \n\n fw=open(maphtml, \"w\")\n fw.write (\"%s\" % html )\n fw.close()\n\n mapscr=\"\"\"\nload %s; spacefill off; rotate x 90;\n\nisosurface s_one color [x00AB24] within 2.0 {*} \"%s\" mesh nofill;\n#isosurface cutoff 0.2\nset echo e1 [0 35];\n#echo cutoff 0.2\necho \"2mFo-DFc\"\ncolor echo magenta\n\"\"\" %(ligpdb, mapout) \n\n fw=open(jmol_com_name, \"w\")\n fw.write (\"%s\" %mapscr )\n fw.close()\n\n return maphtml\n \n \n#####################################################################\ndef cut_map_scr():\n \n \n csh_script=\"\"\"#!/bin/csh -f\n\n######################################################################\n# This script is used to cut ASU CCP4 map to a small size around a \n# selected part of the model \n# (created 10/31/2011)\n#=================== Usage ========================== \n# mapcut mapfile pdbfile mapout\n######################################################################\n\nset mapfile=$1\nset pdbfile=$2\nset mapout=$3\nset maptmp=${1}_tmp\n\nif( $#argv == 2 ) then\n set mapout=\"${mapfile}_cut\"\nendif\n\n\n#cut map around the selected volume (pdbfile) \n# Border can not be smaller that the value set for the big map (>4.)\nmapmask MAPIN $mapfile xyzin $pdbfile MAPOUT $mapout <& /dev/null\nBorder 4.0\nSCALE FACTOR 1.0 0.0\nMODE mapin\nEOF\n\n\"\"\"\n\n mapscr=\"mapcut_TMP.csh\"\n fw=open(mapscr, \"w\")\n fw.write (\"%s\" % csh_script )\n fw.close()\n\n return mapscr\n\n#####################################################################\ndef cut_map_bylimit(xyzlim):\n \n csh_script=\"\"\"#!/bin/csh -f\n\n######################################################################\n# This script is used to cut ASU CCP4 map to a small size around a \n# selected xyz limit in fraction (2013-02-03)\n#\n#---------------------------- Usage ----------------------------------\n# mapcut mapfile mapout\n#######################################################################\n\nset mapfile=$1\nset mapout=$2\nset maptmp=${1}_tmp\n\nif( $#argv == 1 ) then\n set mapout=\"${mapfile}_cut\"\nendif\n\n#cut map around the selected box. No Border should be given!!\nmapmask MAPIN $mapfile MAPOUT $mapout <& /dev/null\nXYZLIM %s\nSCALE FACTOR 1.0 0.0\nMODE mapin\nEOF\n\n\n\"\"\" %(xyzlim)\n\n mapscr=\"mapmask_TMP.csh\"\n fw=open(mapscr, \"w\")\n fw.write (\"%s\" % csh_script )\n fw.close()\n\n return mapscr\n\n##########################################################\ndef map_info(mapfile):\n '''get the min, max, mean, sigma from the map\n '''\n\n min, max, mean, sigma='-1', '-1','-1','-1'\n log=mapfile + '_header'\n scr=mapfile + '.sh'\n \n arg='mapdump mapin %s <%s \\neof\\n' %(mapfile, log)\n \n fw = open(scr, 'w')\n fw.write(arg)\n fw.close()\n os.system('chmod +x %s ; ./%s' %(scr,scr))\n\n\n if not util.check_file(10,log): return min, max, mean, sigma\n fp=open(log,'r')\n for x in fp:\n if 'Minimum density ...' in x:\n min=x.rstrip().split( ' ')[-1]\n elif 'Maximum density ...' in x:\n max=x.rstrip().split( ' ')[-1]\n elif ' Mean density ..' in x:\n mean=x.rstrip().split( ' ')[-1]\n \n elif 'deviation from mean density ..' in x:\n sigma=x.rstrip().split( ' ')[-1]\n\n util.delete_file(scr, log)\n \n return min, max, mean, sigma \n","sub_path":"sf-valid/dccpy/ligand.py","file_name":"ligand.py","file_ext":"py","file_size_in_byte":36683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"236689901","text":"from statistics import mean\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nstyle.use('ggplot')\n\nxs = np.array([1,2,3,4,5], dtype=np.float64)\nys = np.array([5,4,6,5,6], dtype=np.float64)\n\ndef best_fit_slope_and_intercpt (xs,ys):\n m = (((mean(xs)*mean(ys)) - mean(xs*ys)) /\n ((mean(xs)**2) - mean(xs**2)))\n b = mean(ys)-m*mean(xs)\n return m,b\n\nm,b = best_fit_slope_and_intercpt(xs,ys)\n\nregression_line = []\nfor x in xs:\n regression_line.append((m*x)+b)\n\npredict_x =np.array([3,8,9], dtype=np.float64)\npredict_y = (m*predict_x)+b\n\nplt.scatter(xs,ys,color='#003F72')\nplt.scatter(predict_x,predict_y,color='#003F72')\nplt.plot(xs, regression_line)\nplt.show()","sub_path":"solveLinear.py","file_name":"solveLinear.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"362720746","text":"import subprocess\n\nprint('############ SHOW CONTAINER ############')\npscmd = 'docker ps -a'\nsubprocess.call(pscmd)\n\n\nprint('############ STOP CONTAINER ############')\ngetid = 'docker ps -a -q'\nshowid = subprocess.Popen(getid, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\nstd_out, std_err = showid.communicate()\nctnids = std_out.decode('utf-8').rstrip().split('\\n')\n\nprint('--- start ---')\nfor ctnid in ctnids:\n stopctn = \"docker stop %s\" % (ctnid)\n subprocess.call(stopctn)\nelse:\n print('--- end ---')\n\n\nprint('############ REMOVE CONTAINER ############') \ngetid = 'docker ps -a -q'\nshowid = subprocess.Popen(getid, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\nstd_out, std_err = showid.communicate()\nctnids = std_out.decode('utf-8').rstrip().split('\\n')\n\nprint('--- start ---')\nfor ctnid in ctnids:\n rmctn = \"docker rm %s\" % (ctnid)\n subprocess.call(rmctn)\n # rmexec = subprocess.call(rmcmd)\nelse:\n print('--- end ---')\n\n\nprint('############ SHOW CONTAINER ############')\npscmd = 'docker ps -a'\nsubprocess.call(pscmd)\n\n\nprint('############ START CONTAINER ############')\n\nrun1 = 'docker run --hostname=\"slave1\" --privileged -d -it -p 8081:80 -p 2223:22 --name slave1 slave1:0005 /sbin/init'\nsubprocess.call(run1)\n\nrun2 = 'docker run --hostname=\"slave2\" --privileged -d -it -p 8082:80 -p 2224:22 --name slave2 slave2:0005 /sbin/init'\nsubprocess.call(run2)\n\nrun3 = 'docker run --hostname=\"slave3\" --privileged -d -it -p 8083:80 -p 2225:22 --name slave3 slave3:0005 /sbin/init'\nsubprocess.call(run3)\n\nrun4 = 'docker run --hostname=\"master\" --privileged -d -it -p 8080:80 -p 2222:22 --name master --link slave1:s1 --link slave2:s2 --link slave3:s3 master:0006 /sbin/init'\nsubprocess.call(run4)\n","sub_path":"docker-restart.py","file_name":"docker-restart.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"463533486","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 24 13:31:48 2019\n\n@author: InsomniaMe\n\"\"\"\n\ndef count_letters(string, letter, location=0):\n count = 0\n start = location\n while True:\n start = string.find(letter, start) + 1\n if start == 0:\n break\n count = count + 1\n if count == 0:\n return -1\n return count\n\ncount_letters('sesset', 's', 2)\n\npara = '''Assign to a variable in your program a triple-quoted string that contains your favourite\nparagraph of text—perhaps a poem, a speech, instructions to bake a cake, some inspirational\nverses, etc.'''\n\ndef func_1(string):\n import string\n new_para = ''\n for i in para:\n if i not in string.punctuation:\n new_para = new_para + i\n word_list = new_para.split()\n count_1 = len(word_list)\n count_2 = 0\n for i in word_list:\n if 'e' in i:\n count_2 = count_2 + 1\n print('Your text contains {} words, of which {} ({:.2f}%) contain an \"e\".'.format(count_1, count_2, count_2/count_1*100))\n \nfunc_1(para) \n\nfor i in range(1, 13):\n for j in range(1, i+1):\n print(i*j, end=' ')\n print()\n\ndef reverse(string):\n new_string = ''\n for i in range(len(string)):\n new_string = new_string + string[-(i+1)]\n return new_string\n\n\ndef mirror(string):\n return string + reverse(string)\n\ndef remove_letter(letter, string):\n new_string = ''\n for i in string:\n if i != letter:\n new_string = new_string + i\n return new_string\n\ndef is_palindrome(string):\n n = len(string)\n if n % 2 != 0:\n return False\n else:\n front = string[:int(n/2)]\n back = string[int(n/2):]\n if front == reverse(back):\n return True\n else:\n return False\nstring = 'banana'\nx = 2\n\ndef count_how_many(word, string):\n x=len(word)\n string_list = [string[i:x+i] for i in range(len(string)-x+1)]\n count = 0\n for i in string_list:\n if i == word:\n count = count + 1\n return count\n\ndef remove(word, string):\n if word not in string:\n return string\n else:\n x=len(word)\n string_list = [string[i:x+i] for i in range(len(string)-x+1)]\n new_string = ''\n for i, letter in enumerate(string_list):\n if letter == word:\n break\n new_string = new_string + letter[0]\n new_string = new_string + string[i+x:]\n return new_string\n \ndef remove_2(word, string):\n n = string.find(word)\n x = len(word)\n if n == -1:\n return string\n else:\n return string[:n] + string[n+x:]\n \ndef remove_all(word, string):\n temp = string\n while True:\n temp = remove_2(word, temp)\n if temp.find(word) == -1:\n break\n return temp\n\nremove_all('an', 'banananana')\n\nlist(range(10))\n\nthis = [\"I\", \"am\", \"not\", \"a\", \"crook\"]\nthat = [\"I\", \"am\", \"not\", \"a\", \"crook\"]\nprint(\"Test 1: {0}\".format(this is that))\nthat = this\nprint(\"Test 2: {0}\".format(this is that))\n \n\n ","sub_path":"chapter_8.py","file_name":"chapter_8.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41447034","text":"#!/usr/bin/python\nimport globalVars\nimport RPi.GPIO as GPIO\nimport time\nimport socket\nimport datetime\nimport os\nimport shutil\nimport subprocess\n\nCHECK_SECONDS = 0.3;\npirPIN = 11; # GPIO17\n\ndef initGPIO():\n global pirPIN;\n\n try:\n GPIO.setwarnings(False);\n GPIO.setmode(GPIO.BOARD);\n GPIO.setup(pirPIN, GPIO.IN)\n return 1;\n except Exception as e:\n print('Error inicializando GPIO: ' + str(e) +'\\n');\n return 0;\n\ndef isPIRActive():\n global pirPIN;\n pirActive = GPIO.input(pirPIN);\n if (pirActive):\n return True;\n else:\n return False;\n\n\nok =initGPIO();\nif (ok ==1):\n while (True):\n pirActive = isPIRActive();\n if (pirActive):\n print ('Activo!');\n else:\n print ('nada');\n time.sleep(CHECK_SECONDS);\n","sub_path":"raspiWeb/backoffice/test/pirtest.py","file_name":"pirtest.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"495114542","text":"data = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]\n\ndef mean(listVar):\n total = 0 \n\n for i in listVar: #Each element from list gets added to find total\n total += i\n\n return total/len(listVar)\n\nx = mean(data)\nprint(x)\n","sub_path":"Core/Mean.py","file_name":"Mean.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"177417646","text":"def dataaccess(EPICID):\n print(EPICID)\n\ndef getdata(EPICID):\n\n import numpy as np\n import urllib \n\n resultdict={}\n \n campaigndict = {'06':'GO6082/','08':'GO8037/','102':'GO10037/','12':'GO12111/','13':'GO13111/','14':'GO14058/','15':'GO15058/','16':'GO16058/','17':'GO17033/','18':'GO18033/'}\n for eachcampaign in campaigndict:\n try:\n #if True:\n url = 'https://raw.githubusercontent.com/zabop/chameleon/master/data/'+campaigndict[eachcampaign]+'ktwo'+str(EPICID)+'-c'+eachcampaign+'_lpd-targ.fits_massaap_lc_th8_k2sc.lc'\n data = urllib.request.urlopen(url)\n data = np.loadtxt(data,skiprows=1)\n \n time = data[:,0]\n flux = data[:,1]\n \n print('retrieving data from campaign '+eachcampaign)\n print(url)\n print('Run:\\ntime, flux = data[\\''+eachcampaign+'\\']\\nto access time and flux of the target in campaign ' + eachcampaign+'\\n')\n \n resultdict[eachcampaign]=[time, flux]\n \n except urllib.error.HTTPError:\n pass \n \n return resultdict\n","sub_path":"src/dataaccess.py","file_name":"dataaccess.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"195844540","text":"# Jack12\n\nimport os\nimport pathlib\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n\nimport numpy as np\nimport torch\nfrom scipy import linalg\nfrom torch.nn.functional import adaptive_avg_pool2d\n\nfrom PIL import Image\n\nimport pickle\nfrom .inception import InceptionV3 \nfrom .fid_score import calculate_frechet_distance\n\ntry:\n from tqdm import tqdm\nexcept ImportError:\n # If not tqdm is not available, provide a mock version of it\n def tqdm(x): return x\n \ndef norm_ip(img, _min, _max):\n img.clamp_(min=_min, max=_max)\n img.add_(-_min).div_(_max - _min + 1e-5)\n\ndef norm_range(t, _range):\n if range is not None:\n norm_ip(t, _range[0], _range[1])\n else:\n norm_ip(t, float(t.min()), float(t.max()))\n\nclass FidSolver():\n\n def __init__(self, dims, batch, cuda=True,mu2_path='./ffhq_mu512.npy', sigma2_path='ffhq_sigma512.npy' ):\n block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]\n self.model = InceptionV3([block_idx])\n \n if (cuda):\n self.model.cuda()\n self.dims = dims\n self.batch = batch\n\n self.m2 = np.load(mu2_path)\n self.s2 = np.load(sigma2_path)\n\n def set_arr(self, length):\n self.pred_arr = np.empty((length, self.dims))\n self.model.eval()\n \n def cal_frechet_distance(self, mu1, sigma1, mu2, sigma2, eps=1e-6):\n return calculate_frechet_distance(mu1, sigma1, mu2, sigma2)\n\n def cal_get_pred(self, idx, _in):\n \n _in = _in.clone()\n # print(\"Before: {}\".format(_in.shape))\n norm_range(_in, [-1, 1])\n #print(\"after normalization: {}\".format(_in) )\n pred = self.model(_in)[0]\n\n # If model output is not scalar, apply global spatial average pooling.\n # This happens if you choose a dimensionality not equal 2048.\n \n if pred.size(2) != 1 or pred.size(3) != 1:\n pred = adaptive_avg_pool2d(pred, output_size=(1, 1))\n\n start = idx\n end = idx + self.batch\n \n\n t = pred.cpu().data.numpy().reshape(pred.size(0), -1)\n\n # print('pred arr size: '.format(self.pred_arr[start:end]))\n # print('res size: {}'.format(t))\n self.pred_arr[start:end] = t\n\n\n def get_frechet(self):\n\n m1 = np.mean(self.pred_arr, axis=0)\n s1 = np.cov(self.pred_arr, rowvar=False)\n\n fid_value = calculate_frechet_distance(m1, s1, self.m2, self.s2)\n\n return fid_value","sub_path":"FidSolver.py","file_name":"FidSolver.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"202025680","text":"# Tensorflow Computer Vision Helper\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport glob\nimport os\n\ndef plot_convolution(data,t,title=''):\n fig, ax = plt.subplots(2,len(data)+1,figsize=(8,3))\n fig.suptitle(title,fontsize=16)\n tt = np.expand_dims(np.expand_dims(t,2),2)\n for i,im in enumerate(data):\n ax[0][i].imshow(im)\n ximg = np.expand_dims(np.expand_dims(im,2),0)\n cim = tf.nn.conv2d(ximg,tt,1,'SAME')\n ax[1][i].imshow(cim[0][:,:,0])\n ax[0][i].axis('off')\n ax[1][i].axis('off')\n ax[0,-1].imshow(t)\n ax[0,-1].axis('off')\n ax[1,-1].axis('off')\n #plt.tight_layout()\n plt.show()\n\ndef plot_results(hist):\n fig,ax = plt.subplots(1,2,figsize=(15,3))\n ax[0].set_title('Accuracy')\n ax[1].set_title('Loss')\n for x in ['acc','val_acc']:\n ax[0].plot(hist.history[x])\n for x in ['loss','val_loss']:\n ax[1].plot(hist.history[x])\n plt.show()\n\ndef display_dataset(dataset, labels=None, n=10, classes=None):\n fig,ax = plt.subplots(1,n,figsize=(15,3))\n for i in range(n):\n ax[i].imshow(dataset[i])\n ax[i].axis('off')\n if classes is not None and labels is not None:\n ax[i].set_title(classes[labels[i][0]])\n\ndef check_image(fn):\n try:\n im = Image.open(fn)\n im.verify()\n return im.format=='JPEG'\n except:\n return False\n \ndef check_image_dir(path):\n for fn in glob.glob(path):\n if not check_image(fn):\n print(\"Corrupt image or wrong format: {}\".format(fn))\n os.remove(fn)\n\ndef load_cats_dogs_dataset(batch_size=64):\n if not os.path.exists('data/PetImages'):\n print(\"Extracting the dataset\")\n with zipfile.ZipFile('data/kagglecatsanddogs_3367a.zip', 'r') as zip_ref:\n zip_ref.extractall('data')\n print(\"Checking dataset\")\n check_image_dir('data/PetImages/Cat/*.jpg')\n check_image_dir('data/PetImages/Dog/*.jpg')\n data_dir = 'data/PetImages'\n print(\"Loading dataset\")\n ds_train = keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split = 0.2,\n subset = 'training',\n seed = 13,\n image_size = (224,224),\n batch_size = batch_size\n )\n ds_test = keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split = 0.2,\n subset = 'validation',\n seed = 13,\n image_size = (224,224),\n batch_size = batch_size\n )\n return ds_train,ds_test\n","sub_path":"computer-vision-tf/tfcv.py","file_name":"tfcv.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"360755308","text":"import math\n\ndef mergeSort(array):\n if len(array) == 1:\n return array\n\n center = math.floor(len(array)/2)\n left = array[:center]\n right = array[center:]\n\n return merge(mergeSort(left), mergeSort(right))\n\n\n\ndef merge(left, right):\n results = []\n while len(left) and len(right):\n if left[0] < right[0]:\n results.append(left[0])\n del left[0]\n else:\n results.append(right[0])\n del right[0]\n\n for i in range (0, len(left)):\n results.append(left[i])\n for i in range (0, len(right)):\n results.append(right[i])\n\n return results\narr1 = [1,5,6, 7,8]\narr2 = [-1,5,2,9,11,6]\nif __name__ == \"__main__\":\n #res = merge(arr1, arr2)\n #print(res)\n\n print(mergeSort(arr2))\n","sub_path":"codes/job_interview/mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"549316311","text":"import numpy as np\r\n\r\ndef bytestoint(byte_in):\r\n \"\"\"convert byte(0~255) to int\"\"\"\r\n return ord(byte_in)\r\n\r\ndef load_radiance(filepath):\r\n \"\"\"load radiance map from input path and return Mat\"\"\"\r\n with open(filepath, mode='rb') as rad_file:\r\n while 1:\r\n rawline = rad_file.readline()\r\n if rawline[0] == '-' or rawline[0] == '+':\r\n splitted = rawline.split()\r\n size_y = int(splitted[1].decode(\"utf-8\"))\r\n size_x = int(splitted[3].decode(\"utf-8\"))\r\n break\r\n rgbe = np.zeros((size_y, size_x, 4), np.uint8, 'C')\r\n for row_idx in range(size_y):\r\n buf = rad_file.read(4)\r\n if len(buf) != 4:\r\n break\r\n for channel_idx in range(4):\r\n index_cnt = 0\r\n while index_cnt < size_x:\r\n tmp = rad_file.read(1)\r\n runlength = bytestoint(tmp)\r\n if runlength <= 128:\r\n for _ in range(runlength):\r\n pix_in = bytestoint(rad_file.read(1))\r\n rgbe[row_idx, index_cnt, channel_idx] = pix_in\r\n index_cnt = index_cnt + 1\r\n else:\r\n pix_in = bytestoint(rad_file.read(1))\r\n rgbe[row_idx, index_cnt:index_cnt+runlength-128, channel_idx] = pix_in #129 to 128\r\n index_cnt = index_cnt + runlength -128\r\n hdrimage = np.zeros((size_y, size_x, 3), float)\r\n value = rgbe[:, :, 3] # get exponent\r\n value = value.astype(int)\r\n value = value - 128\r\n value_normalize = np.ldexp(1.0/256.0,value)\r\n for _ in range(3):\r\n hdrimage[:, :, _] = (rgbe[:, :, _] + 0.5)*value_normalize\r\n select = rgbe[:, :, 3] == 0 # 2D true-false table\r\n hdrimage[select] = [0, 0, 0] # if select's pixel is false, then let RGB value equal to zero\r\n return hdrimage\r\n\r\ndef PSNR_UCHAR3(input_1, input_2, peak=255):\r\n [row,col,channel] = input_1.shape\r\n if input_1.shape != input_2.shape:\r\n print(\"Warning!! Two image have different shape!!\")\r\n return 0\r\n mse = ((input_1 - input_2)**2).sum() / (row * col * channel)\r\n \r\n return 20*np.log10(peak) - 10*np.log10(mse)","sub_path":"HW1/code/load_radiance_and_psnr.py","file_name":"load_radiance_and_psnr.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"305514126","text":"import pytesseract\nfrom PIL import Image, ImageFilter, ImageEnhance\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport glob, os\nimport io\n\nSIZE = 9\n\n\ndef open_txt(tc):\n \"\"\"\n fungsi untuk membuka file txt\n \"\"\"\n file = \"../test/tc\" + str(tc) + \".txt\"\n board = [[0 for i in range(SIZE)] for j in range(SIZE)]\n with open(file,'r') as f:\n text = f.read()\n for i,line in enumerate(text.split('\\n')):\n board[i] = [(int(i) if i != '#' else 0) for i in line.split(' ')]\n return board\n\n\ndef open_image(number) :\n \"\"\"\n fungsi untuk membuka file img (jpg atau png) \n dan dikonversi ke matriks\n \"\"\"\n # Inisialisasi\n board = [[0 for i in range(SIZE)] for j in range(SIZE)]\n \n # Open image\n path = \"../test/preprocess_\" + str(number) + \".png\"\n image = Image.open(path)\n\n # Mendapatkan lebar dan tinggi tiap grid\n row, col = image.size\n height = row/SIZE\n width = col/SIZE\n\n for i in range(SIZE) :\n for j in range(SIZE) :\n # Crop grid\n cropped = image.crop((4 + width*j, 4 + height*i, width*(j+1) - 4, height*(i+1) - 3))\n np_img = np.array(cropped)\n if(np_img.mean() > 20): \n cropped = cropped.convert('RGB')\n cropped = cropped.filter(ImageFilter.MedianFilter())\n enhancer = ImageEnhance.Contrast(cropped)\n cropped = enhancer.enhance(2)\n # Konversi ke string dan menangani kasus \n # kesalahan konversi untuk angka 5,2,8,9\n result = pytesseract.image_to_string(cropped, config='--psm 6')\n if result not in ['1','2','3','4','5','6','7','8','9']:\n if result == '':\n result = result.replace('','0')\n if result == 'S' :\n result = result.replace('S','0')\n if result == '&' or result == 'g' :\n result = '8'\n if result == 'q' :\n result = '9'\n if result == '>' :\n result = '2'\n\n final = int(result)\n board[i][j] = final\n else:\n board[i][j] = 0\n if(board[0][3] == 9):\n board[0][3] = 2\n return board\n\ndef preprocess_image(file):\n \"\"\"\n fungsi untuk menghilangkan garis tebal agar memudahkan cropping\n \"\"\"\n path = \"test/image\" + str(file) + \".png\"\n image = cv2.imread(path)\n kernel_vertical = cv2.getStructuringElement(cv2.MORPH_RECT, (1,50))\n temp1 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel_vertical)\n horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50,1))\n temp2 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, horizontal_kernel)\n temp3 = cv2.add(temp1, temp2)\n result = cv2.add(temp3, image)\n cv2.imwrite('test/preprocess_'+str(file)+'.png',result)\n return result\n\ndef write_result(res,tc,is_image):\n \"\"\"\n Menulis hasil pemrosesan ke text file\n \"\"\"\n fn=\"\"\n path = \"../result/\"\n if is_image:\n path = path + \"image\" + str(tc) + \"-ans.txt\"\n fn = \"result/image\" + str(tc) + \"-ans.txt\"\n else :\n path = path + \"text\" + str(tc) + \"-ans.txt\"\n fn = \"result/text\" + str(tc) + \"-ans.txt\"\n\n f = open(path, \"w\",encoding=\"utf-8\")\n\n print(\"Muhammad Farid Adilazuarda/13518040\",file=f)\n print(\"12/06/2020\", file=f)\n\n print(\"\\n\",file=f)\n print(\" SUDOKU SOLVER\",file=f)\n for i in range(len(res)) :\n if i%3 == 0 :\n if i == 0:\n print(\" ┎─────────┰─────────┰─────────┒\",file=f)\n else:\n print(\" ┠─────────╂─────────╂─────────┨\",file=f)\n\n for j in range(len(res[0])):\n if j%3 == 0:\n print(\" ┃ \", end=\" \",file=f)\n if j == 8:\n print(res[i][j], \" ┃\", file=f)\n else:\n print(res[i][j], end=\" \",file=f)\n\n print(\" ┖─────────┸─────────┸─────────┚\",file=f)\n\n print(\"\", file = f)\n print(\"Lokasi para 5 : \", end=\"\\n\",file = f)\n for i in range(SIZE) :\n for j in range(SIZE) :\n if res[i][j] == 5 :\n print(\"[ \" + str(i) + \" , \" + str(j) + \" ]\",end=\"\\n\", file=f)\n\n print()\n print(\"Hasil tersimpan pada file \"+fn)\n f.close()\n\ndef preprocess_warning():\n \"\"\"\n fungsi untuk image preprocessing\n \"\"\"\n for infile in glob.glob(\"../test/*.png\"):\n im = Image.open(infile)\n im.save(infile)\n\n return 0","sub_path":"src/IOProcess.py","file_name":"IOProcess.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"441006469","text":"import urllib, urllib2\nfrom xml.dom.minidom import parseString\n\ndef get_node_value(parent, name, ns=\"\"):\n if ns:\n if parent.getElementsByTagNameNS(ns, name) and parent.getElementsByTagNameNS(ns, name)[0].childNodes:\n return parent.getElementsByTagNameNS(ns, name)[0].childNodes[0].data\n else:\n if parent.getElementsByTagName(name) and parent.getElementsByTagName(name)[0].childNodes:\n return parent.getElementsByTagName(name)[0].childNodes[0].data\n return None\n\ndef get_thumbnail_value(parent):\n if parent.getElementsByTagName('media:thumbnail') and len(parent.getElementsByTagName('media:thumbnail')) > 1:\n return parent.getElementsByTagName('media:thumbnail')[1].attributes.getNamedItem('url').value\n return None\n\ndef load_xml(url):\n try:\n return parseString(urllib2.urlopen(url).read())\n\n except (urllib2.HTTPError, urllib2.URLError) as ex:\n #xbmcgui.Dialog().ok( __language__(30301), __language__(30600))\n raise ex\n\n except Exception as ex:\n #xbmc.log( \"An unhandled exception was triggered in the the Areena addon.\" )\n raise ex\n\ndef parse_xml(doc):\n elements = []\n for item in doc.getElementsByTagName('item'):\n title = unicode(get_node_value(item, 'title')).encode('utf-8')\n link = unicode(get_node_value(item, 'link')).encode('utf-8')\n thumbnail = unicode(get_thumbnail_value(item)).encode('utf-8')\n element = [title, link, thumbnail]\n elements.append(element)\n return elements\n","sub_path":"plugin.video.yle/xmlparser.py","file_name":"xmlparser.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"86069090","text":"# encoding = utf-8\n__author__ = 'wy'\n\nimport os\n\ndef getWriter(file_name,output_dir=None,encoding=\"utf-8\",mode=\"w\"):\n if output_dir is None:\n fname=file_name\n else:\n fname=os.path.join(output_dir,file_name)\n writer=open(fname,mode=mode)\n return writer","sub_path":"netty/src/main/python/common/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"152283381","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'polls'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^login', views.login, name='login'),\n url(r'^index', views.index, name='index'),\n url(r'^AgriculturaPrecisao', views.agricprecisao, name='agricprecisao'),\n url(r'^appkey1', views.appkey1, name='appkey1'),\n url(r'^alert', views.alert, name='alert'),\n url(r'^userProfile', views.userProfile, name='userProfile'),\n url(r'^dashboard', views.dashboard, name='dashboard'),\n url(r'^validate_login', views.validate_login, name='validate_login'),\n url(r'logout', views.logout, name='logout'),\n url(r'teste', views.teste, name='teste'),\n url(r'collapse', views.collapse, name='collapse'),\n #url(r'collapse/(?P[0-9])', views.collapse, name='collapse1'),\n]\n","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"457155949","text":"# Copyright (C) 2018 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\"\"\"Page objects for child elements of pages\"\"\"\n# pylint: disable=too-few-public-methods\n\n\nclass RelatedUrls(object):\n \"\"\"Represents reference / evidence url section on info widgets\"\"\"\n\n def __init__(self, descendant_el, label):\n self._root = descendant_el.element(\n class_name=\"related-urls__title\", text=label).parent(\n class_name=\"related-urls\")\n self.add_button = self._root.button(class_name=\"related-urls__toggle\")\n\n\nclass CommentArea(object):\n \"\"\"Represents comment area (form and mapped comments) on info widget\"\"\"\n\n def __init__(self, descendant_el):\n self.add_section = descendant_el.element(\n class_name=\"comment-add-form__section\")\n","sub_path":"test/selenium/src/lib/page/widget/page_elements.py","file_name":"page_elements.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"549740319","text":"import torch\nfrom torch import optim\nfrom torch.nn import Module, ModuleList, Dropout\n\nfrom graphgallery.nn.models import TorchKeras\nfrom graphgallery.nn.layers.pytorch.get_activation import get_activation\nfrom graphgallery.nn.metrics.pytorch import Accuracy\n\nfrom dgl.nn.pytorch import GATConv\n\n\nclass GAT(TorchKeras):\n def __init__(self,\n in_channels,\n out_channels,\n hids=[8],\n num_heads=[8],\n acts=['elu'],\n dropout=0.6,\n weight_decay=5e-4,\n lr=0.01):\n\n super().__init__()\n\n layers = ModuleList()\n paras = []\n\n inc = in_channels\n pre_head = 1\n for hid, num_head, act in zip(hids, num_heads, acts):\n layer = GATConv(inc * pre_head,\n hid,\n activation=get_activation(act),\n num_heads=num_head,\n feat_drop=dropout,\n attn_drop=dropout)\n layers.append(layer)\n paras.append(\n dict(params=layer.parameters(), weight_decay=weight_decay))\n inc = hid\n pre_head = num_head\n\n layer = GATConv(inc * pre_head,\n out_channels,\n num_heads=1,\n feat_drop=dropout,\n attn_drop=dropout)\n layers.append(layer)\n # do not use weight_decay in the final layer\n paras.append(dict(params=layer.parameters(), weight_decay=0.))\n\n self.layers = layers\n self.dropout = Dropout(dropout)\n self.compile(loss=torch.nn.CrossEntropyLoss(),\n optimizer=optim.Adam(paras, lr=lr),\n metrics=[Accuracy()])\n\n def forward(self, x, g):\n for layer in self.layers[:-1]:\n x = layer(g, x).flatten(1)\n x = self.dropout(x)\n\n x = self.layers[-1](g, x).mean(1)\n return x\n","sub_path":"graphgallery/nn/models/dgl_torch/gat.py","file_name":"gat.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"3938166","text":"from socket import AF_INET, socket, SOCK_STREAM\nfrom threading import Thread\nimport json\n\nclients = {}\naddressesses = {}\nusernames = []\nconnections_allowed = 5\n\nhost = ''\nport = 1134\nbuffer_size = 1024\naddress = (host, port)\nserver = socket(AF_INET, SOCK_STREAM)\nserver.bind(address)\n\n\ndef accept_connections():\n while True:\n client, client_addressess = server.accept()\n addressesses[client] = client_addressess\n Thread(target=handle_client, args=(client,)).start()\n\n\ndef message_handler(msg, client):\n try:\n j_obj = json.loads(msg)\n message_return = {}\n print(j_obj)\n username = j_obj.get(\"sender\")\n if len(j_obj) == 2 and j_obj.get(\"disconnect\") is True:\n message_return[\"disconnect\"] = True\n message_json = json.dumps(message_return)\n broadcast_disonnect = (\"{} has disconnected\".format(username))\n client.send(bytes(message_json, \"utf8\"))\n broadcast(broadcast_disonnect, client)\n client.close()\n usernames.remove(username)\n del clients[client]\n else:\n broadcast(msg, client)\n except OSError: # disconnected client\n pass\n\n\ndef connection_check(username, client):\n message_return = {}\n username_jobj = json.loads(username)\n username = username_jobj.get(\"username\")\n # handling username requets\n if len(username_jobj) == 1 and username is not None:\n if username in usernames:\n message_return[\"isConnect\"] = False\n message_return[\"errorCode\"] = 1\n message_json = json.dumps(message_return)\n client.send(bytes(message_json, \"utf8\"))\n client.close()\n del clients[client]\n usernames.remove(username)\n elif len(clients) > connections_allowed:\n message_return[\"isConnect\"] = False\n message_return[\"errorCode\"] = 2\n message_json = json.dumps(message_return)\n client.send(bytes(message_json, \"utf8\"))\n client.close()\n del clients[client]\n usernames.remove(username)\n else:\n message_return[\"isConnect\"] = True\n message_return[\"errorCode\"] = -1\n message_json = json.dumps(message_return)\n client.send(bytes(message_json, \"utf8\"))\n usernames.append(username)\n\n\ndef handle_client(client): # Takes client socket as argument.\n name = client.recv(buffer_size).decode(\"utf8\")\n connection_check(name, client)\n clients[client] = name\n while True:\n msg = client.recv(buffer_size).decode(\"utf8\")\n message_handler(msg, client)\n\n\ndef broadcast(msg, client):\n # print(clients)\n for sock in clients:\n if sock is client:\n pass\n else:\n sock.send(bytes(msg, \"utf8\"))\n\n\nif __name__ == \"__main__\":\n server.listen(connections_allowed) # Listens for 5 connections at max.\n print(\"Waiting for connection...\")\n connection_thread = Thread(target=accept_connections)\n connection_thread.start() # Starts the infinite loop.\n connection_thread.join()\n server.close()\n","sub_path":"Chatroom/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"650255254","text":"import unittest\n\nfrom model.Business import Business\nfrom proto.business_pb2 import Business as BusinessProtobuf\n\n\nclass BusinessTestCase(unittest.TestCase):\n\n def testBusinessProtobufSerialization(self):\n key = \"AAAAAAA\"\n name = \"Acme s.p.a.\"\n transport_enabled = True\n\n object = Business()\n object.public_key = key\n object.name = name\n object.transport_enabled = transport_enabled\n\n object_protobuf = Business.toProtobufObject(object)\n\n self.assertEqual(object_protobuf.public_key, key)\n self.assertEqual(object_protobuf.name, name)\n self.assertEqual(object_protobuf.transport_enabled, transport_enabled)\n\n def testBusinessProtobufDeserialization(self):\n key = \"AAAAAAA\"\n name = \"Acme s.p.a.\"\n transport_enabled = False\n\n object_protobuf = BusinessProtobuf()\n object_protobuf.public_key = key\n object_protobuf.name = name\n object_protobuf.transport_enabled = transport_enabled\n\n object = Business.from_protobuf_object(object_protobuf)\n\n self.assertEqual(object.public_key, key)\n self.assertEqual(object.name, name)\n self.assertEqual(object.transport_enabled, transport_enabled)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"contracts/test/test_business.py","file_name":"test_business.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"14228051","text":"import ConfigParser\nimport sys\n\nclass FileConfig:\n _parser = ConfigParser.SafeConfigParser()\n\n @classmethod\n def parse_file(cls, path):\n cls._parser.read(path)\n\n @classmethod\n def has_section(cls, section_name):\n return section_name in cls._parser.sections()\n\n @classmethod\n def get_attrs(cls, section_name):\n attr_map = dict()\n if cls.has_section(section_name):\n for name, value in cls._parser.items(section_name):\n attr_map[name] = value\n return attr_map\n\n @classmethod\n def get_attr(cls, section_name, name):\n return cls._parser.get(section_name, name)\n\n @classmethod\n def print_all(cls):\n for section_name in cls._parser.sections():\n print('Section: %s' % section_name)\n print(' Options: %s' % cls._parser.options(section_name))\n for name, value in cls._parser.items(section_name):\n print(' %s = %s' % (name, value))\n print('\\n')\n\nif __name__ == '__main__':\n path = 'example.ini'\n if len(sys.argv) >= 2:\n path = sys.argv[1]\n FileConfig.parse_file(path)\n print('')\n print('mysql config: %s' % FileConfig.get_attrs('mysql'))\n print('all config: ')\n FileConfig.print_all()\n\n\n","sub_path":"python/configparser/fileconfig2.py","file_name":"fileconfig2.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"40045670","text":"def main():\n a = input()\n b = input()\n n = len(a)\n\n p1 = 0\n p2 = n - 1\n while p1 < p2 and a[p1] == b[p1]:\n p1 += 1\n while p1 < p2 and a[p2] == b[p2]:\n p2 -= 1\n\n chunk = ''.join(reversed(b[p1:p2 + 1]))\n # print(a[p1:p2 + 1])\n # print(chunk)\n if a[p1:p2 + 1] != chunk:\n print(0)\n return\n\n op1 = p1\n p1 -= 1\n p2 += 1\n while p1 >= 0 and p2 < n and a[p1] == a[p2]:\n p1 -= 1\n p2 += 1\n\n print(op1 - p1)\n\nmain()\n","sub_path":"kattis/orderlyclass.py","file_name":"orderlyclass.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"238981535","text":"\"\"\"\nA Scalable Classifier for processing Big Data Streams\nAuthors: Kiran Sudhir, Mayanka Pachaiyappa and Varun Bezzam\nSri Sivasubramaniya Nadar College of Engineering\nKalavakkam, Chennai, Tamil Nadu\n\"\"\"\n\n#Standard imports for machine learning\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom datetime import datetime\n\n#Imports for the particular algorithm being implemented\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\nfrom sklearn import tree\nfrom sklearn.cross_validation import StratifiedShuffleSplit\n\n#Test data and labels are loaded\ndef load_test_data():\n\t#DataTable creation using pandas\n\ttable = pd.read_csv(\"C:/Users/HP-PC/Desktop/final-year-project/data/poker-hand-training-true.data\")\n\ttable.columns = [\"Suit1\",\"Card1\",\"Suit2\",\"Card2\",\"Suit3\",\"Card3\",\"Suit4\",\"Card4\",\"Suit5\",\"Card5\",\"Poker Hand\"]\n\tX = table.loc[:,[\"Suit1\",\"Card1\",\"Suit2\",\"Card2\",\"Suit3\",\"Card3\",\"Suit4\",\"Card4\",\"Suit5\",\"Card5\"]]\n\tY = table.loc[:,[\"Poker Hand\"]]\n\tprint(\"Test data loaded...\")\n\treturn [X,Y]\n\n#The training dataset is created\ndef load_training_data():\n\ttraining_data = pd.read_csv(\"C:/Users/HP-PC/Desktop/final-year-project/data/poker-hand-testing.data\")\n\ttraining_data.columns = [\"Suit1\",\"Card1\",\"Suit2\",\"Card2\",\"Suit3\",\"Card3\",\"Suit4\",\"Card4\",\"Suit5\",\"Card5\",\"Poker Hand\"]\n\ttraining_labels = training_data.loc[:,[\"Poker Hand\"]]\n\ttraining_data = training_data.loc[:,[\"Suit1\",\"Card1\",\"Suit2\",\"Card2\",\"Suit3\",\"Card3\",\"Suit4\",\"Card4\",\"Suit5\",\"Card5\"]]\n\tprint(\"Training data loaded...\")\n\treturn [training_data,training_labels]\n\n#RandomForests using scikit-learn\ndef classify(X,Y,test_data,test_labels):\n\tprint(\"Building the model for random forests...\")\n\tY = np.ravel(Y)\n\ttest_labels = np.ravel(test_labels)\n\tclf = ExtraTreesClassifier(n_estimators=10)\n\tclf = clf.fit(X,Y)\n\tprint(\"Classification Score using Random Forests:\" + str(clf.score(test_data,test_labels)))\n\toutput = clf.predict(test_data)\n\treturn output\n\n#Decision Tree using scikit-learn\n#Usual score is 66% for 999999 records and time to execute is 10.96 seconds\ndef classify_dtree(X,Y,test_data,test_labels):\n\tprint(\"Building the model for decision trees...\")\n\tstart_time = datetime.now()\n\tprint(start_time)\n\tclf = tree.DecisionTreeClassifier()\n\tclf = clf.fit(X,Y)\n\tend_time = datetime.now()\n\tprint(end_time)\n\tprint(\"Classification Score using Decision Tree:\" + str(clf.score(test_data,test_labels)))\n\n#Reservoir Sampling method\n#Score using reservoir sampled data of 100000 records is 56% and time to execute is 0.6 seconds\n#Score using reservoir sampled data of 300000 records is 59% and time to execute is 2.95 seconds\ndef reservoir_sampler(data,labels):\n\tSAMPLE_COUNT = 300000\n\t# Force the value of the seed so the results are repeatable\n\trandom.seed(12345)\n\tsampled_data = pd.DataFrame()\n\tsampled_labels = pd.DataFrame()\n\t#Generate the reservoir\n\tsampled_data = sampled_data.append(data.loc[0:SAMPLE_COUNT-1])\n\tsampled_labels = sampled_labels.append(labels.loc[0:SAMPLE_COUNT-1])\n\n\tfor i in range(SAMPLE_COUNT,999998):\n\t\t\t# Randomly replace elements in the reservoir\n # with a decreasing probability.\n # Choose an integer between 0 and index (inclusive)\n\t\t\tprint(i)\n\t\t\tr = random.randint(0,i)\n\t\t\tif r < SAMPLE_COUNT:\n\t\t\t\tsampled_data.loc[r] = data.loc[i]\n\t\t\t\tsampled_labels.loc[r] = labels.loc[i]\n\tsampled_data.to_csv(\"reservoir_sampled_data.csv\")\n\tsampled_labels.to_csv(\"reservoir_sampled_labels.csv\")\n\treturn [sampled_data,sampled_labels]\n\n#Random Sampling Method\n#Score using random sampled data of 100000 records is 38% and time to execute is 0.72 seconds\n#Score using random sampled data of 300000 records is 60% and time to execute is 2.24 seconds\ndef random_sampler(data,labels):\n\tNO_OF_SAMPLES = 99999\n\tsampled_data = pd.DataFrame()\n\tsampled_labels = pd.DataFrame()\n\trandom.seed(12345)\n\tfor i in range(0,NO_OF_SAMPLES):\n\t\tprint(i)\n\t\tr = random.randint(0,999998)\n\t\tsampled_data = sampled_data.append(data.loc[r])\n\t\tsampled_labels = sampled_labels.append(labels.loc[r])\n\tsampled_data.to_csv(\"random_sampled_data.csv\")\n\tsampled_labels.to_csv(\"random_sampled_labels.csv\")\n\treturn [sampled_data,sampled_labels]\n\n#Stratified Sampling Method\n#Score using stratified sampled data of 100000 records is 56% and time to execute is 0.74 seconds\n#Score using stratified sampled data of 300000 records is 57% and time to execute is 2.80 seconds\ndef stratified_sampler():\n\ttable = pd.read_csv(\"C:/Users/HP-PC/Desktop/final-year-project/data/poker-hand-testing.data\")\n\ttable.columns = [\"Suit1\",\"Card1\",\"Suit2\",\"Card2\",\"Suit3\",\"Card3\",\"Suit4\",\"Card4\",\"Suit5\",\"Card5\",\"Poker Hand\"]\n\ttarget = table[\"Poker Hand\"]\n\ttable = table.drop(\"Poker Hand\", axis=1)\n\tsss = StratifiedShuffleSplit(target,test_size=0.3)\n\tfor train_index, test_index in sss:\n\t\txtrain,xtest = table.loc[train_index],table.loc[test_index]\n\t\tytrain,ytest = target[train_index],target[test_index]\n\treturn [xtrain,xtest,ytrain,ytest]\n\t# Check target series for distribution of classes\n\t#ytrain.value_counts()\n\t#ytest.value_counts()\n\n#Main method\ndef main():\n\t[X,Y] = load_training_data()\n\t[td,tl] = load_test_data()\n\top = classify(X,Y,td,tl)\n\tclassify_dtree(X,Y,td,tl)\n\n","sub_path":"final_project.py","file_name":"final_project.py","file_ext":"py","file_size_in_byte":5172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"70455614","text":"# 126: Desafio extra: crie um script com um loop while, fora dele vai ter duas listas,\n# 1 com users não confirmados, e outra com usuários confirmados,\n# dentro do while vc terá uma variável que recebe cada ultimo user da lista durante o loop while,\n# que fara com que cada usuário passe por uma verificação,\n# e depois disso sera movido para a lista de confirmados em seguida faça um loop for,\n# que mostre cada usuário confirmado explique.\n\nusers_confirmados = []\n\nusers_nao_confirmados = ['Joao', 'Jose', 'Maria', 'Pedro', 'Mateus', 'Marcos', 'Lucas', 'Timoteo']\n\nwhile users_nao_confirmados:\n user = users_nao_confirmados.pop()\n\n print(\"Verificando o usuario\", user, \"...\")\n\n users_confirmados.append(user)\n\nprint(\"Usuarios confirmados:\\n\")\nfor user in users_confirmados:\n print(\"\\t\", user)\n\n","sub_path":"python_crash_course/chapter_07_user_input_while/desafio_0126.py","file_name":"desafio_0126.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"598948584","text":"\"\"\" Script for creating an inventory yaml \"\"\"\n\nimport os\n\nfrom src import utils\n\n\ndef prepare_cert_path(certification, certification_dir):\n \"\"\" Prepare the path for a specific certification \"\"\"\n if not certification_dir:\n certification_dir = 'exports/certifications/'\n return os.path.join(certification_dir, '{0}.yaml'.format(certification))\n\n\ndef prepare_output_path(output_path):\n \"\"\" Set output_path and create a content dir if needed \"\"\"\n if not output_path:\n output_path = 'exports/inventory'\n utils.create_dir(output_path)\n return output_path\n\n\ndef analyze_attribute(attribute):\n \"\"\" Check how many elements an attribute has otherwise return \"Missing \"\"\"\n if attribute:\n return len(attribute)\n return \"Missing\"\n\n\ndef analyze_component(component):\n \"\"\" Analyze a component to find gaps in governors and references \"\"\"\n return {\n 'references': analyze_attribute(component.get('references')),\n 'governors': analyze_attribute(component.get('governors')),\n 'documentation_completed': component.get('documentation_complete'),\n }\n\n\ndef catalog_control(inventory, control, standard_key, control_key):\n \"\"\" Adds all the components in the control into the inventory\n while determing the gaps \"\"\"\n if 'justifications' in control:\n for component in control['justifications']:\n system = component.get('system', 'No System')\n name = component.get('name', 'No Name')\n # Catalog component in certification inventory\n if system not in inventory[standard_key][control_key]:\n inventory[standard_key][control_key][system] = []\n inventory[standard_key][control_key][system].append(name)\n # Catalog component in component inventory\n analysis = analyze_component(component)\n if system not in inventory['components']:\n inventory['components'][system] = {}\n inventory['components'][system][name] = analysis\n else:\n inventory[standard_key][control_key] = \"Missing Justifications\"\n\n\ndef build_inventory(certification_path):\n \"\"\" Create an inventory of components for a specific certification \"\"\"\n certification = utils.yaml_loader(certification_path)\n inventory = {\n 'certification': certification.get('name'),\n 'components': {}\n }\n for standard_key in certification['standards']:\n inventory[standard_key] = {}\n for control_key in certification['standards'][standard_key]:\n inventory[standard_key][control_key] = {}\n control = certification['standards'][standard_key][control_key]\n catalog_control(inventory, control, standard_key, control_key)\n return inventory\n\n\ndef create_inventory(certification, certification_dir, output_path):\n \"\"\" Creates an inventory yaml \"\"\"\n certification_path = prepare_cert_path(certification, certification_dir)\n if not os.path.exists(certification_path):\n return None, \"{} certification not found\".format(certification)\n output_path = prepare_output_path(output_path)\n inventory = build_inventory(certification_path)\n inventory_path = os.path.join(\n output_path,\n inventory.get('certification') + '.yaml'\n )\n utils.yaml_writer(inventory, inventory_path)\n return inventory_path, None\n","sub_path":"src/renderers/inventory_builder.py","file_name":"inventory_builder.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"559329257","text":"import logging\nfrom dataclasses import dataclass\nfrom enum import Enum, auto\nfrom typing import Optional\n\nfrom pydantic import BaseModel\nfrom stories import Failure, Success, arguments, story\n\nfrom transit_odp.bods.domain import commands\nfrom transit_odp.bods.domain.entities import Organisation, Publication, Publisher, User\nfrom transit_odp.bods.interfaces.notifications import INotifications\nfrom transit_odp.bods.interfaces.unit_of_work import IUnitOfWork\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass SendFeedback:\n \"\"\"Send feedback from the Consumer to the Publisher\"\"\"\n\n def __call__(self, command):\n with self.uow:\n result = self.story.run(command=command)\n if result.is_success:\n self.uow.commit()\n\n @story\n @arguments(\"command\")\n def story(I): # noqa\n I.fetch_publication\n I.check_publication_exists\n I.fetch_organisation\n I.check_organisation_exists_and_is_active\n I.fetch_sender\n I.get_sender_email\n I.fetch_recipient\n I.check_recipient_exists\n I.send_notification\n\n # Dependencies\n uow: IUnitOfWork\n notifications: INotifications\n\n def fetch_publication(self, ctx):\n ctx.publication = self.uow.publications.find(\n publication_id=ctx.command.publication_id\n )\n return Success()\n\n def check_publication_exists(self, ctx):\n if ctx.publication is None:\n return Failure(Errors.publication_not_found)\n return Success()\n\n def fetch_organisation(self, ctx):\n ctx.organisation = self.uow.organisations.find(\n organisation_id=ctx.publication.organisation_id\n )\n return Success()\n\n def check_organisation_exists_and_is_active(self, ctx):\n if ctx.organisation is None:\n return Failure(Errors.organisation_not_found)\n elif not ctx.organisation.is_active:\n return Failure(Errors.organisation_is_inactive)\n return Success()\n\n def fetch_sender(self, ctx):\n ctx.sender = self.uow.users.find(user_id=ctx.command.sender_id)\n return Success()\n\n def get_sender_email(self, ctx):\n if ctx.command.anonymous:\n ctx.sender_email = None\n elif ctx.sender:\n ctx.sender_email = ctx.sender.email\n else:\n return Failure(Errors.sender_not_found)\n return Success()\n\n def fetch_recipient(self, ctx):\n ctx.recipient = self.uow.users.find(user_id=ctx.publication.contact_user_id)\n return Success()\n\n def check_recipient_exists(self, ctx):\n if ctx.recipient is None:\n return Failure(Errors.recipient_not_found)\n return Success()\n\n def send_notification(self, ctx):\n self.notifications.send_feedback_notification(\n dataset_id=ctx.publication.get_id(),\n contact_email=ctx.recipient.email,\n dataset_name=ctx.publication.live.dataset.name,\n feedback=ctx.command.feedback,\n developer_email=ctx.sender_email,\n )\n return Success()\n\n\n@SendFeedback.story.contract\nclass Context(BaseModel):\n # Arguments\n command: commands.SendFeedback\n\n # State\n publication: Publication\n organisation: Optional[Organisation]\n sender: User\n sender_email: Optional[str]\n recipient: Publisher\n\n\n@SendFeedback.story.failures\nclass Errors(Enum):\n publication_not_found = auto()\n organisation_not_found = auto()\n organisation_is_inactive = auto()\n sender_not_found = auto()\n recipient_not_found = auto()\n","sub_path":"transit_odp/bods/service_layer/usecases/consumer/send_feedback.py","file_name":"send_feedback.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216960991","text":"def fib(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)\n\nprint(fib(10))\n\ndef ifib(n):\n new, old = 1, 0\n for i in range(n - 1):\n new, old = old + new, new\n return new\n\nprint(ifib(10))\n","sub_path":"bodenseo/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"136294568","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 6 10:20:35 2017\n\n@author: hannah.li\n\"\"\"\nimport pandas as pd\ns=0\nn=10\ndf = pd.read_csv('/home/hannah.li/Yuqing/Q_15_test_mw.csv')\nfor i in range(int(len(df)/10)):\n print(df[s:s+n])\n s=s+n","sub_path":"testforpandas.py","file_name":"testforpandas.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"472327417","text":"\n'''\nsétima questão\n\nreferencia do calculo da distancia usado na função Dist\nhttp://carlosdelfino.eti.br/cursoarduino/geoprocessamento/calculando-distancias-com-base-em-coordenadas-de-gps/\n\na função menorDistancia iria retornar uma lista ordenada de distancia\n\nfuncionando... pode ser adaptada para retornar o que for preciso, como a LocalizacaoGeografia ou um objeto referente a unidade de saude\n'''\n\ndef menorDistancia (unit_health_ref, unit_health):\n dist = [];\n\n for unit in unit_health:\n DLA = abs(unit_health_ref.getLatitude() - unit.getLatitude());\n DLO = abs(unit_health_ref.getLongitude() - unit.getLongitude());\n DT = sqrt((DLA * 1.852)^2 + (DLO * 1.852)^2);\n if (DT > 0):\n dist.append(DT)\n\n return dist.sort()","sub_path":"pythoncourse/src/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"213260480","text":"\n# # D1: Scrape High-Resolution Mars’ Hemisphere Images and Titles\n\n# 1. Use browser to visit the URL \nurl = 'https://marshemispheres.com/'\nbrowser.visit(url)\nhtml = browser.html\nmars_hemi_soup = soup(html , 'html.parser')\n\n\n# 2. Create a list to hold the images and titles.\nhemisphere_image_urls = []\n\n# 3. Write code to retrieve the image urls and titles for each hemisphere.\nparent_elem = mars_hemi_soup.find_all('div', class_='description')\n\nfor elem in parent_elem:\n \n# get title\n title = elem.find('h3').get_text()\n#get link to website link for each image\n img_url_partial = elem.find('a').get('href')\n img_url_complete =url+img_url_partial\n# visit each link, parse and use soup for each site opened.\n img_url_visit = browser.visit(img_url_complete)\n html = browser.html\n img_new_page_soup = soup(html, 'html.parser')\n # retrieve image url\n parent_elem_new_site = img_new_page_soup.find('div', class_='downloads')\n img_high_resol_href = parent_elem_new_site.ul.li.select_one('a').get('href')\n img_url = url + img_high_resol_href\n\n browser_back = browser.back()\n \n #insert information in hemisphere\n hemisphere_image_urls.append({'img_url':img_url,'title':title})\n \n\n# 4. Print the list that holds the dictionary of each image url and title.\nhemisphere_image_urls\n\n# 5. Quit the browser\nbrowser.quit()\n\n\n\n","sub_path":"challenge_10/Mission_to_Mars_Challenge.py","file_name":"Mission_to_Mars_Challenge.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}