diff --git "a/6286.jsonl" "b/6286.jsonl" new file mode 100644--- /dev/null +++ "b/6286.jsonl" @@ -0,0 +1,680 @@ +{"seq_id":"498185797","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport os\nimport ROOT\n\n# ------------------------------------------------------------------------------\n# this class can be used to apply and additional cut during sys step\n# ------------------------------------------------------------------------------\nclass Skim(object):\n\n def __init__(self, cut):\n self.cut = cut\n self.cutFormula = None\n self.debug = 'XBBDEBUG' in os.environ\n\n # return True => keep event, return False => discard event\n def processEvent(self, tree):\n\n # initialize formula on the first event\n if self.cutFormula is None:\n self.tree = tree\n self.cutFormula = ROOT.TTreeFormula(\"sysnew_skim_cut\", self.cut, self.tree)\n if self.debug:\n print(\"DEBUG: initialized TTreeFormula for addtional cut during sys: {cut}\".format(cut=self.cut))\n\n # apply cut\n if self.cutFormula.GetNdata():\n return True if self.cutFormula.EvalInstance() else False\n else:\n return False\n\n","sub_path":"python/myutils/Skim.py","file_name":"Skim.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"467621398","text":"import RPi.GPIO as GPIO\nimport time, asyncio\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(37, GPIO.OUT)\n\nasync def blink(intensity, cycle_time):\n blinktime = intensity/100*cycle_time\n GPIO.output(37, GPIO.HIGH)\n time.sleep(float(blinktime))\n GPIO.output(37, GPIO.LOW)\n time.sleep(cycle_time-blinktime)","sub_path":"led_blink.py","file_name":"led_blink.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"442773255","text":"import pytest\n\nfrom openapi_core.schema.parameters.exceptions import (\n EmptyParameterValue, InvalidParameterValue,\n)\nfrom openapi_core.schema.parameters.enums import ParameterStyle\nfrom openapi_core.schema.parameters.models import Parameter\nfrom openapi_core.schema.schemas.models import Schema\n\n\nclass TestParameterInit(object):\n\n def test_path(self):\n param = Parameter('param', 'path')\n\n assert param.allow_empty_value is False\n assert param.style == ParameterStyle.SIMPLE\n assert param.explode is False\n\n def test_query(self):\n param = Parameter('param', 'query')\n\n assert param.allow_empty_value is False\n assert param.style == ParameterStyle.FORM\n assert param.explode is True\n\n def test_header(self):\n param = Parameter('param', 'header')\n\n assert param.allow_empty_value is False\n assert param.style == ParameterStyle.SIMPLE\n assert param.explode is False\n\n def test_cookie(self):\n param = Parameter('param', 'cookie')\n\n assert param.allow_empty_value is False\n assert param.style == ParameterStyle.FORM\n assert param.explode is True\n\n\nclass TestParameterCast(object):\n\n def test_deprecated(self):\n param = Parameter('param', 'query', deprecated=True)\n value = 'test'\n\n with pytest.warns(DeprecationWarning):\n result = param.cast(value)\n\n assert result == value\n\n def test_query_empty(self):\n param = Parameter('param', 'query')\n value = ''\n\n with pytest.raises(EmptyParameterValue):\n param.cast(value)\n\n def test_query_valid(self):\n param = Parameter('param', 'query')\n value = 'test'\n\n result = param.cast(value)\n\n assert result == value\n\n\nclass TestParameterUnmarshal(object):\n\n def test_query_valid(self):\n param = Parameter('param', 'query')\n value = 'test'\n\n result = param.unmarshal(value)\n\n assert result == value\n\n def test_query_allow_empty_value(self):\n param = Parameter('param', 'query', allow_empty_value=True)\n value = ''\n\n result = param.unmarshal(value)\n\n assert result == value\n\n def test_query_schema_type_invalid(self):\n schema = Schema('integer', _source={'type': 'integer'})\n param = Parameter('param', 'query', schema=schema)\n value = 'test'\n\n with pytest.raises(InvalidParameterValue):\n param.unmarshal(value)\n\n def test_query_schema_custom_format_invalid(self):\n def custom_formatter(value):\n raise ValueError\n schema = Schema(\n 'string',\n schema_format='custom',\n _source={'type': 'string', 'format': 'custom'},\n )\n custom_formatters = {\n 'custom': custom_formatter,\n }\n param = Parameter('param', 'query', schema=schema)\n value = 'test'\n\n with pytest.raises(InvalidParameterValue):\n param.unmarshal(value, custom_formatters=custom_formatters)\n","sub_path":"tests/unit/schema/test_parameters.py","file_name":"test_parameters.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"480871098","text":"# 2018/07/11\n# 2018-09-01 15:19\n# 运行有问题\n\n\nimport tensorflow as tf\nimport os # 用于访问系统的文件名和路径\nimport numpy as np\nimport matplotlib.image as mpimg # mpimg 用于读取图片\nimport tensorflow\n\ndata_path = './data/' # 这里可能有问题。 ./当前文件夹路径. 注意:最后面有一个/\n\ndef read_img(path):\n \"\"\"\n :param path: 图片文件夹路径\n :return: 图片像素点数组,图片标签数组\n \"\"\"\n imgs = [] # 二维数组,每一个元素为一个一维数组,表示图片的像素\n labels = [] # 一维数组,每一个元素表示一个标签\n cate = [path+x for x in os.listdir(path) if os.path.isdir(path+x)] #判断是不是文件夹\n print(cate)\n # os.listdir() 方法用于返回指定的文件夹包含的文件或文件夹的名字的列表\n # os.path.isdir()函数判断某一路径是否为目录\n for idx,i in enumerate(cate):\n for j in os.listdir(i):\n im = mpimg.imread(i+'/'+j) # 读入文件\n img = np.reshape(im, (28 * 28)) / 255. # 转换为1*(28*28)的一维数组\n # 归一化,非常重要,防止基数爆照,数小方便处理,会跑飞了,内存炸了\n # reshape变为矩阵,方便相乘\n imgs.append(img)\n labels.append(idx)\n\n # 将读取到的图片列表和标签列表转化为数组类型,并指定数据类型\n return np.asarray(imgs,np.float32),np.asarray(labels,np.int32)\n\ndata,label = read_img(data_path)\nlabel = np.reshape(label,(len(label),1)) # 转换为一个列向量\n\ndata = np.concatenate([data, label], axis = 1) #784变成785列。axis = 1表示在列的方向上拼接\n# 为什么要和标签拼接在一起?0到784表示数据,第785列表示标签\nprint(\"data的维度:\", np.shape(data))\n# 这里已经正确了!\n\n\n# 数据处理:\n# 打乱数据顺序\n# 将数据集分割为两部分,一部分用于训练,另一部分用于验证模型。\n\nnum_example = data.shape[0] # 表示一共有多少个训练数据\narr = np.arange(num_example)\nnp.random.shuffle(arr) # 打乱顺序\ndata = data[arr]\n\nratio = 0.8\ns = np.int(num_example*ratio)\n# 训练集\ntrain = data[:s]\n# 验证集?测试集\nval = data[s:]\n\n\n# 定义产生batch的生成器\ndef gen_batch(dataset, batchsize):\n # 非常建议自己写,数据处理要自己做,方便回头查错。经常手敲,尽量用低级用法,犯的错越多越好。用手以后再用高级用法\n \"\"\"根据设定的batchsize大小产生mini batch\n Args:\n dataset: 数据集\n batchsize: batchsize\n Generates:\n x: 输入\n y:输出\n \"\"\"\n for i in range(np.shape(dataset)[0] // batchsize):\n pos = i * batchsize\n x = dataset[pos:pos + batchsize, 0:-1] # 0到-1列\n y = dataset[pos:pos + batchsize, -1] # -1列为标签\n yield x, y\n\n # 数据集中剩余的部分\n remain = np.shape(dataset)[0] % batchsize\n if remain != 0: # 取余操作\n x, y = dataset[-remain:, 0:-1], dataset[-remain:, -1]\n yield x, y\n\n\n## 超参数设置\nlr = 1e-3\nbatchsize = 128\nepoch = 100\n\n# 建立计算图\ngraph = tf.Graph()\nwith graph.as_default():\n X = tf.placeholder(shape=(None, 784), dtype = tf.float32, name = \"X\" )\n Y = tf.placeholder(shape=(None, 1), dtype = tf.float32, name = \"Y\" )\n\n W = tf.Variable(tf.random_uniform([784,1], -1.0, 1.0), name = \"WeightMatrix\" ) # 维度为 1*2\n lgt = tf.matmul(X,W)\n output = tf.sigmoid(lgt)\n\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = Y, logits= lgt), name = \"calculate_loss\")\n opt = tf.train.GradientDescentOptimizer(lr).minimize(loss)\n\n prediction_result = tf.cast(tf.greater(output,0.5), dtype=tf.float32)\n error_rate = 1 - tf.reduce_mean(tf.cast(tf.equal(prediction_result, Y), dtype = tf.float32))\n\n# 建立计算图\nwith tf.Session(graph=graph) as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n step = 0\n for i in range(epoch):\n for x_,y_ in gen_batch(train,batchsize): # 循环的次数为train的大小整除batchsize\n Loss, err_rate,_ = sess.run([loss, error_rate, opt], {X:x_, Y:np.reshape(y_,(-1,1))}) # p.reshape(y_,(-1,1)) 行数自动确定,列数为一列\n if step % 50 == 0:\n print(\"Step: {}, loss: {}, error_rate: {}\".format(step, Loss, err_rate))\n step +=1\n print('train over')\n print(\"W的优化值为:\", sess.run(W))\n\n for x_, y_ in gen_batch(val, batchsize):\n Loss, err_rate = sess.run([loss, error_rate],{X:x_,Y:np.reshape(y_,(-1,1))})\n print( \"Loss: {}, error_rate: {}\".format(Loss, err_rate))\n\n\n# 定义进程并进行运算\n# 这一步将准备好的���据输入给运算图,对模型中的变量赋予了初值,进行计算和优化,训练,并且训练完成以后利用测试数据集对模型进行评估。\n\n\n\nsess = tf.Session()\nW = np.array(W) # W为模型训练得到的参数\nim = input(\"Please input picture's path:\") # 注意这里是输入路径,字符串??\nim = mpimg.imread(im)\nimgs = np.reshape(im, (1,784)) / 255. # 注意这里的维度,为什么是两个1??\nX = np.asarray(imgs, np.float32)\nlgt = tf.matmul(X, W)\noutput = tf.sigmoid(lgt)\nprediction = tf.cast(tf.greater(output, 0.5), dtype = tf.int32)\nprint(\"图片属于第{}类:\".format(sess.run(prediction)))","sub_path":"Logistic_regression_2.2.py","file_name":"Logistic_regression_2.2.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"84051588","text":"import pandas as pd\n\nfibonacci = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]\nseries_no_label = pd.Series(fibonacci)\nprint(series_no_label)\nprint(series_no_label[5]) \n\nnums = [10.1, 10.2, 10.3]\nlabels = [\"one\", \"two\", \"three\"]\nseries_labels = pd.Series(nums, index = labels)\nprint(series_labels)\nprint(series_labels['two'])\n\ncalories = {\"cheesecake\": 420, \"sorbet\": 380, \"cream\": 390}\nseries_dict = pd.Series(calories)\nprint(series_dict)","sub_path":"src/lessonXX.Pandas/sample_series.py","file_name":"sample_series.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"42920498","text":"#!/usr/bin/python\n\n# (c) 2020, NetApp, Inc\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'certified'}\n\n\nDOCUMENTATION = '''\nmodule: na_um_list_volumes\nshort_description: NetApp Unified Manager list volumes.\nextends_documentation_fragment:\n - netapp.um_info.netapp.um\nversion_added: '20.6.0'\nauthor: NetApp Ansible Team (@carchi8py) \n\ndescription:\n- List Volumes on AIQUM/OCUM.\n'''\n\nEXAMPLES = \"\"\"\n- name: List Volumes\n na_um_list_volumes:\n hostname: \"{{ hostname }}\"\n username: \"{{ username }}\"\n password: \"{{ password }}\"\n\"\"\"\n\nRETURN = \"\"\"\nrecords:\n description: Returns list of Volumes information\n returned: always\n type: list\n sample: [{'style': '...',\n 'svm':\n {'_links':\n {'self': {...}\n },\n '...'\n },\n 'qos': {...},\n 'name': '...',\n 'language': '...',\n 'space': {...},\n 'aggregates':\n [\n {...}\n ],\n 'tiering': {...},\n 'autosize': {...},\n 'cluster': {...},\n 'state': '...',\n 'create_time': '...',\n '_links':\n {'self':\n {'href': '...'\n }\n },\n 'key': '...',\n 'snapmirror': {...},\n 'snapshot_policy': {...},\n 'type': '...',\n 'uuid': '...'\n }]\n\"\"\"\n\nfrom ansible.module_utils.basic import AnsibleModule\nimport ansible_collections.netapp.um_info.plugins.module_utils.netapp as netapp_utils\nfrom ansible_collections.netapp.um_info.plugins.module_utils.netapp_module import NetAppModule\nfrom ansible_collections.netapp.um_info.plugins.module_utils.netapp import UMRestAPI\n\n\nclass NetAppUMVolume(object):\n ''' volumes initialize and class methods '''\n\n def __init__(self):\n self.argument_spec = netapp_utils.na_um_host_argument_spec()\n self.module = AnsibleModule(\n argument_spec=self.argument_spec,\n supports_check_mode=True\n )\n\n self.na_helper = NetAppModule()\n self.parameters = self.na_helper.set_parameters(self.module.params)\n\n self.restApi = UMRestAPI(self.module)\n\n def get_volumes(self):\n \"\"\"\n Fetch details of volumes.\n :return:\n Dictionary of current details if volumes found\n None if volumes is not found\n \"\"\"\n data = {}\n api = \"datacenter/storage/volumes\"\n message, error = self.restApi.get(api, data)\n if error:\n self.module.fail_json(msg=error)\n if message['total_records'] != 0:\n return message['records']\n return []\n\n def apply(self):\n \"\"\"\n Apply action to the volumes listing\n :return: None\n \"\"\"\n current = self.get_volumes()\n if current is not None:\n self.na_helper.changed = True\n self.module.exit_json(changed=self.na_helper.changed, msg=current)\n\n\ndef main():\n \"\"\"\n Create Volume class instance and invoke apply\n :return: None\n \"\"\"\n list_volumes_obj = NetAppUMVolume()\n list_volumes_obj.apply()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ansible_collections/netapp/um_info/plugins/modules/na_um_list_volumes.py","file_name":"na_um_list_volumes.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"231181835","text":"import pygame, random, time, sys\npygame.init()\n\nclass Level:\n def __init__(self, size):\n self.size = size\n self.map = []\n for y in range(size[1]):\n row = []\n for x in range(size[0]):\n row.append([\"#\", x, y, \"SOLID\"])\n\n self.map.append(row)\n\n def drunkGen(self):\n place = [random.randint(1, self.size[0] - 3), random.randint(1, self.size[1] - 3)]\n self.map[place[1]][place[0]] = [\"*\", place[0], place[1]]\n count = round((self.size[0] * self.size[1]) / 5)\n while count > 0:\n count -= 1\n move = [random.randint(-1, 1), random.randint(-1, 1)]\n while place[0] + move[0] in [0, self.size[0] - 1] or place[1] + move[1] in [0, self.size[1] - 1]:\n move = [random.randint(-1, 1), random.randint(-1, 1)]\n\n place[0] += move[0]\n place[1] += move[1]\n\n if self.map[place[1]][place[0]] == [\"*\", place[0], place[1]]:\n count += 1\n \n self.map[place[1]][place[0]] = [\"*\", place[0], place[1]]\n\nlevel = Level([15, 15])\nlevel.drunkGen()\n\nsize = [14, 22]\n# 7:11\n# 1:1.57142857143\n# 14:22\n\ntext = pygame.transform.scale(pygame.image.load(\"Text.png\"), [size[0] * 10, size[1] * 10])\n\ndef create(x, y):\n global size\n surf = pygame.Surface(size)\n surf.blit(text, [0, 0], [x * size[0], y * size[1], size[0], size[1]])\n return surf\n\nchars = {\"A\" : create(0, 0),\n \"B\" : create(1, 0),\n \"C\" : create(2, 0),\n \"D\" : create(3, 0),\n \"E\" : create(4, 0),\n \"F\" : create(5, 0),\n \"G\" : create(6, 0),\n \"H\" : create(7, 0),\n \"I\" : create(8, 0),\n \"J\" : create(9, 0),\n \"K\" : create(0, 1),\n \"L\" : create(1, 1),\n \"M\" : create(2, 1),\n \"N\" : create(3, 1),\n \"O\" : create(4, 1),\n \"P\" : create(5, 1),\n \"Q\" : create(6, 1),\n \"R\" : create(7, 1),\n \"S\" : create(8, 1),\n \"T\" : create(9, 1),\n \"U\" : create(0, 2),\n \"V\" : create(1, 2),\n \"W\" : create(2, 2),\n \"X\" : create(3, 2),\n \"Y\" : create(4, 2),\n \"Z\" : create(5, 2),\n \"0\" : create(6, 2),\n \"1\" : create(7, 2),\n \"2\" : create(8, 2),\n \"3\" : create(9, 2),\n \"4\" : create(0, 3),\n \"5\" : create(1, 3),\n \"6\" : create(2, 3),\n \"7\" : create(3, 3),\n \"8\" : create(4, 3),\n \"9\" : create(5, 3),\n \"#\" : create(6, 3),\n \"+\" : create(7, 3),\n \"-\" : create(8, 3),\n \"*\" : create(9, 3)}\n\nscreen = pygame.display.set_mode([1200, 800])\n\nwhile True:\n for row in level.map:\n for tile in row:\n screen.blit(chars[tile[0]], [tile[1] * size[0], tile[2] * size[1]])\n\n pygame.display.flip()\n\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n pygame.display.quit()\n sys.exit()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"264436096","text":"\r\n\r\ndef sumWithoutHighest(values):\r\n highest = values[0]\r\n total = 0\r\n for v in values:\r\n if v > highest:\r\n highest = v\r\n total += v\r\n return total - highest \r\n\r\n\r\n \r\nnums = [10,10,10,10,10,11] \r\ntotalLessHighest = sumWithoutHighest(nums)\r\nprint(\"Total of list less Highest value: \" , totalLessHighest)\r\n\r\n\r\n\r\n","sub_path":"python/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"631520718","text":"from .lib import *\n\n# ----- index function render the index.html template and give context struct ----- #\n@login_required\ndef index(request):\n config = get_config()\n Raspb_list = get_rasb_list(config, 0)\n context = {\n 'Raspb_list' : Raspb_list[config.offset:config.offset+20],\n 'Config_list' : Config.objects.all(),\n 'form' : ConfigSelectForm,\n 'current_config' : config.name\n }\n return (render(request, 'ooperbox/index.html', context)) #render index page\n\n\n# ----- function RaspbUpdateView to Handle Raspb's Form ----- #\nclass RaspbUpdateView(BSModalUpdateView): \n template_name = 'ooperbox/create_raspb.html'\n model = Raspb\n form_class = RaspbForm\n success_message = 'Success!'\n success_url = reverse_lazy('index')\n\n\n# ----- function ConfigCreateView to Handle Raspb's Form ----- #\nclass ConfigCreateView(BSModalCreateView):\n template_name = 'ooperbox/create_config.html'\n form_class = ConfigForm\n sucess_message = \"Config succesfully created\"\n success_url = reverse_lazy('index')\n\n\n# ----- reboot_rasp is the Reboot Function ----- #\ndef reboot_rasp(request, pk): \n rasp = Raspb.objects.get(pk=pk)\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(0.1)\n sock.sendto(\"reboot\".encode(), (rasp.ip, rasp.port))\n except:\n return(\"Fail\")\n rasp.is_playing=\"OFF\"\n rasp.save()\n return HttpResponseRedirect('/')\n\n# ----- Function just halt the stream, then call function play_vid to start it again ----- #\ndef reboot_strm(request, pk):\n rasp = Raspb.objects.get(pk=pk)\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(0.1)\n sock.sendto(\"kill\".encode(), (rasp.ip, rasp.port))\n except:\n return(\"Fail\")\n play_vid(rasp)\n return HttpResponseRedirect('/')\n\n# ----- restart_all_stream function call reboot_strm function on every Raspb in the list ----- #\ndef restart_all_stream(request):\n config = get_config()\n Raspb_list = Raspb.objects.all()\n i = 0\n while i < 20:\n if Raspb_list[config.offset+i].is_playing == \"LIVE\" or Raspb_list[config.offset+i].is_playing == \"READY\":\n reboot_strm(request, Raspb_list[config.offset+i].pk)\n i = i + 1\n return HttpResponseRedirect('/')\n\ndef delete_conf(request):\n config = Config.objects.get(pk=request.POST.get('name')) #Query the config that correspond to the Name selected\n i = 0\n Raspb_list = Raspb.objects.all()\n while i < 20:\n Raspb_list[config.offset].delete()\n i = i + 1\n i = 0\n config.delete()\n conf_list = Config.objects.all()\n for c in conf_list:\n c.offset = i * 20\n c.save()\n i = i + 1\n sel_c = Selected_config.objects.first() #if we dont have one we create new\n sel_c.config_id = 0\n sel_c.save()\n\ndef get_selected_conf(request):\n config = Config.objects.get(pk=request.POST.get('name')) #Query the config that correspond to the Name selected\n conf_list = Config.objects.all()\n conf_id = -1\n while conf_id < conf_list.count():\n conf_id = conf_id + 1\n if conf_list[conf_id].pk == config.pk:\n break\n sel_c = Selected_config.objects.all().first() #query the Selected config\n if not sel_c:\n sel_c = Selected_config(config_id=conf_id) #if we dont have one we create new\n sel_c.config_id = conf_id #change the Selected config to match the new Config selected by user\n sel_c.save()\n\n# ----- select_config function is used to handle Config selection ----- #\ndef select_config(request):\n if request.method == 'POST':\n if (request.POST.get(\"action\") == 'delete') :\n delete_conf(request)\n return HttpResponseRedirect('/')\n else:\n get_selected_conf(request)\n return HttpResponseRedirect('/')\n\n# ----- get_status_ajax function is called by ajax in template index.html -----#\n# ----- Fucntion below Are parsing function used in the play_vid fct ----- #\n# ----- it return a string with the status using \"/\" as separator ----- #\n@csrf_exempt\ndef get_status_ajax(request):\n config = get_config()\n rasp_list = get_rasb_list(config, 1)\n result = \"\"\n i = 0;\n while i < 20:\n result += rasp_list[config.offset+i].is_playing+\"/\"\n i += 1\n print(result)\n return HttpResponse(result)\n","sub_path":"oopercast/ooperbox_control/ooperbox/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"392759215","text":"import bisect\r\ntest_case = int(input())\r\ndef lower_bound(arr,val):\r\n mid = len(arr)//2\r\n if(arr[mid][0]>val and arr[mid-1][0]val):\r\n return lower_bound(arr[:mid],val)\r\n else:\r\n return lower_bound(arr[mid:],val)\r\ndef lower_bound2(arr,start,end,val):\r\n mid = (start+end)//2\r\n if(arr[mid][0]==val):\r\n return mid \r\n elif(arr[mid][0]>val):\r\n return lower_bound2(arr,start,mid,val)\r\n else:\r\n return lower_bound2(arr,mid+1,end,val)\r\n\r\n\r\ndef finder(arr,arr2,time):\r\n arr.sort()\r\n arr2.sort()\r\nmy_arr = [1,3,5,6,8,9]\r\ni = bisect.bisect_left(my_arr,2)\r\nprint(i)\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n# for i in range(test_case):\r\n# n_m = list(map(int,input().split()))\r\n# in_times = []\r\n# out_times = []\r\n# for j in range(n_m[0]):\r\n# temp = list(map(int,input().split()))\r\n# in_times.append(temp)\r\n# for j in range(n_m[1]):\r\n# finder(in_times,int(input()))\r\n \r\n","sub_path":"searching and sorting/chef_restraunt.py","file_name":"chef_restraunt.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"19942689","text":"import sys\ninput = sys.stdin.readline\n\nn, k = map(int, input().split()) # 크기, 말의 개수\ncolor = [list(map(int, input().split())) for _ in range(n)] # 보드 색깔\nhorseinfo = {} \nboard = [[[] for _ in range(n)] for _ in range(n)] \n\nfor i in range(k):\n x, y, d = map(int, input().split())\n board[x-1][y-1].append(i) # 말의 위치\n horseinfo[i] = [x-1, y-1, d] # 말의 좌표와 방향\n\ndx, dy = [0, 0, 0, -1, 1], [0, 1, -1, 0, 0]\n \ndef changedirection(d): # 방향 바꾸기\n if(d == 1):\n d = 2\n elif(d == 2):\n d = 1\n elif(d == 3):\n d = 4\n else:\n d = 3\n return d\n\ndef red(nx, ny, movehorse): # 빨간색\n global horseinfo, board\n board[nx][ny] += movehorse[::-1] # 뒤집어서 올리기\n for i in range(len(movehorse)):\n horseinfo[movehorse[i]][0] = nx\n horseinfo[movehorse[i]][1] = ny\n\ndef blue(x, y, h, movehorse): # 파란색\n global horseinfo, board\n horseinfo[h][2] = changedirection(horseinfo[h][2])\n d = horseinfo[h][2]\n nx = x + dx[d] # 거꾸로 좌표\n ny = y + dy[d]\n if(0<=nx= 4): # 길이가 4 이상\n print(cnt+1)\n return\n\n cnt += 1 # 1턴 증가\n\n if(cnt > 1000):\n print(-1)\n return\n\nsolution()","sub_path":"minjoo/삼성 SW 역량 테스트 기출 문제/17837.py","file_name":"17837.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"275144423","text":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for input_pipeline_ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.input_pipeline.python.ops import input_pipeline_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass InputPipelineOpsTest(test.TestCase):\n\n def testObtainNext(self):\n with self.test_session():\n var = state_ops.variable_op([], dtypes.int64)\n state_ops.assign(var, -1).op.run()\n c = constant_op.constant([\"a\", \"b\"])\n sample1 = input_pipeline_ops.obtain_next(c, var)\n self.assertEqual(b\"a\", sample1.eval())\n self.assertEqual(0, var.eval())\n sample2 = input_pipeline_ops.obtain_next(c, var)\n self.assertEqual(b\"b\", sample2.eval())\n self.assertEqual(1, var.eval())\n sample3 = input_pipeline_ops.obtain_next(c, var)\n self.assertEqual(b\"a\", sample3.eval())\n self.assertEqual(0, var.eval())\n\n def testSeekNext(self):\n string_list = [\"a\", \"b\", \"c\"]\n with self.test_session() as session:\n elem = input_pipeline_ops.seek_next(string_list)\n session.run([variables.global_variables_initializer()])\n self.assertEqual(b\"a\", session.run(elem))\n self.assertEqual(b\"b\", session.run(elem))\n self.assertEqual(b\"c\", session.run(elem))\n # Make sure we loop.\n self.assertEqual(b\"a\", session.run(elem))\n\n # Helper method that runs the op len(expected_list) number of times, asserts\n # that the results are elements of the expected_list and then throws an\n # OutOfRangeError.\n def _assert_output(self, expected_list, session, op):\n for element in expected_list:\n self.assertEqual(element, session.run(op))\n with self.assertRaises(errors.OutOfRangeError):\n session.run(op)\n\n def testSeekNextLimitEpochs(self):\n string_list = [\"a\", \"b\", \"c\"]\n with self.test_session() as session:\n elem = input_pipeline_ops.seek_next(string_list, num_epochs=1)\n session.run([\n variables.local_variables_initializer(),\n variables.global_variables_initializer()\n ])\n self._assert_output([b\"a\", b\"b\", b\"c\"], session, elem)\n\n def testSeekNextLimitEpochsTwo(self):\n string_list = [\"a\", \"b\", \"c\"]\n with self.test_session() as session:\n elem = input_pipeline_ops.seek_next(string_list, num_epochs=2)\n session.run([\n variables.local_variables_initializer(),\n variables.global_variables_initializer()\n ])\n # Expect to see [a, b, c] two times.\n self._assert_output([b\"a\", b\"b\", b\"c\"] * 2, session, elem)\n\n\nif __name__ == \"__main__\":\n test.main()\n","sub_path":"Tensorflow_OpenCV_Nightly/source/tensorflow/contrib/input_pipeline/python/ops/input_pipeline_ops_test.py","file_name":"input_pipeline_ops_test.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"475159088","text":"from copy import deepcopy\nimport numpy as np\nimport torch\nimport random\n\nfrom torch import nn\nfrom torch.nn.functional import softmax\nfrom torch.autograd import Variable\n\n#from agent.lstm_forward import DuelingDQN\nfrom agent.forward import DuelingDQN\n\nfrom agent.access import Access\nfrom collections import deque\nfrom params import *\n\nimport sys\nsys.path.append(\"..\")\n\n# Ensure values are greater than epsilon to avoid numerical instability.\n_EPSILON = 1e-6\n\n\nclass Agent(object):\n def __init__(self, image_shape, output_size, is_training = True, capacity=int(6e6)):\n self.output_size = output_size\n self.access = Access(capacity)\n self.is_training = is_training\n self.value_net = DuelingDQN(image_shape, output_size)\n self.target_net = DuelingDQN(image_shape, output_size)\n self.value_net = nn.DataParallel(self.value_net)\n self.target_net = nn.DataParallel(self.target_net)\n self.i = 0\n \n # 自动使用gpu\n self.gpu = torch.cuda.is_available()\n if self.gpu:\n self.value_net.cuda()\n self.target_net.cuda()\n\n self.optimizer = torch.optim.Adam(self.value_net.parameters(),lr = 1e-4)\n \n def get_deterministic_policy(self, x):\n x = Variable(torch.from_numpy(x.astype(np.float32)))\n if not self.gpu:\n out = self.value_net(x).data.numpy()\n return np.argmax(out, axis=1)\n else:\n x = x.cuda()\n \n if self.is_training == True:\n self.value_net.train(mode = True)\n else:\n self.value_net.eval()\n \n out = self.value_net(x)\n out = out.cpu().data.numpy()\n self.i += 1\n #print(self.i)\n #print(out)\n return np.argmax(out, axis=1) \n\n def get_epsilon_policy(self, x, epsilon = 0.9):\n if np.random.uniform() > epsilon:\n return np.random.randint(self.output_size)\n else:\n return self.get_deterministic_policy(x)\n\n def optimize(self):\n batch = self.sample(BATCH_SIZE)\n if self.gpu:\n state, action, reward, done, next_state = [Variable(torch.from_numpy(np.float32(i))).cuda() for i in batch]\n action = action.type(torch.LongTensor).cuda()\n else:\n state, action, reward, done, next_state = [Variable(torch.from_numpy(np.float32(i))) for i in batch]\n action = action.type(torch.LongTensor)\n \n if self.is_training == True:\n self.value_net.train(mode = True)\n else:\n self.value_net.eval()\n\n value = self.value_net(state).gather(1, action.unsqueeze(1))\n #print(value)\n \n next_value = self.target_net(next_state)\n next_value = next_value.max(1)[0].view([-1, 1])\n \n value = value.squeeze(1)\n next_value = next_value.squeeze(1)\n \n target = done * reward + (1 - done) * (reward + GAMMA * next_value)\n \n #print(target)\n \n loss = (value - target.detach()).pow(2).mean()\n #print(loss)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n return loss\n\n def _update_target(self):\n # update target network parameters\n for t, s in zip(self.target_net.parameters(), self.value_net.parameters()):\n t.data.copy_(s.data)\n\n def append(self, *args):\n self.access.append(*args)\n\n def sample(self, batch_size = BATCH_SIZE):\n return self.access.sample(batch_size)\n\n def get_cache_len(self):\n return self.access.length\n \n def save_model(self , i = 0):\n value_net_path = './model/value_net' + str(i) + '.pkl'\n target_net_path = './model/target_net' + str(i) + '.pkl'\n \n torch.save(self.value_net, value_net_path)\n torch.save(self.target_net, target_net_path)\n\n #torch.save(self.value_net.state_dict(), './model/value_net_params.pkl')\n #torch.save(self.target_net.state_dict(), './model/target_net_params.pkl')\n\n def load_model(self, i =0):\n value_net_path = './model/value_net' + str(i) + '.pkl'\n target_net_path = './model/target_net' + str(i) + '.pkl'\n \n self.value_net = torch.load(value_net_path)\n self.target_net = torch.load(target_net_path)\n\n \n\n","sub_path":"agent/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"406602321","text":"import itertools\nN=int(input())\nl=[0]*5\nm=list('MARCH')\nfor i in range(N):\n S=input()\n if S[0] in m:\n l[m.index(S[0])]+=1\nans=0\nfor i in list(itertools.combinations(l,3)):\n m=1\n for j in i:\n m*=j\n ans+=m\nprint(ans)","sub_path":"Python_codes/p03425/s820464861.py","file_name":"s820464861.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"272191322","text":"import sys\r\nimport math\r\n\r\nlines = open(sys.argv[1], 'r')\r\nfor line in lines:\r\n line = line.replace('\\n', '').replace('\\r', '')\r\n if len(line) > 0:\r\n num = float(line)\r\n value1 = int(num)\r\n num = (num - value1) * 60.0\r\n value2 = int(num)\r\n num = (num - value2) * 60.0\r\n value3 = int(num)\r\n print(\"%d.%02d'%02d\\\"\" % (value1, value2, value3))\r\n \r\n\r\nlines.close()\r\n","sub_path":"Easy/Nice Angles/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"324104274","text":"from loader import dp, bot\nfrom FSM import user_states\nfrom keyboards.default import find_keyboard\nfrom utils import get_data\n\nfrom aiogram import types\nimport json\n\n@dp.message_handler(text=\"Поиск 🔎\", state=user_states.countries_page)\nasync def get_find_page(message: types.Message):\n await user_states.find_page.set()\n await bot.send_message(message.from_user.id,\n \"Введите название страны 🔽\",\n reply_markup=find_keyboard)\n\n\n@dp.message_handler(state=user_states.find_page)\nasync def find_statistic(message: types.Message):\n country = message.text.lower()\n with open(\"utils/countries_translate.json\") as file:\n countries = json.load(file);\n flag = False\n if country in countries.keys() or country in countries.values():\n flag = True\n if country == \"date\":\n flag = False\n\n if flag == True:\n if country.lower() in countries.keys():\n country = countries[country.lower()]\n await bot.send_message(message.from_user.id,\n get_data(country, message.text))\n else:\n await bot.send_message(message.from_user.id,\n \"У меня нет такой информации 😔\")\n\n","sub_path":"src/handlers/userHandlers/find_country_handler.py","file_name":"find_country_handler.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"547672397","text":"from django.shortcuts import render, redirect\nfrom common.models import Language, Content\nfrom profiles.models import Profile\nfrom services.models import Contract, Job\n# Create your views here.\ndef create_translator(request):\n if request.user.is_superuser and request.user.is_active:\n languages = Language.objects.all()\n content_types = Content.objects.all()\n return render(request, 'administrator/create_employee.html',{\"languages\":languages, \"content_types\":content_types})\n else:\n return redirect(\"/login\") \n\ndef view_employee(request):\n if request.user.is_superuser and request.user.is_active:\n all_profiles = Profile.objects.all()\n return render(request, 'administrator/view_employee.html', {\"all_profiles\":all_profiles})\n else:\n return redirect(\"/login\") \n\ndef available_jobs(request):\n if request.user.is_superuser and request.user.is_active:\n active_jobs = Job.objects.all().filter(accepted=True, paid=True).exclude(status__status_name=\"DN\")\n return render(request, 'administrator/available_jobs.html', {\"active_jobs\":active_jobs})\n else:\n return redirect(\"/login\") \n\ndef available_jobs_details(request, job_id):\n if request.user.is_superuser and request.user.is_active:\n job_obj = Job.objects.get(job_id=job_id)\n contracts = Contract.objects.all().filter(job=job_obj)\n all_employees = Profile.objects.all().filter(title=\"TR\")\n return render(request, 'administrator/available_jobs_details.html', {\"contracts\":contracts, \"all_employees\":all_employees})\n else:\n return redirect(\"/login\") \n\ndef accepted_contracts(request):\n if request.user.is_superuser and request.user.is_active:\n accepted_contracts = Contract.objects.all().filter(is_signed=True, completed=False)\n print(accepted_contracts)\n return render(request, 'administrator/accepted_contracts.html', {\"accepted_contracts\":accepted_contracts})\n else:\n return redirect(\"/login\") \n\ndef completed_contracts(request):\n if request.user.is_superuser and request.user.is_active:\n payment_completed = Contract.objects.all().filter(completed=True, paid=True)\n payment_due = Contract.objects.all().filter(completed=True, paid=False)\n return render(request, 'administrator/completed_contracts.html', {\"payment_completed\":payment_completed, \"payment_due\":payment_due})\n else:\n return redirect(\"/login\") \n\ndef translator_completed_contracts_due(request, username):\n if request.user.is_superuser and request.user.is_active:\n from users.models import User\n if User.objects.filter(username=username).exists():\n user_obj = User.objects.get(username=username)\n completed_contracts_due = Contract.objects.all().filter(completed=True, paid=False, profile=user_obj)\n return render(request, 'administrator/completed_contracts_due.html', {\"completed_contracts_due\":completed_contracts_due, \"username\":user_obj.get_full_name()})\n else:\n return render(request, 'base/404.html')\n else:\n return redirect(\"/login\") \n\ndef translator_completed_contracts_paid(request, username):\n if request.user.is_superuser and request.user.is_active:\n from users.models import User\n if User.objects.filter(username=username).exists():\n user_obj = User.objects.get(username=username)\n completed_contracts_paid = Contract.objects.all().filter(paid=True, profile=user_obj)\n return render(request, 'administrator/completed_contracts_paid.html', {\"completed_contracts_paid\":completed_contracts_paid, \"username\":user_obj.get_full_name()})\n else:\n return render(request, 'base/404.html')\n else:\n return redirect(\"/login\") \n\ndef translator_completed_contracts_all(request, username):\n if request.user.is_superuser and request.user.is_active:\n from users.models import User\n if User.objects.filter(username=username).exists():\n user_obj = User.objects.get(username=username)\n completed_contracts_all = Contract.objects.all().filter(profile=user_obj)\n return render(request, 'administrator/translator_completed_contracts.html', {\"completed_contracts_all\":completed_contracts_all, \"username\":user_obj.get_full_name()})\n else:\n return render(request, 'base/404.html')\n else:\n return redirect(\"/login\") \n\ndef translator_accepted_contracts(request, username):\n if request.user.is_superuser and request.user.is_active:\n from users.models import User\n if User.objects.filter(username=username).exists():\n user_obj = User.objects.get(username=username)\n accepted_contracts = Contract.objects.all().filter(is_signed=True, completed=False, profile=user_obj)\n return render(request, 'administrator/translator_accepted_contracts.html', {\"accepted_contracts\":accepted_contracts, \"username\":user_obj.get_full_name()})\n else:\n return render(request, 'base/404.html')\n else:\n return redirect(\"/login\") \n\ndef view_employee_details(request, username):\n if request.user.is_active:\n from users.models import User\n if User.objects.filter(username=username).exists():\n user_obj = User.objects.get(username=username)\n profile_obj = Profile.objects.get(user=user_obj)\n return render(request, 'administrator/view_employee_details.html', {\"profile_obj\":profile_obj})\n else:\n return render(request, 'base/404.html')\n else:\n return redirect(\"/login\") \n\ndef edit_employee(request):\n if request.user.is_superuser and request.user.is_active:\n return render(request, 'administrator/edit_employee.html')\n \n else:\n return redirect(\"/login\") \n","sub_path":"administrator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"174695462","text":"class Node:\n\n # A node have these attributes:\n # leaf - a boolean that says if the node is a leaf or not\n # keys - a list of keys in this node\n # children - a list of children of this node\n # parent - the Node object that is parent of this object\n # next - used only in leaf nodes. A \"pointer\" to the next leaf on sequence set\n\n def __init__(self, leaf=False, parent=None, nex=None):\n self.leaf = leaf\n self.keys = []\n self.children = []\n self.parent = parent\n self.next = nex\n\n # A way to show the node in print\n def __str__(self):\n if self.leaf:\n return \"[Leaf] Keys: {0}, Parent: {1}\\n\".format(self.keys, self.parent.keys) + ''.join([child.__str__() for child in self.children])\n else:\n return \"[Node] Keys: {0}, Parent: {1}\\n\".format(self.keys, self.parent.keys) + ''.join([child.__str__() for child in self.children])\n","sub_path":"BPlusTree/Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"133861793","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom apps.centre.forms import CentreForm\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom apps.centre.models import Centre\n\ndef Edit(request,centre_id):\n template_file = \"centre-edit.html\"\n context = RequestContext(request)\n\n obj = Centre.objects.get(pk=centre_id)\n\n form = CentreForm(request.POST or None, request.FILES or None,instance=obj)\n if form.is_valid():\n mymodel = form.save()\n\n msg = u\"Centre saved successfully\"\n messages.add_message(request, messages.SUCCESS, msg)\n return redirect(\"centre:home\")\n\n params = {\n 'form': form,\n 'centre_id': centre_id,\n }\n\n return render_to_response (\n template_file,\n params,\n context_instance = context\n )\n","sub_path":"backup/apps/centre/views/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"158133404","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport shutil\nimport cv2\nimport glob\n\n\n# 批量缩放文件夹下的图片\ndef resize_all_file(folder_path, width, height, copy_to_dst_folder=None, printing=False):\n file_list = [file for file in glob.glob(\"{}/*.jpg\".format(folder_path))]\n index = 0\n for image_path in file_list:\n image_name = os .path.basename(image_path)\n index += 1\n # print(file_name)\n image = cv2.imread(image_path)\n image = cv2.resize(image, dsize=(width, height))\n if copy_to_dst_folder is None:\n rename = os.path.join(folder_path, \"{}\".format(image_name))\n else:\n if not os.path.exists(copy_to_dst_folder):\n os.makedirs(copy_to_dst_folder)\n rename = os.path.join(copy_to_dst_folder, \"{}\".format(image_name))\n cv2.imwrite(rename, image)\n if printing:\n print(\"resize {:s} \".format(image_name))\n\n\ndef main():\n folder_path = r\"./images\"\n save_folder = r\"./re_image\"\n resize_all_file(folder_path, 160, 144, save_folder, printing=True)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/dataset_process/resize_image.py","file_name":"resize_image.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"75695269","text":"import os\nfrom solver import print_matrix\n\ndef accepted():\n ans = input()\n if( ans == 'yes' or ans == 'y' or ans == 'Y' or ans == 'Yes'):\n return True\n return False\n\ndef read_matrix(file):\n f = open(file,'r')\n sudoku = []\n for line in f:\n line = line[1:len(line)-2]\n fields = line.split(', ')\n row = []\n row = [int(x) for x in fields]\n sudoku.append(row)\n f.close()\n return sudoku\n\n\ndef check_sudoku(sudoku):\n print(\"Is the following detect matrix correct ?\\n(Yes/No)\")\n print_matrix(sudoku)\n if not accepted():\n print(\"Please edit the matrix in the opened .txt file and then save\")\n while (True):\n f = open(\"temp.txt\",\"w\")\n for line in sudoku:\n f.write(str(line))\n f.write('\\n')\n f.close()\n os.system(\"gedit temp.txt\")\n print(\"Entered matrix is : \")\n sudoku = read_matrix(\"temp.txt\")\n print_matrix(sudoku)\n print(\"Do you wish to edit above matrix ? \\n (Yes/No)\")\n if not accepted():\n break\n os.system(\"rm temp.txt\")\n return sudoku","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"73984771","text":"#!/usr/bin/env python\n\n# Name: getmagiccardprices.py\n# Authors: Geoff, Matthew Sheridan\n# Date: 04 October 2012\n# Revision: 01 May 2016\n# Copyright: (c) Geoff 2012\n# Licence: \n\n\"\"\"Usage:\n getmagiccardprices.py [-do] []\n getmagiccardprices.py -h | --help\n getmagiccardprices.py --version\n\nArguments:\n input Input file. A list of exact card names and quantities, delimited\n by semicolon. By default, this is exported from a deckstats.net\n collection as a CSV file.\n output Output file. Will be a CSV formatted for Excel and will contain\n the list of card names; quantities; and low, mid, and high\n prices. This file will be appended if it already exists.\n [default:INPUT_out.csv]\n\nOptions:\n -d Enable debugging output.\n -h --help Show this help message.\n -o Overwrite OUTPUT file instead of appending.\n --version Display program version number.\n\"\"\"\n\n__authors__ = \"Geoff, Matthew Sheridan\"\n__credits__ = [\"Geoff\", \"Matthew Sheridan\"]\n__date__ = \"28 March 2016\"\n__version__ = \"0.4e\"\n__status__ = \"Development\"\n\nimport os\nimport sys\nimport codecs\nfrom mtgs_error import *\nfrom mtgs_getprices import GetPrices\nfrom docopt import docopt\n\nDEBUG_FILENAME = \"debug.log\"\n\nif __name__ == \"__main__\":\n debug = False\n overwrite = False\n\n # Process arguments and check for errors.\n args = docopt(__doc__, help=True, version=__version__)\n\n if args[\"-d\"]:\n debug = True\n if args[\"-o\"]:\n overwrite = True\n\n # Input file:\n read_path = os.path.normpath(os.getcwd() + \"/\" + args[\"\"])\n if not os.path.isfile(read_path):\n raise InvalidFileError(read_path)\n\n # Output file; optional arg, default \"_out.csv\":\n if args[\"\"]:\n write_path = os.path.normpath(os.getcwd() + \"/\" + args[\"\"])\n else:\n write_path = os.path.splitext(read_path)[0] + \"_out.csv\"\n\n # Get prices!\n gp = GetPrices(debug)\n gp.get_prices(read_path, write_path, overwrite)\n print(gp.summary())\n\n exit(0)\n","sub_path":"getmagiccardprices.py","file_name":"getmagiccardprices.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"374021241","text":"\nimport webapp2\nimport views\n\nconfig = {'webapp2_extras.jinja2': {'template_path': 'app/templates'}}\n\napp = webapp2.WSGIApplication(\n routes=[\n ('/', views.Home),\n ('/about', views.About),\n ('/newpost', views.NewPost),\n ('/post/([0-9]+)', views.PostPage)],\n config=config,\n debug=False)\n\nadmin = webapp2.WSGIApplication(\n routes=[\n ('/admin', views.AdminPage),\n ('/admin/delete/([0-9]+)', views.DeletePost),\n ('/admin/edit/([0-9]+)', views.EditPost)],\n config=config,\n debug=False)\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"51806649","text":"from collections import deque\nimport itertools\n\nwith open(\"day9_input.txt\", \"r\") as puzzle_input:\n intcode_input = [int(c) for c in puzzle_input.read().split(\",\")]\n\nopcode_size = [0, 4, 4, 2, 2, 3, 3, 4, 4, 2]\n\ndef get_param_index(intcode, index, mode, relative_base):\n if mode == 0:\n actual_index = intcode[index]\n elif mode == 1:\n actual_index = index\n elif mode == 2:\n actual_index = relative_base + intcode[index]\n\n return actual_index\n\ndef set_output(intcode, output, end, value):\n while end <= output:\n intcode.append(0)\n end += 1\n\n intcode[output] = value\n\n return end\n\ndef run_program(intcode, input_generator):\n i = 0\n end = len(intcode)\n params = [0, 0, 0]\n output_params = [0, 0, 0]\n relative_base = 0\n\n while i < end:\n mode_flags, opcode = divmod(intcode[i], 100)\n\n # 99 = BREAK\n if opcode == 99:\n break\n\n for j in range(1, opcode_size[opcode]):\n # Get the param index\n param_index = get_param_index(intcode, i + j, mode_flags % 10, relative_base)\n\n # For parameters, support reading outside the bounds of the program by inserting 0\n params[j - 1] = 0 if param_index >= end else intcode[param_index]\n\n # For output params, we want to store the index, not get the value at that index\n output_params[j - 1] = param_index\n mode_flags //= 10\n\n # ADD\n if opcode == 1:\n end = set_output(intcode, output_params[2], end, params[0] + params[1])\n # MULTIPLY\n elif opcode == 2:\n end = set_output(intcode, output_params[2], end, params[0] * params[1])\n # INPUT\n elif opcode == 3:\n end = set_output(intcode, output_params[0], end, next(input_generator))\n # OUTPUT\n elif opcode == 4:\n yield params[0]\n # JUMP IF TRUE\n elif opcode == 5:\n if params[0] != 0:\n i = params[1]\n continue\n # JUMP IF FALSE\n elif opcode == 6:\n if params[0] == 0:\n i = params[1]\n continue\n # LESS THAN\n elif opcode == 7:\n end = set_output(intcode, output_params[2], end, int(params[0] < params[1]))\n # EQUAL\n elif opcode == 8:\n end = set_output(intcode, output_params[2], end, int(params[0] == params[1]))\n # RELATIVE BASE\n elif opcode == 9:\n relative_base += params[0]\n else:\n print(\"Unhandled opcode \", opcode)\n break\n\n i += opcode_size[opcode]\n\ndef part1():\n for value in run_program(intcode_input.copy(), (n for n in [1])):\n output_value = value\n\n return output_value\n\ndef part2():\n for value in run_program(intcode_input.copy(), (n for n in [2])):\n output_value = value\n\n return output_value\n\nprint(\"Part 1 Result: \", part1())\nprint(\"Part 2 Result: \", part2())","sub_path":"day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"218155901","text":"from suzieq.sqobjects.basicobj import SqObject\nimport pandas as pd\nfrom suzieq.utils import humanize_timestamp\n\n\nclass BgpObj(SqObject):\n def __init__(self, **kwargs):\n super().__init__(table='bgp', **kwargs)\n self._valid_get_args = ['namespace', 'hostname', 'columns', 'state',\n 'vrf', 'peer', 'asn', 'query_str']\n self._valid_arg_vals = {\n 'state': ['Established', 'NotEstd', 'dynamic', ''],\n 'status': ['all', 'pass', 'fail'],\n }\n self._valid_assert_args = ['namespace', 'hostname', 'vrf', 'status']\n\n def aver(self, **kwargs):\n \"\"\"Assert that the BGP state is OK\"\"\"\n\n if not self.ctxt.engine:\n raise AttributeError('No analysis engine specified')\n try:\n self.validate_assert_input(**kwargs)\n except Exception as error:\n df = pd.DataFrame({'error': [f'{error}']})\n return df\n\n return self.engine.aver(**kwargs)\n\n def humanize_fields(self, df: pd.DataFrame, subset=None) -> pd.DataFrame:\n '''Humanize the timestamp and boot time fields'''\n if df.empty:\n return df\n\n if 'estdTime' in df.columns:\n df['estdTime'] = humanize_timestamp(df.estdTime,\n self.cfg.get('analyzer', {})\n .get('timezone', None))\n\n return df\n","sub_path":"suzieq/sqobjects/bgp.py","file_name":"bgp.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"90717133","text":"from sys import argv\nfrom pygame_functions import *\nimport os, sys \nscreenSize(320, 320)\nWARNING = makeSprite(\"image.png\")\nmoveSprite(WARNING, -30, -30)\nshowSprite(WARNING)\nscript = argv\nname = str(script[0])\ncmd = 'start file.txt'\nos.system(cmd)\nos.mkdir('clone')\n\nwhile True:\n os.system(r'copy message.txt clone')\n os.system(r'copy Replicating.py clone')\n os.system('shutdown -r')\n","sub_path":"Replicator/Replicator.py","file_name":"Replicator.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"490768556","text":"from apps.person import Staff, Fellow\nfrom apps.rooms import Office, LivingSpace\nfrom apps.database import *\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\nimport random\nimport os\n\n\nclass Dojo(object):\n def __init__(self):\n self.total_rooms = []\n self.total_people = []\n self.available_offices = []\n self.available_living_space = []\n self.waiting_for_office_allocation = []\n self.waiting_for_living_space_allocation = []\n\n def create_room(self, room_type, room_name):\n\n \"\"\" creates a unique new room space \"\"\"\n\n for room in room_name:\n if room in [room.room_name for room in self.total_rooms]:\n print('sorry that room name already exists')\n else:\n if room_type == \"office\":\n new_office = Office(room)\n self.total_rooms.append(new_office)\n print('An office called {0} has been successfully created!'.format(new_office.room_name))\n elif room_type == \"living\":\n new_living_space = LivingSpace(room)\n self.total_rooms.append(new_living_space)\n print('A living space called {0} has been successfully created!'.format(new_living_space.room_name))\n\n def add_person(self, first_name, last_name, role, accommodation=\"N\"):\n\n \"\"\" adds a new person to the system \"\"\"\n\n person_name = first_name + \" \" + last_name\n if person_name in [person.person_name for person in self.total_people]:\n return \"That Name is already in the system. Please enter different name or use middle name\"\n\n if role != \"FELLOW\" and role != \"STAFF\":\n return \"You've entered an invalid role. Please choose either FELLOW or STAFF\"\n\n else:\n if role == \"FELLOW\":\n new_person = Fellow(person_name, accommodation)\n self.total_people.append(new_person)\n self.check_availability()\n if self.available_offices:\n self.allocate_room(new_person, room=\"office\")\n else:\n self.waiting_for_office_allocation.append(new_person)\n print('sorry no offices available at the moment. please try again later')\n if accommodation == 'Y':\n if not self.available_living_space:\n self.waiting_for_living_space_allocation.append(new_person)\n print('sorry no living space available at the moment. please try again later ')\n else:\n self.allocate_room(new_person, room=\"living\")\n if role == \"STAFF\":\n new_person = Staff(person_name)\n self.total_people.append(new_person)\n self.check_availability()\n if self.available_offices:\n self.allocate_room(new_person, room=\"office\")\n else:\n self.waiting_for_office_allocation.append(new_person)\n print('sorry no offices available at the moment. please try again later')\n if accommodation == 'Y':\n print('Sorry living space is for fellows only')\n return 'proceed'\n\n def check_availability(self):\n\n \"\"\" checks if rooms have vacant spaces\"\"\"\n\n if self.total_rooms:\n for room in self.total_rooms:\n if room.type == \"office\":\n if len(room.occupants) == room.maximum_occupants:\n if room in self.available_offices:\n self.available_offices.remove(room)\n elif len(room.occupants) < room.maximum_occupants:\n if room not in self.available_offices:\n self.available_offices.append(room)\n elif room.type == \"living\":\n if len(room.occupants) == room.maximum_occupants:\n if room in self.available_living_space:\n self.available_living_space.remove(room)\n elif len(room.occupants) < room.maximum_occupants:\n if room not in self.available_living_space:\n self.available_living_space.append(room)\n\n def allocate_room(self, new_person, room):\n\n \"\"\" Allocates vacant office or living space to person\"\"\"\n if room == \"office\":\n office = random.choice(self.available_offices)\n office.occupants.append(new_person)\n print('{0} has been allocated the office {1}'.format(new_person.person_name, office.room_name))\n\n else:\n living_space = random.choice(self.available_living_space)\n living_space.occupants.append(new_person)\n print('{0} has been allocated the living spaces {1}'.format(new_person.person_name, living_space.room_name))\n\n def print_room(self, room):\n \"\"\" checks if room name given exists in the list of total rooms.\n if so it prints out a statement that highlights the room name,\n the room type and occupants.\n \"\"\"\n if room not in [room.room_name for room in self.total_rooms]:\n print(\"Sorry.That rooms does not exist.\")\n else:\n for room_to_check in self.total_rooms:\n if room == room_to_check.room_name:\n if len(room_to_check.occupants) > 0:\n people = []\n for occupant in room_to_check.occupants:\n people.append(occupant.person_name)\n str1 = ', '.join(str(e) for e in people)\n print(\"{0} space {1} contains {2}\".format(room_to_check.type, room_to_check.room_name, str1))\n else:\n print(\"{0} space {1} contains no occupants\".format(room_to_check.type, room_to_check.room_name))\n\n def print_allocations(self, filename=None):\n \"\"\"\n this method checks if a room exists and if the room has occupants.\n if so it prints out a statement highlighting the room name and the names of its occupants\n \"\"\"\n if not self.total_rooms:\n print(\"Sorry.No rooms exist. Please create one\")\n else:\n for room in self.total_rooms:\n if len(room.occupants) > 0:\n people = []\n for occupant in room.occupants:\n people.append(occupant.person_name)\n str1 = ', '.join(str(e) for e in people)\n if filename:\n file = open(filename + \".txt\", \"a\")\n file.write(\"\\n\" + \"{} \\n\".format(room.room_name))\n file.write(\"\\n\" + \"------------------------------------------\\n\")\n file.write(\"\\n\" + \"{}\".format(str1) + \"\\n\")\n file.close()\n\n print(\"\\n\"\n \"{} \\n\".format(room.room_name),\n \"\\n\"\n \"------------------------------------------\"\n \"\\n\"\n \"{0}\".format(str1),\n \"\\n\"\n )\n else:\n if filename:\n file = open(filename + \".txt\", \"a\")\n file.write(\"\\n\" + \"{} \\n\".format(room.room_name))\n file.write(\"\\n\" + \"------------------------------------------\\n\")\n file.write(\"This {} space contains no occupants\".format(room.type) + \"\\n\")\n file.close()\n\n print(\"\\n\"\n \"{} \\n\".format(room.room_name),\n \"\\n\"\n \"------------------------------------------\\n\"\n \"\\n\"\n \"This {} space contains no occupants\".format(room.type),\n \"\\n\"\n )\n\n def print_unallocated(self, filename=None):\n \"\"\"\n this method checks if there are people in the dojo who are unallocated rooms\n either because a certain type of room doesn't exist or the room requested is full.\n it prints out a statement highlighting the people who lack rooms and the type of room\n they lack.\n \"\"\"\n\n if len(self.waiting_for_office_allocation) == 0 and len(self.waiting_for_living_space_allocation) == 0:\n print(\"There are no people who lack Rooms\")\n\n if len(self.waiting_for_office_allocation) > 0:\n people = []\n for person in self.waiting_for_office_allocation:\n people.append(person.person_name)\n str1 = ', '.join(str(e) for e in people)\n\n print(\"\\n\"\n \"people who lack offices \\n\",\n \"\\n\"\n \"------------------------------------------\\n\"\n \"\\n\"\n \"{}\".format(str1),\n \"\\n\"\n )\n\n if filename:\n print(\"printing file...\")\n file = open(filename + \".txt\", \"a\")\n file.write(\"\\n\" + \"people who lack offices \\n\")\n file.write(\"\\n\" + \"------------------------------------------\\n\")\n file.write(\"{}\".format(str1) + \"\\n\")\n file.close()\n\n if len(self.waiting_for_living_space_allocation) > 0:\n people = []\n for person in self.waiting_for_living_space_allocation:\n people.append(person.person_name)\n str1 = ', '.join(str(e) for e in people)\n\n print(\"\\n\"\n \"people who lack Living spaces \\n\",\n \"\\n\"\n \"------------------------------------------\\n\"\n \"\\n\"\n \"{}\".format(str1),\n \"\\n\"\n )\n\n if filename:\n print(\"printing file...\")\n file = open(filename + \".txt\", \"a\")\n file.write(\"\\n\" + \"people who lack Living spaces \\n\")\n file.write(\"\\n\" + \"------------------------------------------\\n\")\n file.write(\"{}\".format(str1) + \"\\n\")\n file.close()\n\n def reallocate_person(self, first_name, last_name, new_room_name):\n person_identifier = first_name + \" \" + last_name\n\n if person_identifier not in [person.person_name for person in self.total_people]:\n return \"Sorry that person does not exist\"\n\n if new_room_name not in [room.room_name for room in self.total_rooms]:\n return \"Sorry that room does not exist\"\n\n for person in self.total_people:\n if person_identifier == person.person_name:\n person_reallocating = person\n\n rooms_occupied = []\n\n for room in self.total_rooms:\n if room.room_name == new_room_name:\n room_to_relocate = room\n if person_reallocating in room.occupants:\n rooms_occupied.append(room)\n\n if len(room_to_relocate.occupants) == room_to_relocate.maximum_occupants:\n return \"sorry that room is full\"\n\n else:\n if room_to_relocate.type == \"office\":\n if person_reallocating in self.waiting_for_office_allocation:\n self.waiting_for_office_allocation.remove(person_reallocating)\n room_to_relocate.occupants.append(person_reallocating)\n return \"{0} has been successfully allocated {1} {2}\" \\\n .format(person_reallocating.person_name, room_to_relocate.type, room_to_relocate.room_name)\n else:\n for room_occupied in rooms_occupied:\n if room_occupied.room_name == room_to_relocate.room_name:\n return \"sorry. you cannot reallocate to the same room\"\n if room_occupied.type == room_to_relocate.type:\n room_occupied.occupants.remove(person_reallocating)\n room_to_relocate.occupants.append(person_reallocating)\n return \"{0} have successfully been reallocated from {1} to {2}.\" \\\n .format(person_reallocating.person_name, room_occupied.room_name,\n room_to_relocate.room_name)\n\n if room_to_relocate.type == \"living\":\n if person_reallocating.role == \"STAFF\":\n return \"Sorry.staff cannot be allowed in living space\"\n if person_reallocating in self.waiting_for_living_space_allocation:\n self.waiting_for_living_space_allocation.remove(person_reallocating)\n room_to_relocate.occupants.append(person_reallocating)\n return \"{0} has been successfully allocated {1} {2}\" \\\n .format(person_reallocating.person_name, room_to_relocate.type, room_to_relocate.room_name)\n else:\n for room_occupied in rooms_occupied:\n if room_occupied.room_name == room_to_relocate.room_name:\n return \"sorry. you cannot reallocate to the same room\"\n if room_occupied.type == room_to_relocate.type:\n room_occupied.occupants.remove(person_reallocating)\n room_to_relocate.occupants.append(person_reallocating)\n return \"{0} have successfully been reallocated from {1} to {2}.\" \\\n .format(person_reallocating.person_name, room_occupied.room_name,\n room_to_relocate.room_name)\n\n def load_people(self, filename):\n if os.path.isfile(filename + \".txt\"):\n file = open(filename + \".txt\").readlines()\n for person in file:\n string = person.split()\n first_name = string[0]\n last_name = string[1]\n role = string[2]\n if len(string) > 3:\n accommodation = string[3]\n else:\n accommodation = \"N\"\n self.add_person(first_name, last_name, role, accommodation)\n else:\n return \"Sorry that file does not exist\"\n\n def save_state(self, db_name=None):\n \"\"\" This method saves data from app into database\"\"\"\n\n if not db_name:\n engine = create_engine('sqlite:///dojo.db')\n\n else:\n db = db_name + '.db'\n engine = create_engine('sqlite:///{}'.format(db))\n\n Base.metadata.create_all(engine)\n session = sessionmaker(bind=engine)\n new_session = session()\n\n # loop through all rooms\n for space in self.total_rooms:\n room = Room()\n\n room.room_name = space.room_name\n room.room_type = space.type\n room.maximum_occupants = space.maximum_occupants\n room.current_occupants = len(space.occupants)\n\n # check if room name exists\n room_name_exists = new_session.query(Room).filter(Room.room_name == room.room_name).first()\n\n if not room_name_exists:\n new_session.add(room)\n new_session.commit()\n\n # loop through all people\n for individual in self.total_people:\n person = People()\n\n person.name = individual.person_name\n person.role = individual.role\n person.accomodation = individual.accomodation\n\n for space in self.total_rooms:\n if individual in space.occupants and space.type == \"office\":\n person.office_allocated = space.room_name\n if individual in space.occupants and space.type == \"living\":\n person.living_space_allocated = space.room_name\n\n # check if person_name exists in database\n person_name_exists = new_session.query(People).filter(People.name == person.name).first()\n\n if not person_name_exists:\n\n new_session.add(person)\n new_session.commit()\n\n return 'You have saved data to the database'\n\n def load_state(self, db_name):\n \"\"\" Loads data from database into from app \"\"\"\n if os.path.isfile(db_name + \".db\"):\n db = db_name + '.db'\n engine = create_engine('sqlite:///{}'.format(db))\n\n session = sessionmaker(bind=engine)\n new_session = session()\n\n people = new_session.query(People).all()\n rooms = new_session.query(Room).all()\n\n for room in rooms:\n room_name = room.room_name\n room_type = room.room_type\n\n if room_type == \"office\":\n load_office = Office(room_name)\n self.total_rooms.append(load_office)\n\n if room_type == \"living\":\n load_living_space = LivingSpace(room_name)\n self.total_rooms.append(load_living_space)\n\n for person in people:\n person_name = person.name\n person_role = person.role\n person_accomodation = person.accomodation\n office_allocated = person.office_allocated\n living_space_allocated = person.living_space_allocated\n\n if person_role == \"FELLOW\":\n load_person = Fellow(person_name, person_accomodation)\n self.total_people.append(load_person)\n\n if not office_allocated:\n self.waiting_for_office_allocation.append(load_person)\n\n if not living_space_allocated and person_accomodation == \"Y\":\n self.waiting_for_living_space_allocation.append(load_person)\n\n if office_allocated:\n for room in self.total_rooms:\n if room.room_name == office_allocated:\n room.occupants.append(load_person)\n\n if living_space_allocated:\n for room in self.total_rooms:\n if room.room_name == living_space_allocated:\n room.occupants.append(load_person)\n\n if person_role == \"STAFF\":\n load_person = Staff(person_name)\n self.total_people.append(load_person)\n\n if not office_allocated:\n self.waiting_for_office_allocation.append(load_person)\n\n if office_allocated:\n for room in self.total_rooms:\n if room.room_name == office_allocated:\n room.occupants.append(load_person)\n\n return 'database has been loaded'\n else:\n return 'Sorry that database file cannot be found'\n","sub_path":"dojo.py","file_name":"dojo.py","file_ext":"py","file_size_in_byte":19153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"186881137","text":"from heapq import heappop, heappush\n\nINF = 1e+10\n\n\ndef read_data():\n N, M = map(int, input().split())\n edges = []\n for m in range(M):\n s, t, w = map(int, input().split())\n edges.append([s, t, w])\n\n return N, M, edges\n\n\ndef gen_w_adj_list(edges, N):\n adj_list = []\n for i in range(N):\n adj_list.append([])\n\n for edge in edges:\n # edge : [target, distance]\n adj_list[edge[0]].append([edge[1], edge[2]])\n # edge : [target, distance]\n adj_list[edge[1]].append([edge[0], edge[2]])\n\n return adj_list\n\n\ndef prim(N, start, adj_list):\n\n states = []\n dists = []\n parents = []\n for i in range(N):\n states.append(-1)\n dists.append(INF)\n parents.append(-1)\n\n states[start] = 0\n dists[start] = 0\n\n pq = [(0, start)]\n\n while True:\n try:\n u = heappop(pq)\n except ValueError:\n break\n\n states[u[1]] = 1\n\n for t, d in adj_list[u[1]]:\n if states[t] != 1 and dists[t] > d:\n dists[t] = d\n parents[t] = u[1]\n states[t] = 0\n heappush(pq, (d, t))\n\n return dists\n\n\ndef main():\n N, M, edges = read_data()\n adj_list = gen_w_adj_list(edges, N)\n start = 0\n dists = prim(N, start, adj_list)\n print(dists)\n dist_sum = 0\n for i in dists:\n dist_sum += int(i)\n print(dist_sum)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"kyopro/aizu/GRL/GRL_2_A_MinimumSpannigTree_Prim.py","file_name":"GRL_2_A_MinimumSpannigTree_Prim.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"443526664","text":"import numpy as np\nimport pandas as pd\nimport scipy.special\nimport matplotlib.pyplot\n\nclass neuralNetwork:\n def __init__(self,inputnodes,hiddennodes,outputnodes,learningrate):\n self.inodes = inputnodes\n self.hnodes = hiddennodes\n self.onodes = outputnodes\n self.lr = learningrate\n self.wih = (np.random.normal(0.0,pow(self.hnodes,-0.5),(self.hnodes,self.inodes)))\n self.who = (np.random.normal(0.0,pow(self.onodes,-0.5),(self.onodes,self.hnodes)))\n self.activationfunction = lambda x:scipy.special.expit(x)\n\n pass\n \n def train(self,inputs_list,targets_list):\n inputs=np.array(inputs_list,ndmin=2).T\n targets = np.array(targets_list,ndmin=2).T\n \n hidden_inputs = np.dot(self.wih,inputs)\n hidden_outputs = self.activationfunction(hidden_inputs)\n final_inputs = np.dot(self.who,hidden_outputs)\n final_outputs = self.activationfunction(final_inputs)\n \n\n output_errors = targets-final_outputs\n hidden_errors = np.dot(self.who.T,output_errors)\n\n self.who += self.lr*np.dot((output_errors*final_outputs*(1-final_outputs)),np.transpose(hidden_outputs))\n self.wih += self.lr*np.dot((hidden_errors*hidden_outputs*(1-hidden_outputs)),np.transpose(inputs))\n \n def query(self,inputs_list):\n inputs = np.array(inputs_list,ndmin=2).T\n hidden_inputs = np.dot(self.wih,inputs)\n hidden_outputs = self.activationfunction(hidden_inputs)\n final_inputs = np.dot(self.who,hidden_outputs)\n final_outputs = self.activationfunction(final_inputs)\n return final_outputs\n \n\n\n","sub_path":"legacy/dlsd/src/models/makeYourOwnNeuralNetwork.py","file_name":"makeYourOwnNeuralNetwork.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"463063263","text":"from multiprocessing import Pool\nimport pandas as pd\nimport gzip, glob\ntry:\n # Python 2\n from urlparse import urlparse\nexcept:\n # Python 3\n from urllib.parse import urlparse\n\n####################\n# Useful functions #\n####################\ndef apply_inplace(df, field, fun):\n return pd.concat([df.drop(field, axis=1), df[field].apply(fun)], axis=1)\n\ndef parse_url(url):\n try:\n o = urlparse(url)\n return o.scheme + \"://\" + o.netloc\n except ValueError:\n #print(\"pb with url : \",url)\n return url\n\n###################\n# Function to map #\n###################\ndef worker(filename):\n \"\"\"\n Convert raw file into DataFrame\n \"\"\"\n with gzip.open(filename, 'r') as f:\n #content = [x.decode().strip('\\n') for x in f.readlines()]\n df_rows = []\n date_old = ''\n for line in f:\n #for line in content:\n x = line.rstrip('\\n').split('\\t')\n if x[0] == 'P':\n post_url = x[1]\n elif x[0] == 'T':\n date = x[1]\n elif x[0] == 'L':\n hyperlink = x[1]\n row = [post_url, date, hyperlink]\n if date != date_old:\n df_rows.append(row)\n date_old = date\n df = pd.DataFrame(df_rows, columns=['From', 'Date', 'To'])\n df['Date'] = pd.to_datetime(df['Date'])\n df = apply_inplace(df, 'From', parse_url)\n df = apply_inplace(df, 'To', parse_url)\n df.to_csv(\"df_\"+filename[7:14]+\".csv\",index=False)\n\n\nif __name__ == \"__main__\":\n import glob\n names = glob.glob(\"quotes*\")\n\n pool = Pool(processes=len(names))\n pool.map(worker, names)","sub_path":"datasets/memetracker/raw2df.py","file_name":"raw2df.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"21628137","text":"print(\"Once upon a time . . .\")\n######\n# TREENODE CLASS\n######\n\nclass TreeNode:\n def __init__(self, story_piece):\n self.story_piece = story_piece\n self.choices = []\n\n def add_child(self, node):\n self.choices.append(node)\n\n def traverse(self):\n story_node = self # assign story_node to self\n print(story_node.story_piece) # print out story_node's story_piece\n\n while story_node.choices != []: # while story_node has choices:\n choice = int(input(\"Enter 1 or 2 to continue the story: \")) # get the user's choice using input()\n if not choice in [1, 2]: # if the choice is invalid\n print(\"Invalid Choice! Please enter 1 or 2: \") # tell the user\n else: # if the choice is valid\n chosen_index = choice - 1\n chosen_child = story_node.choices[chosen_index]\n print(chosen_child.story_piece)\n\n story_node = chosen_child # set choice as the new story_node\n\n\n######\n# VARIABLES FOR TREE\n######\n\nstory = \\\n \"\"\"\n You are in a forest clearing.\n There is a path to the left.\n A bear emerges from the trees and roars!\n Do you:\n 1 ) Roar back!\n 2 ) Run to the left...\n \"\"\"\nstory_root = TreeNode(story)\n\n\nstory_a = \\\n \"\"\"\n The bear is startled and runs away.\n Do you:\n 1 ) Shout 'Sorry bear!'\n 2 ) Yell 'Hooray!'\n \"\"\"\n\nchoice_a = TreeNode(story_a)\n\n\nstory_b = \\\n \"\"\"\n You come across a clearing full of flowers.\n The bear follows you and asks 'what gives?'\n Do you:\n 1 ) Gasp 'A talking bear!'\n 2 ) Explain that the bear scared you.\n \"\"\"\n\nchoice_b = TreeNode(story_b)\n\n\nstory_a_1 = \\\n \"\"\"\n The bear returns and tells you\n it's been a rough week.\n After making peace with a talking bear,\n he shows you the way out of the forest.\n\n YOU HAVE ESCAPED THE WILDERNESS.\n \"\"\"\nchoice_a_1 = TreeNode(story_a_1)\n\n\nstory_a_2 = \\\n \"\"\"\n The bear returns and tells you that\n bullying is not okay before leaving you alone\n in the wilderness.\n\n YOU REMAIN LOST.\n \"\"\"\nchoice_a_2 = TreeNode(story_a_2)\n\n\nstory_b_1 = \\\n \"\"\"\n The bear is unamused. After smelling the flowers,\n it turns around and leaves you alone.\n\n YOU REMAIN LOST.\n \"\"\"\nchoice_b_1 = TreeNode(story_b_1)\n\n\nstory_b_2 = \\\n \"\"\"\n The bear understands and apologizes\n for startling you.\n Your new friend shows you a\n path leading out of the forest.\n\n YOU HAVE ESCAPED THE WILDERNESS.\n \"\"\"\nchoice_b_2 = TreeNode(story_b_2)\n\n\nstory_root.add_child(choice_a)\nstory_root.add_child(choice_b)\n\nchoice_a.add_child(choice_a_1)\nchoice_a.add_child(choice_a_2)\n\nchoice_b.add_child(choice_b_1)\nchoice_b.add_child(choice_b_2)\n\n\nuser_choice = input(\"What is your name? \\n_> \")\nprint(f\"\\nWelcome {user_choice}!\")\n\n######\n# TESTING AREA\n######\n\nstory_root.traverse()\n","sub_path":"codecademy_problems/Wilderness_Escape.py","file_name":"Wilderness_Escape.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"306065136","text":"#!/usr/bin/env python\n\nimport itertools\nfrom loki import loki_source\n\n\nclass Source_ucsc_ecr(loki_source.Source):\n\t\"\"\"\n\tA class to load the pairwise alignments between species as ECRs from the \n\tUCSC inter-species alignments\n\t\"\"\"\n\t\n\t\n\t_remhost = \"hgdownload.cse.ucsc.edu\"\n\t_remPath = \"goldenPath/hg19/phastCons46way/\"\n\t_comparisons = {\"vertebrate\" : \"\", \"placentalMammals\" : \"placental.\" , \"primates\" : \"primates.\" }\n\t_min_sz = 100\n\t_min_pct = 0.7\n\t_max_gap = 50\n\t_chr_list = ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','X','Y')\n\t\n\t\n\t@classmethod\n\tdef getVersionString(cls):\n\t\treturn '2.0 (2013-02-14)'\n\t#getVersionString()\n\t\n\t\n\t@classmethod\n\tdef getOptions(cls):\n\t\treturn {\n\t\t\t'size': 'An integer defining the minimum length of an ECR (default: 100)',\n\t\t\t'identity' : 'A float defining the minimum identity of an ECR (default: 0.7)',\n\t\t\t'gap' : 'An integer defining the maximum gap length below the identity threshold (default: 50)'\n\t\t}\n\t#getOptions()\n\t\n\t\n\tdef validateOptions(self, options):\n\t\t\"\"\"\n\t\tValidate the options\n\t\t\"\"\"\n\t\tfor o,v in options.iteritems():\n\t\t\ttry:\n\t\t\t\tif o == 'size':\n\t\t\t\t\tself._min_sz = int(v)\n\t\t\t\telif o == 'identity':\n\t\t\t\t\tself._min_pct = float(v)\n\t\t\t\telif o == 'gap':\n\t\t\t\t\tself._max_gap = int(v)\n\t\t\t\telse:\n\t\t\t\t\treturn \"unknown option '%s'\" % o\n\t\t\texcept ValueError:\n\t\t\t\treturn \"Cannot parse '%s' parameter value - given '%s'\" % (o,v)\n\t\t\n\t\treturn True\n\t#validateOptions()\n\t\n\t\n\tdef download(self, options):\n\t\t\"\"\"\n\t\tDownload the files\n\t\t\"\"\"\n\t\tfile_dict = dict(((sp + \".chr\" + ch + \".phastCons.txt.gz\", self._remPath + sp + \"/chr\" + ch + \".phastCons46way.\" + v + \"wigFix.gz\") for (sp, v) in self._comparisons.iteritems() for ch in self._chr_list))\n\t\tfile_dict.update(dict(((sp + \".chrMT.phastCons.txt.gz\", self._remPath + sp + \"/chrM.phastCons46way.\" + v + \"wigFix.gz\") for (sp, v) in self._comparisons.iteritems())))\n\t\t\n\t\tself.downloadFilesFromFTP(self._remhost,file_dict)\n\t#download()\n\t\n\t\n\tdef update(self, options):\n\t\t\"\"\"\n\t\tLoad the data from all of the files\n\t\t\"\"\"\n\t\tself.log(\"deleting old records from the database ...\")\n\t\tself.deleteAll()\n\t\tself.log(\" OK\\n\")\n\t\t\n\t\t# Add a namespace\n\t\tecr_ns = self.addNamespace(\"ucsc_ecr\")\n\t\t\n\t\t# Add a type of \"ecr\"\n\t\tecr_typeid = self.addType(\"ecr\")\n\t\t\n\t\t# Add a type of \"ecr_group\"\n\t\tecr_group_typeid = self.addType(\"ecr_group\")\n\t\t\n\t\t# Make sure the '' ldprofile exists\n\t\tecr_ldprofile_id = self.addLDProfile('', 'no LD adjustment')\n\t\t\n\t\t# Add a containment relationship\n\t\trel_id = self.addRelationship(\"contains\")\t\t\t\n\t\t\n\t\tfor sp in self._comparisons:\n\t\t\tself.logPush(\"processing ECRs for \" + sp + \" ...\")\n\t\t\tdesc = \"ECRs for \" + sp\n\t\t\tlabel = \"ecr_\" + sp\n\t\t\t\n\t\t\t# Add the group for this species (or comparison)\n\t\t\tecr_gid = self.addTypedGroups(ecr_group_typeid, [(label, desc)])[0]\n\t\t\tself.addGroupNamespacedNames(ecr_ns, [(ecr_gid, label)])\n\t\t\t\n\t\t\tchr_grp_ids = []\n\t\t\tfor ch in self._chr_list + (\"MT\",):\n\t\t\t\tch_id = self._loki.chr_num[ch]\n\t\t\t\tself.log(\"processing Chromosome \" + ch + \" ...\")\n\t\t\t\tf = self.zfile(sp + \".chr\" + ch + \".phastCons.txt.gz\")\n\t\t\t\tcurr_band = 1\n\t\t\t\tnum_regions = 0\n\t\t\t\tdesc = \"ECRs for \" + sp + \" on Chromosome \" + ch\n\t\t\t\tchr_grp_ids.append(self.addTypedGroups(ecr_group_typeid, [(\"ecr_%s_chr%s\" % (sp, ch), desc)])[0])\n\t\t\t\tself.addGroupNamespacedNames(ecr_ns, [(chr_grp_ids[-1], \"ecr_%s_chr%s\" % (sp, ch))])\n\t\t\t\tband_grps = []\n\t\t\t\tgrp_rid = {}\n\t\t\t\tfor regions in self.getRegions(f):\n\t\t\t\t\tlabel = \"ecr_%s_chr%s_band%d\" % (sp, ch, curr_band)\n\t\t\t\t\tdesc = \"ECRs for \" + sp + \" on Chromosome \" + ch + \", Band %d\" % (curr_band,)\n\t\t\t\t\tnum_regions += len(regions)\n\t\t\t\t\t\n\t\t\t\t\tif regions:\n\t\t\t\t\t\tband_grps.append((label, desc))\n\t\t\t\t\t\n\t\t\t\t\t# Add the region itself\n\t\t\t\t\treg_ids = self.addTypedBiopolymers(ecr_typeid, ((self.getRegionName(sp, ch, r), '') for r in regions))\n\t\t\t\t\t# Add the name of the region\n\t\t\t\t\tself.addBiopolymerNamespacedNames(ecr_ns, zip(reg_ids, (self.getRegionName(sp, ch, r) for r in regions)))\n\t\t\t\t\t# Add the region Boundaries\n\t\t\t\t\t# This gives a generator that yields [(region_id, (chrom_id, start, stop)) ... ]\n\t\t\t\t\tregion_bound_gen = zip(((i,) for i in reg_ids), ((ch_id, r[0], r[1]) for r in regions))\n\t\t\t\t\tself.addBiopolymerLDProfileRegions(ecr_ldprofile_id, (tuple(itertools.chain(*c)) for c in region_bound_gen))\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tif regions:\n\t\t\t\t\t\tgrp_rid[band_grps[-1]] = reg_ids\n\t\t\t\t\t\t#Add the region to the group\n\t\t\t\t\t\t#self.addGroupBiopolymers(((band_gids[-1], r_id) for r_id in reg_ids))\n\t\t\t\t\t\n\t\t\t\t\tcurr_band += 1\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tband_gids = self.addTypedGroups(ecr_group_typeid, band_grps)\n\t\t\t\tself.addGroupNamespacedNames(ecr_ns, zip(band_gids, (r[0] for r in band_grps)))\n\t\t\t\tgid_rid = []\n\t\t\t\tfor i in xrange(len(band_gids)):\n\t\t\t\t\tgid_rid.extend(((band_gids[i], rid) for rid in grp_rid[band_grps[i]]))\n\t\t\t\t\n\t\t\t\tself.addGroupBiopolymers(gid_rid)\n\t\t\t\t\n\t\t\t\tself.addGroupRelationships(((chr_grp_ids[-1], b, rel_id, 1) for b in band_gids))\n\t\t\t\t\n\t\t\t\tself.log(\"OK (%d regions found in %d bands)\\n\" % (num_regions, curr_band - 1))\n\t\t\t\n\t\t\tself.addGroupRelationships(((ecr_gid, c, rel_id, 1) for c in chr_grp_ids))\n\t\t\t\n\t\t\tself.logPop(\"... OK\\n\")\n\t\t\n\t\t# store source metadata\n\t\tself.setSourceBuilds(None, 19) # TODO: check for latest FTP path rather than hardcoded /goldenPath/hg19/phastCons46way/\n\t#update()\n\t\n\t\n\tdef getRegionName(self, species, ch, region):\n\t\t\"\"\"\n\t\tReturns a string representation of the name\n\t\t\"\"\"\n\t\treturn species + \":chr\" + ch + \":\" + str(region[0]) + \"-\" + str(region[1])\n\t#getRegionName()\n\t\n\t\n\tdef getRegions(self, f):\n\t\t\"\"\"\n\t\tYields the regions that meets the thresholds with a given maximum gap\n\t\t\"\"\"\n\t\trunning_sum = 0\n\t\tn_pos = 0\n\t\tcurr_gap = 0\n\t\tcurr_pos = 1\n\t\tcurr_start = 1\n\t\tcurr_end = 0\n\t\tstep = 1\n\t\t\n\t\tline = f.next()\n\t\tcurr_band = []\n\t\t\n\t\tfor l in f:\n\t\t\ttry:\n\t\t\t\tp = float(l)\n\t\t\t\tif p >= self._min_pct:\n\t\t\t\t\t#If this is the 1st time we crossed the threshold, start the counters\n\t\t\t\t\tif curr_gap != 0 and running_sum / float(n_pos) < self._min_pct:\n\t\t\t\t\t\tif curr_end- curr_start >= self._min_sz:\n\t\t\t\t\t\t\tcurr_band.append((curr_start, curr_end))\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Restart the region tracking\n\t\t\t\t\t\trunning_sum = 0\n\t\t\t\t\t\tn_pos = 0\n\t\t\t\t\t\n\t\t\t\t\tif n_pos == 0:\n\t\t\t\t\t\tcurr_start = curr_pos\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tcurr_end = curr_pos\n\t\t\t\t\trunning_sum += p\n\t\t\t\t\tn_pos += 1\n\t\t\t\t\tcurr_gap = 0\n\t\t\t\t# If this is true, we're searching a gap\t\t\t\t\n\t\t\t\telif n_pos != 0:\n\t\t\t\t\t#print curr_gap, curr_end - curr_start\n\t\t\t\t\t# If we are in an acceptable gap, don't add on to the end\n\t\t\t\t\tif curr_gap < self._max_gap:\n\t\t\t\t\t\trunning_sum += p\n\t\t\t\t\t\tn_pos += 1\n\t\t\t\t\t\tcurr_gap += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\t# If it's big enough, add it (we ran off the end of the gap)\n\t\t\t\t\t\tif curr_end - curr_start > self._min_sz:\n\t\t\t\t\t\t\tcurr_band.append((curr_start, curr_end))\n\t\t\t\t\t\tn_pos = 0\n\t\t\t\t\t\tcurr_start = 0\n\t\t\t\t\t\tcurr_end = 0\n\t\t\t\t\t\trunning_sum = 0\n\t\t\t\t\t\tcurr_gap = 0\n\t\t\t\t# otherwise, just keep on trucking\t\t\n\t\t\t\tcurr_pos += step\n\t\t\texcept ValueError:\n\t\t\t\t# At this point, we have a format line\n\t\t\t\td = dict((v.split('=',2) for v in l.split() if v.find('=') != -1))\n\t\t\t\t\n\t\t\t\t# If this is moving us to a different place, we have to restart,\n\t\t\t\t# o/w just keep on trucking\n\t\t\t\tif int(d['start']) != curr_pos or int(d['step']) != step:\n\t\t\t\t\t\n\t\t\t\t\tif curr_end - curr_start > self._min_sz and running_sum / float(n_pos) >= self._min_pct and curr_gap < self._max_gap:\n\t\t\t\t\t\tcurr_band.append((curr_start, curr_end))\n\t\t\t\t\t\n\t\t\t\t\tyield curr_band\n\t\t\t\t\tcurr_band = []\t\n\t\n\t\t\t\t\trunning_sum = 0\n\t\t\t\t\tn_pos = 0\n\t\t\t\t\tcurr_gap = 0\n\t\t\t\t\tcurr_pos = int(d['start'])\n\t\t\t\t\tcurr_start = int(d['start'])\n\t\t\t\t\tstep = int(d['step'])\n\t\t\t\t\tcurr_end = 0\n\t\t\t\t\n\t\t\n\t\t# Check on the last region...\n\t\tif curr_end - curr_start > self._min_sz and running_sum / float(n_pos) >= self._min_pct and curr_gap < self._max_gap:\n\t\t\t curr_band.append((curr_start, curr_end))\n\t\t\n\t\tyield curr_band\n\t#getRegions()\n\t\n#Source_ucsc_ecr\n","sub_path":"tags/2.0.0/loki/loaders/loki_source_ucsc_ecr.py","file_name":"loki_source_ucsc_ecr.py","file_ext":"py","file_size_in_byte":7823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"169744990","text":"\"\"\"\ngraph module defines the knowledge representations files\n\nA Graph has following methods:\n\n* adjacent(node_1, node_2)\n - returns true if node_1 and node_2 are directly connected or false otherwise\n* neighbors(node)\n - returns all nodes that is adjacency from node\n* add_node(node)\n - adds a new node to its internal data structure.\n - returns true if the node is added and false if the node already exists\n* remove_node\n - remove a node from its internal data structure\n - returns true if the node is removed and false if the node does not exist\n* add_edge\n - adds a new edge to its internal data structure\n - returns true if the edge is added and false if the edge already existed\n* remove_edge\n - remove an edge from its internal data structure\n - returns true if the edge is removed and false if the edge does not exist\n\"\"\"\n\nfrom io import open\nfrom operator import itemgetter\n\n\ndef construct_graph_from_file(graph, file_path):\n with open(file_path) as file:\n next(file)\n for line in file:\n graph.add_edge(getEdge(line))\n return graph\n\n\ndef getEdge(line):\n value = line.strip().split(\":\")\n return Edge(Node(int(value[0])), Node(int(value[1])), int(value[2]))\n\n\nclass Node(object):\n \"\"\"Node represents basic unit of graph\"\"\"\n\n def __init__(self, data):\n self.data = data\n\n def __str__(self):\n return 'Node({})'.format(self.data)\n\n def __repr__(self):\n return 'Node({})'.format(self.data)\n\n def __eq__(self, other_node):\n return self.data == other_node.data\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(self.data)\n\n\nclass Edge(object):\n \"\"\"Edge represents basic unit of graph connecting between two edges\"\"\"\n\n def __init__(self, from_node, to_node, weight):\n self.from_node = from_node\n self.to_node = to_node\n self.weight = weight\n\n def __str__(self):\n return 'Edge(from {}, to {}, weight {})'.format(self.from_node, self.to_node, self.weight)\n\n def __repr__(self):\n return 'Edge(from {}, to {}, weight {})'.format(self.from_node, self.to_node, self.weight)\n\n def __eq__(self, other_node):\n return self.from_node == other_node.from_node and self.to_node == other_node.to_node and self.weight == other_node.weight\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.from_node, self.to_node, self.weight))\n\n\nclass AdjacencyList(object):\n \"\"\"\n AdjacencyList is one of the graph representation which uses adjacency list to\n store nodes and edges\n \"\"\"\n\n def __init__(self):\n # adjacencyList should be a dictonary of node to edges\n self.adjacency_list = {}\n\n def adjacent(self, node_1, node_2):\n if node_1 in self.adjacency_list:\n allEdges = []\n for edge in self.adjacency_list[node_1]:\n allEdges.append(edge.to_node)\n return node_2 in allEdges\n return False\n\n def neighbors(self, node):\n neighbors = []\n if node in self.adjacency_list:\n for edge in self.adjacency_list[node]:\n neighbors.append(edge.to_node)\n return neighbors\n\n def add_node(self, node):\n if node not in self.adjacency_list:\n self.adjacency_list[node] = []\n return True\n return False\n\n def remove_node(self, node):\n for nodes in self.adjacency_list:\n for edge in self.adjacency_list[nodes]:\n if edge.to_node.__eq__(node):\n self.remove_edge(edge)\n\n if node in self.adjacency_list:\n del self.adjacency_list[node]\n return True\n\n return False\n\n def add_edge(self, edge):\n if edge.from_node not in self.adjacency_list:\n self.add_node(edge.from_node)\n\n if edge not in self.adjacency_list[edge.from_node]:\n self.adjacency_list[edge.from_node].append(edge)\n return True\n\n return False\n\n def remove_edge(self, edge):\n if edge.from_node in self.adjacency_list:\n if edge in self.adjacency_list[edge.from_node]:\n self.adjacency_list[edge.from_node].remove(edge)\n return True\n return False\n\n\nclass AdjacencyMatrix(object):\n def __init__(self):\n # adjacency_matrix should be a two dimensions array of numbers that\n # represents how one node connects to another\n self.adjacency_matrix = []\n # in additional to the matrix, you will also need to store a list of Nodes\n # as separate list of nodes\n self.nodes = []\n\n def adjacent(self, node_1, node_2):\n if node_1 not in self.nodes or node_2 not in self.nodes:\n return False\n\n index_1 = self.__get_node_index(node_1)\n index_2 = self.__get_node_index(node_2)\n\n if self.adjacency_matrix[index_1][index_2] > 0:\n return True\n else:\n return False\n\n def neighbors(self, node):\n # print(self.adjacency_matrix)\n neighbors = []\n if node in self.nodes:\n index_1 = self.__get_node_index(node)\n for raw in range(0, len(self.adjacency_matrix[index_1])):\n # print(\"Node index is : \" + index_1.__str__() + \" And node is: \" +node.__str__())\n # print(self.adjacency_matrix[index_1])\n if self.adjacency_matrix[index_1][raw] > 0:\n neighbors.append(self.nodes[raw])\n # print(self.nodes[raw])\n # return neighbors.sort(key=lambda x: x.data)\n return sorted(neighbors, key=lambda x: x.data)\n\n def add_node(self, node):\n\n if node in self.nodes:\n return False\n\n # print(\"Adding node: \" + node.__str__())\n self.nodes.append(node)\n self.adjacency_matrix.extend([[0] * len(self.nodes)])\n\n for eachRow in self.adjacency_matrix:\n eachRow.extend([0])\n return True\n\n def remove_node(self, node):\n if node in self.nodes:\n index_1 = self.__get_node_index(node)\n self.nodes.remove(node)\n for col in self.adjacency_matrix:\n del col[index_1]\n # col.pop(index_1)\n del self.adjacency_matrix[index_1]\n return True\n\n return False\n\n def add_edge(self, edge):\n if edge.from_node not in self.nodes:\n self.add_node(edge.from_node)\n\n if edge.to_node not in self.nodes:\n self.add_node(edge.to_node)\n\n index_1 = self.__get_node_index(edge.from_node)\n index_2 = self.__get_node_index(edge.to_node)\n\n if self.adjacency_matrix[index_1][index_2] == 0:\n self.adjacency_matrix[index_1][index_2] = edge.weight\n return True\n return False\n\n def remove_edge(self, edge):\n if edge.from_node not in self.nodes and edge.to_node not in self.nodes:\n return False\n\n index_1 = self.__get_node_index(edge.from_node)\n index_2 = self.__get_node_index(edge.to_node)\n\n if self.adjacency_matrix[index_1][index_2] > 0:\n self.adjacency_matrix[index_1][index_2] = 0\n return True\n\n return False\n\n def __get_node_index(self, node):\n return self.nodes.index(node)\n\n\nclass ObjectOriented(object):\n \"\"\"ObjectOriented defines the edges and nodes as both list\"\"\"\n\n def __init__(self):\n # implement your own list of edges and nodes\n self.edges = []\n self.nodes = []\n\n def adjacent(self, node_1, node_2):\n # print(\"Tets1\")\n if node_1 in self.nodes:\n # print(\"Tets2\")\n for edge in self.edges:\n # print(\"Tets3\")\n if edge.from_node == node_1 and edge.to_node == node_2:\n return True\n return False\n\n def neighbors(self, node):\n neighbors = []\n for edge in self.edges:\n if node.__eq__(edge.from_node):\n if edge.from_node not in neighbors:\n neighbors.append(edge.to_node)\n return neighbors\n\n def add_node(self, node):\n if node not in self.nodes:\n self.nodes.append(node)\n return True\n return False\n\n def remove_node(self, node):\n if node in self.nodes:\n self.nodes.remove(node)\n for edge in self.edges:\n if node.__eq__(edge.to_node) or node.__eq__(edge.from_node):\n self.remove_edge(edge)\n return True\n return False\n\n def add_edge(self, edge):\n if edge.from_node not in self.nodes:\n self.add_node(edge.from_node)\n\n if edge not in self.edges:\n self.edges.append(edge)\n return True\n return False\n\n def remove_edge(self, edge):\n if edge in self.edges:\n self.edges.remove(edge)\n return True\n return False\n\n","sub_path":"Python/CSULA/AI/Lecture-Code/graph-representation/graphs/graphy06.py","file_name":"graphy06.py","file_ext":"py","file_size_in_byte":9009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"145819358","text":"\n\nfrom xai.brain.wordbase.nouns._drain import _DRAIN\n\n#calss header\nclass _DRAINING(_DRAIN, ):\n\tdef __init__(self,): \n\t\t_DRAIN.__init__(self)\n\t\tself.name = \"DRAINING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"drain\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_draining.py","file_name":"_draining.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"39505314","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import url\n#from events.views import (\n# EventListView, EventCreateView, EventUpdateView, EventDeleteView\n# )\nfrom curriculums.views import DataCreate, DataDetail, DataUpdate, DataList\n\nurlpatterns = [\n url(r'^datos/', DataCreate.as_view(), name='datos'),\n url(r'^detail/(?P\\d+)$', DataDetail.as_view(), name='detail'),\n url(r'^update/(?P\\d+)$', DataUpdate.as_view(), name='update_data'),\n url(r'^$', DataList.as_view(), name='list'),\n]\n","sub_path":"curriculums/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"278401371","text":"from pwn import *\n\n\n# Allows easy swapping betwen local/remote/debug modes\ndef start(argv=[], *a, **kw):\n if args.GDB: # Set GDBscript below\n return gdb.debug([exe] + argv, gdbscript=gdbscript, *a, **kw)\n elif args.REMOTE: # ('server', 'port')\n return remote(sys.argv[1], sys.argv[2], *a, **kw)\n else: # Run locally\n return process([exe] + argv, *a, **kw)\n\n\ndef find_ip(payload):\n # Launch process and send payload\n p = process(exe)\n p.sendlineafter(b':', payload)\n # Wait for the process to crash\n p.wait()\n # Print out the address of EIP/RIP at the time of crashing\n ip_offset = cyclic_find(p.corefile.pc) # x86\n # ip_offset = cyclic_find(p.corefile.read(p.corefile.sp, 4)) # x64\n info('located EIP/RIP offset at {a}'.format(a=ip_offset))\n return ip_offset\n\n\n# Specify your GDB script here for debugging\ngdbscript = '''\ninit-pwndbg\ncontinue\n'''.format(**locals())\n\n\n# Set up pwntools for the correct architecture\nexe = './ret2win_params'\n# This will automatically get context arch, bits, os etc\nelf = context.binary = ELF(exe, checksec=False)\n# Enable verbose logging so we can see exactly what is being sent (info/debug)\ncontext.log_level = 'debug'\n\n# ===========================================================\n# EXPLOIT GOES HERE\n# ===========================================================\n\n# Pass in pattern_size, get back EIP/RIP offset\noffset = find_ip(cyclic(200))\n\n# Start program\nio = start()\n\n# ROP object\nrop = ROP(elf)\nrop.hacked(0xdeadbeef, 0xc0debabe)\n\n# Build the payload\npayload = flat({\n offset: rop.chain()\n})\n\n# Save the payload to file\nwrite('payload', payload)\n\n# Send the payload\nio.sendlineafter(b':', payload)\n\n# Get flag\nio.interactive()\n","sub_path":"pwn/binary_exploitation_101/04-ret2win_with_params/32-bit/ropstar.py","file_name":"ropstar.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"221202668","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\n\n#상록원 3층\n#원산지 없애기\n#&sday=1552921200\ndef getMenu():\n url = \"http://dgucoop.dongguk.edu/mobile/menu.html?code=5\"\n res = requests.get(url)\n res.encoding = None\n html = res.text\n\n bs = BeautifulSoup(html, \"html.parser\")\n\n tags_td = bs.findAll(\"td\")\n menu = \"======= 상록원3층 =======\\n———\"+tags_td[1].text+\"———\\n\"\n for i in [0,1] :\n for j in range(4+i, len(tags_td)-2, 3) :\n if(i == 1 and j == 5) :\n menu = menu+\"\\n———\"+tags_td[2].text+\"———\\n\"\n text = tags_td[j].text\n print(text)\n if(len(text) == 0) :\n continue\n text = text.replace(\"₩ \", \" ₩\")\n text = re.sub(\"\", \"\",str(tags_td[j]))\n text = re.sub(\"\\(\\S*\\)\\r\\n\", \"\", text)\n text = re.sub(\"<.*?>\",\"\",text)\n menu = menu+text+\"\\n\"\n print(text)\n \n veget = re.sub(\"\", \"\",str(tags_td[10]))\n veget = veget.replace(\"*외부고객 -10000원

\", \"\").replace(\"-\", \" ₩\").replace(\"원₩ \", \"\\n₩\")\n veget = re.sub(\"<.*?>\",\"\",veget)\n print(veget)\n menu = menu+\"\\n———\"+tags_td[9].text+\"———\\n\"+veget\n return menu\n#print(getMenu())\n","sub_path":"sanglog3F.py","file_name":"sanglog3F.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"292176034","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/toil/utils/toilDebugFile.py\n# Compiled at: 2020-04-03 17:41:24\n# Size of source mod 2**32: 6338 bytes\n\"\"\"Debug tool for copying files contained in a toil jobStore.\n\"\"\"\nfrom __future__ import absolute_import\nimport logging, fnmatch, os.path\nfrom toil.lib.bioio import getBasicOptionParser\nfrom toil.lib.bioio import parseBasicOptions\nfrom toil.common import Toil, jobStoreLocatorHelp, Config\nfrom toil.version import version\nlogger = logging.getLogger(__name__)\n\ndef recursiveGlob(directoryname, glob_pattern):\n \"\"\"\n Walks through a directory and its subdirectories looking for files matching\n the glob_pattern and returns a list=[].\n\n :param directoryname: Any accessible folder name on the filesystem.\n :param glob_pattern: A string like \"*.txt\", which would find all text files.\n :return: A list=[] of absolute filepaths matching the glob pattern.\n \"\"\"\n directoryname = os.path.abspath(directoryname)\n matches = []\n for root, dirnames, filenames in os.walk(directoryname):\n for filename in fnmatch.filter(filenames, glob_pattern):\n absolute_filepath = os.path.join(root, filename)\n matches.append(absolute_filepath)\n\n return matches\n\n\ndef fetchJobStoreFiles(jobStore, options):\n \"\"\"\n Takes a list of file names as glob patterns, searches for these within a\n given directory, and attempts to take all of the files found and copy them\n into options.localFilePath.\n\n :param jobStore: A fileJobStore object.\n :param options.fetch: List of file glob patterns to search\n for in the jobStore and copy into options.localFilePath.\n :param options.localFilePath: Local directory to copy files into.\n :param options.jobStore: The path to the jobStore directory.\n \"\"\"\n for jobStoreFile in options.fetch:\n jobStoreHits = recursiveGlob(directoryname=(options.jobStore), glob_pattern=jobStoreFile)\n for jobStoreFileID in jobStoreHits:\n logger.debug('Copying job store file: %s to %s', jobStoreFileID, options.localFilePath[0])\n jobStore.readFile(jobStoreFileID, (os.path.join(options.localFilePath[0], os.path.basename(jobStoreFileID))),\n symlink=(options.useSymlinks))\n\n\ndef printContentsOfJobStore(jobStorePath, nameOfJob=None):\n \"\"\"\n Fetch a list of all files contained in the jobStore directory input if\n nameOfJob is not declared, otherwise it only prints out the names of files\n for that specific job for which it can find a match. Also creates a logFile\n containing this same record of job files in the working directory.\n\n :param jobStorePath: Directory path to recursively look for files.\n :param nameOfJob: Default is None, which prints out all files in the jobStore.\n If specified, it will print all jobStore files that have been written to the\n jobStore by that job.\n \"\"\"\n if nameOfJob:\n glob = '*' + nameOfJob + '*'\n logFile = nameOfJob + '_fileset.txt'\n else:\n glob = '*'\n logFile = 'jobstore_files.txt'\n nameOfJob = ''\n list_of_files = recursiveGlob(directoryname=jobStorePath, glob_pattern=glob)\n if os.path.exists(logFile):\n os.remove(logFile)\n for gfile in sorted(list_of_files):\n if not gfile.endswith('.new'):\n logger.debug(nameOfJob + 'File: %s', os.path.basename(gfile))\n with open(logFile, 'a+') as (f):\n f.write(os.path.basename(gfile))\n f.write('\\n')\n\n\ndef main():\n parser = getBasicOptionParser()\n parser.add_argument('jobStore', type=str,\n help=('The location of the job store used by the workflow.' + jobStoreLocatorHelp))\n parser.add_argument('--localFilePath', nargs=1,\n help='Location to which to copy job store files.')\n parser.add_argument('--fetch', nargs='+',\n help=\"List of job-store files to be copied locally.Use either explicit names (i.e. 'data.txt'), or specify glob patterns (i.e. '*.txt')\")\n parser.add_argument('--listFilesInJobStore', help='Prints a list of the current files in the jobStore.')\n parser.add_argument('--fetchEntireJobStore', help='Copy all job store files into a local directory.')\n parser.add_argument('--useSymlinks', help=\"Creates symlink 'shortcuts' of files in the localFilePath instead of hardlinking or copying, where possible. If this is not possible, it will copy the files (shutil.copyfile()).\")\n parser.add_argument('--version', action='version', version=version)\n options = parseBasicOptions(parser)\n config = Config()\n config.setOptions(options)\n jobStore = Toil.resumeJobStore(config.jobStore)\n logger.debug('Connected to job store: %s', config.jobStore)\n if options.fetch:\n logger.debug('Fetching local files: %s', options.fetch)\n fetchJobStoreFiles(jobStore=jobStore, options=options)\n else:\n if options.fetchEntireJobStore:\n logger.debug('Fetching all local files.')\n options.fetch = '*'\n fetchJobStoreFiles(jobStore=jobStore, options=options)\n if options.listFilesInJobStore:\n printContentsOfJobStore(jobStorePath=(options.jobStore))\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/toil-4.0.0-py3.6/toilDebugFile.cpython-36.py","file_name":"toilDebugFile.cpython-36.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"353875248","text":"#[0]: import library\nimport numpy as np\nimport pandas as pd\n#\nfrom sklearn.linear_model import Lasso,LinearRegression,Ridge,ElasticNet,TheilSenRegressor,HuberRegressor,RANSACRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.tree import DecisionTreeRegressor,ExtraTreeRegressor\nfrom sklearn.ensemble import AdaBoostRegressor,ExtraTreesRegressor,GradientBoostingRegressor,RandomForestRegressor\nfrom xgboost import XGBRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error\nimport itertools\nimport pickle\n\n#[1]: import data\n# 读取文件\ndata = pd.read_csv('../tiqu.csv')\ndata.head()\ndata.describe()\nY = data['y2'] # 只预测波美度\nvar_names = list(data.columns)\n#[var_names.remove(x) for x in ['y1','y2']]\nvarnames = ['Num','x2','x4','temp']\nX = data[['Num','x2','x4','temp']]\nX_train, X_test, y_train, y_test = train_test_split(X,Y,\\\n test_size=0.2,random_state=44)\n\n#[2]: all models for regression\nregs = [\n ['Lasso',Lasso()],\n ['LinearRegression',LinearRegression()],\n ['Ridge',Ridge()],\n ['ElasticNet',ElasticNet()],\n ['TheilSenRegressor',TheilSenRegressor()],\n ['RANSACRegressor',RANSACRegressor()],\n ['HuberRegressor',HuberRegressor()],\n ['SVR',SVR(kernel='linear')],\n ['DecisionTreeRegressor',DecisionTreeRegressor()],\n ['ExtraTreeRegressor',ExtraTreeRegressor()],\n ['AdaBoostRegressor',AdaBoostRegressor(n_estimators=6)],\n ['ExtraTreesRegressor',ExtraTreesRegressor(n_estimators=6)],\n ['GradientBoostingRegressor',GradientBoostingRegressor(n_estimators=6)],\n ['RandomForestRegressor',RandomForestRegressor(n_estimators=6)],\n ['XGBRegressor',XGBRegressor(n_estimators=6,)],\n]\n#[3]: evaluate all models by score mse value\npreds = []\nfor reg_name,reg in regs:\n print(reg_name)\n reg.fit(X_train,y_train)\n y_pred = reg.predict(X_test)\n score = mean_absolute_error(y_test,y_pred)\n preds.append([reg_name,y_pred])\n\n# 对模型做各种组合寻找最优的方案\nfinal_results = []\nfor comb_length in range(1,len(regs)+1):\n print('Model Amount :',comb_length)\n results = []\n for comb in itertools.combinations(preds,comb_length):\n pred_sum = 0\n model_name = []\n for reg_name,pred in comb:\n pred_sum += pred\n model_name.append(reg_name)\n pred_sum /= comb_length\n model_name = '+'.join(model_name)\n score = mean_absolute_error(y_test,pred_sum)\n results.append([model_name,score])\n results = sorted(results,key=lambda x:x[1])\n #for model_name,score in results:\n #print(model_name,score)\n #print()\n final_results.append(results[0])\n\n# final result\nfinal_results = sorted(final_results,key=lambda x:x[1])\n\n#for model_name,score in final_results:\n# print(model_name,score)\n\n#[print(b) for b in zip(itertools.count(),[a[0] for a in regs])]\n# show best plant\nprint(\"the best model is :\")\nprint(final_results[0])\n# 保存较好的模型\ndef save_model(best_model_names,file_path ='ensemble_ml_y2.pkl'):\n model_names = best_model_names.split('+')\n all_model_names = [x[0] for x in regs] # 所有模型的名字\n print(model_names)\n models={}\n for model_name in model_names:\n index = all_model_names.index(model_name)\n models[model_name]=regs[index][1]\n # 打印该模型对应的score\n score = mean_absolute_error(y_test,preds[index][1])\n print(model_name,\" score:\",score)\n\n pickle.dump(models,open(file_path,'wb'),-1)\n return model_names\nif final_results[0][1] < 0.2:\n save_model(final_results[0][0])\n","sub_path":"Services/Ensemble/all_model.py","file_name":"all_model.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"519850541","text":"'''\nH-Index는 과학자의 생산성과 영향력을 나타내는 지표입니다. 어느 과학자의 H-Index를 나타내는 값인 h를 구하려고 합니다. 위키백과1에 따르면, H-Index는 다음과 같이 구합니다.\n\n어떤 과학자가 발표한 논문 n편 중, h번 이상 인용된 논문이 h편 이상이고 나머지 논문이 h번 이하 인용되었다면 h의 최댓값이 이 과학자의 H-Index입니다.\n\n어떤 과학자가 발표한 논문의 인용 횟수를 담은 배열 citations가 매개변수로 주어질 때, 이 과학자의 H-Index를 return 하도록 solution 함수를 작성해주세요.\n\n5편 중 3번 이상 인용 3편 이상 나머지 논문이 3번 이하\n'''\n\ndef solution(citations):\n citations.sort(reverse = True)\n answer = int(0)\n for i in range(int(sum(citations,0.0)/len(citations)) , citations[0]):\n #mean ~ max\n h_up = 0\n h_down = 0\n for j in citations:\n if(j >= i):\n h_up += 1\n elif (j < i):\n h_down += 1\n if h_up > h_down and h_up >= answer:\n answer = h_up\n print(answer)\n return answer\n\ncitations =\t[1524 , 2 , 999 , 790 , 540, 10 , 22]\n# \n#1524 999 790 540 22 10 2\nsolution(citations)\n","sub_path":"11_Sort/t03.py","file_name":"t03.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"234495989","text":"#-*- coding:utf-8 -*-\r\nimport os\r\nimport time\r\nimport json\r\nfrom flask import Blueprint, url_for, render_template, request,\\\r\n abort, flash, session, redirect\r\n\r\nfrom xnr.global_utils import es_flow_text\r\nfrom xnr.parameter import MAX_VALUE\r\nfrom xnr.time_utils import ts2datetime,datetime2ts,ts2date,date2ts\r\n\r\nfrom utils import search_by_xnr_number,search_by_period,aggr_sen_users,rank_sen_users\r\n\r\nmod = Blueprint('qq_xnr_monitor', __name__, url_prefix='/qq_xnr_monitor')\r\n\r\n\r\n@mod.route('/search_by_xnr_number/')\r\ndef ajax_search_by_xnr_number():\r\n xnr_qq_number = request.args.get('xnr_number','')\r\n ts = request.args.get('date','')\r\n ts = float(ts)\r\n date = ts2datetime(ts)\r\n results = search_by_xnr_number(xnr_qq_number, date)\r\n return json.dumps(results)\r\n\r\n\r\n\r\n\r\n@mod.route('/search_by_period/')\r\ndef ajax_search_by_period():\r\n xnr_qq_number = request.args.get('xnr_number','') #查询时需要给定虚拟人身份么\r\n startdate = request.args.get('startdate','')\r\n enddate = request.args.get('enddate','')\r\n results = search_by_period(xnr_qq_number,startdate,enddate)\r\n return json.dumps(results)\r\n\r\n@mod.route('/show_sensitive_users/')\r\ndef show_sensitive_users():\r\n xnr_qq_number = request.args.get('xnr_number','')\r\n users = aggr_sen_users(xnr_qq_number)\r\n results = users\r\n # results = rank_sen_users(users)\r\n return json.dumps(results)\r\n\r\n\r\n\r\n\r\n\r\n# 暂时用不到的函数\r\n\r\n\r\n@mod.route('/search_by_keyword/')\r\ndef ajax_search_by_keyword():\r\n keyword = request.args.get('keyword','')\r\n # 暂时指定了日期 测试用\r\n date = '2017-07-13'\r\n results = search_by_keyword(keyword,date)\r\n return json.dumps(results)\r\n\r\n\r\n@mod.route('/show_sensitive_message/')\r\ndef ajax_show_sensitive_message(): #这两个函数二选一 用在最初始没指定时候显示\r\n results = show_sensitive_message()\r\n\r\n return json.dumps(results)","sub_path":"xnr/qq_xnr_monitor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"576703938","text":"from selenium import webdriver\r\n\r\n# 유의: chromedriver를 위에서 받아준 \r\n# chromdriver(windows는 chromedriver.exe)의 절대경로로 바꿔주세요!\r\ndriver = webdriver.Chrome('C:/Users/user/Downloads/AnySign_Installer.exechromedriver_win32/chomedriver')\r\n\r\ndriver.get('http://naver.com')\r\ndriver.implicitly_wait(3)\r\ndriver.get_screenshot_as_file('naver_main.png')\r\n\r\ndriver.quit()\r\n","sub_path":"180508_selenium2.py","file_name":"180508_selenium2.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"350706764","text":"import csv\nimport itertools\nimport math\nimport os\nimport statistics\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport Divergence\n\n\ninput_dir = \"Inputs\"\ninput_dir_cen = \"InputsCentralized\"\ninput_dir_equ = \"InputsEqualized\"\n\nf = open(\"brackets.txt\", 'r')\nmin_val = int(f.readline())\nmax_val = int(f.readline())\nf.close()\n\nfor pw in range(1, 11):\n#for nbins in [4, 8, 16, 32, 64, 128, 256, 512, 1024]:\n dists = []\n jsds = []\n jsds_cen = []\n jsds_equ = []\n nbins = 2 ** pw\n print(\"N bins: \", nbins)\n\n # Record History\n print(\"Reading History...\")\n num_hists = 30\n for i in range(133, 133 + num_hists):\n with open(\"%s/%d.csv\" % (input_dir, i), 'r') as f:\n reader = csv.reader(f)\n records = (int(row[2]) for row in itertools.islice(reader, 0, None))\n distribution = Divergence.histogramNaive(min_val, max_val, records, nbins=nbins)\n dists += [distribution]\n\n # Test Rest Days\n print(\"Performing Tests...\")\n for i in range(133 + num_hists, 489):\n total_dist = Divergence.blendDistributions(dists)\n\n if not os.path.exists(os.path.abspath(\"%s/%d.csv\" % (input_dir, i))):\n continue\n\n # Calculate Real Distribution\n f = open(\"%s/%d.csv\" % (input_dir, i), 'r')\n reader = csv.reader(f)\n records = (int(row[2]) for row in itertools.islice(reader, 0, None))\n distribution = Divergence.histogramNaive(min_val, max_val, records, nbins=nbins)\n jsds += [Divergence.jsd(total_dist, distribution)]\n dists += [distribution]\n f.close()\n\n # Calculate Centralized Distribution\n f = open(\"%s/%d.csv\" % (input_dir_cen, i), 'r')\n reader = csv.reader(f)\n records = (int(row[2]) for row in itertools.islice(reader, 0, None))\n distribution = Divergence.histogramNaive(min_val, max_val, records, nbins=nbins)\n jsds_cen += [Divergence.jsd(total_dist, distribution)]\n f.close()\n\n # Calculate Equalized Distribution\n f = open(\"%s/%d.csv\" % (input_dir_equ, i), 'r')\n reader = csv.reader(f)\n records = (int(row[2]) for row in itertools.islice(reader, 0, None))\n distribution = Divergence.histogramNaive(min_val, max_val, records, nbins=nbins)\n jsds_equ += [Divergence.jsd(total_dist, distribution)]\n f.close()\n\n # Window Sliding Forward\n del dists[0]\n\n # Plot\n print(\"Normal Distribution(Real):\")\n print(\"mu: %f\" % statistics.mean(jsds))\n print(\"sigma: %f\" % math.sqrt(statistics.variance(jsds)))\n\n print(\"Normal Distribution(Centralized Cheating):\")\n print(\"mu: %f\" % statistics.mean(jsds_cen))\n print(\"sigma: %f\" % math.sqrt(statistics.variance(jsds_cen)))\n\n print(\"Normal Distribution(Equalized Cheating):\")\n print(\"mu: %f\" % statistics.mean(jsds_equ))\n print(\"sigma: %f\" % math.sqrt(statistics.variance(jsds_equ)))\n\n print(\"Plotting...\")\n figure = plt.figure(figsize=(1400/300, 1400/300), dpi=300)\n plt.plot(jsds, [0] * len(jsds), \"r.\", ms=1, label=\"normal\")\n plt.plot(jsds_cen, [0.5] * len(jsds), \"bo\", ms=1, label=\"centralized\")\n plt.plot(jsds_equ, [-0.5] * len(jsds), \"go\", ms=1, label=\"equalized\")\n plt.legend()\n plt.grid()\n plt.savefig(\"Emulate'(nbins=%d).png\" % nbins)\n print(\"Done.\")","sub_path":"OldStory/SyntheticStatic-Old/Emulate.py","file_name":"Emulate.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"470682382","text":"import requests\nfrom bs4 import BeautifulSoup\n\ninfo = \"date\", \"pict\"\nurl = 'https://weather.yahoo.co.jp/weather/jp/33/6610.html'\nlinks = {}\ndata = []\nres = requests.get(url)\nhtml = BeautifulSoup(res.text)\nlink = html.find(class_=\"forecastCity\").findAll(class_=info)\n\n\ndef weather():\n for count, i in enumerate(link):\n data.append(link[count].get_text())\n if count % 2 == 0:\n key = link[count].get_text()\n else:\n value = link[count].get_text()\n if count >= 1:\n links[key] = value\n return links, data","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"568201980","text":"import random\r\nlistt = []\r\n\r\nfor i in range(random.randint(5,10)):\r\n listt.append(random.randint(0,20))\r\n\r\nprint(listt)\r\n\r\ndef has_duplicates(item):\r\n for i in range(len(item)):\r\n count = item.count(item[i])\r\n if count > 1:\r\n return True\r\n return False\r\n\r\nisDuplicate = has_duplicates(listt)\r\nprint(f'Does it have duplicates? {isDuplicate}')\r\n","sub_path":"day 4 question 5.py","file_name":"day 4 question 5.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"392094243","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nimport xlwt\r\nfrom xlwt import Workbook\r\n\r\nwb = Workbook()\r\nsheet1 = wb.add_sheet('Sheet 1')\r\n\r\nsheet1.write(0, 0, 'File Name')\r\nsheet1.write(0, 1, 'White %')\r\nsheet1.write(0, 2, 'Orange %')\r\nr = 1\r\ndirectory = \"Fedex\"\r\nfor path, subdirnames, filenames in os.walk(directory):\r\n for filename in filenames:\r\n\r\n img_path = os.path.join(path, filename)\r\n print(\"img_name:\", filename)\r\n test_img = cv2.imread(img_path)\r\n\r\n img = cv2.imread(img_path)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n rows,cols = img.shape\r\n\r\n pixels = []\r\n\r\n for i in range(rows):\r\n for j in range(cols):\r\n pixels.append(img[i,j])\r\n\r\n pixels = list(dict.fromkeys(pixels))\r\n\r\n pixels.sort()\r\n size = len(pixels)\r\n i = 0\r\n j = 0\r\n\r\n sum = 0\r\n for i in range(0, size-1):\r\n sum += pixels[i]\r\n\r\n threshold = int((int(pixels[0]) +int(pixels[size-1]))*0.5)\r\n threshold = int(sum/size)\r\n\r\n orange = 0\r\n white = 0\r\n\r\n for i in range(rows):\r\n for j in range(cols):\r\n if(img[i,j] > threshold):\r\n white+=1;\r\n else:\r\n orange+=1;\r\n\r\n white_p = int((white*100)/(white+orange))\r\n # white_p = int(str(round(white_p, 2)))\r\n orange_p = 100 - white_p\r\n print(white_p)\r\n\r\n sheet1.write(r, 0, filename)\r\n sheet1.write(r, 1, white_p)\r\n sheet1.write(r, 2, orange_p)\r\n r+=1\r\n print(orange_p)\r\n\r\nwb.save('Fedex.xls')","sub_path":"count_pixels.py","file_name":"count_pixels.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"177135148","text":"import math\nimport numpy\nfrom operator import itemgetter\nfrom numpy.linalg import norm\n\nEPSILON = 1e-6\n\ndef euclidean(vec1, vec2):\n diff = vec1 - vec2\n return math.sqrt(diff.dot(diff))\n\ndef cosine_sim(vec1, vec2):\n vec1 += EPSILON * numpy.ones(len(vec1))\n vec2 += EPSILON * numpy.ones(len(vec1))\n return vec1.dot(vec2)/(norm(vec1)*norm(vec2))\n\ndef assign_ranks(item_dict):\n ranked_dict = {}\n sorted_list = [(key, val) for (key, val) in sorted(item_dict.items(), key=itemgetter(1), reverse=True)]\n for i, (key, val) in enumerate(sorted_list):\n same_val_indices = []\n for j, (key2, val2) in enumerate(sorted_list):\n if val2 == val:\n same_val_indices.append(j+1)\n if len(same_val_indices) == 1:\n ranked_dict[key] = i+1\n else:\n ranked_dict[key] = 1.*sum(same_val_indices)/len(same_val_indices)\n return ranked_dict\n\ndef correlation(dict1, dict2):\n avg1 = 1.*sum([val for key, val in dict1.iteritems()])/len(dict1)\n avg2 = 1.*sum([val for key, val in dict2.iteritems()])/len(dict2)\n numr, den1, den2 = (0., 0., 0.)\n for val1, val2 in zip(dict1.itervalues(), dict2.itervalues()):\n numr += (val1 - avg1) * (val2 - avg2)\n den1 += (val1 - avg1) ** 2\n den2 += (val2 - avg2) ** 2\n return numr / math.sqrt(den1 * den2)\n\ndef spearmans_rho(ranked_dict1, ranked_dict2):\n assert len(ranked_dict1) == len(ranked_dict2)\n if len(ranked_dict1) == 0 or len(ranked_dict2) == 0:\n return 0.\n x_avg = 1.*sum([val for val in ranked_dict1.values()])/len(ranked_dict1)\n y_avg = 1.*sum([val for val in ranked_dict2.values()])/len(ranked_dict2)\n num, idx, idy = (0., 0., 0.)\n for key in ranked_dict1.keys():\n xi = ranked_dict1[key]\n yi = ranked_dict2[key]\n num += (xi-x_avg)*(yi-y_avg)\n idx += (xi-x_avg)**2\n idy += (yi-y_avg)**2\n return num/(math.sqrt(idx*idy))","sub_path":"src/package/ranking.py","file_name":"ranking.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"187026840","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\nimport keras\nfrom keras import backend as K\nfrom keras.models import Model, Sequential\nfrom keras.layers import Input, concatenate\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Convolution2D, Conv2D, MaxPooling2D, Conv2DTranspose, ZeroPadding2D\nfrom keras.optimizers import Adam, Adadelta, Adamax, Nadam, Adagrad, SGD, RMSprop\n\nSMOOTH = 1 # constant value for internal dice coefficient calculations\n\ndef dice_coef(y_true, y_pred, smooth=1):\n \"\"\"Dice coefficient function to use outside of the U-Net model for calculations.\"\"\"\n side = len(y_true[0])\n y_true_f = y_true.reshape(side * side)\n y_pred_f = y_pred.reshape(side * side)\n intersection = sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (sum(y_true_f) + sum(y_pred_f) + smooth)\n\ndef _dice_coef(y_true, y_pred, smooth=1):\n \"\"\"Dice coefficient for use with Tensorflow fitting. For INTERNAL use only.\"\"\"\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\ndef _dice_coef_loss(y_true, y_pred, smooth=1):\n \"\"\"Dice coefficient loss. Simply just the negative dice coefficient. For INTERNAL use only.\"\"\"\n return -_dice_coef(y_true, y_pred, smooth)\n\nclass UNet:\n def __init__(self, optimizer=Adam, sample_width=128, sample_height=128,\n learning_rate=1e-3, weight_decay=1e-3, momentum=0.8):\n self.optimizer = optimizer\n self.sample_width = sample_width\n self.sample_height = sample_height\n self.loss_metric = _dice_coef_loss\n self.metrics = [_dice_coef]\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay # Used only for Adam\n self.momentum = momentum # Used only for SGD\n self.history = None\n self.model = None; self._initializeArchitecture()\n \n def fit(self, train_x: np.array, train_y: np.array, batch_size=None, \n epochs=150, verbose=1, shuffle=True, validation_split=0.2):\n \"\"\"Fit the model and set the Keras history.\"\"\"\n if not batch_size:\n batch_size = int(len(train_x) / 4) # Initialize here, since we can't do it in parameters\n self.history = self.model.fit(train_x, train_y, batch_size=batch_size, \n epochs=epochs, verbose=verbose, shuffle=shuffle, \n validation_split=validation_split)\n \n def predict(self, test_x: np.array, verbose=1) -> np.array:\n \"\"\"Predict and return a mask for the test image.\"\"\"\n return self.model.predict(test_x, verbose=verbose)\n \n def plot(self, title='Model Accuracy'):\n \"\"\"Summarize the network history for accuracy.\"\"\"\n plt.plot(self.history.history['_dice_coef'])\n plt.plot(-np.array(self.history.history['val_loss']))\n plt.title(title)\n plt.ylabel('Dice coefficient')\n plt.xlabel('Epochs')\n plt.legend(['Training', 'Validation'], loc='upper left')\n plt.show()\n \n def _initializeArchitecture(self):\n \"\"\"Initialize the architecture for the U-Net.\"\"\"\n inputs = Input((self.sample_width, self.sample_height, 1))\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n drop1 = Dropout(0.5)(pool1)\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(drop1)\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n drop2 = Dropout(0.5)(pool2)\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(drop2)\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n drop3 = Dropout(0.3)(pool3)\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(drop3)\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n drop4 = Dropout(0.3)(pool4)\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(drop4)\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)\n up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), \n padding='same')(conv5), conv4], axis=3)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)\n up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), \n padding='same')(conv6), conv3], axis=3)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)\n up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), \n padding='same')(conv7), conv2], axis=3)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)\n up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), \n padding='same')(conv8), conv1], axis=3)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)\n conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)\n model = Model(inputs=[inputs], outputs=[conv10])\n # Compile the network based off of its optimizer. \n if self.optimizer is Adam:\n model.compile(optimizer=self.optimizer(lr=self.learning_rate, decay=self.weight_decay), \n loss=self.loss_metric, metrics=self.metrics)\n elif self.optimizer in [Adamax, Nadam, Adadelta]:\n model.compile(optimizer=self.optimizer(lr=self.learning_rate), \n loss=self.loss_metric, metrics=self.metrics)\n elif self.optimizer is SGD:\n model.compile(optimizer=self.optimizer(lr=self.learning_rate, momentum=self.momentum), \n loss=self.loss_metric, metrics=self.metrics)\n self.model = model","sub_path":"unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":6605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"81987414","text":"#!/usr/bin/python\nimport requests\nimport sys\nfrom BeautifulSoup import *\nfrom MaltegoTransform import *\n\nme=MaltegoTransform()\nuserID = sys.argv[1]\n\nmemberSite=BeautifulSoup(requests.get('http://meetup.com/members/'+userID).content)\n\nfor divTag in memberSite.findAll('div'):\n\ttry:\n\t\tif str(divTag['class']) == 'figureset-description margin-bottom':\n\t\t\tfor aTag in divTag.findAll('a'):\n\t\t\t\tif aTag.contents[0] != 'Member':\n\t\t\t\t\tobj=me.addEntity(\"maltego.Website\",aTag['href'])\n\t\t\t\t\tobj.setType(\"maltego.Website\")\n\t\t\t\t\tobj.setValue(aTag['href'])\n\t\t\t\n\t\t\t\t\tfor divTag2 in memberSite.findAll('div'):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif str(divTag2['class']) == 'figureset-figure':\n\t\t\t\t\t\t\t\tif str(divTag2.a['title']).find(aTag.contents[0]) != -1:\n\t\t\t\t\t\t\t\t\tobj.iconURL = divTag2.img['src']\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tcontinue\n\t\t#\tprint .img['src']\n\texcept:\t\t\t\t\n\t\tcontinue\nme.addUIMessage(\"Meetup Group Finder Done!\")\nme.returnOutput()\n","sub_path":"findGroups.py","file_name":"findGroups.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"78764489","text":"import openpyxl\nimport re\n\nfile_sig_int = 'project_files/Tele2_TMS_Signal_integration_v5.7.xlsx'\nwb = openpyxl.load_workbook(file_sig_int, True)\n\n\ndef get_sheets_list(region):\n ws_list = list(filter(lambda i: re.search('^'+region.upper(), i), wb.sheetnames))\n return ws_list\n\n\ndef get_named_ranges(work_sheet):\n nr = wb.get_named_ranges()\n named_ranges = list(filter(lambda i: re.search('^'+work_sheet.lower(), i.name), nr))\n return named_ranges\n\n\ndef get_gy_peers(w_sheet):\n ws = wb[w_sheet]\n s1_gy = wb.defined_names[f'{w_sheet.lower()}_s1_gy'].attr_text\n rng = s1_gy[s1_gy.find('!') + 1:]\n gy_peers = []\n for row in ws[rng]:\n gy_peers.append({\"peerId\": row[13].value,\n \"hostName\": row[9].value,\n \"port\": int(row[11].value),\n \"bindAddress\": None,\n \"enabled\": True,\n \"watchdogTimeoutMs\": 30000\n })\n # Non-production RTUCG for testing purposes. Disabled by default\n gy_peers.append({\"peerId\": \"T2TST-RTUCG-01-2\",\n \"hostName\": \"10.78.245.57\",\n \"port\": 3878,\n \"bindAddress\": None,\n \"enabled\": False, # True,\n \"watchdogTimeoutMs\": 30000\n })\n return gy_peers\n\n\ndef get_radius_secret(w_sheet):\n ws = wb[w_sheet]\n rad_secret = ws['M6'].value\n return rad_secret\n\n\ndef get_gx_peers(w_sheet, p_type):\n ws = wb[w_sheet]\n name = [None, 'odd', 'even']\n pcrfs = {}\n for i in 1,2:\n gx = wb.defined_names[f'{w_sheet.lower()}_s{i}_gx'].attr_text\n rng = gx[gx.find('!') + 1:]\n pcrfs_list = []\n for row in ws[rng]:\n if row[-1].value == p_type:\n pcrfs_list.append({\n 'primIP': row[9].value,\n 'secIP': row[10].value,\n 'hostName': row[13].value,\n 'realm': row[14].value\n })\n pcrfs[name[i]] = pcrfs_list\n return pcrfs\n\n\ndef get_sig_nets():\n ranges = wb.defined_names.definedName\n sig_nets = {}\n for rng in ranges:\n if re.match('^\\D{3}_d\\d_s\\d_(radius|gx|gy)$', rng.name):\n dests = wb.defined_names[rng.name].destinations\n nets = []\n for coord in dests:\n ws = wb[coord[0]]\n for row in ws[coord[1]]:\n if row[-2].value:\n nets.append(row[-2].value.strip())\n sig_nets[rng.name] = nets\n return sig_nets","sub_path":"shared/sig_int.py","file_name":"sig_int.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"414771282","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom django.utils.safestring import mark_safe\nfrom time import sleep\n\nclass PageInfo(object):\n def __init__(self,currentPage,totalItems,perItems=5,pageNum=6):\n try:\n currentPage = int(currentPage)\n except Exception as e:\n currentPage = 1\n self.current_page = currentPage\n self.per_items = perItems\n self.total_items = totalItems\n self.page_num = pageNum\n\n @property\n def total_page(self):\n if not self.total_items:\n self.total_items = 0\n val = int(self.total_items / self.per_items) + 1 if self.total_items % self.per_items > 0 else self.total_items / self.per_items\n #print(int(self.total_items / self.per_items),'val',val)\n return val\n @property\n def start(self):\n val = (self.current_page -1 ) * self.per_items\n return val\n\n @property\n def end(self):\n val = self.current_page * self.per_items\n return val\n def pager(self):\n page_html = []\n page = self.current_page\n all_page_count = self.total_page\n total_items = self.total_items\n\n first_html = \"
  • 首页
  • \"\n page_html.append(first_html)\n\n if page <= 1:\n prev_html = \"
  • 上一页
  • \"\n else:\n prev_html = \"
  • 上一页
  • \" %(page-1,)\n page_html.append(prev_html)\n\n if all_page_count <11:\n begin = 0\n end = all_page_count\n else:\n if page < 6:\n begin = 0\n end = 11\n else:\n if page + 6 > all_page_count:\n begin = page - 6\n end = all_page_count\n else:\n begin = page - 6\n end = page + 5\n for i in range(int(begin),int(end)):\n print(i,'range')\n if page == i + 1:\n print(page,)\n a_html = \"
  • %d
  • \" \\\n %(i +1 ,i+1,)\n print(a_html,3)\n else:\n a_html = \"
  • %d
  • \" %(i+1,i+1,)\n page_html.append(a_html)\n\n if page + 1 > all_page_count:\n next_html = \"
  • 下一页
  • \"\n else:\n next_html = \"
  • 下一页
  • \" %(page + 1,)\n page_html.append(next_html)\n\n end_html = \"
  • 尾页
  • \" %(self.total_page,)\n page_html.append(end_html)\n\n end_html = \"
  • 共%d页/%d 条数据
  • \" %(all_page_count,total_items,)\n page_html.append(end_html)\n\n page_string = mark_safe(''.join(page_html))\n return page_string\n\n\n","sub_path":"utils/pager.py","file_name":"pager.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"618194891","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 4 20:41:31 2019\n\n@author: inter000\n\"\"\"\n\nimport re\nimport numpy as np\nimport scipy.spatial.distance\n\n# Считываем построчно (по предложениям) файл с данными\nwith open('sentences.txt', 'r') as file:\n propos = list(map(lambda x: x.lower(), file.readlines()))\n\n# Производим токенезацию по словам\npropos = list(map(lambda x: re.findall('[a-z]+[a-z]*', x), propos))\n\n# Присвоим каждому уникальному слову его уникальный номер\nwords = dict()\nnum = 0\nfor _list in propos:\n for word in _list:\n if not (word in words):\n words[word] = num\n num += 1\n# Инвертируем словарь, так как в следующем пункте нам будут нужны именно номера слов\nwords = {value: key for key, value in words.items()}\n\n# Создадим матрицу со строками - предложениями и столбцами - словами\n# Элементы матрицы - количество вхождений слова j в предложение i\ncolumns_n = len(words)\nrows_n = len(propos)\nwords_mtx = np.zeros((rows_n, columns_n))\nfor i in range(rows_n):\n for j in range(columns_n):\n words_mtx[i][j] = propos[i].count(words[j])\n \n# Считаем косинусное расстояние между первой строкой и остальными\ncosdist_res = [1] * rows_n\nfor i in range(1, rows_n):\n cosdist_res[i] = scipy.spatial.distance.cosine(words_mtx[0], words_mtx[i])\n\n# Выводим результаты в файл\nfirst_ind = cosdist_res.index(min(cosdist_res))\ncosdist_res[first_ind] = 1\nsecond_ind = cosdist_res.index(min(cosdist_res))\nwith open('submission-1.txt', 'w') as file:\n file.write(str(first_ind) + ' ' + str(second_ind))","sub_path":"Math_and_Python_for_DA/week2/Task1_1.py","file_name":"Task1_1.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"51305459","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/webplotlib/views.py\n# Compiled at: 2011-05-24 18:47:12\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom webplotlib.chart_builders import create_chart_as_png_str\n\ndef index(request):\n return HttpResponseRedirect('/admin/')\n\n\ndef show_ts_plot_png(request):\n fake_data_dct = {'data': [\n [\n 1, 2, 1, 2, 3, -1, 4, -2, 2.5, 1.3]]}\n img_str = create_chart_as_png_str('timeseries', fake_data_dct, {}, '')\n response = HttpResponse(img_str, mimetype='image/png')\n return response\n\n\ndef show_bar_plot_png(request):\n fake_data_dct = {'data': [\n [\n 1, 2, 1, 2, 3, -11, 4, -2, 2.5, 1.3]]}\n img_str = create_chart_as_png_str('barchart', fake_data_dct, {}, '')\n response = HttpResponse(img_str, mimetype='image/png')\n return response","sub_path":"pycfiles/webplotlib-0.1-py2.6/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"377895216","text":"import random\r\nfrom discord.ext import commands\r\nimport links\r\nimport about_text as wm\r\nimport colors, dice\r\nimport asyncio\r\nimport discord\r\nimport aiohttp\r\n\r\n\r\nerrortxt = ('That is not formatted properly or valid positive integers weren\\'t used, ',\r\n 'the proper format is:\\n`[Prefix]minesweeper `\\n\\n',\r\n 'You can give me nothing for random columns, rows, and bombs.')\r\nerrortxt = ''.join(errortxt)\r\n\r\n\r\nclass Fun(commands.Cog):\r\n \"\"\"This category is about... well... Fun\"\"\"\r\n def __init__(self, client):\r\n self.guild = None\r\n self.author = None\r\n self.client = client\r\n\r\n @commands.command()\r\n async def ssp(self, ctx, args):\r\n \"\"\"Rock, paper, scissors\"\"\"\r\n ssp_choice = ['scissor', 'stone', 'paper']\r\n choice = random.choice(ssp_choice)\r\n\r\n if choice == 'scissor' and args == 'scissor':\r\n s = discord.Embed(title='✂ Scissors', description='Drawn 🙄', color=colors.fun)\r\n s.set_author(name='')\r\n await ctx.send(embed=s)\r\n elif choice == 'scissor' and args == 'stone':\r\n s = discord.Embed(title='✂ Scissors', description='You win 🎉', color=colors.fun)\r\n s.set_author(name='')\r\n await ctx.send(embed=s)\r\n elif choice == 'scissor' and args == 'paper':\r\n s = discord.Embed(title='✂ Scissors', description='You lose 😂', color=colors.fun)\r\n s.set_author(name='')\r\n await ctx.send(embed=s)\r\n\r\n elif choice == 'stone' and args == 'scissor':\r\n s = discord.Embed(title='Stone', description='You lose 😂', color=colors.fun)\r\n s.set_author(name='')\r\n await ctx.send(embed=s)\r\n elif choice == 'stone' and args == 'stone':\r\n s = discord.Embed(title='Stone', description='Drawn 🙄', color=colors.fun)\r\n s.set_author(name='')\r\n await ctx.send(embed=s)\r\n elif choice == 'stone' and args == 'paper':\r\n s = discord.Embed(title='Stone', description='You win 🎉', color=colors.fun)\r\n s.set_author(name='')\r\n await ctx.send(embed=s)\r\n\r\n elif choice == 'paper' and args == 'scissor':\r\n s = discord.Embed(title='📜 Paper', description='You win 🎉', color=colors.fun)\r\n s.set_author(name='')\r\n await ctx.send(embed=s)\r\n elif choice == 'paper' and args == 'stone':\r\n s = discord.Embed(title='📜 Paper', description='You lose 😂', color=colors.fun)\r\n s.set_author(name='')\r\n await ctx.send(embed=s)\r\n elif choice == 'paper' and args == 'paper':\r\n s = discord.Embed(title='📜 Paper', description='Drawn 🙄', color=colors.fun)\r\n s.set_author(name='')\r\n await ctx.send(embed=s)\r\n\r\n else:\r\n n = discord.Embed(title='Dont try to cheat', description='Invalid choice', color=colors.red)\r\n n.set_author(name='')\r\n await ctx.send(embed=n)\r\n\r\n @commands.command()\r\n async def minesweeper(self, ctx, columns=None, rows=None, bombs=None):\r\n if columns is None or rows is None and bombs is None:\r\n if columns is not None or rows is not None or bombs is not None:\r\n await ctx.send(errortxt)\r\n return\r\n else:\r\n # Gives a random range of columns and rows from 4-13 if no arguments are given\r\n # The amount of bombs depends on a random range from 5 to this formula:\r\n # ((columns * rows) - 1) / 2.5\r\n # This is to make sure the percentages of bombs at a given random board isn't too high\r\n columns = random.randint(4, 13)\r\n rows = random.randint(4, 13)\r\n bombs = columns * rows - 1\r\n bombs = bombs / 2.5\r\n bombs = round(random.randint(5, round(bombs)))\r\n try:\r\n columns = int(columns)\r\n rows = int(rows)\r\n bombs = int(bombs)\r\n except ValueError:\r\n await ctx.send(errortxt)\r\n return\r\n if columns > 13 or rows > 13:\r\n await ctx.send('The limit for the columns and rows are 13 due to discord limits...')\r\n return\r\n if columns < 1 or rows < 1 or bombs < 1:\r\n await ctx.send('The provided numbers cannot be zero or negative...')\r\n return\r\n if bombs + 1 > columns * rows:\r\n await ctx.send(\r\n ':boom:**BOOM**, you have more bombs than spaces on the grid or you attempted to make all of the spaces bombs!')\r\n return\r\n\r\n # Creates a list within a list and fills them with 0s, this is our makeshift grid\r\n grid = [[0 for num in range(columns)] for num in range(rows)]\r\n\r\n # Loops for the amount of bombs there will be\r\n loop_count = 0\r\n while loop_count < bombs:\r\n x = random.randint(0, columns - 1)\r\n y = random.randint(0, rows - 1)\r\n # We use B as a variable to represent a Bomb (this will be replaced with emotes later)\r\n if grid[y][x] == 0:\r\n grid[y][x] = 'B'\r\n loop_count = loop_count + 1\r\n # It will loop again if a bomb is already selected at a random point\r\n if grid[y][x] == 'B':\r\n pass\r\n\r\n # The while loop will go though every point though our makeshift grid\r\n pos_x = 0\r\n pos_y = 0\r\n while pos_x * pos_y < columns * rows and pos_y < rows:\r\n # We need to predefine this for later\r\n adj_sum = 0\r\n # Checks the surrounding points of our \"grid\"\r\n for (adj_y, adj_x) in [(0, 1), (0, -1), (1, 0), (-1, 0), (1, 1), (-1, 1), (1, -1), (-1, -1)]:\r\n # There will be index errors, we can just simply ignore them by using a try and exception block\r\n try:\r\n if grid[adj_y + pos_y][adj_x + pos_x] == 'B' and adj_y + pos_y > -1 and adj_x + pos_x > -1:\r\n # adj_sum will go up by 1 if a surrounding point has a bomb\r\n adj_sum = adj_sum + 1\r\n except Exception as error:\r\n pass\r\n # Since we don't want to change the Bomb variable into a number,\r\n # the point that the loop is in will only change if it isn't \"B\"\r\n if grid[pos_y][pos_x] != 'B':\r\n grid[pos_y][pos_x] = adj_sum\r\n # Increases the X values until it is more than the columns\r\n # If the while loop does not have \"pos_y < rows\" will index error\r\n if pos_x == columns - 1:\r\n pos_x = 0\r\n pos_y = pos_y + 1\r\n else:\r\n pos_x = pos_x + 1\r\n\r\n # Builds the string to be Discord-ready\r\n string_builder = []\r\n for the_rows in grid:\r\n string_builder.append(''.join(map(str, the_rows)))\r\n string_builder = '\\n'.join(string_builder)\r\n # Replaces the numbers and B for the respective emotes and spoiler tags\r\n string_builder = string_builder.replace('0', '||:zero:||')\r\n string_builder = string_builder.replace('1', '||:one:||')\r\n string_builder = string_builder.replace('2', '||:two:||')\r\n string_builder = string_builder.replace('3', '||:three:||')\r\n string_builder = string_builder.replace('4', '||:four:||')\r\n string_builder = string_builder.replace('5', '||:five:||')\r\n string_builder = string_builder.replace('6', '||:six:||')\r\n string_builder = string_builder.replace('7', '||:seven:||')\r\n string_builder = string_builder.replace('8', '||:eight:||')\r\n final = string_builder.replace('B', '||:bomb:||')\r\n\r\n percentage = columns * rows\r\n percentage = bombs / percentage\r\n percentage = 100 * percentage\r\n percentage = round(percentage, 2)\r\n\r\n embed = discord.Embed(title='\\U0001F642 Minesweeper \\U0001F635', color=discord.Colour.blue())\r\n embed.add_field(name='Columns:', value=columns, inline=True)\r\n embed.add_field(name='Rows:', value=rows, inline=True)\r\n embed.add_field(name='Total Spaces:', value=columns * rows, inline=True)\r\n embed.add_field(name='\\U0001F4A3 Count:', value=bombs, inline=True)\r\n embed.add_field(name='\\U0001F4A3 Percentage:', value=f'{percentage}%', inline=True)\r\n embed.add_field(name='Requested by:', value=ctx.author.display_name, inline=True)\r\n await ctx.send(content=f'\\U0000FEFF\\n{final}', embed=embed)\r\n\r\n @minesweeper.error\r\n async def minesweeper_error(self, ctx, error):\r\n await ctx.send(errortxt)\r\n return\r\n\r\n @commands.command()\r\n async def rolldice(self, ctx):\r\n \"\"\"Roll some dice\"\"\"\r\n dice_ = [f'{dice.dice_1}',\r\n f'{dice.dice_2}',\r\n f'{dice.dice_3}',\r\n f'{dice.dice_4}',\r\n f'{dice.dice_5}',\r\n f'{dice.dice_6}']\r\n\r\n rolldice = discord.Embed(description=f'You rolled a {random.choice(dice_)}',\r\n color=colors.fun)\r\n rolldice.set_author(name='Roll a dice', icon_url=links.giveaway_fun)\r\n rolldice.set_footer(text=wm.footer)\r\n await ctx.send(embed=rolldice)\r\n\r\n @commands.command(pass_context=True)\r\n async def coinflip(self, ctx):\r\n \"\"\"Wanna bet? Flip a coin!\"\"\"\r\n flip = random.choice([\r\n f'https://upload.wikimedia.org/wikipedia/de/thumb/8/80/2_euro_coin_Eu_serie_1.png/220px-2_euro_coin_Eu_serie_1.png',\r\n f'https://www.zwei-euro.com/wp-content/uploads/2019/02/DE-2002.gif'])\r\n flipcoin = discord.Embed()\r\n flipcoin.colour = 0x12423\r\n flipcoin.set_thumbnail(\r\n url=\"https://media1.tenor.com/images/938e1fc4fcf2e136855fd0e83b1e8a5f/tenor.gif?itemid=5017733\")\r\n flipcoin1 = await ctx.send(embed=flipcoin)\r\n coin = discord.Embed()\r\n coin.set_thumbnail(url=f'{flip}')\r\n await asyncio.sleep(2)\r\n await flipcoin1.delete()\r\n await ctx.send(embed=coin)\r\n\r\n @commands.command()\r\n async def tournament(self, ctx, tc1: discord.Member, tc2: discord.Member, tc3: discord.Member, tc4: discord.Member):\r\n \"\"\"Uno game\"\"\"\r\n try:\r\n user = [tc1, tc2, tc3, tc4]\r\n hitu1 = f'{tc1} choose a card!'\r\n hitu2 = f'{tc2} choose a card!'\r\n hitu3 = f'{tc3} choose a card!'\r\n hitu4 = f'{tc4} choose a card!'\r\n rndmc = ['https://i.pinimg.com/originals/9b/bb/70/9bbb7015af1bcd420ee07d89048cebf7.jpg',\r\n 'https://pics.me.me/thumb_earth-angry-german-kid-spellcastor-tuner-he-rages-about-lag-and-52634494.png',\r\n 'https://www.memesmonkey.com/images/memesmonkey/cb/cbc69b7a454ec9f50fa0616ca3d4d4d9.jpeg',\r\n 'https://i.imgur.com/gq8aDzq.jpg',\r\n 'https://i.redd.it/gqse7u1cudw31.png',\r\n 'https://i.imgur.com/yeD5fGI.gif',\r\n 'https://images-na.ssl-images-amazon.com/images/I/51jxIccbroL._AC_.jpg',\r\n 'https://images-cdn.9gag.com/photo/aDzZ1LO_460s.jpg']\r\n\r\n fight = discord.Embed(description=f'{tc1} vs. {tc2} vs. {tc3} vs. {tc4}')\r\n fight.set_author(name='Battle', icon_url=links.battle)\r\n fight.set_thumbnail(url='https://media3.giphy.com/media/dw5SDFsmqFhYs/giphy.gif')\r\n fight.set_footer(text=wm.footer)\r\n fight1 = await ctx.send(embed=fight)\r\n\r\n hit = discord.Embed(title=hitu1, color=colors.fun)\r\n hit.set_image(url=random.choice(rndmc))\r\n hit_ = await ctx.send(embed=hit)\r\n await asyncio.sleep(7)\r\n\r\n hit2 = discord.Embed(title=hitu2, color=colors.fun)\r\n hit2.set_image(url=random.choice(rndmc))\r\n hit2_ = await ctx.send(embed=hit2)\r\n await asyncio.sleep(7)\r\n\r\n hit3 = discord.Embed(title=hitu3, color=colors.fun)\r\n hit3.set_image(url=random.choice(rndmc))\r\n hit3_ = await ctx.send(embed=hit3)\r\n await asyncio.sleep(7)\r\n\r\n hit4 = discord.Embed(title=hitu4, color=colors.fun)\r\n hit4.set_image(url=random.choice(rndmc))\r\n hit4_ = await ctx.send(embed=hit4)\r\n await asyncio.sleep(7)\r\n\r\n hit5 = discord.Embed(title=hitu1, color=colors.fun)\r\n hit5.set_image(url=random.choice(rndmc))\r\n hit5_ = await ctx.send(embed=hit5)\r\n await asyncio.sleep(7)\r\n\r\n hit6 = discord.Embed(title=hitu2, color=colors.fun)\r\n hit6.set_image(url=random.choice(rndmc))\r\n hit6_ = await ctx.send(embed=hit6)\r\n await asyncio.sleep(7)\r\n\r\n hit7 = discord.Embed(title=hitu3, color=colors.fun)\r\n hit7.set_image(url=random.choice(rndmc))\r\n hit7_ = await ctx.send(embed=hit7)\r\n await asyncio.sleep(7)\r\n\r\n hit8 = discord.Embed(title=hitu4, color=colors.fun)\r\n hit8.set_image(url=random.choice(rndmc))\r\n hit8_ = await ctx.send(embed=hit8)\r\n await asyncio.sleep(7)\r\n\r\n hit9 = discord.Embed(title=hitu2, color=colors.fun)\r\n hit9.set_image(url=random.choice(rndmc))\r\n hit9_ = await ctx.send(embed=hit9)\r\n await asyncio.sleep(7)\r\n\r\n hit10 = discord.Embed(title=hitu1, color=colors.fun)\r\n hit10.set_image(url=random.choice(rndmc))\r\n hit10_ = await ctx.send(embed=hit10)\r\n await asyncio.sleep(7)\r\n\r\n hit11 = discord.Embed(title=hitu2, color=colors.fun)\r\n hit11.set_image(url=random.choice(rndmc))\r\n hit11_ = await ctx.send(embed=hit11)\r\n await asyncio.sleep(7)\r\n\r\n hit12 = discord.Embed(title=hitu1, color=colors.fun)\r\n hit12.set_image(url=random.choice(rndmc))\r\n hit12_ = await ctx.send(embed=hit12)\r\n await asyncio.sleep(7)\r\n\r\n await fight1.delete()\r\n await hit_.delete()\r\n await hit2_.delete()\r\n await hit3_.delete()\r\n await hit4_.delete()\r\n await hit5_.delete()\r\n await hit6_.delete()\r\n await hit7_.delete()\r\n await hit8_.delete()\r\n await hit9_.delete()\r\n await hit10_.delete()\r\n await hit12_.delete()\r\n winner = discord.Embed(title=f'{random.choice(user)} WINS!!!\\n', description=f'{tc1}'\r\n f' VS. '\r\n f'{tc2}'\r\n f'explore more commands with /help',\r\n color=colors.red)\r\n winner.set_thumbnail(\r\n url='https://cdna.artstation.com/p/assets/images/images/015/814/178/original/jean-baptiste-gabert-pokemonmockup.gif?1549763590')\r\n winner.set_footer(text=wm.footer)\r\n await ctx.send(embed=winner)\r\n except:\r\n error = discord.Embed(title='Cant find any user', description='User ```<@user>``')\r\n await ctx.send(embed=error)\r\n\r\n @commands.command(name=\"whoami\")\r\n async def whoami(self, ctx):\r\n \"\"\"Tells you who you are\"\"\"\r\n await ctx.send(f\"You are {ctx.message.author.name}\")\r\n\r\n @commands.command()\r\n async def hack(self, ctx, member: discord.Member):\r\n random_id = ['20390940',\r\n '20930948',\r\n '09479398',\r\n '03984988',\r\n '94883099',\r\n '98477490',\r\n '37729902',\r\n '98765421',\r\n '93893893',\r\n '08589498',\r\n '88489920',\r\n '84990201',\r\n '94789435',\r\n '98839897',\r\n '49732974',\r\n '97398394',\r\n '80489033',\r\n '98479883',\r\n '97878820',\r\n '08839004',\r\n '98308934',\r\n '09029389',\r\n '98308483',\r\n '84083887',\r\n '08480388',\r\n '98408036',\r\n '39729993',\r\n '39383479',\r\n '47859789',\r\n '48749749',\r\n '70909585']\r\n passwords = ['Hisj09',\r\n 'o093y*gh',\r\n 'Im43fpiN10&',\r\n '3i9eiih8',\r\n 'sok30wok',\r\n '39iwi9i',\r\n '3kw903ewo',\r\n 'ekw0kw0',\r\n 'wokkwooks',\r\n 'k0okwok',\r\n 'ko8928',\r\n '30ij9i7',\r\n '49ie990ko',\r\n '30ke0eo',\r\n '3003eokeo',\r\n '30ek9eki9',\r\n '3oke0emiddj',\r\n '30e0w0lks8',\r\n '48jeijsiji8',\r\n '49kred8',\r\n '82u2waji',\r\n '22ll0la',\r\n '30ks0so0',\r\n '55omf09',\r\n '30309oe',\r\n '10009',\r\n '10993k',\r\n '10020oski',\r\n '20keosoo20',\r\n 'bd489475998',\r\n '4u8dhig7t',\r\n 'o4j9uerri8',\r\n '4eud9i4u',\r\n '4ue9re9'\r\n ]\r\n y = passwords\r\n x = random_id\r\n message = await ctx.send(content=f\"🔄 Hacking {member}\")\r\n await asyncio.sleep(5)\r\n await message.edit(content=\"❌ Firewall blocking access\")\r\n await asyncio.sleep(5)\r\n await message.edit(content=\"✅ Firewall hacked\")\r\n await asyncio.sleep(5)\r\n await message.edit(content=f\"💸 Apple Account password is {random.choice(y)}\")\r\n await asyncio.sleep(5)\r\n await message.edit(content=f\"〽 Credit Card ID is {random.choice(x)}\")\r\n await asyncio.sleep(5)\r\n await message.edit(content=f\"💫 Discord ID is {random.choice(x)}\")\r\n await asyncio.sleep(5)\r\n await message.edit(content=f\"🔄 Google Account password is {random.choice(y)}\")\r\n await asyncio.sleep(5)\r\n await message.edit(content=f\"🔄 Microsoft Account password is {random.choice(y)}\")\r\n await asyncio.sleep(5)\r\n await message.edit(content=f\"🔄 Bank Lock Code is {random.choice(x)}\")\r\n await asyncio.sleep(5)\r\n await message.edit(content=\"🔄 Covering all traces\")\r\n await asyncio.sleep(5)\r\n await message.edit(content=\"🔄 Destroying browser memory\")\r\n await asyncio.sleep(5)\r\n await message.edit(content=f\"✅ Finished hacking {member}\")\r\n\r\n @commands.command()\r\n async def say(self, ctx, *, message):\r\n \"\"\"Makes the bot say something\"\"\"\r\n await ctx.message.delete()\r\n await ctx.send(message)\r\n\r\n @commands.command(name=\"republicAnthem\")\r\n async def republicAnthem(self, ctx):\r\n \"\"\"Plays the Reppy Anthem\"\"\"\r\n await ctx.send(f\"https://www.youtube.com/watch?v=aQ_zW_PgWeA%22\")\r\n\r\n @commands.command(name=\"separatistAnthem\")\r\n async def separatistAnthem(self, ctx):\r\n \"\"\"Plays the Seppy Anthem\"\"\"\r\n await ctx.send(f\"https://www.youtube.com/watch?v=0IBW9mT_PxM&t=2s%22\")\r\n\r\n @commands.command()\r\n async def num(self, ctx: commands.Context):\r\n \"\"\"Number guessing-game\"\"\"\r\n await ctx.send(ctx.message.author.mention + ' Would you like to play \"guess number\" game?')\r\n randomnum = random.randint(1, 100)\r\n attempts = 5\r\n\r\n def check(m):\r\n return (m.author == ctx.author\r\n and m.channel == ctx.channel)\r\n\r\n while attempts > 0:\r\n guess = \"\"\r\n\r\n while not guess.isdigit():\r\n await ctx.send(ctx.author.mention + ', write a natural number from 1 to 100 or q (quit)')\r\n\r\n try:\r\n msg = await self.client.wait_for('message', timeout=15, check=check)\r\n guess = msg.content\r\n except asyncio.TimeoutError:\r\n await ctx.send('Timeout exceed, quitting...')\r\n return\r\n except ValueError:\r\n pass\r\n\r\n quitwords = ('q', 'quit', 'exit')\r\n if guess in quitwords:\r\n await ctx.send('Quitting...')\r\n return\r\n\r\n guess = int(guess)\r\n\r\n if guess < randomnum:\r\n await ctx.send('It is bigger')\r\n elif guess > randomnum:\r\n await ctx.send('It is smaller')\r\n else:\r\n await ctx.send(f'Ladies and gentlemen, {ctx.author.mention} got it. My number was: {randomnum}')\r\n return\r\n\r\n attempts -= 1\r\n\r\n await ctx.send(f'You failed! My number was {randomnum}')\r\n\r\n @commands.command(aliases=['8ball', 'ball'])\r\n async def _8ball(self, ctx, *, question):\r\n \"\"\"Answers for your life\"\"\"\r\n\r\n responses = ['As I see it, yes.',\r\n 'Ask again later.',\r\n 'Better not tell you now.',\r\n 'Cannot predict now.',\r\n 'Concentrate and ask again.',\r\n 'Don’t count on it.',\r\n 'It is certain.',\r\n 'It is decidedly so.',\r\n 'Most likely.',\r\n 'My reply is no.',\r\n 'My sources say no.',\r\n 'Outlook not so good.',\r\n 'Outlook good.',\r\n 'Reply hazy, try again.',\r\n 'Signs point to yes.',\r\n 'Very doubtful.',\r\n 'Without a doubt.',\r\n 'Yes.',\r\n 'Yes – definitely.',\r\n 'You may rely on it.'\r\n ]\r\n q = (\"Question: \" + question)\r\n a = (\"Answer: \" + random.choice(responses))\r\n embed = discord.Embed(\r\n title=q,\r\n description=a,\r\n colour=discord.Colour.blue()\r\n )\r\n\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command(aliases=[\"facepalm\"])\r\n async def fp(self, ctx):\r\n \"\"\"Idiotic memes to laugh at\"\"\"\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(f\"https://www.reddit.com/r/facepalm/top.json\") as response:\r\n j = await response.json()\r\n\r\n data = j[\"data\"][\"children\"][random.randint(0, 25)][\"data\"]\r\n image_url = data[\"url\"]\r\n title = data[\"title\"]\r\n em = discord.Embed(description=f\"[**{title}**]({image_url})\", colour=discord.Colour.blue())\r\n em.set_image(url=image_url)\r\n em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested by {ctx.author}\")\r\n await ctx.send(embed=em)\r\n\r\n @commands.command(aliases=[\"maymay\", \"memes\"])\r\n async def meme(self, ctx):\r\n \"\"\"Em... well... memes?\"\"\"\r\n\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(f\"https://www.reddit.com/r/memes/top.json\") as response:\r\n j = await response.json()\r\n\r\n data = j[\"data\"][\"children\"][random.randint(0, 25)][\"data\"]\r\n image_url = data[\"url\"]\r\n title = data[\"title\"]\r\n em = discord.Embed(description=f\"[**{title}**]({image_url})\", colour=discord.Colour.blue())\r\n em.set_image(url=image_url)\r\n em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested by {ctx.author}\")\r\n await ctx.send(embed=em)\r\n\r\n @commands.command(aliases=[\"prequelmeme\", \"pre\"])\r\n async def prequel(self, ctx):\r\n \"\"\"Star Wars memes\"\"\"\r\n\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(f\"https://www.reddit.com/r/PrequelMemes/top.json\") as response:\r\n j = await response.json()\r\n\r\n data = j[\"data\"][\"children\"][random.randint(0, 25)][\"data\"]\r\n image_url = data[\"url\"]\r\n title = data[\"title\"]\r\n em = discord.Embed(description=f\"[**{title}**]({image_url})\", colour=discord.Colour.blue())\r\n em.set_image(url=image_url)\r\n em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested by {ctx.author}\")\r\n await ctx.send(embed=em)\r\n\r\n @commands.command(aliases=[\"ot\"])\r\n async def OTmemes(self, ctx):\r\n \"\"\"Star Wars memes\"\"\"\r\n\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(f\"https://www.reddit.com/r/OTMemes/top.json\") as response:\r\n j = await response.json()\r\n\r\n data = j[\"data\"][\"children\"][random.randint(0, 25)][\"data\"]\r\n image_url = data[\"url\"]\r\n title = data[\"title\"]\r\n em = discord.Embed(description=f\"[**{title}**]({image_url})\", colour=discord.Colour.blue())\r\n em.set_image(url=image_url)\r\n em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested by {ctx.author}\")\r\n await ctx.send(embed=em)\r\n\r\n @commands.command(aliases=['knockout'])\r\n async def KO(self, ctx, member):\r\n \"\"\"Punch the person you hate\"\"\"\r\n\r\n gifslist = ['https://media.tenor.com/images/e302b70e805f045816c100a92325b824/tenor.gif',\r\n 'https://media1.tenor.com/images/3da22c373b5506939514773ad496b170/tenor.gif?itemid=11751811',\r\n 'https://media1.tenor.com/images/b3dddda27a439a9951fdd0de5a0644e6/tenor.gif?itemid=15872871',\r\n 'https://media1.tenor.com/images/3b0d7cc04fb09adb1ccc96a23b98dd86/tenor.gif?itemid=6032176',\r\n 'https://media1.tenor.com/images/97248cf32942f467c4a049acbae8981e/tenor.gif?itemid=3555140',\r\n 'https://media1.tenor.com/images/c7dece5cdd4cee237e232e0c5d955042/tenor.gif?itemid=4902914']\r\n gifs = random.choice(gifslist)\r\n\r\n embed = discord.Embed(\r\n description=f\"{member} Has been Knocked Out!\",\r\n colour=discord.Colour.blue()\r\n )\r\n embed.set_image(url=gifs)\r\n await ctx.send(embed=embed)\r\n\r\n @commands.command()\r\n async def gunfight(self, ctx, user: discord.Member):\r\n \"\"\"Who is quicker at drawing his gun?\"\"\"\r\n global response\r\n choices = ['fire', 'draw', 'shoot', 'bang', 'pull', 'boom']\r\n gun = random.choice(choices)\r\n if ctx.message.author == user:\r\n await ctx.send(\"**You can't fight yourself!**\")\r\n else:\r\n await ctx.send(f\"{user.mention} **Do you accept the challenge?** ``yes``** or** ``no``?\")\r\n\r\n def check(m):\r\n return m.channel == ctx.channel and m.author == user\r\n\r\n if ctx.message.author != user:\r\n try:\r\n response = await self.client.wait_for('message', check=check, timeout=15)\r\n except:\r\n await ctx.send(f\"**Looks like {user.mention} doesn't want to play :frowning:**\")\r\n tr = random.randrange(5)\r\n\r\n if response.content.lower() == \"yes\":\r\n await ctx.send(f\"{user.mention} **has accepted the challenge** :slight_smile:\")\r\n await asyncio.sleep(2)\r\n await ctx.send(\"**Get Ready, it will start at any moment!**\")\r\n await asyncio.sleep(tr)\r\n await ctx.send(f\"**Type** ``{gun}`` **now!**\")\r\n\r\n if response.content.lower() == \"no\":\r\n await ctx.send(f\"{user.mention} has declined your request :frowning:\")\r\n\r\n user1 = ctx.author\r\n user2 = user\r\n\r\n def check(n):\r\n return n.author == user1 or n.author == user2\r\n\r\n message = await self.client.wait_for(\"message\", check=check)\r\n if message.author == user1:\r\n if message.content == gun:\r\n await ctx.send(f\"{user1.mention} **Has Won!**\")\r\n\r\n else:\r\n if message.content == gun:\r\n await ctx.send(f\"{user2.mention} **Has Won!**\")\r\n\r\n @commands.command(name='..')\r\n async def command(self, ctx):\r\n \"\"\"Indeed...\"\"\"\r\n await ctx.send(\"Indeed...\")\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Fun(bot))\r\n","sub_path":"cog_stuff.py","file_name":"cog_stuff.py","file_ext":"py","file_size_in_byte":28577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"478084619","text":"#Write a program that asks the user to enter a positive integer n. Assuming that this integer is in seconds,\n# your program should convert the number of seconds into days, hours, minutes, and seconds and prints them\nuserSeconds = input('Enter the value: ')\n# seconds / 60 = minutes | minutes / 60 = hours | hours / 24 = days\n# seconds -> days = seconds / 86400\nuserIntSeconds = int(userSeconds)\n\n#Days (minutes * hours * days)\nminHrDays = 60 * 60 * 24\ndays = userIntSeconds //(minHrDays)\nsecondsAfterDays = userIntSeconds % minHrDays\n\n#Hours (minutes * hours)\nminHr = 60 * 60\nhours = secondsAfterDays //(minHr)\nsecondsAfterHours = secondsAfterDays % minHr\n\n#Minutes (minutes)\nmMin = 60\nminutes = secondsAfterHours // mMin\nsecondsAfterMinutes = secondsAfterHours % mMin\n\nprint(days,\"Days\", hours, \"Hours\", minutes, \"Minutes\", secondsAfterMinutes, \"Seconds\")\n","sub_path":"CSE1309x/secondsToDays.py","file_name":"secondsToDays.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"131585461","text":"\n# coding: utf-8\n\n# In[26]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pydicom\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nimport csv\nimport os\nfrom imgaug import augmenters as iaa\nimport random\nfrom skimage.transform import resize\nfrom skimage import measure\nfrom matplotlib.patches import Rectangle\n\n\n# In[27]:\n\n\n# Making dictonary to save all the pnemonia locations \ninfection_dict = {}\nwith open(r\"C:\\Users\\ashu1\\OneDrive\\Desktop\\CS 688\\all\\stage_1_train_labels.csv\", \"r\") as csvfile:\n reader = csv.reader(csvfile)\n next(reader, None)\n for row in reader:\n PatientID = row[0]\n Locations = row[1:5]\n Label = row[5]\n if Label == '1':\n Locations = [int(i) for i in Locations]\n if PatientID in infection_dict:\n infection_dict[PatientID].append(Locations)\n else:\n infection_dict[PatientID] = [Locations]\n\n\n# In[28]:\n\n\n# Splitting the dataset into train, test and validation set in the ratio of 80:10:10 \ndata = os.listdir(r'C:\\Users\\ashu1\\OneDrive\\Desktop\\CS 688\\all\\train')\nrandom.shuffle(data)\ntrain_data = data[:int((len(data)+1)*.80)] \nremaining_data = data[int(len(data)*.80+1):] \nvalidation_data = remaining_data[:int((len(remaining_data)+1)*.50)] \ntest_data = remaining_data[int(len(remaining_data)*.50+1):]\n\n\n# In[29]:\n\n\n# Generate images for traing the model\nclass generator(keras.utils.Sequence):\n \n def __init__(self, directory, filenames, infection_dict = None, batch_size = 32, image_size = 256, shuffle = True, augment = False, predict = False):\n self.folder = directory\n self.filenames = filenames\n self.pneumonia_locations = infection_dict\n self.batch_size = batch_size\n self.image_size = image_size\n self.shuffle = shuffle\n self.augment = augment\n self.predict = predict\n self.on_epoch_end()\n \n def __load__(self, filename):\n img = pydicom.dcmread(os.path.join(self.folder, filename)).pixel_array\n # creating mask of 0's for bounding box\n msk = np.zeros(img.shape)\n filename = filename.split('.')[0]\n if filename in infection_dict:\n for location in infection_dict[filename]:\n x, y, w, h = location\n # writting 1's on the mask where infection is present in the image\n msk[y:y+h, x:x+w] = 1\n # image augmentation\n if self.augment and random.random() > 0.5:\n img = np.fliplr(img)\n msk = np.fliplr(msk)\n img = resize(img, (self.image_size, self.image_size), mode='reflect')\n msk = resize(msk, (self.image_size, self.image_size), mode='reflect') > 0.5\n img = np.expand_dims(img, -1)\n msk = np.expand_dims(msk, -1) \n return img, msk\n \n def __loadpredict__(self, filename):\n img = pydicom.dcmread(os.path.join(self.folder, filename)).pixel_array\n img = resize(img, (self.image_size, self.image_size), mode='reflect')\n img = np.expand_dims(img, -1)\n return img\n \n def __getitem__(self, index):\n filenames = self.filenames[index*self.batch_size:(index+1)*self.batch_size]\n if self.predict:\n imgs = [self.__loadpredict__(filename) for filename in filenames]\n imgs = np.array(imgs)\n return imgs, filenames\n else:\n items = [self.__load__(filename) for filename in filenames]\n imgs, msks = zip(*items)\n imgs = np.array(imgs)\n msks = np.array(msks)\n return imgs, msks\n \n def on_epoch_end(self):\n if self.shuffle:\n random.shuffle(self.filenames)\n \n def __len__(self):\n if self.predict:\n return int(np.ceil(len(self.filenames) / self.batch_size))\n else:\n return int(len(self.filenames) / self.batch_size)\n\n\n# In[30]:\n\n\nMOMENTUM = 0.95\nN_BLOCKS = 4\nDEPTH = 4\nCHANNELS = 32\nIMAGE_SIZE = 256\nINPUT_SIZE = 256\nEPOCH = 5\nBATCH_SIZE = 1\n\n\n# In[31]:\n\n\n# Prepare training and validation data for training and validation\ndirectory = r'C:\\Users\\ashu1\\OneDrive\\Desktop\\CS 688\\all\\train'\ntrain_gen = generator(directory, train_data, infection_dict, batch_size = BATCH_SIZE, image_size = IMAGE_SIZE, shuffle = True, augment = True, predict = False)\nvalidation_gen = generator(directory, validation_data, infection_dict, batch_size = BATCH_SIZE, image_size = IMAGE_SIZE, shuffle = True, predict = False)\n\n\n# In[32]:\n\n\ndef create_network(input_size, channels, n_blocks = N_BLOCKS, depth = DEPTH):\n # input\n inputs = keras.Input(shape=(input_size, input_size, 1))\n x = keras.layers.Conv2D(channels, 3, padding='same', use_bias=False)(inputs)\n # Residual block\n for d in range(depth):\n channels = channels * 2\n x = create_downsample(channels, x)\n for b in range(n_blocks):\n x = create_resblock(channels, x)\n # Output\n x = keras.layers.BatchNormalization(momentum=MOMENTUM)(x)\n x = keras.layers.LeakyReLU(0)(x)\n x = keras.layers.Conv2D(1, 1, activation='sigmoid')(x)\n outputs = keras.layers.UpSampling2D(2**depth)(x)\n model = keras.Model(inputs=inputs, outputs=outputs)\n return model\n\n\n# In[33]:\n\n\n# Creating Residual Block\ndef create_resblock(channels, inputs):\n x = keras.layers.BatchNormalization(momentum = MOMENTUM)(inputs)\n x = keras.layers.Conv2D(channels, 3, padding = 'same', use_bias=False)(x)\n x = keras.layers.BatchNormalization(momentum = MOMENTUM)(x)\n x = keras.layers.LeakyReLU(0)(x)\n x = keras.layers.Conv2D(channels, 3, padding = 'same', use_bias=False)(x)\n return keras.layers.add([x, inputs])\n\n\n# In[34]:\n\n\ndef create_downsample(channels, inputs):\n x = keras.layers.BatchNormalization(momentum = MOMENTUM)(inputs)\n x = keras.layers.LeakyReLU(0)(x)\n x = keras.layers.Conv2D(channels, 1, padding = 'same', use_bias=False)(x)\n x = keras.layers.MaxPool2D(2)(x)\n return x\n\n\n# In[35]:\n\n\n# Defining intersecion over union loss\ndef iou_loss(y_true, y_pred):\n y_true = tf.reshape(y_true, [-1])\n y_pred = tf.reshape(y_pred, [-1])\n intersection = tf.reduce_sum(y_true * y_pred)\n score = (intersection + 1.) / (tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) - intersection + 1.)\n return 1 - score\n\n\n# In[36]:\n\n\n# Calculating loss as a combination of iou and binary cross entropy\ndef iou_bce_loss(y_true, y_pred):\n return 0.5 * keras.losses.binary_crossentropy(y_true, y_pred) + 0.5 * iou_loss(y_true, y_pred)\n\n\n# In[37]:\n\n\n# Defining accuray metrics\ndef mean_iou(y_true, y_pred):\n y_pred = tf.round(y_pred)\n intersect = tf.reduce_sum(y_true * y_pred, axis=[1, 2, 3])\n union = tf.reduce_sum(y_true, axis=[1, 2, 3]) + tf.reduce_sum(y_pred, axis=[1, 2, 3])\n smooth = tf.ones(tf.shape(intersect))\n return tf.reduce_mean((intersect + smooth) / (union - intersect + smooth))\n\n\n# In[38]:\n\n\n# Definine cosine annealing for learning rate\ndef cosine_annealing(x):\n # learn_rate = 0.09\n lr = 0.001\n epochs = EPOCH\n return lr*(np.cos(np.pi*x/epochs)+1.)/2.0\nlearning_rate = tf.keras.callbacks.LearningRateScheduler(cosine_annealing)\n\n\n# In[39]:\n\n\n# Creatimg network\nmodel = create_network(input_size = INPUT_SIZE, channels = CHANNELS, n_blocks = N_BLOCKS, depth = DEPTH)\n\n# Compiling model\nmodel.compile(optimizer='adam', loss = iou_bce_loss, metrics=['accuracy', mean_iou])\n\n# Fitting the model\nhistory = model.fit_generator(train_gen, validation_data = validation_gen, callbacks = [learning_rate], epochs = EPOCH, shuffle = True)\n\n\n# In[12]:\n\n\n# Plotting accuray and loss of both training and validation data\nplt.figure(figsize=(15,5))\nplt.subplot(121)\nplt.plot(history.epoch, history.history[\"acc\"], label=\"Train accuracy\")\nplt.plot(history.epoch, history.history[\"val_acc\"], label=\"Validation accuracy\")\nplt.legend()\nplt.subplot(122)\nplt.plot(history.epoch, history.history[\"loss\"], label=\"Train loss\")\nplt.plot(history.epoch, history.history[\"val_loss\"], label=\"Validation loss\")\nplt.legend()\nplt.show()\n\n\n# In[13]:\n\n\n# Prepare testing data for prediction\ntest_gen = generator(directory, test_data, None, batch_size = BATCH_SIZE, image_size = IMAGE_SIZE, shuffle = False, predict = True)\n\n\n# In[14]:\n\n\n# Making dictionary to save the predicted infection locations of test data\ndict = {}\nfor imgs, filenames in test_gen:\n preds = model.predict(imgs)\n for pred, filename in zip(preds, filenames):\n pred = resize(pred, (1024, 1024), mode = 'reflect')\n comp = pred[:, :, 0] > 0.5\n comp = measure.label(comp)\n predictionString = ''\n for region in measure.regionprops(comp):\n y, x, y2, x2 = region.bbox\n height = y2 - y\n width = x2 - x\n conf = np.mean(pred[y:y+height, x:x+width])\n if conf >= 0.7: # If the confidence is greater than 0.7 then it will be written in Prediction String \n predictionString += str(conf) + ' ' + str(x) + ' ' + str(y) + ' ' + str(width) + ' ' + str(height) + ' '\n filename = filename.split('.')[0]\n dict[filename] = predictionString\n if len(dict) >= len(test_data):\n break\n\n\n# In[18]:\n\n\nsub = pd.DataFrame.from_dict(dict,orient='index')\nsub.index.names = ['patientId']\nsub.columns = ['PredictionString']\nsub.to_csv('submission.csv')\n\n\n# In[19]:\n\n\nsub = sub.reset_index()\n#sub.head(5)\n\n\n# In[20]:\n\n\nsub['Result'] = np.where(sub['PredictionString']== \"\", '0', '1')\n\n\n# In[21]:\n\n\nsub['Confidence'], sub['x'], sub['y'], sub['width'], sub['height'] = sub['PredictionString'].str.split(' ', 4).str\n\n\n# In[22]:\n\n\nsub = sub.drop(['PredictionString'], axis = 1)\nsub.head(5)\n\n\n# In[ ]:\n\n\n# Code for bounding box and generator was referred from one of the github repository.\n#https://github.com/yannistannier/kaggle-Pneumonia-Detection-Challenge/blob/master/Keras-Pneumonia.ipynb\n\n","sub_path":"CNN_Final.py","file_name":"CNN_Final.py","file_ext":"py","file_size_in_byte":9920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"377185719","text":"# Copyright 2020 Canonical Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For further info, check https://github.com/canonical/charmcraft\n\n\"\"\"Tests for the Store API layer (code in store/store.py).\"\"\"\n\nimport logging\nfrom unittest.mock import patch, call, MagicMock\n\nimport pytest\n\nfrom charmcraft.commands.store.store import Store\n\n\n@pytest.fixture\ndef client_mock():\n client_mock = MagicMock()\n with patch('charmcraft.commands.store.store.Client', lambda: client_mock):\n yield client_mock\n\n\ndef test_login(client_mock):\n \"\"\"Simple login case.\"\"\"\n store = Store()\n result = store.login()\n assert client_mock.mock_calls == [\n call.clear_credentials(),\n call.get('/v1/whoami'),\n ]\n assert result is None\n\n\ndef test_logout(client_mock):\n \"\"\"Simple logout case.\"\"\"\n store = Store()\n result = store.logout()\n assert client_mock.mock_calls == [\n call.clear_credentials(),\n ]\n assert result is None\n\n\ndef test_whoami(client_mock):\n \"\"\"Simple whoami case.\"\"\"\n store = Store()\n auth_response = {'display-name': 'John Doe', 'username': 'jdoe', 'id': '-1'}\n client_mock.get.return_value = auth_response\n\n result = store.whoami()\n\n assert client_mock.mock_calls == [\n call.get('/v1/whoami'),\n ]\n assert result.name == 'John Doe'\n assert result.username == 'jdoe'\n assert result.userid == '-1'\n\n\ndef test_register_name(client_mock):\n \"\"\"Simple whoami case.\"\"\"\n store = Store()\n result = store.register_name('testname')\n\n assert client_mock.mock_calls == [\n call.post('/v1/charm', {'name': 'testname'}),\n ]\n assert result is None\n\n\ndef test_list_registered_names_empty(client_mock):\n \"\"\"List registered names getting an empty response.\"\"\"\n store = Store()\n\n auth_response = {'charms': []}\n client_mock.get.return_value = auth_response\n\n result = store.list_registered_names()\n\n assert client_mock.mock_calls == [\n call.get('/v1/charm')\n ]\n assert result == []\n\n\ndef test_list_registered_names_multiple(client_mock):\n \"\"\"List registered names getting a multiple response.\"\"\"\n store = Store()\n\n auth_response = {'charms': [\n {'name': 'name1', 'private': False, 'status': 'status1'},\n {'name': 'name2', 'private': True, 'status': 'status2'},\n ]}\n client_mock.get.return_value = auth_response\n\n result = store.list_registered_names()\n\n assert client_mock.mock_calls == [\n call.get('/v1/charm')\n ]\n item1, item2 = result\n assert item1.name == 'name1'\n assert not item1.private\n assert item1.status == 'status1'\n assert item2.name == 'name2'\n assert item2.private\n assert item2.status == 'status2'\n\n\ndef test_upload_straightforward(client_mock, caplog):\n \"\"\"The full and successful upload case.\"\"\"\n caplog.set_level(logging.DEBUG, logger=\"charmcraft.commands\")\n store = Store()\n\n # the first response, for when pushing bytes\n test_upload_id = 'test-upload-id'\n client_mock.push.return_value = test_upload_id\n\n # the second response, for telling the store it was pushed\n test_status_url = 'https://store.c.c/status'\n client_mock.post.return_value = {'status-url': test_status_url}\n\n # the third response, status ok (note the patched UPLOAD_ENDING_STATUSES below)\n test_revision = 123\n test_status_ok = 'test-status'\n status_response = {'revisions': [{'status': test_status_ok, 'revision': test_revision}]}\n client_mock.get.return_value = status_response\n\n test_status_resolution = 'test-ok-or-not'\n fake_statuses = {test_status_ok: test_status_resolution}\n test_charm_name = 'test-name'\n test_filepath = 'test-filepath'\n with patch.dict('charmcraft.commands.store.store.UPLOAD_ENDING_STATUSES', fake_statuses):\n result = store.upload(test_charm_name, test_filepath)\n\n # check all client calls\n assert client_mock.mock_calls == [\n call.push(test_filepath),\n call.post('/v1/charm/{}/revisions'.format(test_charm_name), {'upload-id': test_upload_id}),\n call.get(test_status_url),\n ]\n\n # check result (build after patched ending struct)\n assert result.ok == test_status_resolution\n assert result.status == test_status_ok\n assert result.revision == test_revision\n\n # check logs\n expected = [\n \"Upload test-upload-id started, got status url https://store.c.c/status\",\n \"Status checked: \" + str(status_response),\n ]\n assert expected == [rec.message for rec in caplog.records]\n\n\ndef test_upload_polls_status(client_mock, caplog):\n \"\"\"Upload polls status url until the end is indicated.\"\"\"\n caplog.set_level(logging.DEBUG, logger=\"charmcraft.commands\")\n store = Store()\n\n # first and second response, for pushing bytes and let the store know about it\n test_upload_id = 'test-upload-id'\n client_mock.push.return_value = test_upload_id\n test_status_url = 'https://store.c.c/status'\n client_mock.post.return_value = {'status-url': test_status_url}\n\n # the status checking response, will answer something not done yet twice, then ok\n test_revision = 123\n test_status_ok = 'test-status'\n status_response_1 = {'revisions': [{'status': 'still-scanning', 'revision': None}]}\n status_response_2 = {'revisions': [{'status': 'more-revisions', 'revision': None}]}\n status_response_3 = {'revisions': [{'status': test_status_ok, 'revision': test_revision}]}\n client_mock.get.side_effect = [status_response_1, status_response_2, status_response_3]\n\n test_status_resolution = 'clean and crispy'\n fake_statuses = {test_status_ok: test_status_resolution}\n with patch.dict('charmcraft.commands.store.store.UPLOAD_ENDING_STATUSES', fake_statuses):\n with patch('charmcraft.commands.store.store.POLL_DELAY', 0.01):\n result = store.upload('some-name', 'some-filepath')\n\n # check the status-checking client calls (kept going until third one)\n assert client_mock.mock_calls[2:] == [\n call.get(test_status_url),\n call.get(test_status_url),\n call.get(test_status_url),\n ]\n\n # check result which must have values from final result\n assert result.ok == test_status_resolution\n assert result.status == test_status_ok\n assert result.revision == test_revision\n\n # check logs\n expected = [\n \"Upload test-upload-id started, got status url https://store.c.c/status\",\n \"Status checked: \" + str(status_response_1),\n \"Status checked: \" + str(status_response_2),\n \"Status checked: \" + str(status_response_3),\n ]\n assert expected == [rec.message for rec in caplog.records]\n","sub_path":"tests/commands/test_store_api.py","file_name":"test_store_api.py","file_ext":"py","file_size_in_byte":7123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"427589613","text":"# -*- coding: utf-8 -*-\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n'''\nP1\n'''\n\ne=1.776\n\n\n'''\nDefinimos primero para obetener los valores de\nde k1 k2 y k3 para luego obtener yn+1.\n'''\ndef f(y, v,eta=e):\n return v, -y-eta*(y**2-1)*v\n\ndef get_k1(y_n, v_n, h, f):\n f_eval = f(y_n, v_n)\n return h * f_eval[0], h * f_eval[1]\n\ndef get_k2(y_n, v_n, h, f):\n k1 = get_k1(y_n, v_n, h, f)\n f_eval = f(y_n + k1[0]/2, v_n + k1[1]/2)\n return h * f_eval[0], h * f_eval[1]\n\ndef get_k3(y_n, v_n, h,f):\n k1=get_k1(y_n, v_n, h, f)\n k2=get_k2(y_n, v_n, h, f)\n f_eval=f(y_n-k1[0]-2*k2[0],v_n -k1[1]-2*k2[0])\n return h*f_eval[0],h*f_eval[1]\n\ndef rk3_step(y_n, v_n, h, f):\n k1 = get_k1(y_n, v_n, h, f)\n k2 = get_k2(y_n, v_n, h, f)\n k3 = get_k1(y_n, v_n, h, f)\n y_n1 = y_n + (1/6.)*(k1[0] + k3[0] + k2[0])\n v_n1 = v_n + (1/6.)*(k1[1] + k3[1] + k2[1])\n return y_n1, v_n1\n\n\n'''\nNos definimos el largo de pasos para asi obtener el\ntamaño de las variables y y dy/ds = v\n'''\n\nN_steps = 50000\nh = 20*np.pi / N_steps\ny= np.zeros(N_steps)\nv = np.zeros(N_steps)\ny2= np.zeros(N_steps)\nv2= np.zeros(N_steps)\n\ny[0] = 0.1\nv[0] = 0\nfor i in range(1, N_steps):\n y[i], v[i] = rk3_step(y[i-1], v[i-1], h, f)\n\n'''\nPloteamos\n'''\n\nt_rk= [h * i for i in range(N_steps)]\nplt.figure(1)\nplt.figure(1).clf()\nplt.plot(y, v, 'y')\nplt.title(\"$ \\ Condiciones \\ iniciales \\ y(s)=0.1 \\; \\ dy/ds=0$\", fontsize=15)\nplt.xlabel('$y(s)$',fontsize=15)\nplt.ylabel('$dy/ds$',fontsize=15)\nplt.savefig(\"fig1.png\")\nplt.show()\n\nplt.figure(2)\nplt.figure(2).clf()\n\ny2[0]=4\nv2[0]=0\nfor i in range(1,N_steps):\n y2[i],v2[i]= rk3_step(y2[i-1], v2[i-1], h, f)\n\nplt.plot(y2,v2,'g')\nplt.title(\"$ \\ Condiciones \\ iniciales \\ y(s)=4 \\ ; \\ dy/ds=0$ \", fontsize=15)\nplt.xlabel('$y(s)$',fontsize=15)\nplt.ylabel('$dy/ds$',fontsize=15)\nplt.savefig(\"fig2.png\")\nplt.show()\n\nplt.figure(3)\nplt.figure(3).clf()\n\nplt.plot(t_rk,y2,'r', label= \"$ \\ condiciones \\ iniciales \\ y(s)=4 \\ ; \\ dy/ds=0$\")\nplt.legend()\nplt.plot(t_rk,y,'b', label = \"$ \\ condiciones \\ iniciales \\ y(s)=0.1 \\ ; \\ dy/ds=0$\")\nplt.legend()\nplt.title(\" $ \\ Y \\ vs \\ S$\",fontsize=15)\nplt.ylabel('$y(s)$',fontsize=15)\nplt.xlabel(\"$s$\",fontsize=15)\nplt.savefig(\"fig3.png\")\n\nplt.show()","sub_path":"P1.py","file_name":"P1.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"158225251","text":"\"\"\"\n PackageExportAMBA\n \n (C)2012-2018 FIS Front Arena\n \n Handles the AMBA file export\n \n 20120514 Richard Ludwig\n\n\n Extension Manager extension type FAMBADefinition it is possible to have AMBA settings per Object type.\n \n An example for FInstrument:\n \n [PackageTagger]FInstrument:Configurator =\n add_fields={{instrument,strike_price},{instrument,free_text}}\n add_referring={leg,insaddr}\n add_ref_fields={{trade,insaddr,instrument,instype}}\n nice_enum_names=1\n remove_fields={{instrument,free_text}}\n remove_ref_fields={{instrument,settle_calnbr,calendar,calid}}\n show_all_fields=0\n show_protection=1\n show_seqnbr=0\n use_regional_settings=0\n utc_timestamps=1\n \n\"\"\"\nimport acm\nimport FRunScriptGUI\nimport urllib\nimport PackageDependents\nreload(PackageDependents)\n\ntry:\n import PackageFilter\nexcept:\n PackageFilter = None\n\ndef ExportCommonObjectAsXMLCB(eii):\n commonobj = eii.ExtensionObject() \n shell = eii.Parameter('shell')\n follow = str(eii.MenuExtension().At( \"Follow\")) == \"True\"\n ExportCommonObject(shell, commonobj, follow = follow)\n\n\ndef ExportCommonObjectAsXMLCBFromAppMenu(eii):\n commonobj = eii.ExtensionObject().CurrentObject()\n shell = eii.Parameter('shell')\n ExportCommonObject(shell, commonobj)\n\ndef getAMBADefinition(commonobject):\n context = acm.GetDefaultContext()\n configName = context.GetExtension('FParameters', 'FObject', 'Configurator').Value().At('FAMBADefinitionName')\n\n par = context.GetExtension('FAMBADefinition', commonobject.Class(), configName )\n if par:\n return par\n else:\n emptyPar = acm.FAMBADefinition()\n emptyPar.Name('Empty')\n\n return emptyPar\n\n\ndef generateFilename( objectList, package):\n # Create a nice filename\n filename = \"mixedobjects\"\n if package != None:\n filename = package\n \n elif len(objectList) == 1: # and hasattr(objectList[0],'Name'):\n cls = objectList[0].Class()\n try:\n if cls.UniqueNameAttribute():\n method = cls.UniqueNameAttribute().GetMethod().Name()\n else:\n method = cls.UniqueAttribute().GetMethod().Name()\n except:\n method = 'StringKey'\n \n metCall = getattr(objectList[0], str(method))\n if metCall == None:\n metCall = getattr(objectList[0], 'Oid')\n uniqueName = metCall()\n \n filename = urllib.quote( (\"%s_%s\"%(objectList[0].ClassName(), uniqueName)).replace(' ', '_'), ' @.')\n\n elif objectList[0].ClassName() == objectList[-1].ClassName():\n filename = objectList[0].ClassName()\n \n return filename\n\n\ndef WriteToFile(commonobjects, formatterclass, filename, typeUpdate=False):\n\n gen = acm.FAMBAMessageGenerator()\n taggedmessformatter = formatterclass()\n if str(filename).endswith('.xml'):\n try:\n output = acm.FCharacterOutputFileStream(filename)\n\n ambamessages = acm.FAMBAMessage()\n ambamessages.Type(\"MESSAGES\")\n for commonobject in commonobjects:\n # Set FAMBADefinition\n par = getAMBADefinition(commonobject)\n gen.Parameters(par.Value())\n \n objMessage = gen.Generate(commonobject)\n messageType = objMessage.At('TYPE')\n objMessage.RemoveKeyString('TYPE')\n if typeUpdate:\n objMessage.AtPut('TYPE', 'UPDATE_%s'%messageType)\n else:\n objMessage.AtPut('TYPE', 'INSERT_%s'%messageType)\n \n ambamessages.AddMessage(objMessage)\n \n taggedmessformatter.FormatStream(output, ambamessages)\n finally:\n if output != None:\n output.Close()\n else: \n try:\n output = acm.FCharacterOutputFileStream(filename)\n \n for commonobject in commonobjects:\n # Set FAMBADefinition\n par = getAMBADefinition(commonobject)\n gen.Parameters(par.Value())\n\n objMessage = gen.Generate(commonobject)\n messageType = objMessage.At('TYPE')\n objMessage.RemoveKeyString('TYPE')\n\n if typeUpdate:\n objMessage.AtPut('TYPE', 'UPDATE_%s'%messageType)\n else:\n objMessage.AtPut('TYPE', 'INSERT_%s'%messageType)\n\n taggedmessformatter.FormatStream(output, objMessage)\n finally:\n if output != None:\n output.Close()\n \n \ndef __UpdateReferenceSet(object, visited, dependencies):\n \n if object not in visited and object != None:\n # Marking object type as visited.\n visited.add(object)\n\n # Adding references defined in this package.\n for reference in PackageDependents.Dependents(object):\n __UpdateReferenceSet(reference, visited, dependencies)\n\n # Adding references out of object to list, if not already present.\n for reference in object.ReferencesOut():\n __UpdateReferenceSet(reference, visited, dependencies)\n \n # Adding object to list (after its dependencies). \n dependencies.append(object)\n\n # Adding objects which depend on object.\n for reference in PackageDependents.Depends(object):\n __UpdateReferenceSet(reference, visited, dependencies)\n\n\ndef followDependents(objects):\n visited = set()\n dependencies = list()\n \n for object in objects:\n __UpdateReferenceSet(object, visited, dependencies)\n\n return dependencies\n\n \ndef ExportCommonObject(shell, commonobj, package = None, follow=False):\n if not commonobj:\n acm.UX().Dialogs().MessageBoxInformation(shell, 'No FCommonObject Selected!')\n return\n\n fileSelection = FRunScriptGUI.OutputFileSelection(\"AMBA text files (*.txt)|*.txt|XML Files (*.xml)|*.xml|All files (*.*)|*.*||\")\n \n objectList = [obj for obj in commonobj if hasattr(obj, 'ClassName')] # Strip header string objects\n if follow:\n objectList = followDependents(objectList)\n # Create a nice filename\n if package != None:\n fileSelection.SelectedFile(package)\n elif len(objectList) == 1: # and hasattr(objectList[0],'Name'):\n cls = objectList[0].Class()\n try:\n if cls.UniqueNameAttribute():\n method = cls.UniqueNameAttribute().GetMethod().Name()\n else:\n method = cls.UniqueAttribute().GetMethod().Name()\n except:\n method = 'StringKey'\n \n metCall = getattr(objectList[0], str(method))\n if metCall == None:\n metCall = getattr(objectList[0], 'Oid')\n uniqueName = metCall()\n \n filename = urllib.quote( (\"%s_%s\"%(objectList[0].ClassName(), uniqueName)).replace(' ', '_'), ' @.')\n\n fileSelection.SelectedFile(filename)\n elif objectList[0].ClassName() == objectList[-1].ClassName():\n fileSelection.SelectedFile(objectList[0].ClassName())\n\n if PackageFilter:\n reload(PackageFilter)\n objectList = PackageFilter.Filter(objectList)\n\n # Open file requester\n if acm.UX().Dialogs().BrowseForFile(shell, fileSelection ):\n if str(fileSelection.SelectedFile()).endswith('.xml'):\n WriteToFile(objectList, acm.FTaggedMessageXMLFormatter, fileSelection.SelectedFile(), typeUpdate=False)\n else:\n WriteToFile(objectList, acm.FTaggedMessageMBFormatter, fileSelection.SelectedFile(), typeUpdate=False)\n","sub_path":"Extensions/Packager/FPythonCode/PackageExportAMBA.py","file_name":"PackageExportAMBA.py","file_ext":"py","file_size_in_byte":7640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"113140465","text":"#!/usr/bin/env python\nimport os\nimport shutil\nimport subprocess\nimport errno\nimport re\nimport argparse\n\nimport executor\nimport executor.concurrent\n\nimport subsequences\n\ndef resequence_copy_point_clouds(source_folder, prefix, results_path, type, subsequence):\n destination_path = os.path.join(results_path, 'PointClouds', type)\n subsequences.make_dir_path(destination_path)\n for (filename, original_filename) in zip(subsequences.file_names(source_folder, prefix, '.mat'), subsequence):\n resequenced_name = prefix + os.path.splitext(original_filename)[0] + '.mat'\n shutil.copy(os.path.join(source_folder, filename), os.path.join(destination_path, resequenced_name))\n\ndef resequence_files(ms_results_folder, subsequence, results_path):\n point_clouds_folder = os.path.join(ms_results_folder, 'SuperPixels')\n resequence_copy_point_clouds(point_clouds_folder, 'points_sparse_', results_path, 'Sparse', subsequence)\n resequence_copy_point_clouds(point_clouds_folder, 'points_dense_linear_', results_path, 'DenseLinear', subsequence)\n resequence_copy_point_clouds(point_clouds_folder, 'points_dense_foreground_', results_path, 'DenseForeground', subsequence)\n resequence_copy_point_clouds(point_clouds_folder, 'points_dense_global_', results_path, 'DenseGlobal', subsequence)\n\ndef main(args):\n # Copy files to results\n results_path = os.path.join(subsequences._OUTPUT_PATH, 'DepthReconstruction')\n subsequences.clear_directory(results_path)\n for (subsequence, start, end, subsampling) in subsequences.generate_subsequences_from_args(args):\n bm_results_folder_name = subsequences.results_folder_name(args.size, start, args.length, subsampling)\n dir = os.path.join(subsequences._DATASETS_PATH, args.name)\n ms_results_folder = subsequences.motseg_results_folder_path(dir, bm_results_folder_name, len(subsequence))\n resequence_files(ms_results_folder, subsequence, results_path)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n subsequences.add_subsequence_args(parser)\n args = parser.parse_args()\n\n main(args)\n","sub_path":"video_popup/resequence_depth.py","file_name":"resequence_depth.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"474829270","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n=========================\r\n @name: Pyoro\r\n @author: Ptijuju22\r\n @date: 10/04/2018\r\n @version: 1.1\r\n=========================\r\n\"\"\"\r\n\r\nfrom gui.eventable_widget import Eventable_widget\r\nfrom gui.text import Text\r\n\r\nclass Clickable_text(Text, Eventable_widget):\r\n \"\"\" Clickable text widget \"\"\"\r\n\r\n DEFAULT_KWARGS = {\r\n \"onClickTextColor\": (200, 200, 200, 255),\r\n \"onMiddleClickTextColor\": (100, 100, 100, 255),\r\n \"onRightClickTextColor\": (220, 220, 220, 255),\r\n \"onHoverTextColor\": (230, 230, 230, 255),\r\n \"disableTextColor\": (240, 240, 240, 235)\r\n }\r\n\r\n def __init__(self, activity, pos, text, **kwargs):\r\n Clickable_text.updateDefaultKwargs(kwargs)\r\n Text.__init__(self, activity, pos, text, **kwargs)\r\n Eventable_widget.__init__(self, activity, pos, **self.kwargs)\r\n\r\n def update(self, deltaTime):\r\n if not self.kwargs[\"enable\"]:\r\n self.font.fgcolor = self.kwargs[\"disableTextColor\"]\r\n elif self.clicked:\r\n self.font.fgcolor = self.kwargs[\"onClickTextColor\"]\r\n elif self.rightClicked:\r\n self.font.fgcolor = self.kwargs[\"onRightClickTextColor\"]\r\n elif self.middleClicked:\r\n self.font.fgcolor = self.kwargs[\"onMiddleClickTextColor\"]\r\n elif self.hovered:\r\n self.font.fgcolor = self.kwargs[\"onHoverTextColor\"]\r\n else:\r\n self.font.fgcolor = self.kwargs[\"textColor\"]\r\n Text.update(self, deltaTime)","sub_path":"gui/clickable_text.py","file_name":"clickable_text.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"70640422","text":"items = [\n ('product 1', 46),\n ('product 2', 12),\n ('product 3', 34),\n ('product 4', 9),\n]\n\n\n# prices = []\n# for item in items:\n# prices.append(item[1])\n\nx = map(lambda item: item[1], items)\nfor item in x:\n print(item)\n","sub_path":"Mosh Python Course/Hello world/map_function.py","file_name":"map_function.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"30966416","text":"\"\"\"empty message\n\nRevision ID: e2d0f8a5e06a\nRevises: 2f3bf2f7913d\nCreate Date: 2018-01-04 13:57:08.245183\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e2d0f8a5e06a'\ndown_revision = '2f3bf2f7913d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('contacts', schema=None) as batch_op:\n batch_op.drop_constraint('unique_contact_email', type_='unique')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('contacts', schema=None) as batch_op:\n batch_op.create_unique_constraint('unique_contact_email', ['email'])\n\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/e2d0f8a5e06a_.py","file_name":"e2d0f8a5e06a_.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"501961156","text":"import os\nimport sys\nimport subprocess\nimport time\nfrom subprocess import DEVNULL\n\n\ndef startRedisAgain(isTest):\n try:\n output = subprocess.check_output(['redis-cli', 'ping']) # Cant put full path for Windows compatibility\n if 'PONG' in str(output):\n print('[LOG] Redis detected and running -> OK', flush=True)\n return True\n if 'No such file or directory:' in str(output):\n print(\"[ERROR] can't execute redis: No such file or directory\")\n print(output)\n except FileNotFoundError:\n if isTest:\n return True\n print(\"[ERROR] Redis is not installed or wasn't found on the system\")\n return False\n\n\ndef redis_sanity_check(isTest):\n try:\n return startRedisAgain(isTest)\n except subprocess.CalledProcessError:\n print('[LOG] CalledProcessError but will try to start Redis mannualy', flush=True)\n subprocess.Popen(['nohup', 'redis-server', '--protected-mode no'], stdout=DEVNULL)\n time.sleep(3)\n if startRedisAgain(isTest):\n return True\n print('[ERROR] Redis didnt answered, is redis installed ?', flush=True)\n return False\n\n\ndef startDjango(settings_path='tipboard.webserver.settings'):\n \"\"\" Start the django with DJANGO_SETTINGS_MODULE path added in env \"\"\"\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_path)\n if redis_sanity_check(isTest='test' in sys.argv[1]):\n from django.core.management import execute_from_command_line\n try:\n return execute_from_command_line(sys.argv)\n except NotImplementedError:\n print('Django is not installed')\n return -1\n\n\ndef show_help():\n print('''\n Usage:\n -h, or help \\t\\t=> show help usage\n -r, or runserver\\t=> start the tipboard server\n -s, or sensors \\t=> start sensors located in src/sensors ''', flush=True)\n return 0\n\n\ndef main_as_pkg():\n \"\"\" to become a python package and go to pypi, started in ../setup.py \"\"\"\n return startDjango(settings_path='src.tipboard.webserver.settings')\n\n\nif __name__ == '__main__':\n argv = sys.argv[1]\n sys.path.insert(0, os.getcwd())\n if argv in ('sensors', '-s'):\n from src.sensors.sensors_main import scheduleYourSensors\n\n scheduleYourSensors()\n elif argv in ('test', 'runserver', 'migrate', 'shell', 'collectstatic', 'findstatic'):\n sys.exit(startDjango())\n sys.exit(show_help())\n","sub_path":"src/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"386683614","text":"import os\nimport random\nimport textwrap\n\nfrom PIL import Image, ImageDraw, ImageFont\nFONT_FILE = 'fonts/HaginCapsMedium-Medium.otf'\nFONT_SIZE = 48\nFONT = ImageFont.truetype(FONT_FILE, FONT_SIZE)\nSIZE = SIZE_X, SIZE_Y = (1024, 640)\nLINE_HEIGHT = FONT_SIZE * 1.2\nWIDTH_LIM = 1000\n\n\nBACKGROUNDS = [\n {\"filename\": \"backs/back1.jpg\", \"colour\": 0, \"position\": (0.5, 0.25),},\n {\"filename\": \"backs/back2.jpg\", \"colour\": 255, \"position\": (0.5, 0.55),},\n {\"filename\": \"backs/back3.jpg\", \"colour\": 255, \"position\": (0.5, 0.3)},\n]\n\ndef main():\n # where to put generated images?\n try:\n os.mkdir('images')\n except:\n pass # already exists :)\n\n # load the list of quotes\n with open('quotes/quotes.txt') as file:\n quotes = read(file)\n # make the images! mwahahaha!\n for i, (quote, author) in enumerate(quotes):\n print(f'drawing {quote}')\n background = random.choice(BACKGROUNDS)\n img = make_image(quote, author, background)\n img.save(f'images/quote-{i:02d}.png')\n\ndef read(file):\n quotes = []\n for line in file:\n quote, *authors = line.strip().split(\" ~ \")\n author = authors[0] if authors else None\n quotes.append((quote, author))\n return quotes\n\ndef make_image(quote, author, bg):\n # start with the background\n img = Image.open(bg['filename'])\n draw = ImageDraw.Draw(img)\n\n # prepare the text colour\n colour = tuple([bg['colour']]*3+[255])\n \n # put the quote in the image:\n # where?\n x, y = bg['position']\n line = LINE_HEIGHT\n x *= SIZE_X\n y *= SIZE_Y\n\n # split the quote up if necessary:\n for i in range(1, 4+1):\n try:\n wquote = \"\\n\".join(textwrap.wrap(quote, width=len(quote)/i))\n put_text_centered(draw, (x, y), wquote, FONT, colour)\n break\n except:\n continue\n if author:\n put_text_centered(draw, (x, y+line*(1+0.5*i)), author, FONT, colour)\n #done!\n return img\n\ndef put_text_centered(draw, pos, text, font, colour):\n center_x, center_y = int(pos[0]), int(pos[1])\n w, h = draw.multiline_textsize(text, font=font)\n if w > WIDTH_LIM:\n raise Exception('text too wide!')\n corner_x = center_x - (w // 2)\n corner_y = center_y - (h // 2)\n draw.multiline_text((corner_x, corner_y), text, font=font, fill=colour, align='center')\n\nif __name__ == '__main__':\n main()","sub_path":"create-images.py","file_name":"create-images.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"422608860","text":"\"\"\"\nCreates `gen` directory with all necessary files.\n\"\"\"\nimport glob\nimport logging\nfrom typing import List\nimport execjs\nimport constants\nfrom copy_file import copy_file\nfrom safe_create_directory import safe_create_directory\n\n\ndef gen_all_files(\n parser: execjs.ExternalRuntime, std_dir: str, out_dir: str, style_file: str\n) -> List[str]:\n \"\"\"\n Recursively generates all doc files and puts them into the `gen` directory.\n \"\"\"\n all_file_names: List[str] = []\n\n for filename in glob.iglob(\"**/*\" + constants.FILE_EXT, recursive=True):\n out_file_name = (\n filename.replace(std_dir + \"/\", \"\")\n .replace(\"/\", \"-\")\n .replace(\"-src-\", \"-\")\n .replace(constants.FILE_EXT, \".html\")\n )\n logging.info(\"Generating: %s\", out_file_name)\n try:\n __gen_file(parser, filename, out_file_name, out_dir, style_file)\n all_file_names.append(out_file_name.replace(\".html\", \"\"))\n except execjs.Error as err:\n logging.info(\"Could not generate: %s\", out_file_name)\n logging.info(\"Got an exception: %s\", str(err))\n\n return all_file_names\n\n\ndef __gen_file(\n parser: execjs.ExternalRuntime,\n path: str,\n out_name: str,\n out_dir: str,\n style_file: str,\n) -> None:\n \"\"\"\n Generates an HTML file from Enso source file provided with `path` and saves\n it as `out_name`.\n \"\"\"\n enso_file = open(path, \"r\")\n stylesheet_link = ''\n parsed = parser.call(constants.PARSE_AST_METHOD, enso_file.read())\n enso_file.close()\n html_file = open(out_dir + \"/\" + out_name, \"w\")\n if len(parsed) == 0:\n parsed = parser.call(\n constants.PARSE_PURE_METHOD,\n \"\\n\\n*Enso Reference Viewer.*\\n\\nNo documentation available for chosen source file.\",\n )\n html_file.write(stylesheet_link + parsed.replace(\"display: flex\", \"display: none\"))\n html_file.close()\n\n\ndef init_gen_dir(name: str, style_file: str) -> None:\n \"\"\"\n Creates the `gen` directory with all necessary files.\n \"\"\"\n safe_create_directory(name)\n stylesheet_file: str = \"/\" + style_file\n favicon_file: str = \"favicon.ico\"\n copy_file(constants.IN_DIR + stylesheet_file, name + stylesheet_file)\n copy_file(favicon_file, name + \"/\" + favicon_file)\n\n\ndef init_parser(parser_file: str) -> execjs.ExternalRuntime:\n \"\"\"\n Compiles the JS parser to call from within Python code.\n \"\"\"\n parser = open(constants.IN_DIR + \"/\" + parser_file, \"r\").read()\n parser = execjs.compile(parser)\n return parser\n","sub_path":"src/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"624787470","text":"from unittest import TestCase\nfrom unittest.mock import patch\nimport unittest.mock\nimport sud\nimport io\n\n# May Chau\n# A01080616\n# 2019-03-11\n\n\nclass TestWillRestart(TestCase):\n @patch('builtins.input', return_value=\"Y\")\n def test_will_restart_true(self, mock_input):\n self.assertTrue(sud.will_restart())\n\n @patch('builtins.input', return_value=\"N\")\n def test_will_restart_false(self, mock_input):\n self.assertFalse(sud.will_restart())\n\n @unittest.mock.patch(\"sys.stdout\",new_callable=io.StringIO)\n @patch('builtins.input', side_effect=[\"\", \"N\"])\n def test_will_restart_empty_input(self, mock_input, mock_output):\n sud.will_restart()\n expected_output = \"Please retry\\n\"\n self.assertEqual(mock_output.getvalue(), expected_output)\n\n @unittest.mock.patch(\"sys.stdout\", new_callable=io.StringIO)\n @patch('builtins.input', side_effect=[\"abc\", \"N\"])\n def test_will_restart_invalid_input(self, mock_input, mock_output):\n sud.will_restart()\n expected_output = \"Please retry\\n\"\n self.assertEqual(mock_output.getvalue(), expected_output)\n\n","sub_path":"SUD - RuPaul Drag Race/test_will_restart.py","file_name":"test_will_restart.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"563135440","text":"from rdkit import Chem\nfrom rdkit.Chem import Crippen\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\n\nfile_input = '/home/lin/Downloads/reference.sdf'\nfile_input_sulfone = '/home/lin/Downloads/sulfone10000.sdf'\nfile_input_acid = '/home/lin/Downloads/acid10000.sdf'\nsupplier = Chem.SDMolSupplier(file_input)\nsupplier_sulfone = Chem.SDMolSupplier(file_input_sulfone)\nsupplier_acid = Chem.SDMolSupplier(file_input_acid)\nmollogp = []\nmolaf = []\nmolpre = []\nfor x in supplier:\n mollogp.append(float(Crippen.MolLogP(x)))\n molpre.append(float(x.GetProp('pre')))\n molaf.append(float(x.GetProp('af')))\n\nX = []\nX_sulfone = []\nX_acid = []\nfor x in supplier_sulfone:\n X_acid.append([float(x.GetProp('pre')), float(Crippen.MolLogP(x))])\nfor x in supplier_acid:\n X_acid.append([float(x.GetProp('pre')), float(Crippen.MolLogP(x))])\nY = []\nfor i in range(len(mollogp)):\n X.append([molpre[i], mollogp[i]])\n Y.append(molaf[i])\nX = sm.add_constant(X)\nX_acid = sm.add_constant(X_acid)\nmodel = sm.OLS(Y, X)\nresults = model.fit()\nprint(results.summary())\nplt.scatter(results.predict(X), Y, color='black',label='Ref',s=80)\nplt.scatter(results.predict(X_acid[0]), results.predict(X_acid[0]), color='red',label='sulfone',s=80)\nplt.scatter(results.predict(X_acid[1:]), results.predict(X_acid[1:]), color='blue',label='acid',s=80)\nplt.legend()\n#plt.plot(clf.predict(X), clf.predict(X), color='blue',linewidth=3)\nplt.show()","sub_path":"xlogp.py","file_name":"xlogp.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"152694976","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom numpy.linalg import inv\r\n\r\nnum = 1001\r\nstd = 5\r\n\r\n\r\n# x : x-coordinate data\r\n# y1 : (clean) y-coordinate data\r\n# y2 : (noisy) y-coordinate data\r\n\r\ndef fun(x):\r\n # f = np.sin(x) * (1 / (1 + np.exp(-x)))\r\n f = np.abs(x) * np.sin(x)\r\n\r\n return f\r\n\r\n\r\ndef modelGenerate(x, parameter_num):\r\n model = np.empty((len(x), parameter_num))\r\n for i in range(len(x)):\r\n parameter_row = np.empty(parameter_num)\r\n for j in range(parameter_num):\r\n parameter_row[j] = np.power(x[i], j)\r\n\r\n model[i, :] = parameter_row[:]\r\n\r\n return model\r\n\r\n\r\ndef A(model, parameter_num, lamb):\r\n #i_matrix = np.identity(parameter_num) * np.sqrt(lamb)\r\n i_matrix = np.identity(parameter_num) * np.sqrt(lamb)\r\n\r\n return np.concatenate((model, i_matrix), axis=0)\r\n\r\n\r\ndef B(y, parameter_num):\r\n zero_matrix = np.zeros([parameter_num])\r\n\r\n return np.concatenate((y, zero_matrix), axis=0)\r\n\r\n\r\ndef leastSquare(left, right):\r\n return np.dot(np.dot(inv(np.dot(left.T, left)), left.T), right)\r\n\r\n\r\ndef energy(a, b, parameter, lamb):\r\n return np.sqrt(np.sum(np.power((a - b), 2))) + np.sqrt(np.sum(np.power(parameter, 2))) * lamb\r\n\r\n\r\n\r\nn = np.random.rand(num)\r\nnn = n - np.mean(n)\r\nx = np.linspace(-10, 10, num)\r\ny1 = fun(x) # clean points\r\ny2 = y1 + nn * std # noisy points\r\n\r\np1 = plt.subplot(2,1,1)\r\np1.set_title('clean points')\r\nplt.plot(x, y1, 'b.')\r\nplt.axis('off')\r\np2 = plt.subplot(2,1,2)\r\nplt.plot(x, y2, 'k.')\r\nplt.axis('off')\r\nplt.show()\r\n\r\n\r\nfor parameter_num in range(6, 16):\r\n model = modelGenerate(x, parameter_num)\r\n noLambdaParameter = leastSquare(model, y2)\r\n y3 = np.dot(model, noLambdaParameter)\r\n\r\n plt.title('parameter num = %d' % parameter_num)\r\n plt.plot(x, y3, 'b.')\r\n plt.axis('off')\r\n plt.show()\r\n\r\n\r\ndef result(parameter_num):\r\n model = modelGenerate(x, parameter_num)\r\n noLambdaParameter = leastSquare(model, y2)\r\n y3 = np.dot(model, noLambdaParameter)\r\n\r\n energyList = []\r\n energyListX = []\r\n\r\n for j in range(20):\r\n lamb = pow(2, j - 5)\r\n left = A(model, parameter_num, lamb)\r\n right = B(y2, parameter_num)\r\n withLambdaParameter = leastSquare(left, right)\r\n y4 = np.dot(model, withLambdaParameter)\r\n energyList.append(energy(y4, y2, withLambdaParameter, lamb))\r\n energyListX.append(lamb)\r\n\r\n p3 = plt.subplot(3, 1, 1)\r\n p3.set_title('parameter num = %d - WITHOUT LAMBDA-' % parameter_num)\r\n plt.plot(x, y3)\r\n plt.axis('off')\r\n\r\n p4 = plt.subplot(3, 1, 2)\r\n p4.set_title('lambda = pow (2, %d) - WITH LAMBDA-' % (j-5))\r\n plt.plot(x, y4)\r\n plt.axis('off')\r\n\r\n p5 = plt.subplot(3, 1, 3)\r\n p5.set_title('parameter num = %d, lambda = pow(2, %d)' % (parameter_num, j - 5))\r\n plt.plot(x, y3, 'r.', x, y4, 'k.')\r\n plt.axis('off')\r\n plt.show()\r\n\r\n return energyList, energyListX\r\n\r\n\r\n# 6 ~ 16\r\n## Plot the data with 8 parameter and changing lambda ($2^{-5}$, $2^{-4}$, $2^{-3}$, $\\cdots$, $2^{12}$, $2^{13}$, $2^{14}$)\r\nenergyList, energyListX = result(6)\r\n### Plot the energy changed by lambda\r\nplt.plot(energyListX, energyList, 'r.')\r\nplt.show()\r\n\r\n\r\n\r\n","sub_path":"assignment12/assignment12.py","file_name":"assignment12.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"164793085","text":"import networkx as nx\nfrom pymongo import MongoClient\nimport matplotlib.pyplot as plt\n\n\ndef get_id_by_link(link):\n client = MongoClient()\n db = client[\"vk_db\"]\n collection = db[\"name_id_weight\"]\n result = collection.find({\"link\": link})\n for res in result:\n return res[\"id\"]\n\nif __name__ == \"__main__\":\n\n G = nx.Graph()\n reposts = []\n data = []\n client = MongoClient()\n db = client[\"vk_db\"]\n collection = db[\"name_id_weight\"]\n result = collection.find()\n for res in result:\n data.append(res)\n\n db=client[\"reposts\"]\n collection = db[\"general\"]\n answer = collection.find()\n for res in answer:\n reposts.append(res)\n\n for each in data:\n G.add_node(each[\"id\"])\n G.node[each[\"id\"]]['weight'] = each[\"weight\"]\n\n for each in reposts:\n G.add_edge(get_id_by_link(each[\"link\"]), each[\"owner\"], weight=each[\"times\"])\n\n nx.write_graphml(G,'so.graphml')\n\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"502719194","text":"from flask import Flask, render_template\nfrom utils import get_data\n\napp = Flask(__name__)\n\n\ndef count_words(line):\n num = 0\n for _ in line.split():\n num += 1\n return num\n\n\n@app.route('/')\ndef get_home_page():\n return render_template(\"home.html\", data=get_data())\n\n\n@app.route('/alarm')\ndef get_alarm_page():\n line = get_data()[0]['text']\n name = get_data()[0]['title']\n num = count_words(line)\n return render_template(\"alarm.html\", line=line, num=num, name=name)\n\n\n@app.route('/headphones')\ndef get_headphones_page():\n line = get_data()[1]['text']\n name = get_data()[1]['title']\n num = count_words(line)\n return render_template(\"headphones.html\", line=line, num=num, name=name)\n\n\n@app.route('/iPod')\ndef get_ipod_page():\n line = get_data()[2]['text']\n name = get_data()[2]['title']\n num = count_words(line)\n return render_template(\"iPod.html\", line=line, num=num, name=name)\n\n\n@app.route('/calculator')\ndef get_calculator_page():\n line = get_data()[3]['text']\n name = get_data()[3]['title']\n num = count_words(line)\n return render_template(\"calculator.html\", line=line, num=num, name=name)\n\n\n@app.route('/coffeemaker')\ndef get_coffeemaker_page():\n line = get_data()[4]['text']\n name = get_data()[4]['title']\n num = count_words(line)\n return render_template(\"coffeemaker.html\", line=line, num=num, name=name)\n\n\n@app.route('/battery_charger')\ndef get_battery_charger_page():\n line = get_data()[5]['text']\n name = get_data()[5]['title']\n num = count_words(line)\n return render_template(\"battery_charger.html\", line=line, num=num, name=name)\n\n\n@app.route('/author')\ndef get_author_page():\n return render_template(\"author.html\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"flask_lesson_intro/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"436075921","text":"import sys\nfrom ortools.sat.python import cp_model\nfrom ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\nimport math\nfrom random import randint, random, sample\nfrom time import sleep, time\nimport csv\nimport codecs\nimport json\nfrom classes import Intersection, Endpoint, Node, Outage, Car, Route, Routes\nfrom rtree import Node\nfrom dynamicdistances import get_distance_matrix\nfrom distances import euclidean_distance\nfrom weather_learning import predict_fix_times\nimport datetime\nfrom subprocess import Popen\nfrom statistics import median, stdev, mean\n\n\n# intersections.csv contains the necessary intersection data\n# USA-road contains the TIMES between the intersections\ndef get_routing_data():\n intersection_dictionary = { }\n f = open('./routes/pathfinding/intersections.csv', 'r')\n reader = csv.reader(f, delimiter=',')\n for row in reader:\n intersection_dictionary[int(row[1])] = Intersection(row[0], int(row[1]), float(row[2]), float(row[3]))\n f = open('./routes/pathfinding/USA-road-t.NY.gr', 'r')\n for row in f:\n words = row.split(' ')\n if len(words) == 4 and words[0] == 'a' and int(words[1]) in intersection_dictionary and int(words[2]) in intersection_dictionary:\n intersection_dictionary[int(words[1])].endpoints[int(words[2])] = Endpoint(int(words[2]), int(words[3]))\n return intersection_dictionary\n\ndef define_outages(intersection_dictionary, outages, prediction_times):\n outage_objects = []\n i = 0\n for row in outages:\n outage_objects.append(Outage(i, float(row['Latitude']), float(row['Longitude']), intersection_dictionary[7440], row['TimeStampCrawled'], prediction_times[i][0], prediction_times[i][1]))\n i += 1\n i = 0\n for outage in outage_objects:\n for intersection in intersection_dictionary.values():\n current_outage_distance = euclidean_distance(outage.latitude, outage.longitude, outage.closest_intersection.latitude, outage.closest_intersection.longitude)\n potential_outage_distance = euclidean_distance(outage.latitude, outage.longitude, intersection.latitude, intersection.longitude)\n if potential_outage_distance < current_outage_distance:\n outage.closest_intersection = intersection\n i += 1\n return outage_objects\n\ndef get_distance_to_outages(routes, data, outage_index):\n new_routes = []\n for route in routes:\n for i in range(len(route.paths)):\n new_paths = []\n total_time = 0.0\n for j in range(len(route.paths)):\n if i == j:\n new_paths.append(Route(route.paths[i].vehicle_id, outage_index, route.paths[i].previous_indices + [outage_index], route.paths[i].route_time + data[route.paths[i].current_index][outage_index]))\n total_time += data[route.paths[i].current_index][outage_index]\n else:\n new_paths.append(route.paths[j])\n total_time += route.paths[j].route_time\n new_routes.append(Routes(new_paths, total_time))\n return new_routes\n \n\ndef insertion_based_routing(data, outages, prune_constant):\n start = time()\n def get_time(elem):\n return elem.max_time\n\n starting_vehicles = [Route(index, len(outages) + 1 + index, [len(outages) + 1 + index], 0.0) for index in range(len(data) - len(outages) - 1)]\n routes = [Routes(starting_vehicles, 0.0)]\n for outage_index in range(1, len(outages) + 1):\n routes = get_distance_to_outages(routes, data, outage_index)\n routes.sort(key=get_time)\n routes = routes[:prune_constant * (outage_index + 1)]\n \n min_route = min(routes, key=get_time)\n end = time()\n difference = end - start\n return print_info(min_route, difference)\n\ndef get_rtree_routes(routes, outage_index, outage, cars_to_check, prune_constant):\n def get_distance(elem):\n return elem[1]\n\n distances = []\n for car in cars_to_check:\n distances.append((car.id, math.sqrt((car.latitude - outage.latitude) ** 2 + (car.longitude - outage.longitude) ** 2) + 0.1))\n distances.sort(key=get_distance)\n distances = distances[:prune_constant]\n\n new_routes = []\n for route in routes:\n for edge in distances:\n new_paths = []\n total_time = 0.0\n for i in range(len(route.paths)):\n if route.paths[i].vehicle_id == edge[0]:\n new_paths.append(Route(route.paths[i].vehicle_id, outage_index, route.paths[i].previous_indices + [outage_index], route.paths[i].route_time + edge[1]))\n total_time += edge[1]\n else:\n new_paths.append(route.paths[i])\n total_time += route.paths[i].route_time\n new_routes.append(Routes(new_paths, total_time))\n return new_routes\n\ndef rtree_based_routing(data, outages, prune_constant, vehicles):\n start = time()\n def get_time(elem):\n return elem.max_time\n\n starting_vehicles = [Route(index, len(outages) + 1 + index, [len(outages) + 1 + index], 0.0) for index in range(len(data) - len(outages) - 1)]\n routes = [Routes(starting_vehicles, 0.0)]\n rtree = Node(True, 0)\n for vehicle in vehicles:\n rtree.insert_new_value(vehicle, prune_constant)\n for outage_index in range(len(outages)):\n node = rtree.insert_new_value(outages[outage_index], prune_constant)\n values = rtree.return_all_values(node)\n cars_to_check = [value for value in values if value.is_car]\n routes = get_rtree_routes(routes, outage_index, outages[outage_index], cars_to_check, prune_constant)\n routes.sort(key=get_time)\n routes = routes[:prune_constant * (outage_index + 1)]\n \n min_route = min(routes, key=get_time)\n end = time()\n difference = end - start\n return print_info(min_route, difference)\n\ndef print_info(route, time_difference):\n times = []\n outages = []\n\n for path in route.paths:\n times.append(path.route_time)\n outages.append(len(path.previous_indices) - 1)\n\n average_time = mean(times)\n median_time = median(times)\n min_time = min(times)\n max_time = max(times)\n standard_deviation_time = stdev(times)\n\n average_outages = mean(outages)\n median_outages = median(outages)\n min_outages = min(outages)\n max_outages = max(outages)\n standard_deviation_outages = stdev(outages)\n\n return [time_difference, average_time, median_time, min_time, max_time, standard_deviation_time,\n average_outages, median_outages, min_outages, max_outages, standard_deviation_outages]\n\n\ndef get_outages():\n f = open('./outages1.json', 'r')\n return json.load(f)\n\ndef create_recursive_rtree(rtree, index):\n if rtree.is_leaf:\n values_dict = { }\n for i in range(len(rtree.values)):\n values_dict[i] = rtree.values[i].convert_to_dict()\n rtree.values = values_dict\n return { index: rtree.convert_to_dict() }\n return { index: rtree.convert_to_dict(), \"left\": create_recursive_rtree(rtree.nodes[0], index + 1), \"right\": create_recursive_rtree(rtree.nodes[1], index + 1) }\n\ndef assemble_rtree(outages):\n rtree = Node(True, 0)\n for outage in outages:\n rtree.insert_new_value(outage)\n return rtree\n\ndef main(number_of_cars, outages=None, type_of_prediction=\"actual\", prune_constant=1):\n if outages == None:\n outages = get_outages()\n intersection_dictionary = get_routing_data()\n prediction_times = predict_fix_times(type_of_prediction, outages)\n outages = define_outages(intersection_dictionary, outages, prediction_times)\n\n return_object = { }\n for i in range(5):\n starting_intersections = sample(list(intersection_dictionary.values()), number_of_cars)\n vehicles = [Car(index, starting_intersections[index].latitude, starting_intersections[index].longitude, starting_intersections[index]) for index in range(len(starting_intersections))]\n distance_matrix = get_distance_matrix([None] + outages + vehicles, intersection_dictionary)\n insertion_return_object = insertion_based_routing(distance_matrix, outages, prune_constant)\n rtree_return_object = rtree_based_routing(distance_matrix, outages, prune_constant, vehicles)\n return_object[i] = insertion_return_object + rtree_return_object\n \n #json_object = json.dumps(create_recursive_rtree(rtree, 0))\n print(json.dumps(return_object))\n\nmain(int(sys.argv[1]), json.loads(sys.argv[2]), str(sys.argv[3]), int(sys.argv[4]))","sub_path":"dynamicrouting.py","file_name":"dynamicrouting.py","file_ext":"py","file_size_in_byte":8042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"482358758","text":"class Node:\n def __init__(self,value):\n self.value=value\n self.left=None\n self.right=None\n\n def preorder(self):\n root=self\n stack=[]\n while root:\n if not root.left:\n print(root.value)\n root=root.right\n else:\n curr=root.left\n while curr.right and curr.right!=root:\n curr=curr.right\n if curr.right==root:\n curr.right=None\n root=root.right\n else:\n curr.right=root\n print(root.value)\n root=root.left\n \n def insert(self,value):\n if self.value>value:\n if self.left:\n self.left.insert(value)\n else:\n self.left=Node(value)\n elif self.value